diff --git a/docs/sources/_index.md b/docs/sources/_index.md index 3428c662cd50f..822c01fd6a7ce 100644 --- a/docs/sources/_index.md +++ b/docs/sources/_index.md @@ -1,19 +1,48 @@ --- -title: Grafana Loki documentation -description: "Technical documentation for Grafana Loki" +title: Grafana Loki +description: Grafana Loki is a set of open source components that can be composed into a fully featured logging stack. aliases: - /docs/loki/ weight: 100 +hero: + title: Grafana Loki + level: 1 + image: /media/docs/loki/logo-grafana-loki.png + width: 110 + height: 110 + description: Grafana Loki is a set of open source components that can be composed into a fully featured logging stack. A small index and highly compressed chunks simplifies the operation and significantly lowers the cost of Loki. +cards: + title_class: pt-0 lh-1 + items: + - title: Learn about Loki + href: /docs/loki/latest/get-started/ + description: Learn about the Loki architecture and components, the various deployment modes, and best practices for labels. + - title: Set up Loki + href: /docs/loki/latest/setup/ + description: View instructions for how to configure and install Loki, migrate from previous deployments, and upgrade your Loki environment. + - title: Configure Loki + href: /docs/loki/latest/configure/ + description: View the Loki configuration reference and configuration examples. + - title: Send logs to Loki + href: /docs/loki/latest/send-data/ + description: Select one or more clients to use to send your logs to Loki. + - title: Manage Loki + href: /docs/loki/latest/operations/ + description: Learn how to manage tenants, log ingestion, storage, queries, and more. + - title: Query with LogQL + href: /docs/loki/latest/query/ + description: Inspired by PromQL, LogQL is Grafana Loki’s query language. LogQL uses labels and operators for filtering. --- -# Grafana Loki documentation +{{< docs/hero-simple key="hero" >}} -

Loki Logo
+--- -Grafana Loki is a set of components that can be composed into a fully featured logging stack. +## Overview -Unlike other logging systems, Loki is built around the idea of only indexing metadata about your logs: labels (just like Prometheus labels). +Unlike other logging systems, Loki is built around the idea of only indexing metadata about your logs' labels (just like Prometheus labels). Log data itself is then compressed and stored in chunks in object stores such as Amazon Simple Storage Service (S3) or Google Cloud Storage (GCS), or even locally on the filesystem. -A small index and highly compressed chunks simplifies the operation and significantly lowers the cost of Loki. -For more information, see the [Loki overview]({{< relref "./get-started/overview" >}}). +## Explore + +{{< card-grid key="cards" type="simple" >}} diff --git a/docs/sources/setup/install/helm/install-microservices/_index.md b/docs/sources/setup/install/helm/install-microservices/_index.md index 71f94673fe53c..9e0eb4d3307e6 100644 --- a/docs/sources/setup/install/helm/install-microservices/_index.md +++ b/docs/sources/setup/install/helm/install-microservices/_index.md @@ -48,73 +48,73 @@ It is not recommended to run scalable mode with `filesystem` storage. For the pu 3. Create the configuration file `values.yaml`. The example below illustrates how to deploy Loki in test mode using MinIO as storage: ```yaml - loki: - schemaConfig: - configs: - - from: 2024-04-01 - store: tsdb - object_store: s3 - schema: v13 - index: - prefix: loki_index_ - period: 24h - ingester: - chunk_encoding: snappy - tracing: - enabled: true - querier: - # Default is 4, if you have enough memory and CPU you can increase, reduce if OOMing - max_concurrent: 4 - - #gateway: - # ingress: - # enabled: true - # hosts: - # - host: FIXME - # paths: - # - path: / - # pathType: Prefix - - deploymentMode: Distributed - - ingester: - replicas: 3 - querier: - replicas: 3 - maxUnavailable: 2 - queryFrontend: - replicas: 2 - maxUnavailable: 1 - queryScheduler: - replicas: 2 - distributor: - replicas: 3 - maxUnavailable: 2 - compactor: - replicas: 1 - indexGateway: - replicas: 2 - maxUnavailable: 1 - - bloomCompactor: - replicas: 0 - bloomGateway: - replicas: 0 - - # Enable minio for storage - minio: - enabled: true - - # Zero out replica counts of other deployment modes - backend: - replicas: 0 - read: - replicas: 0 - write: - replicas: 0 - - singleBinary: - replicas: 0 + loki: + schemaConfig: + configs: + - from: 2024-04-01 + store: tsdb + object_store: s3 + schema: v13 + index: + prefix: loki_index_ + period: 24h + ingester: + chunk_encoding: snappy + tracing: + enabled: true + querier: + # Default is 4, if you have enough memory and CPU you can increase, reduce if OOMing + max_concurrent: 4 + + #gateway: + # ingress: + # enabled: true + # hosts: + # - host: FIXME + # paths: + # - path: / + # pathType: Prefix + + deploymentMode: Distributed + + ingester: + replicas: 3 + querier: + replicas: 3 + maxUnavailable: 2 + queryFrontend: + replicas: 2 + maxUnavailable: 1 + queryScheduler: + replicas: 2 + distributor: + replicas: 3 + maxUnavailable: 2 + compactor: + replicas: 1 + indexGateway: + replicas: 2 + maxUnavailable: 1 + + bloomCompactor: + replicas: 0 + bloomGateway: + replicas: 0 + + # Enable minio for storage + minio: + enabled: true + + # Zero out replica counts of other deployment modes + backend: + replicas: 0 + read: + replicas: 0 + write: + replicas: 0 + + singleBinary: + replicas: 0 ``` 4. Install or upgrade the Loki deployment. diff --git a/docs/sources/setup/install/helm/install-scalable/_index.md b/docs/sources/setup/install/helm/install-scalable/_index.md index e27f544b28f0c..fed56e339d969 100644 --- a/docs/sources/setup/install/helm/install-scalable/_index.md +++ b/docs/sources/setup/install/helm/install-scalable/_index.md @@ -50,68 +50,68 @@ It is not recommended to run scalable mode with `filesystem` storage. For the pu 3. Create the configuration file `values.yaml`. The example below illustrates how to deploy Loki in test mode using MinIO as storage: ```yaml - loki: - schemaConfig: - configs: - - from: 2024-04-01 - store: tsdb - object_store: s3 - schema: v13 - index: - prefix: loki_index_ - period: 24h - ingester: - chunk_encoding: snappy - tracing: - enabled: true - querier: - # Default is 4, if you have enough memory and CPU you can increase, reduce if OOMing - max_concurrent: 4 - - #gateway: - # ingress: - # enabled: true - # hosts: - # - host: FIXME - # paths: - # - path: / - # pathType: Prefix - - deploymentMode: SimpleScalable - - backend: - replicas: 3 - read: - replicas: 3 - write: - replicas: 3 - - # Enable minio for storage - minio: - enabled: true - - # Zero out replica counts of other deployment modes - singleBinary: - replicas: 0 - - ingester: - replicas: 0 - querier: - replicas: 0 - queryFrontend: - replicas: 0 - queryScheduler: - replicas: 0 - distributor: - replicas: 0 - compactor: - replicas: 0 - indexGateway: - replicas: 0 - bloomCompactor: - replicas: 0 - bloomGateway: - replicas: 0 + loki: + schemaConfig: + configs: + - from: 2024-04-01 + store: tsdb + object_store: s3 + schema: v13 + index: + prefix: loki_index_ + period: 24h + ingester: + chunk_encoding: snappy + tracing: + enabled: true + querier: + # Default is 4, if you have enough memory and CPU you can increase, reduce if OOMing + max_concurrent: 4 + + #gateway: + # ingress: + # enabled: true + # hosts: + # - host: FIXME + # paths: + # - path: / + # pathType: Prefix + + deploymentMode: SimpleScalable + + backend: + replicas: 3 + read: + replicas: 3 + write: + replicas: 3 + + # Enable minio for storage + minio: + enabled: true + + # Zero out replica counts of other deployment modes + singleBinary: + replicas: 0 + + ingester: + replicas: 0 + querier: + replicas: 0 + queryFrontend: + replicas: 0 + queryScheduler: + replicas: 0 + distributor: + replicas: 0 + compactor: + replicas: 0 + indexGateway: + replicas: 0 + bloomCompactor: + replicas: 0 + bloomGateway: + replicas: 0 ``` 4. Install or upgrade the Loki deployment. @@ -131,162 +131,162 @@ After testing Loki with MinIO, it is recommended to configure Loki with an objec {{< code >}} ```s3 - loki: - schemaConfig: - configs: - - from: 2024-04-01 - store: tsdb - object_store: s3 - schema: v13 - index: - prefix: loki_index_ - period: 24h - ingester: - chunk_encoding: snappy - tracing: - enabled: true - querier: - max_concurrent: 4 - - storage: - type: s3 - bucketNames: - chunks: "chunks" - ruler: "ruler" - admin: "admin" - s3: - # s3 URL can be used to specify the endpoint, access key, secret key, and bucket name - s3: s3://access_key:secret_access_key@custom_endpoint/bucket_name - # AWS endpoint URL - endpoint: - # AWS region where the S3 bucket is located - region: - # AWS secret access key - secretAccessKey: - # AWS access key ID - accessKeyId: - # AWS signature version (e.g., v2 or v4) - signatureVersion: - # Forces the path style for S3 (true/false) - s3ForcePathStyle: false - # Allows insecure (HTTP) connections (true/false) - insecure: false - # HTTP configuration settings - http_config: {} - - deploymentMode: SimpleScalable - - backend: - replicas: 3 - read: - replicas: 3 - write: - replicas: 3 - - # Disable minio storage - minio: - enabled: false - - # Zero out replica counts of other deployment modes - singleBinary: - replicas: 0 - - ingester: - replicas: 0 - querier: - replicas: 0 - queryFrontend: - replicas: 0 - queryScheduler: - replicas: 0 - distributor: - replicas: 0 - compactor: - replicas: 0 - indexGateway: - replicas: 0 - bloomCompactor: - replicas: 0 - bloomGateway: - replicas: 0 +loki: + schemaConfig: + configs: + - from: 2024-04-01 + store: tsdb + object_store: s3 + schema: v13 + index: + prefix: loki_index_ + period: 24h + ingester: + chunk_encoding: snappy + tracing: + enabled: true + querier: + max_concurrent: 4 + + storage: + type: s3 + bucketNames: + chunks: "chunks" + ruler: "ruler" + admin: "admin" + s3: + # s3 URL can be used to specify the endpoint, access key, secret key, and bucket name + s3: s3://access_key:secret_access_key@custom_endpoint/bucket_name + # AWS endpoint URL + endpoint: + # AWS region where the S3 bucket is located + region: + # AWS secret access key + secretAccessKey: + # AWS access key ID + accessKeyId: + # AWS signature version (e.g., v2 or v4) + signatureVersion: + # Forces the path style for S3 (true/false) + s3ForcePathStyle: false + # Allows insecure (HTTP) connections (true/false) + insecure: false + # HTTP configuration settings + http_config: {} + +deploymentMode: SimpleScalable + +backend: + replicas: 3 +read: + replicas: 3 +write: + replicas: 3 + +# Disable minio storage +minio: + enabled: false + +# Zero out replica counts of other deployment modes +singleBinary: + replicas: 0 + +ingester: + replicas: 0 +querier: + replicas: 0 +queryFrontend: + replicas: 0 +queryScheduler: + replicas: 0 +distributor: + replicas: 0 +compactor: + replicas: 0 +indexGateway: + replicas: 0 +bloomCompactor: + replicas: 0 +bloomGateway: + replicas: 0 ``` ```azure - loki: - schemaConfig: - configs: - - from: 2024-04-01 - store: tsdb - object_store: azure - schema: v13 - index: - prefix: loki_index_ - period: 24h - ingester: - chunk_encoding: snappy - tracing: - enabled: true - querier: - max_concurrent: 4 - - storage: - type: azure - azure: - # Name of the Azure Blob Storage account - accountName: - # Key associated with the Azure Blob Storage account - accountKey: - # Comprehensive connection string for Azure Blob Storage account (Can be used to replace endpoint, accountName, and accountKey) - connectionString: - # Flag indicating whether to use Azure Managed Identity for authentication - useManagedIdentity: false - # Flag indicating whether to use a federated token for authentication - useFederatedToken: false - # Client ID of the user-assigned managed identity (if applicable) - userAssignedId: - # Timeout duration for requests made to the Azure Blob Storage account (in seconds) - requestTimeout: - # Domain suffix of the Azure Blob Storage service endpoint (e.g., core.windows.net) - endpointSuffix: - bucketNames: - chunks: "chunks" - ruler: "ruler" - admin: "admin" - - deploymentMode: SimpleScalable - - backend: - replicas: 3 - read: - replicas: 3 - write: - replicas: 3 - - # Disable minio storage - minio: - enabled: false - - # Zero out replica counts of other deployment modes - singleBinary: - replicas: 0 - - ingester: - replicas: 0 - querier: - replicas: 0 - queryFrontend: - replicas: 0 - queryScheduler: - replicas: 0 - distributor: - replicas: 0 - compactor: - replicas: 0 - indexGateway: - replicas: 0 - bloomCompactor: - replicas: 0 - bloomGateway: - replicas: 0 +loki: + schemaConfig: + configs: + - from: 2024-04-01 + store: tsdb + object_store: azure + schema: v13 + index: + prefix: loki_index_ + period: 24h + ingester: + chunk_encoding: snappy + tracing: + enabled: true + querier: + max_concurrent: 4 + + storage: + type: azure + azure: + # Name of the Azure Blob Storage account + accountName: + # Key associated with the Azure Blob Storage account + accountKey: + # Comprehensive connection string for Azure Blob Storage account (Can be used to replace endpoint, accountName, and accountKey) + connectionString: + # Flag indicating whether to use Azure Managed Identity for authentication + useManagedIdentity: false + # Flag indicating whether to use a federated token for authentication + useFederatedToken: false + # Client ID of the user-assigned managed identity (if applicable) + userAssignedId: + # Timeout duration for requests made to the Azure Blob Storage account (in seconds) + requestTimeout: + # Domain suffix of the Azure Blob Storage service endpoint (e.g., core.windows.net) + endpointSuffix: + bucketNames: + chunks: "chunks" + ruler: "ruler" + admin: "admin" + +deploymentMode: SimpleScalable + +backend: + replicas: 3 +read: + replicas: 3 +write: + replicas: 3 + +# Disable minio storage +minio: + enabled: false + +# Zero out replica counts of other deployment modes +singleBinary: + replicas: 0 + +ingester: + replicas: 0 +querier: + replicas: 0 +queryFrontend: + replicas: 0 +queryScheduler: + replicas: 0 +distributor: + replicas: 0 +compactor: + replicas: 0 +indexGateway: + replicas: 0 +bloomCompactor: + replicas: 0 +bloomGateway: + replicas: 0 ``` {{< /code >}} diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index cae0094873a84..b287bdea5f37f 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -2752,7 +2752,23 @@ lifecycler: # CLI flag: -ingester.flush-check-period [flush_check_period: | default = 30s] -# The timeout before a flush is cancelled. +flush_op_backoff: + # Minimum backoff period when a flush fails. Each concurrent flush has its own + # backoff, see `ingester.concurrent-flushes`. + # CLI flag: -ingester.flush-op-backoff-min-period + [min_period: | default = 10s] + + # Maximum backoff period when a flush fails. Each concurrent flush has its own + # backoff, see `ingester.concurrent-flushes`. + # CLI flag: -ingester.flush-op-backoff-max-period + [max_period: | default = 1m] + + # Maximum retries for failed flushes. + # CLI flag: -ingester.flush-op-backoff-retries + [max_retries: | default = 10] + +# The timeout for an individual flush. Will be retried up to +# `flush-op-backoff-retries` times. # CLI flag: -ingester.flush-op-timeout [flush_op_timeout: | default = 10m] diff --git a/pkg/bloombuild/builder/batch.go b/pkg/bloombuild/builder/batch.go index 3ff52327b4c30..4b5fcdb00ad2e 100644 --- a/pkg/bloombuild/builder/batch.go +++ b/pkg/bloombuild/builder/batch.go @@ -168,9 +168,9 @@ func newBatchedBlockLoader( } // compiler checks -var _ v1.Iterator[*v1.SeriesWithBloom] = &blockLoadingIter{} -var _ v1.CloseableIterator[*v1.SeriesWithBloom] = &blockLoadingIter{} -var _ v1.ResettableIterator[*v1.SeriesWithBloom] = &blockLoadingIter{} +var _ v1.Iterator[*v1.SeriesWithBlooms] = &blockLoadingIter{} +var _ v1.CloseableIterator[*v1.SeriesWithBlooms] = &blockLoadingIter{} +var _ v1.ResettableIterator[*v1.SeriesWithBlooms] = &blockLoadingIter{} // TODO(chaudum): testware func newBlockLoadingIter(ctx context.Context, blocks []bloomshipper.BlockRef, fetcher FetchFunc[bloomshipper.BlockRef, *bloomshipper.CloseableBlockQuerier], batchSize int) *blockLoadingIter { @@ -196,13 +196,13 @@ type blockLoadingIter struct { // internals initialized bool err error - iter v1.Iterator[*v1.SeriesWithBloom] + iter v1.Iterator[*v1.SeriesWithBlooms] loader *batchedLoader[bloomshipper.BlockRef, *bloomshipper.CloseableBlockQuerier, *bloomshipper.CloseableBlockQuerier] loaded map[io.Closer]struct{} } // At implements v1.Iterator. -func (i *blockLoadingIter) At() *v1.SeriesWithBloom { +func (i *blockLoadingIter) At() *v1.SeriesWithBlooms { if !i.initialized { panic("iterator not initialized") } @@ -229,7 +229,7 @@ func (i *blockLoadingIter) init() { i.overlapping = overlappingBlocksIter(i.inputs) // set initial iter - i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]() + i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]() // set "match all" filter function if not present if i.filter == nil { @@ -249,14 +249,14 @@ func (i *blockLoadingIter) loadNext() bool { loader := newBatchedBlockLoader(i.ctx, i.fetcher, blockRefs, i.batchSize) filtered := v1.NewFilterIter[*bloomshipper.CloseableBlockQuerier](loader, i.filter) - iters := make([]v1.PeekingIterator[*v1.SeriesWithBloom], 0, len(blockRefs)) + iters := make([]v1.PeekingIterator[*v1.SeriesWithBlooms], 0, len(blockRefs)) for filtered.Next() { bq := filtered.At() i.loaded[bq] = struct{}{} iter, err := bq.SeriesIter() if err != nil { i.err = err - i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]() + i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]() return false } iters = append(iters, iter) @@ -264,7 +264,7 @@ func (i *blockLoadingIter) loadNext() bool { if err := filtered.Err(); err != nil { i.err = err - i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]() + i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]() return false } @@ -278,12 +278,12 @@ func (i *blockLoadingIter) loadNext() bool { // two overlapping blocks can conceivably have the same series, so we need to dedupe, // preferring the one with the most chunks already indexed since we'll have // to add fewer chunks to the bloom - i.iter = v1.NewDedupingIter[*v1.SeriesWithBloom, *v1.SeriesWithBloom]( - func(a, b *v1.SeriesWithBloom) bool { + i.iter = v1.NewDedupingIter[*v1.SeriesWithBlooms, *v1.SeriesWithBlooms]( + func(a, b *v1.SeriesWithBlooms) bool { return a.Series.Fingerprint == b.Series.Fingerprint }, - v1.Identity[*v1.SeriesWithBloom], - func(a, b *v1.SeriesWithBloom) *v1.SeriesWithBloom { + v1.Identity[*v1.SeriesWithBlooms], + func(a, b *v1.SeriesWithBlooms) *v1.SeriesWithBlooms { if len(a.Series.Chunks) > len(b.Series.Chunks) { return a } @@ -294,7 +294,7 @@ func (i *blockLoadingIter) loadNext() bool { return i.iter.Next() } - i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]() + i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]() i.err = i.overlapping.Err() return false } diff --git a/pkg/bloombuild/builder/batch_test.go b/pkg/bloombuild/builder/batch_test.go index b2616a37dc1ec..19de5354fb14b 100644 --- a/pkg/bloombuild/builder/batch_test.go +++ b/pkg/bloombuild/builder/batch_test.go @@ -5,6 +5,7 @@ import ( "errors" "testing" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" @@ -208,3 +209,12 @@ func TestOverlappingBlocksIter(t *testing.T) { }) } } + +func genBlockRef(min, max model.Fingerprint) bloomshipper.BlockRef { + bounds := v1.NewBounds(min, max) + return bloomshipper.BlockRef{ + Ref: bloomshipper.Ref{ + Bounds: bounds, + }, + } +} diff --git a/pkg/bloombuild/builder/builder.go b/pkg/bloombuild/builder/builder.go index 3a6d6ce4e1532..cbbd737a83190 100644 --- a/pkg/bloombuild/builder/builder.go +++ b/pkg/bloombuild/builder/builder.go @@ -368,7 +368,7 @@ func (b *Builder) loadWorkForGap( tenant string, id tsdb.Identifier, gap protos.GapWithBlocks, -) (v1.Iterator[*v1.Series], v1.CloseableResettableIterator[*v1.SeriesWithBloom], error) { +) (v1.Iterator[*v1.Series], v1.CloseableResettableIterator[*v1.SeriesWithBlooms], error) { // load a series iterator for the gap seriesItr, err := b.tsdbStore.LoadTSDB(ctx, table, tenant, id, gap.Bounds) if err != nil { diff --git a/pkg/bloombuild/builder/spec.go b/pkg/bloombuild/builder/spec.go index a56918b0344de..284c0c6d7fc44 100644 --- a/pkg/bloombuild/builder/spec.go +++ b/pkg/bloombuild/builder/spec.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "time" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -45,7 +44,7 @@ type SimpleBloomGenerator struct { userID string store v1.Iterator[*v1.Series] chunkLoader ChunkLoader - blocksIter v1.ResettableIterator[*v1.SeriesWithBloom] + blocksIter v1.ResettableIterator[*v1.SeriesWithBlooms] // options to build blocks with opts v1.BlockOptions @@ -68,7 +67,7 @@ func NewSimpleBloomGenerator( opts v1.BlockOptions, store v1.Iterator[*v1.Series], chunkLoader ChunkLoader, - blocksIter v1.ResettableIterator[*v1.SeriesWithBloom], + blocksIter v1.ResettableIterator[*v1.SeriesWithBlooms], readWriterFn func() (v1.BlockWriter, v1.BlockReader), reporter func(model.Fingerprint), metrics *Metrics, @@ -98,44 +97,30 @@ func NewSimpleBloomGenerator( } } -func (s *SimpleBloomGenerator) populator(ctx context.Context) func(series *v1.Series, bloom *v1.Bloom) (int, bool, error) { - return func(series *v1.Series, bloom *v1.Bloom) (int, bool, error) { - start := time.Now() +func (s *SimpleBloomGenerator) populator(ctx context.Context) v1.BloomPopulatorFunc { + return func( + series *v1.Series, + srcBlooms v1.SizedIterator[*v1.Bloom], + toAdd v1.ChunkRefs, + ch chan *v1.BloomCreation, + ) { level.Debug(s.logger).Log( "msg", "populating bloom filter", "stage", "before", "fp", series.Fingerprint, "chunks", len(series.Chunks), ) - chunkItersWithFP, err := s.chunkLoader.Load(ctx, s.userID, series) - if err != nil { - return 0, false, errors.Wrapf(err, "failed to load chunks for series: %+v", series) - } - - bytesAdded, skip, err := s.tokenizer.Populate( - &v1.SeriesWithBloom{ - Series: series, - Bloom: bloom, - }, - chunkItersWithFP.itr, - ) + chunkItersWithFP := s.chunkLoader.Load(ctx, s.userID, &v1.Series{ + Fingerprint: series.Fingerprint, + Chunks: toAdd, + }) - level.Debug(s.logger).Log( - "msg", "populating bloom filter", - "stage", "after", - "fp", series.Fingerprint, - "chunks", len(series.Chunks), - "series_bytes", bytesAdded, - "duration", time.Since(start), - "err", err, - ) + s.tokenizer.Populate(srcBlooms, chunkItersWithFP.itr, ch) if s.reporter != nil { s.reporter(series.Fingerprint) } - return bytesAdded, skip, err } - } func (s *SimpleBloomGenerator) Generate(ctx context.Context) *LazyBlockBuilderIterator { @@ -179,10 +164,10 @@ type LazyBlockBuilderIterator struct { ctx context.Context opts v1.BlockOptions metrics *Metrics - populate func(*v1.Series, *v1.Bloom) (int, bool, error) + populate v1.BloomPopulatorFunc readWriterFn func() (v1.BlockWriter, v1.BlockReader) series v1.PeekingIterator[*v1.Series] - blocks v1.ResettableIterator[*v1.SeriesWithBloom] + blocks v1.ResettableIterator[*v1.SeriesWithBlooms] bytesAdded int curr *v1.Block @@ -193,10 +178,10 @@ func NewLazyBlockBuilderIterator( ctx context.Context, opts v1.BlockOptions, metrics *Metrics, - populate func(*v1.Series, *v1.Bloom) (int, bool, error), + populate v1.BloomPopulatorFunc, readWriterFn func() (v1.BlockWriter, v1.BlockReader), series v1.PeekingIterator[*v1.Series], - blocks v1.ResettableIterator[*v1.SeriesWithBloom], + blocks v1.ResettableIterator[*v1.SeriesWithBlooms], ) *LazyBlockBuilderIterator { return &LazyBlockBuilderIterator{ ctx: ctx, @@ -270,7 +255,7 @@ type ChunkItersByFingerprint struct { // ChunkLoader loads chunks from a store type ChunkLoader interface { - Load(ctx context.Context, userID string, series *v1.Series) (*ChunkItersByFingerprint, error) + Load(ctx context.Context, userID string, series *v1.Series) *ChunkItersByFingerprint } // StoreChunkLoader loads chunks from a store @@ -286,7 +271,7 @@ func NewStoreChunkLoader(fetcherProvider stores.ChunkFetcherProvider, metrics *M } } -func (s *StoreChunkLoader) Load(ctx context.Context, userID string, series *v1.Series) (*ChunkItersByFingerprint, error) { +func (s *StoreChunkLoader) Load(ctx context.Context, userID string, series *v1.Series) *ChunkItersByFingerprint { // NB(owen-d): This is probably unnecessary as we should only have one fetcher // because we'll only be working on a single index period at a time, but this should protect // us in the case of refactoring/changing this and likely isn't a perf bottleneck. @@ -317,5 +302,5 @@ func (s *StoreChunkLoader) Load(ctx context.Context, userID string, series *v1.S return &ChunkItersByFingerprint{ fp: series.Fingerprint, itr: newBatchedChunkLoader(ctx, fetchers, inputs, s.metrics, batchedLoaderDefaultBatchSize), - }, nil + } } diff --git a/pkg/bloombuild/builder/spec_test.go b/pkg/bloombuild/builder/spec_test.go index 40225dc45865b..e6b47b1442a6e 100644 --- a/pkg/bloombuild/builder/spec_test.go +++ b/pkg/bloombuild/builder/spec_test.go @@ -15,19 +15,19 @@ import ( "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" ) -func blocksFromSchema(t *testing.T, n int, options v1.BlockOptions) (res []*v1.Block, data []v1.SeriesWithBloom, refs []bloomshipper.BlockRef) { +func blocksFromSchema(t *testing.T, n int, options v1.BlockOptions) (res []*v1.Block, data []v1.SeriesWithBlooms, refs []bloomshipper.BlockRef) { return blocksFromSchemaWithRange(t, n, options, 0, 0xffff) } // splits 100 series across `n` non-overlapping blocks. // uses options to build blocks with. -func blocksFromSchemaWithRange(t *testing.T, n int, options v1.BlockOptions, fromFP, throughFp model.Fingerprint) (res []*v1.Block, data []v1.SeriesWithBloom, refs []bloomshipper.BlockRef) { +func blocksFromSchemaWithRange(t *testing.T, n int, options v1.BlockOptions, fromFP, throughFp model.Fingerprint) (res []*v1.Block, data []v1.SeriesWithBlooms, refs []bloomshipper.BlockRef) { if 100%n != 0 { panic("100 series must be evenly divisible by n") } numSeries := 100 - data, _ = v1.MkBasicSeriesWithBlooms(numSeries, 0, fromFP, throughFp, 0, 10000) + data, _ = v1.MkBasicSeriesWithBlooms(numSeries, fromFP, throughFp, 0, 10000) seriesPerBlock := numSeries / n @@ -46,7 +46,7 @@ func blocksFromSchemaWithRange(t *testing.T, n int, options v1.BlockOptions, fro minIdx, maxIdx := i*seriesPerBlock, (i+1)*seriesPerBlock - itr := v1.NewSliceIter[v1.SeriesWithBloom](data[minIdx:maxIdx]) + itr := v1.NewSliceIter[v1.SeriesWithBlooms](data[minIdx:maxIdx]) _, err = builder.BuildFrom(itr) require.Nil(t, err) @@ -62,11 +62,11 @@ func blocksFromSchemaWithRange(t *testing.T, n int, options v1.BlockOptions, fro // doesn't actually load any chunks type dummyChunkLoader struct{} -func (dummyChunkLoader) Load(_ context.Context, _ string, series *v1.Series) (*ChunkItersByFingerprint, error) { +func (dummyChunkLoader) Load(_ context.Context, _ string, series *v1.Series) *ChunkItersByFingerprint { return &ChunkItersByFingerprint{ fp: series.Fingerprint, itr: v1.NewEmptyIter[v1.ChunkRefWithIter](), - }, nil + } } func dummyBloomGen(t *testing.T, opts v1.BlockOptions, store v1.Iterator[*v1.Series], blocks []*v1.Block, refs []bloomshipper.BlockRef) *SimpleBloomGenerator { @@ -132,9 +132,9 @@ func TestSimpleBloomGenerator(t *testing.T) { } { t.Run(fmt.Sprintf("%s/%s", tc.desc, enc), func(t *testing.T) { sourceBlocks, data, refs := blocksFromSchemaWithRange(t, 2, tc.fromSchema, 0x00000, 0x6ffff) - storeItr := v1.NewMapIter[v1.SeriesWithBloom, *v1.Series]( - v1.NewSliceIter[v1.SeriesWithBloom](data), - func(swb v1.SeriesWithBloom) *v1.Series { + storeItr := v1.NewMapIter[v1.SeriesWithBlooms, *v1.Series]( + v1.NewSliceIter[v1.SeriesWithBlooms](data), + func(swb v1.SeriesWithBlooms) *v1.Series { return swb.Series }, ) @@ -150,9 +150,9 @@ func TestSimpleBloomGenerator(t *testing.T) { // Check all the input series are present in the output blocks. expectedRefs := v1.PointerSlice(data) - outputRefs := make([]*v1.SeriesWithBloom, 0, len(data)) + outputRefs := make([]*v1.SeriesWithBlooms, 0, len(data)) for _, block := range outputBlocks { - bq := v1.NewBlockQuerier(block, false, v1.DefaultMaxPageSize) + bq := v1.NewBlockQuerier(block, false, v1.DefaultMaxPageSize).Iter() for bq.Next() { outputRefs = append(outputRefs, bq.At()) } @@ -164,13 +164,5 @@ func TestSimpleBloomGenerator(t *testing.T) { }) } } -} -func genBlockRef(min, max model.Fingerprint) bloomshipper.BlockRef { - bounds := v1.NewBounds(min, max) - return bloomshipper.BlockRef{ - Ref: bloomshipper.Ref{ - Bounds: bounds, - }, - } } diff --git a/pkg/bloombuild/planner/metrics.go b/pkg/bloombuild/planner/metrics.go index 3f68ab5206303..165f530f1709c 100644 --- a/pkg/bloombuild/planner/metrics.go +++ b/pkg/bloombuild/planner/metrics.go @@ -32,6 +32,9 @@ type Metrics struct { buildCompleted *prometheus.CounterVec buildTime *prometheus.HistogramVec + blocksDeleted prometheus.Counter + metasDeleted prometheus.Counter + tenantsDiscovered prometheus.Counter } @@ -107,6 +110,19 @@ func NewMetrics( Buckets: prometheus.DefBuckets, }, []string{"status"}), + blocksDeleted: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Namespace: metricsNamespace, + Subsystem: metricsSubsystem, + Name: "blocks_deleted_total", + Help: "Number of blocks deleted", + }), + metasDeleted: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Namespace: metricsNamespace, + Subsystem: metricsSubsystem, + Name: "metas_deleted_total", + Help: "Number of metas deleted", + }), + tenantsDiscovered: promauto.With(r).NewCounter(prometheus.CounterOpts{ Namespace: metricsNamespace, Subsystem: metricsSubsystem, diff --git a/pkg/bloombuild/planner/planner.go b/pkg/bloombuild/planner/planner.go index 287a859745f5a..ea2ea5db531b2 100644 --- a/pkg/bloombuild/planner/planner.go +++ b/pkg/bloombuild/planner/planner.go @@ -3,6 +3,7 @@ package planner import ( "context" "fmt" + "math" "sort" "sync" "time" @@ -156,6 +157,17 @@ func (p *Planner) running(ctx context.Context) error { } } +type tenantTableTaskResults struct { + tasksToWait int + originalMetas []bloomshipper.Meta + resultsCh chan *protos.TaskResult +} + +type tenantTable struct { + table config.DayTable + tenant string +} + func (p *Planner) runOne(ctx context.Context) error { var ( start = time.Now() @@ -171,39 +183,79 @@ func (p *Planner) runOne(ctx context.Context) error { tables := p.tables(time.Now()) level.Debug(p.logger).Log("msg", "loaded tables", "tables", tables.TotalDays()) - work, err := p.loadWork(ctx, tables) + work, err := p.loadTenantWork(ctx, tables) if err != nil { return fmt.Errorf("error loading work: %w", err) } + // For deletion, we need to aggregate the results for each table and tenant tuple + // We cannot delete the returned tombstoned metas as soon as a task finishes since + // other tasks may still be using the now tombstoned metas + tasksResultForTenantTable := make(map[tenantTable]tenantTableTaskResults) var totalTasks int - for _, w := range work { - logger := log.With(p.logger, "tenant", w.tenant, "table", w.table.Addr(), "ownership", w.ownershipRange.String()) - gaps, err := p.findGapsForBounds(ctx, w.tenant, w.table, w.ownershipRange) - if err != nil { - level.Error(logger).Log("msg", "error finding gaps", "err", err) - continue - } + for table, tenants := range work { + for tenant, ownershipRanges := range tenants { + logger := log.With(p.logger, "tenant", tenant, "table", table.Addr()) + tt := tenantTable{ + tenant: tenant, + table: table, + } - now := time.Now() - for _, gap := range gaps { - totalTasks++ + tasks, existingMetas, err := p.computeTasks(ctx, table, tenant, ownershipRanges) + if err != nil { + level.Error(logger).Log("msg", "error computing tasks", "err", err) + continue + } - task := NewTask( - ctx, now, - protos.NewTask(w.table, w.tenant, w.ownershipRange, gap.tsdb, gap.gaps), - ) + var tenantTableEnqueuedTasks int + resultsCh := make(chan *protos.TaskResult, len(tasks)) - if err := p.enqueueTask(task); err != nil { - level.Error(logger).Log("msg", "error enqueuing task", "err", err) - continue + now := time.Now() + for _, task := range tasks { + queueTask := NewQueueTask(ctx, now, task, resultsCh) + if err := p.enqueueTask(queueTask); err != nil { + level.Error(logger).Log("msg", "error enqueuing task", "err", err) + continue + } + + totalTasks++ + tenantTableEnqueuedTasks++ + } + + tasksResultForTenantTable[tt] = tenantTableTaskResults{ + tasksToWait: tenantTableEnqueuedTasks, + originalMetas: existingMetas, + resultsCh: resultsCh, } + + level.Debug(logger).Log("msg", "enqueued tasks", "tasks", tenantTableEnqueuedTasks) } } level.Debug(p.logger).Log("msg", "planning completed", "tasks", totalTasks) + // Create a goroutine to process the results for each table tenant tuple + // TODO(salvacorts): This may end up creating too many goroutines. + // Create a pool of workers to process table-tenant tuples. + var wg sync.WaitGroup + for tt, results := range tasksResultForTenantTable { + wg.Add(1) + go func(table config.DayTable, tenant string, results tenantTableTaskResults) { + defer wg.Done() + + if err := p.processTenantTaskResults( + ctx, table, tenant, + results.originalMetas, results.tasksToWait, results.resultsCh, + ); err != nil { + level.Error(p.logger).Log("msg", "failed to process tenant task results", "err", err) + } + }(tt.table, tt.tenant, results) + } + + level.Debug(p.logger).Log("msg", "waiting for all tasks to be completed", "tasks", totalTasks, "tenantTables", len(tasksResultForTenantTable)) + wg.Wait() + status = statusSuccess level.Info(p.logger).Log( "msg", "bloom build iteration completed", @@ -212,6 +264,177 @@ func (p *Planner) runOne(ctx context.Context) error { return nil } +// computeTasks computes the tasks for a given table and tenant and ownership range. +// It returns the tasks to be executed and the metas that are existing relevant for the ownership range. +func (p *Planner) computeTasks( + ctx context.Context, + table config.DayTable, + tenant string, + ownershipRanges []v1.FingerprintBounds, +) ([]*protos.Task, []bloomshipper.Meta, error) { + var tasks []*protos.Task + logger := log.With(p.logger, "table", table.Addr(), "tenant", tenant) + + // Fetch source metas to be used in both build and cleanup of out-of-date metas+blooms + metas, err := p.bloomStore.FetchMetas( + ctx, + bloomshipper.MetaSearchParams{ + TenantID: tenant, + Interval: bloomshipper.NewInterval(table.Bounds()), + Keyspace: v1.NewBounds(0, math.MaxUint64), + }, + ) + if err != nil { + return nil, nil, fmt.Errorf("failed to get metas: %w", err) + } + + for _, ownershipRange := range ownershipRanges { + logger := log.With(logger, "ownership", ownershipRange.String()) + + // Filter only the metas that overlap in the ownership range + metasInBounds := bloomshipper.FilterMetasOverlappingBounds(metas, ownershipRange) + level.Debug(logger).Log("msg", "found relevant metas", "metas", len(metasInBounds)) + + // Find gaps in the TSDBs for this tenant/table + gaps, err := p.findOutdatedGaps(ctx, tenant, table, ownershipRange, metasInBounds, logger) + if err != nil { + level.Error(logger).Log("msg", "failed to find outdated gaps", "err", err) + continue + } + + for _, gap := range gaps { + tasks = append(tasks, protos.NewTask(table, tenant, ownershipRange, gap.tsdb, gap.gaps)) + } + } + + return tasks, metas, nil +} + +func (p *Planner) processTenantTaskResults( + ctx context.Context, + table config.DayTable, + tenant string, + originalMetas []bloomshipper.Meta, + totalTasks int, + resultsCh <-chan *protos.TaskResult, +) error { + logger := log.With(p.logger, table, table.Addr(), "tenant", tenant) + level.Debug(logger).Log("msg", "waiting for all tasks to be completed", "tasks", totalTasks) + + newMetas := make([]bloomshipper.Meta, 0, totalTasks) + for i := 0; i < totalTasks; i++ { + select { + case <-ctx.Done(): + if err := ctx.Err(); err != nil && !errors.Is(err, context.Canceled) { + level.Error(logger).Log("msg", "planner context done with error", "err", err) + return err + } + + // No error or context canceled, just return + level.Debug(logger).Log("msg", "context done while waiting for task results") + return nil + case result := <-resultsCh: + if result == nil { + level.Error(logger).Log("msg", "received nil task result") + continue + } + if result.Error != nil { + level.Error(logger).Log( + "msg", "task failed", + "err", result.Error, + "task", result.TaskID, + ) + continue + } + + newMetas = append(newMetas, result.CreatedMetas...) + } + } + + level.Debug(logger).Log( + "msg", "all tasks completed", + "tasks", totalTasks, + "originalMetas", len(originalMetas), + "newMetas", len(newMetas), + ) + + if len(newMetas) == 0 { + // No new metas were created, nothing to delete + // Note: this would only happen if all tasks failed + return nil + } + + combined := append(originalMetas, newMetas...) + outdated := outdatedMetas(combined) + level.Debug(logger).Log("msg", "found outdated metas", "outdated", len(outdated)) + + if err := p.deleteOutdatedMetasAndBlocks(ctx, table, tenant, outdated); err != nil { + return fmt.Errorf("failed to delete outdated metas: %w", err) + } + + return nil +} + +func (p *Planner) deleteOutdatedMetasAndBlocks( + ctx context.Context, + table config.DayTable, + tenant string, + metas []bloomshipper.Meta, +) error { + logger := log.With(p.logger, "table", table.Addr(), "tenant", tenant) + + client, err := p.bloomStore.Client(table.ModelTime()) + if err != nil { + level.Error(logger).Log("msg", "failed to get client", "err", err) + return errors.Wrap(err, "failed to get client") + } + + var ( + deletedMetas int + deletedBlocks int + ) + defer func() { + p.metrics.metasDeleted.Add(float64(deletedMetas)) + p.metrics.blocksDeleted.Add(float64(deletedBlocks)) + }() + + for _, meta := range metas { + for _, block := range meta.Blocks { + if err := client.DeleteBlocks(ctx, []bloomshipper.BlockRef{block}); err != nil { + if client.IsObjectNotFoundErr(err) { + level.Debug(logger).Log("msg", "block not found while attempting delete, continuing", "block", block.String()) + } else { + level.Error(logger).Log("msg", "failed to delete block", "err", err, "block", block.String()) + return errors.Wrap(err, "failed to delete block") + } + } + + deletedBlocks++ + level.Debug(logger).Log("msg", "removed outdated block", "block", block.String()) + } + + err = client.DeleteMetas(ctx, []bloomshipper.MetaRef{meta.MetaRef}) + if err != nil { + if client.IsObjectNotFoundErr(err) { + level.Debug(logger).Log("msg", "meta not found while attempting delete, continuing", "meta", meta.MetaRef.String()) + } else { + level.Error(logger).Log("msg", "failed to delete meta", "err", err, "meta", meta.MetaRef.String()) + return errors.Wrap(err, "failed to delete meta") + } + } + deletedMetas++ + level.Debug(logger).Log("msg", "removed outdated meta", "meta", meta.MetaRef.String()) + } + + level.Debug(logger).Log( + "msg", "deleted outdated metas and blocks", + "metas", deletedMetas, + "blocks", deletedBlocks, + ) + + return nil +} + func (p *Planner) tables(ts time.Time) *dayRangeIterator { // adjust the minimum by one to make it inclusive, which is more intuitive // for a configuration variable @@ -228,21 +451,15 @@ func (p *Planner) tables(ts time.Time) *dayRangeIterator { return newDayRangeIterator(fromDay, throughDay, p.schemaCfg) } -type tenantTableRange struct { - tenant string - table config.DayTable - ownershipRange v1.FingerprintBounds +type work map[config.DayTable]map[string][]v1.FingerprintBounds - // TODO: Add tracking - //finished bool - //queueTime, startTime, endTime time.Time -} - -func (p *Planner) loadWork( +// loadTenantWork loads the work for each tenant and table tuple. +// work is the list of fingerprint ranges that need to be indexed in bloom filters. +func (p *Planner) loadTenantWork( ctx context.Context, tables *dayRangeIterator, -) ([]tenantTableRange, error) { - var work []tenantTableRange +) (work, error) { + tenantTableWork := make(map[config.DayTable]map[string][]v1.FingerprintBounds, tables.TotalDays()) for tables.Next() && tables.Err() == nil && ctx.Err() == nil { table := tables.At() @@ -252,7 +469,12 @@ func (p *Planner) loadWork( if err != nil { return nil, fmt.Errorf("error loading tenants: %w", err) } - level.Debug(p.logger).Log("msg", "loaded tenants", "table", table, "tenants", tenants.Len()) + level.Debug(p.logger).Log("msg", "loaded tenants", "table", table, "tenants", tenants.Remaining()) + + // If this is the first this we see this table, initialize the map + if tenantTableWork[table] == nil { + tenantTableWork[table] = make(map[string][]v1.FingerprintBounds, tenants.Remaining()) + } for tenants.Next() && tenants.Err() == nil && ctx.Err() == nil { p.metrics.tenantsDiscovered.Inc() @@ -265,13 +487,7 @@ func (p *Planner) loadWork( splitFactor := p.limits.BloomSplitSeriesKeyspaceBy(tenant) bounds := SplitFingerprintKeyspaceByFactor(splitFactor) - for _, bounds := range bounds { - work = append(work, tenantTableRange{ - tenant: tenant, - table: table, - ownershipRange: bounds, - }) - } + tenantTableWork[table][tenant] = bounds level.Debug(p.logger).Log("msg", "loading work for tenant", "table", table, "tenant", tenant, "splitFactor", splitFactor) } @@ -286,7 +502,7 @@ func (p *Planner) loadWork( return nil, fmt.Errorf("error iterating tables: %w", err) } - return work, ctx.Err() + return tenantTableWork, ctx.Err() } func (p *Planner) tenants(ctx context.Context, table config.DayTable) (*v1.SliceIter[string], error) { @@ -298,47 +514,6 @@ func (p *Planner) tenants(ctx context.Context, table config.DayTable) (*v1.Slice return v1.NewSliceIter(tenants), nil } -/* -Planning works as follows, split across many functions for clarity: - 1. Fetch all meta.jsons for the given tenant and table which overlap the ownership range of this compactor. - 2. Load current TSDBs for this tenant/table. - 3. For each live TSDB (there should be only 1, but this works with multiple), find any gaps - (fingerprint ranges) which are not up-to-date, determined by checking other meta.json files and comparing - the TSDBs they were generated from as well as their ownership ranges. -*/ -func (p *Planner) findGapsForBounds( - ctx context.Context, - tenant string, - table config.DayTable, - ownershipRange v1.FingerprintBounds, -) ([]blockPlan, error) { - logger := log.With(p.logger, "org_id", tenant, "table", table.Addr(), "ownership", ownershipRange.String()) - - // Fetch source metas to be used in both build and cleanup of out-of-date metas+blooms - metas, err := p.bloomStore.FetchMetas( - ctx, - bloomshipper.MetaSearchParams{ - TenantID: tenant, - Interval: bloomshipper.NewInterval(table.Bounds()), - Keyspace: ownershipRange, - }, - ) - if err != nil { - level.Error(logger).Log("msg", "failed to get metas", "err", err) - return nil, fmt.Errorf("failed to get metas: %w", err) - } - - level.Debug(logger).Log("msg", "found relevant metas", "metas", len(metas)) - - // Find gaps in the TSDBs for this tenant/table - gaps, err := p.findOutdatedGaps(ctx, tenant, table, ownershipRange, metas, logger) - if err != nil { - return nil, fmt.Errorf("failed to find outdated gaps: %w", err) - } - - return gaps, nil -} - // blockPlan is a plan for all the work needed to build a meta.json // It includes: // - the tsdb (source of truth) which contains all the series+chunks @@ -507,11 +682,11 @@ func blockPlansForGaps(tsdbs []tsdbGaps, metas []bloomshipper.Meta) ([]blockPlan return plans, nil } -func (p *Planner) addPendingTask(task *Task) { +func (p *Planner) addPendingTask(task *QueueTask) { p.pendingTasks.Store(task.ID, task) } -func (p *Planner) removePendingTask(task *Task) { +func (p *Planner) removePendingTask(task *QueueTask) { p.pendingTasks.Delete(task.ID) } @@ -523,7 +698,7 @@ func (p *Planner) totalPendingTasks() (total int) { return total } -func (p *Planner) enqueueTask(task *Task) error { +func (p *Planner) enqueueTask(task *QueueTask) error { p.activeUsers.UpdateUserTimestamp(task.Tenant, time.Now()) return p.tasksQueue.Enqueue(task.Tenant, nil, task, func() { task.timesEnqueued++ @@ -570,7 +745,8 @@ func (p *Planner) BuilderLoop(builder protos.PlannerForBuilder_BuilderLoopServer return fmt.Errorf("dequeue() call resulted in nil response. builder: %s", builderID) } - task := item.(*Task) + task := item.(*QueueTask) + logger := log.With(logger, "task", task.ID) queueTime := time.Since(task.queueTime) p.metrics.queueDuration.Observe(queueTime.Seconds()) @@ -582,7 +758,8 @@ func (p *Planner) BuilderLoop(builder protos.PlannerForBuilder_BuilderLoopServer continue } - if err := p.forwardTaskToBuilder(builder, builderID, task); err != nil { + result, err := p.forwardTaskToBuilder(builder, builderID, task) + if err != nil { maxRetries := p.limits.BloomTaskMaxRetries(task.Tenant) if maxRetries > 0 && task.timesEnqueued >= maxRetries { p.metrics.tasksFailed.Inc() @@ -593,6 +770,10 @@ func (p *Planner) BuilderLoop(builder protos.PlannerForBuilder_BuilderLoopServer "maxRetries", maxRetries, "err", err, ) + task.resultsChannel <- &protos.TaskResult{ + TaskID: task.ID, + Error: fmt.Errorf("task failed after max retries (%d): %w", maxRetries, err), + } continue } @@ -601,13 +782,31 @@ func (p *Planner) BuilderLoop(builder protos.PlannerForBuilder_BuilderLoopServer p.metrics.taskLost.Inc() p.removePendingTask(task) level.Error(logger).Log("msg", "error re-enqueuing task. this task will be lost", "err", err) + task.resultsChannel <- &protos.TaskResult{ + TaskID: task.ID, + Error: fmt.Errorf("error re-enqueuing task: %w", err), + } continue } p.metrics.tasksRequeued.Inc() - level.Error(logger).Log("msg", "error forwarding task to builder, Task requeued", "err", err) + level.Error(logger).Log( + "msg", "error forwarding task to builder, Task requeued", + "retries", task.timesEnqueued, + "err", err, + ) + continue } + level.Debug(logger).Log( + "msg", "task completed", + "duration", time.Since(task.queueTime).Seconds(), + "retries", task.timesEnqueued, + ) + p.removePendingTask(task) + + // Send the result back to the task. The channel is buffered, so this should not block. + task.resultsChannel <- result } return errPlannerIsNotRunning @@ -616,16 +815,14 @@ func (p *Planner) BuilderLoop(builder protos.PlannerForBuilder_BuilderLoopServer func (p *Planner) forwardTaskToBuilder( builder protos.PlannerForBuilder_BuilderLoopServer, builderID string, - task *Task, -) error { - defer p.removePendingTask(task) - + task *QueueTask, +) (*protos.TaskResult, error) { msg := &protos.PlannerToBuilder{ Task: task.ToProtoTask(), } if err := builder.Send(msg); err != nil { - return fmt.Errorf("error sending task to builder (%s): %w", builderID, err) + return nil, fmt.Errorf("error sending task to builder (%s): %w", builderID, err) } // Launch a goroutine to wait for the response from the builder so we can @@ -651,12 +848,14 @@ func (p *Planner) forwardTaskToBuilder( select { case result := <-resultsCh: - // TODO: Return metas forward via channel - return result.Error + // Note: Errors from the result are not returned here since we don't retry tasks + // that return with an error. I.e. we won't retry errors forwarded from the builder. + // TODO(salvacorts): Filter and return errors that can be retried. + return result, nil case err := <-errCh: - return err + return nil, err case <-timeout: - return fmt.Errorf("timeout waiting for response from builder (%s)", builderID) + return nil, fmt.Errorf("timeout waiting for response from builder (%s)", builderID) } } @@ -666,7 +865,7 @@ func (p *Planner) forwardTaskToBuilder( func (p *Planner) receiveResultFromBuilder( builder protos.PlannerForBuilder_BuilderLoopServer, builderID string, - task *Task, + task *QueueTask, ) (*protos.TaskResult, error) { // If connection is closed, Recv() will return an error res, err := builder.Recv() diff --git a/pkg/bloombuild/planner/planner_test.go b/pkg/bloombuild/planner/planner_test.go index b46b987de751c..c76ef0e4d2679 100644 --- a/pkg/bloombuild/planner/planner_test.go +++ b/pkg/bloombuild/planner/planner_test.go @@ -3,12 +3,16 @@ package planner import ( "context" "fmt" + "io" + "math" + "sync" "testing" "time" "github.com/go-kit/log" "github.com/grafana/dskit/flagext" "github.com/grafana/dskit/services" + "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -17,6 +21,7 @@ import ( "github.com/grafana/loki/v3/pkg/bloombuild/protos" "github.com/grafana/loki/v3/pkg/storage" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" + "github.com/grafana/loki/v3/pkg/storage/chunk/cache" "github.com/grafana/loki/v3/pkg/storage/chunk/client/local" "github.com/grafana/loki/v3/pkg/storage/config" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" @@ -25,6 +30,9 @@ import ( "github.com/grafana/loki/v3/pkg/storage/types" ) +var testDay = parseDayTime("2023-09-01") +var testTable = config.NewDayTable(testDay, "index_") + func tsdbID(n int) tsdb.SingleTenantTSDBIdentifier { return tsdb.SingleTenantTSDBIdentifier{ TS: time.Unix(int64(n), 0), @@ -35,7 +43,9 @@ func genMeta(min, max model.Fingerprint, sources []int, blocks []bloomshipper.Bl m := bloomshipper.Meta{ MetaRef: bloomshipper.MetaRef{ Ref: bloomshipper.Ref{ - Bounds: v1.NewBounds(min, max), + TenantID: "fakeTenant", + TableName: testTable.Addr(), + Bounds: v1.NewBounds(min, max), }, }, Blocks: blocks, @@ -141,14 +151,26 @@ func Test_gapsBetweenTSDBsAndMetas(t *testing.T) { } func genBlockRef(min, max model.Fingerprint) bloomshipper.BlockRef { - bounds := v1.NewBounds(min, max) + startTS, endTS := testDay.Bounds() return bloomshipper.BlockRef{ Ref: bloomshipper.Ref{ - Bounds: bounds, + TenantID: "fakeTenant", + TableName: testTable.Addr(), + Bounds: v1.NewBounds(min, max), + StartTimestamp: startTS, + EndTimestamp: endTS, + Checksum: 0, }, } } +func genBlock(ref bloomshipper.BlockRef) bloomshipper.Block { + return bloomshipper.Block{ + BlockRef: ref, + Data: &DummyReadSeekCloser{}, + } +} + func Test_blockPlansForGaps(t *testing.T) { for _, tc := range []struct { desc string @@ -333,13 +355,14 @@ func Test_blockPlansForGaps(t *testing.T) { } } -func createTasks(n int) []*Task { - tasks := make([]*Task, 0, n) +func createTasks(n int, resultsCh chan *protos.TaskResult) []*QueueTask { + tasks := make([]*QueueTask, 0, n) // Enqueue tasks for i := 0; i < n; i++ { - task := NewTask( + task := NewQueueTask( context.Background(), time.Now(), - protos.NewTask(config.NewDayTable(config.NewDayTime(0), "fake"), "fakeTenant", v1.NewBounds(0, 10), tsdbID(1), nil), + protos.NewTask(config.NewDayTable(testDay, "fake"), "fakeTenant", v1.NewBounds(0, 10), tsdbID(1), nil), + resultsCh, ) tasks = append(tasks, task) } @@ -385,7 +408,12 @@ func createPlanner( } reg := prometheus.NewPedanticRegistry() - planner, err := New(cfg, limits, schemaCfg, storageCfg, storage.ClientMetrics{}, nil, logger, reg) + metasCache := cache.NewNoopCache() + blocksCache := bloomshipper.NewFsBlocksCache(storageCfg.BloomShipperConfig.BlocksCache, reg, logger) + bloomStore, err := bloomshipper.NewBloomStore(schemaCfg.Configs, storageCfg, storage.ClientMetrics{}, metasCache, blocksCache, reg, logger) + require.NoError(t, err) + + planner, err := New(cfg, limits, schemaCfg, storageCfg, storage.ClientMetrics{}, bloomStore, logger, reg) require.NoError(t, err) return planner @@ -432,9 +460,8 @@ func Test_BuilderLoop(t *testing.T) { modifyBuilder: func(builder *fakeBuilder) { builder.SetReturnErrorMsg(true) }, - resetBuilder: func(builder *fakeBuilder) { - builder.SetReturnErrorMsg(false) - }, + // We don't retry on error messages from the builder + shouldConsumeAfterModify: true, }, { name: "exceed max retries", @@ -487,7 +514,8 @@ func Test_BuilderLoop(t *testing.T) { }) // Enqueue tasks - tasks := createTasks(nTasks) + resultsCh := make(chan *protos.TaskResult, nTasks) + tasks := createTasks(nTasks, resultsCh) for _, task := range tasks { err = planner.enqueueTask(task) require.NoError(t, err) @@ -517,6 +545,11 @@ func Test_BuilderLoop(t *testing.T) { // Finally, the queue should be empty require.Equal(t, 0, planner.totalPendingTasks()) + // consume all tasks result to free up the channel for the next round of tasks + for i := 0; i < nTasks; i++ { + <-resultsCh + } + if tc.modifyBuilder != nil { // Configure builders to return errors for _, builder := range builders { @@ -568,6 +601,213 @@ func Test_BuilderLoop(t *testing.T) { } } +func putMetas(bloomClient bloomshipper.Client, metas []bloomshipper.Meta) error { + for _, meta := range metas { + err := bloomClient.PutMeta(context.Background(), meta) + if err != nil { + return err + } + + for _, block := range meta.Blocks { + err := bloomClient.PutBlock(context.Background(), genBlock(block)) + if err != nil { + return err + } + } + } + return nil +} + +func Test_processTenantTaskResults(t *testing.T) { + for _, tc := range []struct { + name string + + originalMetas []bloomshipper.Meta + taskResults []*protos.TaskResult + expectedMetas []bloomshipper.Meta + }{ + { + name: "errors", + originalMetas: []bloomshipper.Meta{ + genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 10)}), + genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(10, 20)}), + }, + taskResults: []*protos.TaskResult{ + { + TaskID: "1", + Error: errors.New("fake error"), + }, + { + TaskID: "2", + Error: errors.New("fake error"), + }, + }, + expectedMetas: []bloomshipper.Meta{ + // The original metas should remain unchanged + genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 10)}), + genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(10, 20)}), + }, + }, + { + name: "no new metas", + originalMetas: []bloomshipper.Meta{ + genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 10)}), + genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(10, 20)}), + }, + taskResults: []*protos.TaskResult{ + { + TaskID: "1", + }, + { + TaskID: "2", + }, + }, + expectedMetas: []bloomshipper.Meta{ + // The original metas should remain unchanged + genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 10)}), + genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(10, 20)}), + }, + }, + { + name: "no original metas", + taskResults: []*protos.TaskResult{ + { + TaskID: "1", + CreatedMetas: []bloomshipper.Meta{ + genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 10)}), + }, + }, + { + TaskID: "2", + CreatedMetas: []bloomshipper.Meta{ + genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(10, 20)}), + }, + }, + }, + expectedMetas: []bloomshipper.Meta{ + genMeta(0, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 10)}), + genMeta(10, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(10, 20)}), + }, + }, + { + name: "single meta covers all original", + originalMetas: []bloomshipper.Meta{ + genMeta(0, 5, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 5)}), + genMeta(6, 10, []int{0}, []bloomshipper.BlockRef{genBlockRef(6, 10)}), + }, + taskResults: []*protos.TaskResult{ + { + TaskID: "1", + CreatedMetas: []bloomshipper.Meta{ + genMeta(0, 10, []int{1}, []bloomshipper.BlockRef{genBlockRef(0, 10)}), + }, + }, + }, + expectedMetas: []bloomshipper.Meta{ + genMeta(0, 10, []int{1}, []bloomshipper.BlockRef{genBlockRef(0, 10)}), + }, + }, + { + name: "multi version ordering", + originalMetas: []bloomshipper.Meta{ + genMeta(0, 5, []int{0}, []bloomshipper.BlockRef{genBlockRef(0, 5)}), + genMeta(0, 10, []int{1}, []bloomshipper.BlockRef{genBlockRef(0, 10)}), // only part of the range is outdated, must keep + }, + taskResults: []*protos.TaskResult{ + { + TaskID: "1", + CreatedMetas: []bloomshipper.Meta{ + genMeta(8, 10, []int{2}, []bloomshipper.BlockRef{genBlockRef(8, 10)}), + }, + }, + }, + expectedMetas: []bloomshipper.Meta{ + genMeta(0, 10, []int{1}, []bloomshipper.BlockRef{genBlockRef(0, 10)}), + genMeta(8, 10, []int{2}, []bloomshipper.BlockRef{genBlockRef(8, 10)}), + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + logger := log.NewNopLogger() + //logger := log.NewLogfmtLogger(os.Stdout) + + cfg := Config{ + PlanningInterval: 1 * time.Hour, + MaxQueuedTasksPerTenant: 10000, + } + planner := createPlanner(t, cfg, &fakeLimits{}, logger) + + bloomClient, err := planner.bloomStore.Client(testDay.ModelTime()) + require.NoError(t, err) + + // Create original metas and blocks + err = putMetas(bloomClient, tc.originalMetas) + require.NoError(t, err) + + ctx, ctxCancel := context.WithCancel(context.Background()) + defer ctxCancel() + resultsCh := make(chan *protos.TaskResult, len(tc.taskResults)) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + + err = planner.processTenantTaskResults( + ctx, + testTable, + "fakeTenant", + tc.originalMetas, + len(tc.taskResults), + resultsCh, + ) + require.NoError(t, err) + }() + + for _, taskResult := range tc.taskResults { + if len(taskResult.CreatedMetas) > 0 { + // Emulate builder putting new metas to obj store + err = putMetas(bloomClient, taskResult.CreatedMetas) + require.NoError(t, err) + } + + resultsCh <- taskResult + } + + // Wait for all tasks to be processed and outdated metas/blocks deleted + wg.Wait() + + // Get all metas + metas, err := planner.bloomStore.FetchMetas( + context.Background(), + bloomshipper.MetaSearchParams{ + TenantID: "fakeTenant", + Interval: bloomshipper.NewInterval(testTable.Bounds()), + Keyspace: v1.NewBounds(0, math.MaxUint64), + }, + ) + require.NoError(t, err) + + // TODO(salvacorts): Fix this + // For some reason, when the tests are run in the CI, we do not encode the `loc` of model.Time for each TSDB. + // As a result, when we fetch them, the loc is empty whereas in the original metas, it is not. Therefore the + // comparison fails. As a workaround to fix the issue, we will manually reset the TS of the sources to the + // fetched metas + for i := range metas { + for j := range metas[i].Sources { + sec := metas[i].Sources[j].TS.Unix() + nsec := metas[i].Sources[j].TS.Nanosecond() + metas[i].Sources[j].TS = time.Unix(sec, int64(nsec)) + } + } + + // Compare metas + require.Equal(t, len(tc.expectedMetas), len(metas)) + require.ElementsMatch(t, tc.expectedMetas, metas) + }) + } +} + type fakeBuilder struct { id string tasks []*protos.Task @@ -709,3 +949,17 @@ func parseDayTime(s string) config.DayTime { Time: model.TimeFromUnix(t.Unix()), } } + +type DummyReadSeekCloser struct{} + +func (d *DummyReadSeekCloser) Read(_ []byte) (n int, err error) { + return 0, io.EOF +} + +func (d *DummyReadSeekCloser) Seek(_ int64, _ int) (int64, error) { + return 0, nil +} + +func (d *DummyReadSeekCloser) Close() error { + return nil +} diff --git a/pkg/bloombuild/planner/task.go b/pkg/bloombuild/planner/task.go index 1da39cea6bfd7..8580dd12a655f 100644 --- a/pkg/bloombuild/planner/task.go +++ b/pkg/bloombuild/planner/task.go @@ -7,19 +7,27 @@ import ( "github.com/grafana/loki/v3/pkg/bloombuild/protos" ) -type Task struct { +type QueueTask struct { *protos.Task + resultsChannel chan *protos.TaskResult + // Tracking timesEnqueued int queueTime time.Time ctx context.Context } -func NewTask(ctx context.Context, queueTime time.Time, task *protos.Task) *Task { - return &Task{ - Task: task, - ctx: ctx, - queueTime: queueTime, +func NewQueueTask( + ctx context.Context, + queueTime time.Time, + task *protos.Task, + resultsChannel chan *protos.TaskResult, +) *QueueTask { + return &QueueTask{ + Task: task, + resultsChannel: resultsChannel, + ctx: ctx, + queueTime: queueTime, } } diff --git a/pkg/bloombuild/planner/versioned_range.go b/pkg/bloombuild/planner/versioned_range.go new file mode 100644 index 0000000000000..578b5d7ef83a6 --- /dev/null +++ b/pkg/bloombuild/planner/versioned_range.go @@ -0,0 +1,261 @@ +package planner + +import ( + "sort" + + "github.com/prometheus/common/model" + + v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" + "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" +) + +type tsdbToken struct { + through model.Fingerprint // inclusive + version int // TSDB version +} + +// a ring of token ranges used to identify old metas. +// each token represents that a TSDB version has covered the entire range +// up to that point from the previous token. +type tsdbTokenRange []tsdbToken + +func (t tsdbTokenRange) Len() int { + return len(t) +} + +func (t tsdbTokenRange) Less(i, j int) bool { + return t[i].through < t[j].through +} + +func (t tsdbTokenRange) Swap(i, j int) { + t[i], t[j] = t[j], t[i] +} + +// Add ensures a versioned set of bounds is added to the range. If the bounds are already +// covered by a more up to date version, it returns false. +func (t tsdbTokenRange) Add(version int, bounds v1.FingerprintBounds) (res tsdbTokenRange, added bool) { + // allows attempting to join neighboring token ranges with identical versions + // that aren't known until the end of the function + var shouldReassemble bool + var reassembleFrom int + defer func() { + if shouldReassemble { + res = res.reassemble(reassembleFrom) + } + }() + + // special case: first token + if len(t) == 0 { + tok := tsdbToken{through: bounds.Max, version: version} + // special case: first token is included in bounds, no need to fill negative space + if bounds.Min == 0 { + return append(t, tok), true + } + // Use a negative version to indicate that the range is not covered by any version. + return append(t, tsdbToken{through: bounds.Min - 1, version: -1}, tok), true + } + + // For non-nil token ranges, we continually update the range with newer versions. + for { + // find first token that covers the start of the range + i := sort.Search(len(t), func(i int) bool { + return t[i].through >= bounds.Min + }) + + if i == len(t) { + tok := tsdbToken{through: bounds.Max, version: version} + + // edge case: there is no gap between the previous token range + // and the new one; + // skip adding a negative token + if t[len(t)-1].through == bounds.Min-1 { + return append(t, tok), true + } + + // the range is not covered by any version and we are at the end of the range. + // Add a negative token and the new token. + negative := tsdbToken{through: bounds.Min - 1, version: -1} + return append(t, negative, tok), true + } + + // Otherwise, we've found a token that covers the start of the range. + newer := t[i].version < version + preExisting := t.boundsForToken(i) + if !newer { + if bounds.Within(preExisting) { + // The range is already covered by a more up to date version, no need + // to add anything, but honor if an earlier token was added + return t, added + } + + // The range is partially covered by a more up to date version; + // update the range we need to check and continue + bounds = v1.NewBounds(preExisting.Max+1, bounds.Max) + continue + } + + // If we need to update the range, there are 5 cases: + // 1. `equal`: the incoming range equals an existing range () + // ------ # addition + // ------ # src + // 2. `subset`: the incoming range is a subset of an existing range + // ------ # addition + // -------- # src + // 3. `overflow_both_sides`: the incoming range is a superset of an existing range. This is not possible + // because the first token in the ring implicitly covers the left bound (zero) of all possible fps. + // Therefore, we can skip this case. + // ------ # addition + // ---- # src + // 4. `right_overflow`: the incoming range overflows the right side of an existing range + // ------ # addition + // ------ # src + // 5. `left_overflow`: the incoming range overflows the left side of an existing range. This can be skipped + // for the same reason as `superset`. + // ------ # addition + // ------ # src + + // 1) (`equal`): we're replacing the same bounds + if bounds.Equal(preExisting) { + t[i].version = version + return t, true + } + + // 2) (`subset`): the incoming range is a subset of an existing range + if bounds.Within(preExisting) { + // 2a) the incoming range touches the existing range's minimum bound + if bounds.Min == preExisting.Min { + tok := tsdbToken{through: bounds.Max, version: version} + t = append(t, tsdbToken{}) + copy(t[i+1:], t[i:]) + t[i] = tok + return t, true + } + // 2b) the incoming range touches the existing range's maximum bound + if bounds.Max == preExisting.Max { + t[i].through = bounds.Min - 1 + tok := tsdbToken{through: bounds.Max, version: version} + t = append(t, tsdbToken{}) + copy(t[i+2:], t[i+1:]) + t[i+1] = tok + return t, true + } + + // 2c) the incoming range is does not touch either edge; + // add two tokens (the new one and a new left-bound for the old range) + tok := tsdbToken{through: bounds.Max, version: version} + t = append(t, tsdbToken{}, tsdbToken{}) + copy(t[i+2:], t[i:]) + t[i+1] = tok + t[i].through = bounds.Min - 1 + return t, true + } + + // 4) (`right_overflow`): the incoming range overflows the right side of an existing range + + // 4a) shortcut: the incoming range is a right-overlapping superset of the existing range. + // replace the existing token's version, update reassembly targets for merging neighboring ranges + // w/ the same version, and continue + if preExisting.Min == bounds.Min { + t[i].version = version + bounds.Min = preExisting.Max + 1 + added = true + if !shouldReassemble { + reassembleFrom = i + shouldReassemble = true + } + continue + } + + // 4b) the incoming range overlaps the right side of the existing range but + // does not touch the left side; + // add a new token for the right side of the existing range then update the reassembly targets + // and continue + overlap := tsdbToken{through: t[i].through, version: version} + t[i].through = bounds.Min - 1 + t = append(t, tsdbToken{}) + copy(t[i+2:], t[i+1:]) + t[i+1] = overlap + added = true + bounds.Min = overlap.through + 1 + if !shouldReassemble { + reassembleFrom = i + 1 + shouldReassemble = true + } + continue + } +} + +func (t tsdbTokenRange) boundsForToken(i int) v1.FingerprintBounds { + if i == 0 { + return v1.FingerprintBounds{Min: 0, Max: t[i].through} + } + return v1.FingerprintBounds{Min: t[i-1].through + 1, Max: t[i].through} +} + +// reassemble merges neighboring tokens with the same version +func (t tsdbTokenRange) reassemble(from int) tsdbTokenRange { + reassembleTo := from + for i := from; i < len(t)-1; i++ { + if t[i].version != t[i+1].version { + break + } + reassembleTo = i + 1 + } + + if reassembleTo == from { + return t + } + t[from].through = t[reassembleTo].through + copy(t[from+1:], t[reassembleTo+1:]) + return t[:len(t)-(reassembleTo-from)] +} + +func outdatedMetas(metas []bloomshipper.Meta) []bloomshipper.Meta { + var outdated []bloomshipper.Meta + + // Sort metas descending by most recent source when checking + // for outdated metas (older metas are discarded if they don't change the range). + sort.Slice(metas, func(i, j int) bool { + a, aExists := metas[i].MostRecentSource() + b, bExists := metas[j].MostRecentSource() + + if !aExists && !bExists { + // stable sort two sourceless metas by their bounds (easier testing) + return metas[i].Bounds.Less(metas[j].Bounds) + } + + if !aExists { + // If a meta has no sources, it's out of date by definition. + // By convention we sort it to the beginning of the list and will mark it for removal later + return true + } + + if !bExists { + // if a exists but b does not, mark b as lesser, sorting b to the + // front + return false + } + return !a.TS.Before(b.TS) + }) + + var ( + tokenRange tsdbTokenRange + added bool + ) + + for _, meta := range metas { + mostRecent, exists := meta.MostRecentSource() + if !exists { + // if the meta exists but does not reference a TSDB, it's out of date + // TODO(owen-d): this shouldn't happen, figure out why + outdated = append(outdated, meta) + } + version := int(model.TimeFromUnixNano(mostRecent.TS.UnixNano())) + tokenRange, added = tokenRange.Add(version, meta.Bounds) + if !added { + outdated = append(outdated, meta) + } + } + + return outdated +} diff --git a/pkg/bloombuild/planner/versioned_range_test.go b/pkg/bloombuild/planner/versioned_range_test.go new file mode 100644 index 0000000000000..e58f143842f1c --- /dev/null +++ b/pkg/bloombuild/planner/versioned_range_test.go @@ -0,0 +1,322 @@ +package planner + +import ( + "testing" + + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + + v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" + "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" + "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" +) + +func Test_TsdbTokenRange(t *testing.T) { + type addition struct { + version int + bounds v1.FingerprintBounds + } + type exp struct { + added bool + err bool + } + mk := func(version int, min, max model.Fingerprint) addition { + return addition{version, v1.FingerprintBounds{Min: min, Max: max}} + } + tok := func(version int, through model.Fingerprint) tsdbToken { + return tsdbToken{version: version, through: through} + } + + for _, tc := range []struct { + desc string + additions []addition + exp []bool + result tsdbTokenRange + }{ + { + desc: "ascending versions", + additions: []addition{ + mk(1, 0, 10), + mk(2, 11, 20), + mk(3, 15, 25), + }, + exp: []bool{true, true, true}, + result: tsdbTokenRange{ + tok(1, 10), + tok(2, 14), + tok(3, 25), + }, + }, + { + desc: "descending versions", + additions: []addition{ + mk(3, 15, 25), + mk(2, 11, 20), + mk(1, 0, 10), + }, + exp: []bool{true, true, true}, + result: tsdbTokenRange{ + tok(1, 10), + tok(2, 14), + tok(3, 25), + }, + }, + { + desc: "simple", + additions: []addition{ + mk(3, 0, 10), + mk(2, 11, 20), + mk(1, 15, 25), + }, + exp: []bool{true, true, true}, + result: tsdbTokenRange{ + tok(3, 10), + tok(2, 20), + tok(1, 25), + }, + }, + { + desc: "simple replacement", + additions: []addition{ + mk(3, 10, 20), + mk(2, 0, 9), + }, + exp: []bool{true, true}, + result: tsdbTokenRange{ + tok(2, 9), + tok(3, 20), + }, + }, + { + desc: "complex", + additions: []addition{ + mk(5, 30, 50), + mk(4, 20, 45), + mk(3, 25, 70), + mk(2, 10, 20), + mk(1, 1, 5), + }, + exp: []bool{true, true, true, true, true, true}, + result: tsdbTokenRange{ + tok(-1, 0), + tok(1, 5), + tok(-1, 9), + tok(2, 19), + tok(4, 29), + tok(5, 50), + tok(3, 70), + }, + }, + { + desc: "neighboring upper range", + additions: []addition{ + mk(5, 30, 50), + mk(4, 51, 60), + }, + exp: []bool{true, true}, + result: tsdbTokenRange{ + tok(-1, 29), + tok(5, 50), + tok(4, 60), + }, + }, + { + desc: "non-neighboring upper range", + additions: []addition{ + mk(5, 30, 50), + mk(4, 55, 60), + }, + exp: []bool{true, true}, + result: tsdbTokenRange{ + tok(-1, 29), + tok(5, 50), + tok(-1, 54), + tok(4, 60), + }, + }, + { + desc: "earlier version within", + additions: []addition{ + mk(5, 30, 50), + mk(4, 40, 45), + }, + exp: []bool{true, false}, + result: tsdbTokenRange{ + tok(-1, 29), + tok(5, 50), + }, + }, + { + desc: "earlier version right overlapping", + additions: []addition{ + mk(5, 10, 20), + mk(4, 15, 25), + }, + exp: []bool{true, true}, + result: tsdbTokenRange{ + tok(-1, 9), + tok(5, 20), + tok(4, 25), + }, + }, + { + desc: "older version overlaps two", + additions: []addition{ + mk(3, 10, 20), + mk(2, 21, 30), + mk(1, 15, 25), + }, + exp: []bool{true, true, false}, + result: tsdbTokenRange{ + tok(-1, 9), + tok(3, 20), + tok(2, 30), + }, + }, + { + desc: "older version overlaps two w middle", + additions: []addition{ + mk(3, 10, 20), + mk(2, 22, 30), + mk(1, 15, 25), + }, + exp: []bool{true, true, true}, + result: tsdbTokenRange{ + tok(-1, 9), + tok(3, 20), + tok(1, 21), + tok(2, 30), + }, + }, + { + desc: "newer right overflow", + additions: []addition{ + mk(1, 30, 50), + mk(2, 40, 60), + }, + exp: []bool{true, true}, + result: tsdbTokenRange{ + tok(-1, 29), + tok(1, 39), + tok(2, 60), + }, + }, + { + desc: "newer right overflow superset", + additions: []addition{ + mk(1, 30, 50), + mk(2, 30, 60), + }, + exp: []bool{true, true}, + result: tsdbTokenRange{ + tok(-1, 29), + tok(2, 60), + }, + }, + { + desc: "newer right overflow partial", + additions: []addition{ + mk(1, 30, 50), + mk(2, 40, 60), + }, + exp: []bool{true, true}, + result: tsdbTokenRange{ + tok(-1, 29), + tok(1, 39), + tok(2, 60), + }, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + var ( + tr tsdbTokenRange + added bool + ) + for i, a := range tc.additions { + tr, added = tr.Add(a.version, a.bounds) + exp := tc.exp[i] + require.Equal(t, exp, added, "on iteration %d", i) + } + require.Equal(t, tc.result, tr) + }) + } +} + +func Test_OutdatedMetas(t *testing.T) { + gen := func(bounds v1.FingerprintBounds, tsdbTimes ...model.Time) (meta bloomshipper.Meta) { + for _, tsdbTime := range tsdbTimes { + meta.Sources = append(meta.Sources, tsdb.SingleTenantTSDBIdentifier{TS: tsdbTime.Time()}) + } + meta.Bounds = bounds + return meta + } + + for _, tc := range []struct { + desc string + metas []bloomshipper.Meta + exp []bloomshipper.Meta + }{ + { + desc: "no metas", + metas: nil, + exp: nil, + }, + { + desc: "single meta", + metas: []bloomshipper.Meta{ + gen(v1.NewBounds(0, 10), 0), + }, + exp: nil, + }, + { + desc: "single outdated meta", + metas: []bloomshipper.Meta{ + gen(v1.NewBounds(0, 10), 0), + gen(v1.NewBounds(0, 10), 1), + }, + exp: []bloomshipper.Meta{ + gen(v1.NewBounds(0, 10), 0), + }, + }, + { + desc: "single outdated via partitions", + metas: []bloomshipper.Meta{ + gen(v1.NewBounds(0, 5), 0), + gen(v1.NewBounds(6, 10), 0), + gen(v1.NewBounds(0, 10), 1), + }, + exp: []bloomshipper.Meta{ + gen(v1.NewBounds(6, 10), 0), + gen(v1.NewBounds(0, 5), 0), + }, + }, + { + desc: "same tsdb versions", + metas: []bloomshipper.Meta{ + gen(v1.NewBounds(0, 5), 0), + gen(v1.NewBounds(6, 10), 0), + gen(v1.NewBounds(0, 10), 1), + }, + exp: []bloomshipper.Meta{ + gen(v1.NewBounds(6, 10), 0), + gen(v1.NewBounds(0, 5), 0), + }, + }, + { + desc: "multi version ordering", + metas: []bloomshipper.Meta{ + gen(v1.NewBounds(0, 5), 0), + gen(v1.NewBounds(0, 10), 1), // only part of the range is outdated, must keep + gen(v1.NewBounds(8, 10), 2), + }, + exp: []bloomshipper.Meta{ + gen(v1.NewBounds(0, 5), 0), + }, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + outdated := outdatedMetas(tc.metas) + require.Equal(t, tc.exp, outdated) + }) + } +} diff --git a/pkg/bloomcompactor/batch.go b/pkg/bloomcompactor/batch.go index 4247fc1e4b52c..4525bca006a07 100644 --- a/pkg/bloomcompactor/batch.go +++ b/pkg/bloomcompactor/batch.go @@ -168,9 +168,9 @@ func newBatchedBlockLoader( } // compiler checks -var _ v1.Iterator[*v1.SeriesWithBloom] = &blockLoadingIter{} -var _ v1.CloseableIterator[*v1.SeriesWithBloom] = &blockLoadingIter{} -var _ v1.ResettableIterator[*v1.SeriesWithBloom] = &blockLoadingIter{} +var _ v1.Iterator[*v1.SeriesWithBlooms] = &blockLoadingIter{} +var _ v1.CloseableIterator[*v1.SeriesWithBlooms] = &blockLoadingIter{} +var _ v1.ResettableIterator[*v1.SeriesWithBlooms] = &blockLoadingIter{} // TODO(chaudum): testware func newBlockLoadingIter(ctx context.Context, blocks []bloomshipper.BlockRef, fetcher FetchFunc[bloomshipper.BlockRef, *bloomshipper.CloseableBlockQuerier], batchSize int) *blockLoadingIter { @@ -196,13 +196,13 @@ type blockLoadingIter struct { // internals initialized bool err error - iter v1.Iterator[*v1.SeriesWithBloom] + iter v1.Iterator[*v1.SeriesWithBlooms] loader *batchedLoader[bloomshipper.BlockRef, *bloomshipper.CloseableBlockQuerier, *bloomshipper.CloseableBlockQuerier] loaded map[io.Closer]struct{} } // At implements v1.Iterator. -func (i *blockLoadingIter) At() *v1.SeriesWithBloom { +func (i *blockLoadingIter) At() *v1.SeriesWithBlooms { if !i.initialized { panic("iterator not initialized") } @@ -229,7 +229,7 @@ func (i *blockLoadingIter) init() { i.overlapping = overlappingBlocksIter(i.inputs) // set initial iter - i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]() + i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]() // set "match all" filter function if not present if i.filter == nil { @@ -249,14 +249,14 @@ func (i *blockLoadingIter) loadNext() bool { loader := newBatchedBlockLoader(i.ctx, i.fetcher, blockRefs, i.batchSize) filtered := v1.NewFilterIter[*bloomshipper.CloseableBlockQuerier](loader, i.filter) - iters := make([]v1.PeekingIterator[*v1.SeriesWithBloom], 0, len(blockRefs)) + iters := make([]v1.PeekingIterator[*v1.SeriesWithBlooms], 0, len(blockRefs)) for filtered.Next() { bq := filtered.At() i.loaded[bq] = struct{}{} iter, err := bq.SeriesIter() if err != nil { i.err = err - i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]() + i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]() return false } iters = append(iters, iter) @@ -264,7 +264,7 @@ func (i *blockLoadingIter) loadNext() bool { if err := filtered.Err(); err != nil { i.err = err - i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]() + i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]() return false } @@ -278,12 +278,12 @@ func (i *blockLoadingIter) loadNext() bool { // two overlapping blocks can conceivably have the same series, so we need to dedupe, // preferring the one with the most chunks already indexed since we'll have // to add fewer chunks to the bloom - i.iter = v1.NewDedupingIter[*v1.SeriesWithBloom, *v1.SeriesWithBloom]( - func(a, b *v1.SeriesWithBloom) bool { + i.iter = v1.NewDedupingIter[*v1.SeriesWithBlooms, *v1.SeriesWithBlooms]( + func(a, b *v1.SeriesWithBlooms) bool { return a.Series.Fingerprint == b.Series.Fingerprint }, - v1.Identity[*v1.SeriesWithBloom], - func(a, b *v1.SeriesWithBloom) *v1.SeriesWithBloom { + v1.Identity[*v1.SeriesWithBlooms], + func(a, b *v1.SeriesWithBlooms) *v1.SeriesWithBlooms { if len(a.Series.Chunks) > len(b.Series.Chunks) { return a } @@ -294,7 +294,7 @@ func (i *blockLoadingIter) loadNext() bool { return i.iter.Next() } - i.iter = v1.NewEmptyIter[*v1.SeriesWithBloom]() + i.iter = v1.NewEmptyIter[*v1.SeriesWithBlooms]() i.err = i.overlapping.Err() return false } diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go index b46ec1cba7c87..acfb5ba01f355 100644 --- a/pkg/bloomcompactor/bloomcompactor.go +++ b/pkg/bloomcompactor/bloomcompactor.go @@ -303,7 +303,7 @@ func (c *Compactor) loadWork( if err != nil { return errors.Wrap(err, "getting tenants") } - nTenants := tenants.Len() + nTenants := tenants.Remaining() type ownedTenant struct { tenant string diff --git a/pkg/bloomcompactor/controller.go b/pkg/bloomcompactor/controller.go index f9defdc1fdfbc..277d040d688b9 100644 --- a/pkg/bloomcompactor/controller.go +++ b/pkg/bloomcompactor/controller.go @@ -287,7 +287,7 @@ func (s *SimpleBloomController) loadWorkForGap( tenant string, id tsdb.Identifier, gap gapWithBlocks, -) (v1.Iterator[*v1.Series], v1.CloseableResettableIterator[*v1.SeriesWithBloom], error) { +) (v1.Iterator[*v1.Series], v1.CloseableResettableIterator[*v1.SeriesWithBlooms], error) { // load a series iterator for the gap seriesItr, err := s.tsdbStore.LoadTSDB(ctx, table, tenant, id, gap.bounds) if err != nil { diff --git a/pkg/bloomcompactor/spec.go b/pkg/bloomcompactor/spec.go index 229efe9c16935..2cb16eac02eae 100644 --- a/pkg/bloomcompactor/spec.go +++ b/pkg/bloomcompactor/spec.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "time" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -45,7 +44,7 @@ type SimpleBloomGenerator struct { userID string store v1.Iterator[*v1.Series] chunkLoader ChunkLoader - blocksIter v1.ResettableIterator[*v1.SeriesWithBloom] + blocksIter v1.ResettableIterator[*v1.SeriesWithBlooms] // options to build blocks with opts v1.BlockOptions @@ -68,7 +67,7 @@ func NewSimpleBloomGenerator( opts v1.BlockOptions, store v1.Iterator[*v1.Series], chunkLoader ChunkLoader, - blocksIter v1.ResettableIterator[*v1.SeriesWithBloom], + blocksIter v1.ResettableIterator[*v1.SeriesWithBlooms], readWriterFn func() (v1.BlockWriter, v1.BlockReader), reporter func(model.Fingerprint), metrics *Metrics, @@ -98,44 +97,30 @@ func NewSimpleBloomGenerator( } } -func (s *SimpleBloomGenerator) populator(ctx context.Context) func(series *v1.Series, bloom *v1.Bloom) (int, bool, error) { - return func(series *v1.Series, bloom *v1.Bloom) (int, bool, error) { - start := time.Now() +func (s *SimpleBloomGenerator) populator(ctx context.Context) v1.BloomPopulatorFunc { + return func( + series *v1.Series, + srcBlooms v1.SizedIterator[*v1.Bloom], + toAdd v1.ChunkRefs, + ch chan *v1.BloomCreation, + ) { level.Debug(s.logger).Log( "msg", "populating bloom filter", "stage", "before", "fp", series.Fingerprint, "chunks", len(series.Chunks), ) - chunkItersWithFP, err := s.chunkLoader.Load(ctx, s.userID, series) - if err != nil { - return 0, false, errors.Wrapf(err, "failed to load chunks for series: %+v", series) - } - - bytesAdded, skip, err := s.tokenizer.Populate( - &v1.SeriesWithBloom{ - Series: series, - Bloom: bloom, - }, - chunkItersWithFP.itr, - ) + chunkItersWithFP := s.chunkLoader.Load(ctx, s.userID, &v1.Series{ + Fingerprint: series.Fingerprint, + Chunks: toAdd, + }) - level.Debug(s.logger).Log( - "msg", "populating bloom filter", - "stage", "after", - "fp", series.Fingerprint, - "chunks", len(series.Chunks), - "series_bytes", bytesAdded, - "duration", time.Since(start), - "err", err, - ) + s.tokenizer.Populate(srcBlooms, chunkItersWithFP.itr, ch) if s.reporter != nil { s.reporter(series.Fingerprint) } - return bytesAdded, skip, err } - } func (s *SimpleBloomGenerator) Generate(ctx context.Context) *LazyBlockBuilderIterator { @@ -179,10 +164,10 @@ type LazyBlockBuilderIterator struct { ctx context.Context opts v1.BlockOptions metrics *Metrics - populate func(*v1.Series, *v1.Bloom) (int, bool, error) + populate v1.BloomPopulatorFunc readWriterFn func() (v1.BlockWriter, v1.BlockReader) series v1.PeekingIterator[*v1.Series] - blocks v1.ResettableIterator[*v1.SeriesWithBloom] + blocks v1.ResettableIterator[*v1.SeriesWithBlooms] bytesAdded int curr *v1.Block @@ -193,10 +178,10 @@ func NewLazyBlockBuilderIterator( ctx context.Context, opts v1.BlockOptions, metrics *Metrics, - populate func(*v1.Series, *v1.Bloom) (int, bool, error), + populate v1.BloomPopulatorFunc, readWriterFn func() (v1.BlockWriter, v1.BlockReader), series v1.PeekingIterator[*v1.Series], - blocks v1.ResettableIterator[*v1.SeriesWithBloom], + blocks v1.ResettableIterator[*v1.SeriesWithBlooms], ) *LazyBlockBuilderIterator { return &LazyBlockBuilderIterator{ ctx: ctx, @@ -270,7 +255,7 @@ type ChunkItersByFingerprint struct { // ChunkLoader loads chunks from a store type ChunkLoader interface { - Load(ctx context.Context, userID string, series *v1.Series) (*ChunkItersByFingerprint, error) + Load(ctx context.Context, userID string, series *v1.Series) *ChunkItersByFingerprint } // StoreChunkLoader loads chunks from a store @@ -286,7 +271,7 @@ func NewStoreChunkLoader(fetcherProvider stores.ChunkFetcherProvider, metrics *M } } -func (s *StoreChunkLoader) Load(ctx context.Context, userID string, series *v1.Series) (*ChunkItersByFingerprint, error) { +func (s *StoreChunkLoader) Load(ctx context.Context, userID string, series *v1.Series) *ChunkItersByFingerprint { // NB(owen-d): This is probably unnecessary as we should only have one fetcher // because we'll only be working on a single index period at a time, but this should protect // us in the case of refactoring/changing this and likely isn't a perf bottleneck. @@ -317,5 +302,5 @@ func (s *StoreChunkLoader) Load(ctx context.Context, userID string, series *v1.S return &ChunkItersByFingerprint{ fp: series.Fingerprint, itr: newBatchedChunkLoader(ctx, fetchers, inputs, s.metrics, batchedLoaderDefaultBatchSize), - }, nil + } } diff --git a/pkg/bloomcompactor/spec_test.go b/pkg/bloomcompactor/spec_test.go index 7e39b8dec57f0..f887d32053226 100644 --- a/pkg/bloomcompactor/spec_test.go +++ b/pkg/bloomcompactor/spec_test.go @@ -15,19 +15,19 @@ import ( "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" ) -func blocksFromSchema(t *testing.T, n int, options v1.BlockOptions) (res []*v1.Block, data []v1.SeriesWithBloom, refs []bloomshipper.BlockRef) { +func blocksFromSchema(t *testing.T, n int, options v1.BlockOptions) (res []*v1.Block, data []v1.SeriesWithBlooms, refs []bloomshipper.BlockRef) { return blocksFromSchemaWithRange(t, n, options, 0, 0xffff) } // splits 100 series across `n` non-overlapping blocks. // uses options to build blocks with. -func blocksFromSchemaWithRange(t *testing.T, n int, options v1.BlockOptions, fromFP, throughFp model.Fingerprint) (res []*v1.Block, data []v1.SeriesWithBloom, refs []bloomshipper.BlockRef) { +func blocksFromSchemaWithRange(t *testing.T, n int, options v1.BlockOptions, fromFP, throughFp model.Fingerprint) (res []*v1.Block, data []v1.SeriesWithBlooms, refs []bloomshipper.BlockRef) { if 100%n != 0 { panic("100 series must be evenly divisible by n") } numSeries := 100 - data, _ = v1.MkBasicSeriesWithBlooms(numSeries, 0, fromFP, throughFp, 0, 10000) + data, _ = v1.MkBasicSeriesWithBlooms(numSeries, fromFP, throughFp, 0, 10000) seriesPerBlock := numSeries / n @@ -46,7 +46,7 @@ func blocksFromSchemaWithRange(t *testing.T, n int, options v1.BlockOptions, fro minIdx, maxIdx := i*seriesPerBlock, (i+1)*seriesPerBlock - itr := v1.NewSliceIter[v1.SeriesWithBloom](data[minIdx:maxIdx]) + itr := v1.NewSliceIter[v1.SeriesWithBlooms](data[minIdx:maxIdx]) _, err = builder.BuildFrom(itr) require.Nil(t, err) @@ -62,11 +62,11 @@ func blocksFromSchemaWithRange(t *testing.T, n int, options v1.BlockOptions, fro // doesn't actually load any chunks type dummyChunkLoader struct{} -func (dummyChunkLoader) Load(_ context.Context, _ string, series *v1.Series) (*ChunkItersByFingerprint, error) { +func (dummyChunkLoader) Load(_ context.Context, _ string, series *v1.Series) *ChunkItersByFingerprint { return &ChunkItersByFingerprint{ fp: series.Fingerprint, itr: v1.NewEmptyIter[v1.ChunkRefWithIter](), - }, nil + } } func dummyBloomGen(t *testing.T, opts v1.BlockOptions, store v1.Iterator[*v1.Series], blocks []*v1.Block, refs []bloomshipper.BlockRef) *SimpleBloomGenerator { @@ -132,9 +132,9 @@ func TestSimpleBloomGenerator(t *testing.T) { } { t.Run(fmt.Sprintf("%s/%s", tc.desc, enc), func(t *testing.T) { sourceBlocks, data, refs := blocksFromSchemaWithRange(t, 2, tc.fromSchema, 0x00000, 0x6ffff) - storeItr := v1.NewMapIter[v1.SeriesWithBloom, *v1.Series]( - v1.NewSliceIter[v1.SeriesWithBloom](data), - func(swb v1.SeriesWithBloom) *v1.Series { + storeItr := v1.NewMapIter[v1.SeriesWithBlooms, *v1.Series]( + v1.NewSliceIter[v1.SeriesWithBlooms](data), + func(swb v1.SeriesWithBlooms) *v1.Series { return swb.Series }, ) @@ -150,9 +150,9 @@ func TestSimpleBloomGenerator(t *testing.T) { // Check all the input series are present in the output blocks. expectedRefs := v1.PointerSlice(data) - outputRefs := make([]*v1.SeriesWithBloom, 0, len(data)) + outputRefs := make([]*v1.SeriesWithBlooms, 0, len(data)) for _, block := range outputBlocks { - bq := v1.NewBlockQuerier(block, false, v1.DefaultMaxPageSize) + bq := v1.NewBlockQuerier(block, false, v1.DefaultMaxPageSize).Iter() for bq.Next() { outputRefs = append(outputRefs, bq.At()) } diff --git a/pkg/bloomcompactor/versioned_range.go b/pkg/bloomcompactor/versioned_range.go index 03da12f1d7da5..8af56a0754cc3 100644 --- a/pkg/bloomcompactor/versioned_range.go +++ b/pkg/bloomcompactor/versioned_range.go @@ -214,13 +214,24 @@ func outdatedMetas(metas []bloomshipper.Meta) (outdated []bloomshipper.Meta, err // Sort metas descending by most recent source when checking // for outdated metas (older metas are discarded if they don't change the range). sort.Slice(metas, func(i, j int) bool { - a, err := metas[i].MostRecentSource() - if err != nil { - panic(err.Error()) + a, aExists := metas[i].MostRecentSource() + b, bExists := metas[j].MostRecentSource() + + if !aExists && !bExists { + // stable sort two sourceless metas by their bounds (easier testing) + return metas[i].Bounds.Less(metas[j].Bounds) } - b, err := metas[j].MostRecentSource() - if err != nil { - panic(err.Error()) + + if !aExists { + // If a meta has no sources, it's out of date by definition. + // By convention we sort it to the beginning of the list and will mark it for removal later + return true + } + + if !bExists { + // if a exists but b does not, mark b as lesser, sorting b to the + // front + return false } return !a.TS.Before(b.TS) }) @@ -231,9 +242,11 @@ func outdatedMetas(metas []bloomshipper.Meta) (outdated []bloomshipper.Meta, err ) for _, meta := range metas { - mostRecent, err := meta.MostRecentSource() - if err != nil { - return nil, err + mostRecent, exists := meta.MostRecentSource() + if !exists { + // if the meta exists but does not reference a TSDB, it's out of date + // TODO(owen-d): this shouldn't happen, figure out why + outdated = append(outdated, meta) } version := int(model.TimeFromUnixNano(mostRecent.TS.UnixNano())) tokenRange, added = tokenRange.Add(version, meta.Bounds) diff --git a/pkg/bloomcompactor/versioned_range_test.go b/pkg/bloomcompactor/versioned_range_test.go index a85418bc6e1e5..67db348036ffa 100644 --- a/pkg/bloomcompactor/versioned_range_test.go +++ b/pkg/bloomcompactor/versioned_range_test.go @@ -313,6 +313,35 @@ func Test_OutdatedMetas(t *testing.T) { gen(v1.NewBounds(0, 5), 0), }, }, + { + desc: "metas without sources are removed", + metas: []bloomshipper.Meta{ + gen(v1.NewBounds(0, 5), 0), + gen(v1.NewBounds(6, 10), 0), + gen(v1.NewBounds(0, 10), 1), + gen(v1.NewBounds(11, 15)), // Meta without sources + }, + exp: []bloomshipper.Meta{ + gen(v1.NewBounds(11, 15)), // Meta without sources + gen(v1.NewBounds(6, 10), 0), + gen(v1.NewBounds(0, 5), 0), + }, + }, + { + desc: "metas without sources are interleaved", + metas: []bloomshipper.Meta{ + gen(v1.NewBounds(0, 5), 0), + gen(v1.NewBounds(6, 10)), // Meta without sources + gen(v1.NewBounds(0, 10), 1), + gen(v1.NewBounds(11, 15)), // Meta without sources + gen(v1.NewBounds(16, 20), 2), + }, + exp: []bloomshipper.Meta{ + gen(v1.NewBounds(6, 10)), // Meta without sources + gen(v1.NewBounds(11, 15)), // Meta without sources + gen(v1.NewBounds(0, 5), 0), + }, + }, } { t.Run(tc.desc, func(t *testing.T) { outdated, err := outdatedMetas(tc.metas) diff --git a/pkg/bloomgateway/bloomgateway_test.go b/pkg/bloomgateway/bloomgateway_test.go index 15c9ca2be2d85..fdcd7df117f3f 100644 --- a/pkg/bloomgateway/bloomgateway_test.go +++ b/pkg/bloomgateway/bloomgateway_test.go @@ -325,7 +325,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) { { Fingerprint: uint64(1000 + 100*idx), UserID: tenantID, - From: now.Add(-24 * time.Hour), + From: now.Add(-4 * time.Hour), Through: now, Checksum: uint32(idx), }, @@ -335,7 +335,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) { TenantID: tenantID, TableName: "table_1", Bounds: v1.NewBounds(0, 10000), - StartTimestamp: now.Add(-24 * time.Hour), + StartTimestamp: now.Add(-4 * time.Hour), EndTimestamp: now, Checksum: uint32(idx), }, @@ -343,7 +343,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) { expr, err := syntax.ParseExpr(`{foo="bar"} |= "foo"`) require.NoError(t, err) req := &logproto.FilterChunkRefRequest{ - From: now.Add(-24 * time.Hour), + From: now.Add(-4 * time.Hour), Through: now, Refs: groupRefs(t, chunkRefs), Plan: plan.QueryPlan{AST: expr}, diff --git a/pkg/bloomgateway/querier.go b/pkg/bloomgateway/querier.go index c92d6fad30f73..23de7a15e2be7 100644 --- a/pkg/bloomgateway/querier.go +++ b/pkg/bloomgateway/querier.go @@ -23,6 +23,7 @@ import ( type querierMetrics struct { chunksTotal prometheus.Counter chunksFiltered prometheus.Counter + chunksSkipped prometheus.Counter seriesTotal prometheus.Counter seriesFiltered prometheus.Counter seriesSkipped prometheus.Counter @@ -42,6 +43,12 @@ func newQuerierMetrics(registerer prometheus.Registerer, namespace, subsystem st Name: "chunks_filtered_total", Help: "Total amount of chunks that have been filtered out. Does not count chunks in failed requests.", }), + chunksSkipped: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "chunks_skipped_total", + Help: "Total amount of chunks that have been skipped and returned unfiltered, because no block matched the series.", + }), seriesTotal: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, @@ -137,6 +144,7 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from } } + var skippedGrps [][]*logproto.GroupedChunkRefs responses := make([][]*logproto.GroupedChunkRefs, 0, 2) // We can perform requests sequentially, because most of the time the request // only covers a single day, and if not, it's at most two days. @@ -152,9 +160,19 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from return nil, err } - // add chunk refs from series that were not mapped to any blocks + skippedGrps = append(skippedGrps, skipped) responses = append(responses, refs, skipped) - bq.metrics.seriesSkipped.Add(float64(len(skipped))) + } + + // add chunk refs from series that were not mapped to any blocks + skippedDeduped, err := mergeSeries(skippedGrps, nil) + if err != nil { + return nil, errors.Wrap(err, "failed to dedupe skipped series") + } + + var chunksSkipped int + for _, skippedSeries := range skippedDeduped { + chunksSkipped += len(skippedSeries.Refs) } deduped, err := mergeSeries(responses, nil) @@ -185,15 +203,19 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from "responses", len(responses), "preFilterChunks", preFilterChunks, "postFilterChunks", postFilterChunks, + "skippedChunks", chunksSkipped, "filteredChunks", preFilterChunks-postFilterChunks, "preFilterSeries", preFilterSeries, "postFilterSeries", postFilterSeries, + "skippedSeries", len(skippedDeduped), "filteredSeries", preFilterSeries-postFilterSeries, ) bq.metrics.chunksTotal.Add(float64(preFilterChunks)) + bq.metrics.chunksSkipped.Add(float64(chunksSkipped)) bq.metrics.chunksFiltered.Add(float64(preFilterChunks - postFilterChunks)) bq.metrics.seriesTotal.Add(float64(preFilterSeries)) + bq.metrics.seriesSkipped.Add(float64(len(skippedDeduped))) bq.metrics.seriesFiltered.Add(float64(preFilterSeries - postFilterSeries)) return result, nil diff --git a/pkg/bloomgateway/util.go b/pkg/bloomgateway/util.go index 9617202b948c3..df3a93fcafeda 100644 --- a/pkg/bloomgateway/util.go +++ b/pkg/bloomgateway/util.go @@ -2,6 +2,7 @@ package bloomgateway import ( "sort" + "time" "github.com/prometheus/common/model" "golang.org/x/exp/slices" @@ -102,38 +103,30 @@ func partitionSeriesByDay(from, through model.Time, seriesWithChunks []*logproto fromDay, throughDay := truncateDay(from), truncateDay(through) - for day := fromDay; day.Equal(throughDay) || day.Before(throughDay); day = day.Add(Day) { + // because through is exclusive, if it's equal to the truncated day, it means it's the start of the day + // and we should not include it in the range + if through.Equal(throughDay) { + throughDay = throughDay.Add(-24 * time.Hour) + } + + for day := fromDay; !throughDay.Before(day); day = day.Add(Day) { minTs, maxTs := model.Latest, model.Earliest - nextDay := day.Add(Day) res := make([]*logproto.GroupedChunkRefs, 0, len(seriesWithChunks)) for _, series := range seriesWithChunks { chunks := series.Refs - min := sort.Search(len(chunks), func(i int) bool { - return chunks[i].From >= day - }) - - max := sort.Search(len(chunks), func(i int) bool { - return chunks[i].From >= nextDay - }) + var relevantChunks []*logproto.ShortRef + minTs, maxTs, relevantChunks = overlappingChunks(day, day.Add(Day), minTs, maxTs, chunks) - // All chunks fall outside of the range - if min == len(chunks) || max == 0 || min == max { + if len(relevantChunks) == 0 { continue } - if chunks[min].From < minTs { - minTs = chunks[min].From - } - if chunks[max-1].Through > maxTs { - maxTs = chunks[max-1].Through - } - res = append(res, &logproto.GroupedChunkRefs{ Fingerprint: series.Fingerprint, Tenant: series.Tenant, - Refs: chunks[min:max], + Refs: relevantChunks, }) } @@ -152,3 +145,28 @@ func partitionSeriesByDay(from, through model.Time, seriesWithChunks []*logproto return result } + +func overlappingChunks(from, through, minTs, maxTs model.Time, chunks []*logproto.ShortRef) (model.Time, model.Time, []*logproto.ShortRef) { + + // chunks are ordered first by `From`. Can disregard all chunks + // that start later than the search range ends + maxIdx := sort.Search(len(chunks), func(i int) bool { + return chunks[i].From > through + }) + + res := make([]*logproto.ShortRef, 0, len(chunks[:maxIdx])) + + for _, chunk := range chunks[:maxIdx] { + // if chunk ends before the search range starts, skip + if from.After(chunk.Through) { + continue + } + + // Bound min & max ranges to the search range + minTs = max(min(minTs, chunk.From), from) + maxTs = min(max(maxTs, chunk.Through), through) + res = append(res, chunk) + } + + return minTs, maxTs, res +} diff --git a/pkg/bloomgateway/util_test.go b/pkg/bloomgateway/util_test.go index a3f219c326efd..849f3a30bbfc0 100644 --- a/pkg/bloomgateway/util_test.go +++ b/pkg/bloomgateway/util_test.go @@ -201,7 +201,7 @@ func TestPartitionRequest(t *testing.T) { { Fingerprint: 0x00, Refs: []*logproto.ShortRef{ - {From: ts.Add(-13 * time.Hour), Through: ts.Add(-12 * time.Hour)}, + {From: ts.Add(-14 * time.Hour), Through: ts.Add(-13 * time.Hour)}, {From: ts.Add(13 * time.Hour), Through: ts.Add(14 * time.Hour)}, }, }, @@ -306,35 +306,69 @@ func TestPartitionRequest(t *testing.T) { { Fingerprint: 0x00, Refs: []*logproto.ShortRef{ - {From: ts.Add(-14 * time.Hour), Through: ts.Add(-13 * time.Hour)}, - {From: ts.Add(-13 * time.Hour), Through: ts.Add(-11 * time.Hour)}, - {From: ts.Add(-11 * time.Hour), Through: ts.Add(-10 * time.Hour)}, + {From: ts.Add(-14 * time.Hour), Through: ts.Add(-13 * time.Hour)}, // previous day + {From: ts.Add(-13 * time.Hour), Through: ts.Add(-11 * time.Hour)}, // previous & target day + {From: ts.Add(-11 * time.Hour), Through: ts.Add(-10 * time.Hour)}, // target day }, }, }, }, exp: []seriesWithInterval{ + // previous day { - interval: bloomshipper.Interval{Start: ts.Add(-14 * time.Hour), End: ts.Add(-11 * time.Hour)}, + interval: bloomshipper.Interval{Start: ts.Add(-14 * time.Hour), End: ts.Add(-12 * time.Hour)}, day: config.NewDayTime(mktime("2024-01-23 00:00")), series: []*logproto.GroupedChunkRefs{ { Fingerprint: 0x00, Refs: []*logproto.ShortRef{ - {From: ts.Add(-14 * time.Hour), Through: ts.Add(-13 * time.Hour)}, - {From: ts.Add(-13 * time.Hour), Through: ts.Add(-11 * time.Hour)}, + {From: ts.Add(-14 * time.Hour), Through: ts.Add(-13 * time.Hour)}, // previous day + {From: ts.Add(-13 * time.Hour), Through: ts.Add(-11 * time.Hour)}, // previous & target day + }, + }, + }, + }, + // target day + { + interval: bloomshipper.Interval{Start: ts.Add(-12 * time.Hour), End: ts.Add(-10 * time.Hour)}, + day: config.NewDayTime(mktime("2024-01-24 00:00")), + series: []*logproto.GroupedChunkRefs{ + { + Fingerprint: 0x00, + Refs: []*logproto.ShortRef{ + {From: ts.Add(-13 * time.Hour), Through: ts.Add(-11 * time.Hour)}, // previous & target day + {From: ts.Add(-11 * time.Hour), Through: ts.Add(-10 * time.Hour)}, // target day }, }, }, }, + }, + }, + + "through target day inclusion": { + inp: &logproto.FilterChunkRefRequest{ + // Only search for the target day, but ensure chunks whose through (but not from) + // is on the target day are included + From: ts.Add(-1 * time.Hour), + Through: ts, + Refs: []*logproto.GroupedChunkRefs{ + { + Fingerprint: 0x00, + Refs: []*logproto.ShortRef{ + {From: ts.Add(-13 * time.Hour), Through: ts.Add(-1 * time.Hour)}, // previous & target day + }, + }, + }, + }, + exp: []seriesWithInterval{ { - interval: bloomshipper.Interval{Start: ts.Add(-11 * time.Hour), End: ts.Add(-10 * time.Hour)}, + interval: bloomshipper.Interval{Start: ts.Add(-12 * time.Hour), End: ts.Add(-1 * time.Hour)}, day: config.NewDayTime(mktime("2024-01-24 00:00")), series: []*logproto.GroupedChunkRefs{ { Fingerprint: 0x00, Refs: []*logproto.ShortRef{ - {From: ts.Add(-11 * time.Hour), Through: ts.Add(-10 * time.Hour)}, + {From: ts.Add(-13 * time.Hour), Through: ts.Add(-1 * time.Hour)}, // inherited from the chunk }, }, }, @@ -358,13 +392,13 @@ func TestPartitionRequest(t *testing.T) { } } -func createBlocks(t *testing.T, tenant string, n int, from, through model.Time, minFp, maxFp model.Fingerprint) ([]bloomshipper.BlockRef, []bloomshipper.Meta, []*bloomshipper.CloseableBlockQuerier, [][]v1.SeriesWithBloom) { +func createBlocks(t *testing.T, tenant string, n int, from, through model.Time, minFp, maxFp model.Fingerprint) ([]bloomshipper.BlockRef, []bloomshipper.Meta, []*bloomshipper.CloseableBlockQuerier, [][]v1.SeriesWithBlooms) { t.Helper() blockRefs := make([]bloomshipper.BlockRef, 0, n) metas := make([]bloomshipper.Meta, 0, n) queriers := make([]*bloomshipper.CloseableBlockQuerier, 0, n) - series := make([][]v1.SeriesWithBloom, 0, n) + series := make([][]v1.SeriesWithBlooms, 0, n) step := (maxFp - minFp) / model.Fingerprint(n) for i := 0; i < n; i++ { @@ -410,7 +444,7 @@ func createBlocks(t *testing.T, tenant string, n int, from, through model.Time, return blockRefs, metas, queriers, series } -func createQueryInputFromBlockData(t *testing.T, tenant string, data [][]v1.SeriesWithBloom, nthSeries int) []*logproto.ChunkRef { +func createQueryInputFromBlockData(t *testing.T, tenant string, data [][]v1.SeriesWithBlooms, nthSeries int) []*logproto.ChunkRef { t.Helper() n := 0 res := make([]*logproto.ChunkRef, 0) @@ -449,3 +483,78 @@ func createBlockRefsFromBlockData(t *testing.T, tenant string, data []*bloomship } return res } + +func TestOverlappingChunks(t *testing.T) { + mkRef := func(from, through model.Time) *logproto.ShortRef { + return &logproto.ShortRef{From: from, Through: through} + } + + for _, tc := range []struct { + desc string + from, through model.Time + input []*logproto.ShortRef + exp []*logproto.ShortRef + expMin, expMax model.Time + }{ + { + desc: "simple ordered", + from: 0, through: 10, + input: []*logproto.ShortRef{ + mkRef(0, 2), + mkRef(3, 5), + mkRef(6, 8), + mkRef(10, 12), + mkRef(14, 16), + }, + exp: []*logproto.ShortRef{ + mkRef(0, 2), + mkRef(3, 5), + mkRef(6, 8), + mkRef(10, 12), + }, + expMin: 0, expMax: 10, + }, + { + desc: "refs through timestamps aren't in monotonic order", + from: 0, through: 10, + input: []*logproto.ShortRef{ + mkRef(0, 2), + mkRef(3, 5), + mkRef(6, 8), + mkRef(10, 12), + mkRef(14, 16), + }, + exp: []*logproto.ShortRef{ + mkRef(0, 2), + mkRef(3, 5), + mkRef(6, 8), + mkRef(10, 12), + }, + expMin: 0, expMax: 10, + }, + { + desc: "expMin & expMax are within from/through", + from: 10, through: 20, + input: []*logproto.ShortRef{ + mkRef(0, 2), + mkRef(3, 5), + mkRef(6, 8), + mkRef(14, 16), + mkRef(17, 19), + mkRef(21, 30), + }, + exp: []*logproto.ShortRef{ + mkRef(14, 16), + mkRef(17, 19), + }, + expMin: 14, expMax: 19, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + minTs, maxTs, got := overlappingChunks(tc.from, tc.through, model.Latest, model.Earliest, tc.input) + require.Equal(t, tc.expMin, minTs) + require.Equal(t, tc.expMax, maxTs) + require.Equal(t, tc.exp, got) + }) + } +} diff --git a/pkg/distributor/http.go b/pkg/distributor/http.go index 00c3ba53a2806..ec0660b91bc01 100644 --- a/pkg/distributor/http.go +++ b/pkg/distributor/http.go @@ -23,7 +23,26 @@ func (d *Distributor) PushHandler(w http.ResponseWriter, r *http.Request) { } func (d *Distributor) OTLPPushHandler(w http.ResponseWriter, r *http.Request) { - d.pushHandler(w, r, push.ParseOTLPRequest) + interceptor := newOtelErrorHeaderInterceptor(w) + d.pushHandler(interceptor, r, push.ParseOTLPRequest) +} + +// otelErrorHeaderInterceptor maps 500 errors to 503. +// According to the OTLP specification, 500 errors are never retried on the client side, but 503 are. +type otelErrorHeaderInterceptor struct { + http.ResponseWriter +} + +func newOtelErrorHeaderInterceptor(w http.ResponseWriter) *otelErrorHeaderInterceptor { + return &otelErrorHeaderInterceptor{ResponseWriter: w} +} + +func (i *otelErrorHeaderInterceptor) WriteHeader(statusCode int) { + if statusCode == http.StatusInternalServerError { + statusCode = http.StatusServiceUnavailable + } + + i.ResponseWriter.WriteHeader(statusCode) } func (d *Distributor) pushHandler(w http.ResponseWriter, r *http.Request, pushRequestParser push.RequestParser) { diff --git a/pkg/distributor/http_test.go b/pkg/distributor/http_test.go index 0ecf70fa9a498..b6281b81bf3d7 100644 --- a/pkg/distributor/http_test.go +++ b/pkg/distributor/http_test.go @@ -82,6 +82,38 @@ func TestRequestParserWrapping(t *testing.T) { require.True(t, called) } +func Test_OtelErrorHeaderInterceptor(t *testing.T) { + for _, tc := range []struct { + name string + inputCode int + expectedCode int + }{ + { + name: "500", + inputCode: http.StatusInternalServerError, + expectedCode: http.StatusServiceUnavailable, + }, + { + name: "400", + inputCode: http.StatusBadRequest, + expectedCode: http.StatusBadRequest, + }, + { + name: "204", + inputCode: http.StatusNoContent, + expectedCode: http.StatusNoContent, + }, + } { + t.Run(tc.name, func(t *testing.T) { + r := httptest.NewRecorder() + i := newOtelErrorHeaderInterceptor(r) + + http.Error(i, "error", tc.inputCode) + require.Equal(t, tc.expectedCode, r.Code) + }) + } +} + func stubParser(_ string, _ *http.Request, _ push.TenantsRetention, _ push.Limits, _ push.UsageTracker) (*logproto.PushRequest, *push.Stats, error) { return &logproto.PushRequest{}, &push.Stats{}, nil } diff --git a/pkg/ingester/flush.go b/pkg/ingester/flush.go index 00aad05475495..81407abcb2e25 100644 --- a/pkg/ingester/flush.go +++ b/pkg/ingester/flush.go @@ -7,7 +7,9 @@ import ( "sync" "time" + "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/ring" "github.com/grafana/dskit/user" "github.com/prometheus/client_golang/prometheus" @@ -135,8 +137,9 @@ func (i *Ingester) sweepStream(instance *instance, stream *stream, immediate boo } func (i *Ingester) flushLoop(j int) { + l := log.With(i.logger, "loop", j) defer func() { - level.Debug(i.logger).Log("msg", "Ingester.flushLoop() exited") + level.Debug(l).Log("msg", "Ingester.flushLoop() exited") i.flushQueuesDone.Done() }() @@ -147,9 +150,10 @@ func (i *Ingester) flushLoop(j int) { } op := o.(*flushOp) - err := i.flushUserSeries(op.userID, op.fp, op.immediate) + m := util_log.WithUserID(op.userID, l) + err := i.flushOp(m, op) if err != nil { - level.Error(util_log.WithUserID(op.userID, i.logger)).Log("msg", "failed to flush", "err", err) + level.Error(m).Log("msg", "failed to flush", "err", err) } // If we're exiting & we failed to flush, put the failed operation @@ -161,7 +165,23 @@ func (i *Ingester) flushLoop(j int) { } } -func (i *Ingester) flushUserSeries(userID string, fp model.Fingerprint, immediate bool) error { +func (i *Ingester) flushOp(l log.Logger, op *flushOp) error { + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + + b := backoff.New(ctx, i.cfg.FlushOpBackoff) + for b.Ongoing() { + err := i.flushUserSeries(ctx, op.userID, op.fp, op.immediate) + if err == nil { + break + } + level.Error(l).Log("msg", "failed to flush", "retries", b.NumRetries(), "err", err) + b.Wait() + } + return b.Err() +} + +func (i *Ingester) flushUserSeries(ctx context.Context, userID string, fp model.Fingerprint, immediate bool) error { instance, ok := i.getInstanceByID(userID) if !ok { return nil @@ -175,9 +195,9 @@ func (i *Ingester) flushUserSeries(userID string, fp model.Fingerprint, immediat lbs := labels.String() level.Info(i.logger).Log("msg", "flushing stream", "user", userID, "fp", fp, "immediate", immediate, "num_chunks", len(chunks), "labels", lbs) - ctx := user.InjectOrgID(context.Background(), userID) - ctx, cancel := context.WithTimeout(ctx, i.cfg.FlushOpTimeout) - defer cancel() + ctx = user.InjectOrgID(ctx, userID) + ctx, cancelFunc := context.WithTimeout(ctx, i.cfg.FlushOpTimeout) + defer cancelFunc() err := i.flushChunks(ctx, fp, labels, chunks, chunkMtx) if err != nil { return fmt.Errorf("failed to flush chunks: %w, num_chunks: %d, labels: %s", err, len(chunks), lbs) diff --git a/pkg/ingester/flush_test.go b/pkg/ingester/flush_test.go index 6fd52bafa066f..edd6084a2741b 100644 --- a/pkg/ingester/flush_test.go +++ b/pkg/ingester/flush_test.go @@ -1,6 +1,7 @@ package ingester import ( + "errors" "fmt" "os" "sort" @@ -102,6 +103,67 @@ func Benchmark_FlushLoop(b *testing.B) { } } +func Test_FlushOp(t *testing.T) { + t.Run("no error", func(t *testing.T) { + cfg := defaultIngesterTestConfig(t) + cfg.FlushOpBackoff.MinBackoff = time.Second + cfg.FlushOpBackoff.MaxBackoff = 10 * time.Second + cfg.FlushOpBackoff.MaxRetries = 1 + cfg.FlushCheckPeriod = 100 * time.Millisecond + + _, ing := newTestStore(t, cfg, nil) + + ctx := user.InjectOrgID(context.Background(), "foo") + ins, err := ing.GetOrCreateInstance("foo") + require.NoError(t, err) + + lbs := makeRandomLabels() + req := &logproto.PushRequest{Streams: []logproto.Stream{{ + Labels: lbs.String(), + Entries: entries(5, time.Now()), + }}} + require.NoError(t, ins.Push(ctx, req)) + + time.Sleep(cfg.FlushCheckPeriod) + require.NoError(t, ing.flushOp(gokitlog.NewNopLogger(), &flushOp{ + immediate: true, + userID: "foo", + fp: ins.getHashForLabels(lbs), + })) + }) + + t.Run("max retries exceeded", func(t *testing.T) { + cfg := defaultIngesterTestConfig(t) + cfg.FlushOpBackoff.MinBackoff = time.Second + cfg.FlushOpBackoff.MaxBackoff = 10 * time.Second + cfg.FlushOpBackoff.MaxRetries = 1 + cfg.FlushCheckPeriod = 100 * time.Millisecond + + store, ing := newTestStore(t, cfg, nil) + store.onPut = func(_ context.Context, _ []chunk.Chunk) error { + return errors.New("failed to write chunks") + } + + ctx := user.InjectOrgID(context.Background(), "foo") + ins, err := ing.GetOrCreateInstance("foo") + require.NoError(t, err) + + lbs := makeRandomLabels() + req := &logproto.PushRequest{Streams: []logproto.Stream{{ + Labels: lbs.String(), + Entries: entries(5, time.Now()), + }}} + require.NoError(t, ins.Push(ctx, req)) + + time.Sleep(cfg.FlushCheckPeriod) + require.EqualError(t, ing.flushOp(gokitlog.NewNopLogger(), &flushOp{ + immediate: true, + userID: "foo", + fp: ins.getHashForLabels(lbs), + }), "terminated after 1 retries") + }) +} + func Test_Flush(t *testing.T) { var ( store, ing = newTestStore(t, defaultIngesterTestConfig(t), nil) @@ -297,6 +359,10 @@ func defaultIngesterTestConfig(t testing.TB) Config { cfg := Config{} flagext.DefaultValues(&cfg) + cfg.FlushOpBackoff.MinBackoff = 100 * time.Millisecond + cfg.FlushOpBackoff.MaxBackoff = 10 * time.Second + cfg.FlushOpBackoff.MaxRetries = 1 + cfg.FlushOpTimeout = 15 * time.Second cfg.FlushCheckPeriod = 99999 * time.Hour cfg.MaxChunkIdle = 99999 * time.Hour cfg.ConcurrentFlushes = 1 diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 41b358906e0a1..1a89aebe6ef9f 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -21,6 +21,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/concurrency" "github.com/grafana/dskit/modules" "github.com/grafana/dskit/multierror" @@ -34,6 +35,8 @@ import ( "github.com/prometheus/prometheus/model/labels" "google.golang.org/grpc/health/grpc_health_v1" + server_util "github.com/grafana/loki/v3/pkg/util/server" + "github.com/grafana/loki/v3/pkg/analytics" "github.com/grafana/loki/v3/pkg/chunkenc" "github.com/grafana/loki/v3/pkg/distributor/writefailures" @@ -82,6 +85,7 @@ type Config struct { ConcurrentFlushes int `yaml:"concurrent_flushes"` FlushCheckPeriod time.Duration `yaml:"flush_check_period"` + FlushOpBackoff backoff.Config `yaml:"flush_op_backoff"` FlushOpTimeout time.Duration `yaml:"flush_op_timeout"` RetainPeriod time.Duration `yaml:"chunk_retain_period"` MaxChunkIdle time.Duration `yaml:"chunk_idle_period"` @@ -127,7 +131,10 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.ConcurrentFlushes, "ingester.concurrent-flushes", 32, "How many flushes can happen concurrently from each stream.") f.DurationVar(&cfg.FlushCheckPeriod, "ingester.flush-check-period", 30*time.Second, "How often should the ingester see if there are any blocks to flush. The first flush check is delayed by a random time up to 0.8x the flush check period. Additionally, there is +/- 1% jitter added to the interval.") - f.DurationVar(&cfg.FlushOpTimeout, "ingester.flush-op-timeout", 10*time.Minute, "The timeout before a flush is cancelled.") + f.DurationVar(&cfg.FlushOpBackoff.MinBackoff, "ingester.flush-op-backoff-min-period", 10*time.Second, "Minimum backoff period when a flush fails. Each concurrent flush has its own backoff, see `ingester.concurrent-flushes`.") + f.DurationVar(&cfg.FlushOpBackoff.MaxBackoff, "ingester.flush-op-backoff-max-period", time.Minute, "Maximum backoff period when a flush fails. Each concurrent flush has its own backoff, see `ingester.concurrent-flushes`.") + f.IntVar(&cfg.FlushOpBackoff.MaxRetries, "ingester.flush-op-backoff-retries", 10, "Maximum retries for failed flushes.") + f.DurationVar(&cfg.FlushOpTimeout, "ingester.flush-op-timeout", 10*time.Minute, "The timeout for an individual flush. Will be retried up to `flush-op-backoff-retries` times.") f.DurationVar(&cfg.RetainPeriod, "ingester.chunks-retain-period", 0, "How long chunks should be retained in-memory after they've been flushed.") f.DurationVar(&cfg.MaxChunkIdle, "ingester.chunks-idle-period", 30*time.Minute, "How long chunks should sit in-memory with no updates before being flushed if they don't hit the max block size. This means that half-empty chunks will still be flushed after a certain period as long as they receive no further activity.") f.IntVar(&cfg.BlockSize, "ingester.chunks-block-size", 256*1024, "The targeted _uncompressed_ size in bytes of a chunk block When this threshold is exceeded the head block will be cut and compressed inside the chunk.") @@ -155,6 +162,15 @@ func (cfg *Config) Validate() error { return err } + if cfg.FlushOpBackoff.MinBackoff > cfg.FlushOpBackoff.MaxBackoff { + return errors.New("invalid flush op min backoff: cannot be larger than max backoff") + } + if cfg.FlushOpBackoff.MaxRetries <= 0 { + return fmt.Errorf("invalid flush op max retries: %d", cfg.FlushOpBackoff.MaxRetries) + } + if cfg.FlushOpTimeout <= 0 { + return fmt.Errorf("invalid flush op timeout: %s", cfg.FlushOpTimeout) + } if cfg.IndexShards <= 0 { return fmt.Errorf("invalid ingester index shard factor: %d", cfg.IndexShards) } @@ -1041,6 +1057,13 @@ func (i *Ingester) asyncStoreMaxLookBack() time.Duration { // GetChunkIDs is meant to be used only when using an async store like boltdb-shipper or tsdb. func (i *Ingester) GetChunkIDs(ctx context.Context, req *logproto.GetChunkIDsRequest) (*logproto.GetChunkIDsResponse, error) { + gcr, err := i.getChunkIDs(ctx, req) + err = server_util.ClientGrpcStatusAndError(err) + return gcr, err +} + +// GetChunkIDs is meant to be used only when using an async store like boltdb-shipper or tsdb. +func (i *Ingester) getChunkIDs(ctx context.Context, req *logproto.GetChunkIDsRequest) (*logproto.GetChunkIDsResponse, error) { orgID, err := tenant.TenantID(ctx) if err != nil { return nil, err @@ -1168,6 +1191,12 @@ func (i *Ingester) Label(ctx context.Context, req *logproto.LabelRequest) (*logp // Series queries the ingester for log stream identifiers (label sets) matching a set of matchers func (i *Ingester) Series(ctx context.Context, req *logproto.SeriesRequest) (*logproto.SeriesResponse, error) { + sr, err := i.series(ctx, req) + err = server_util.ClientGrpcStatusAndError(err) + return sr, err +} + +func (i *Ingester) series(ctx context.Context, req *logproto.SeriesRequest) (*logproto.SeriesResponse, error) { instanceID, err := tenant.TenantID(ctx) if err != nil { return nil, err @@ -1331,6 +1360,11 @@ func (i *Ingester) getInstances() []*instance { // Tail logs matching given query func (i *Ingester) Tail(req *logproto.TailRequest, queryServer logproto.Querier_TailServer) error { + err := i.tail(req, queryServer) + err = server_util.ClientGrpcStatusAndError(err) + return err +} +func (i *Ingester) tail(req *logproto.TailRequest, queryServer logproto.Querier_TailServer) error { select { case <-i.tailersQuit: return errors.New("Ingester is stopping") @@ -1376,6 +1410,12 @@ func (i *Ingester) Tail(req *logproto.TailRequest, queryServer logproto.Querier_ // TailersCount returns count of active tail requests from a user func (i *Ingester) TailersCount(ctx context.Context, _ *logproto.TailersCountRequest) (*logproto.TailersCountResponse, error) { + tcr, err := i.tailersCount(ctx) + err = server_util.ClientGrpcStatusAndError(err) + return tcr, err +} + +func (i *Ingester) tailersCount(ctx context.Context) (*logproto.TailersCountResponse, error) { instanceID, err := tenant.TenantID(ctx) if err != nil { return nil, err @@ -1431,6 +1471,12 @@ func (i *Ingester) GetDetectedFields(_ context.Context, r *logproto.DetectedFiel // GetDetectedLabels returns map of detected labels and unique values from this ingester func (i *Ingester) GetDetectedLabels(ctx context.Context, req *logproto.DetectedLabelsRequest) (*logproto.LabelToValuesResponse, error) { + lvr, err := i.getDetectedLabels(ctx, req) + err = server_util.ClientGrpcStatusAndError(err) + return lvr, err +} + +func (i *Ingester) getDetectedLabels(ctx context.Context, req *logproto.DetectedLabelsRequest) (*logproto.LabelToValuesResponse, error) { userID, err := tenant.TenantID(ctx) if err != nil { return nil, err diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index 1c438bd6bf2c0..6bb27ad645cc9 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -12,6 +12,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/flagext" "github.com/grafana/dskit/httpgrpc" "github.com/grafana/dskit/middleware" @@ -676,57 +677,119 @@ func TestIngester_asyncStoreMaxLookBack(t *testing.T) { func TestValidate(t *testing.T) { for i, tc := range []struct { - in Config - err bool - expected Config + in Config + expected Config + expectedErr string }{ { in: Config{ - MaxChunkAge: time.Minute, ChunkEncoding: chunkenc.EncGZIP.String(), - IndexShards: index.DefaultIndexShards, + FlushOpBackoff: backoff.Config{ + MinBackoff: 100 * time.Millisecond, + MaxBackoff: 10 * time.Second, + MaxRetries: 1, + }, + FlushOpTimeout: 15 * time.Second, + IndexShards: index.DefaultIndexShards, + MaxChunkAge: time.Minute, }, expected: Config{ + ChunkEncoding: chunkenc.EncGZIP.String(), + FlushOpBackoff: backoff.Config{ + MinBackoff: 100 * time.Millisecond, + MaxBackoff: 10 * time.Second, + MaxRetries: 1, + }, + FlushOpTimeout: 15 * time.Second, + IndexShards: index.DefaultIndexShards, MaxChunkAge: time.Minute, - ChunkEncoding: chunkenc.EncGZIP.String(), parsedEncoding: chunkenc.EncGZIP, - IndexShards: index.DefaultIndexShards, }, }, { in: Config{ ChunkEncoding: chunkenc.EncSnappy.String(), - IndexShards: index.DefaultIndexShards, + FlushOpBackoff: backoff.Config{ + MinBackoff: 100 * time.Millisecond, + MaxBackoff: 10 * time.Second, + MaxRetries: 1, + }, + FlushOpTimeout: 15 * time.Second, + IndexShards: index.DefaultIndexShards, }, expected: Config{ - ChunkEncoding: chunkenc.EncSnappy.String(), - parsedEncoding: chunkenc.EncSnappy, + ChunkEncoding: chunkenc.EncSnappy.String(), + FlushOpBackoff: backoff.Config{ + MinBackoff: 100 * time.Millisecond, + MaxBackoff: 10 * time.Second, + MaxRetries: 1, + }, + FlushOpTimeout: 15 * time.Second, IndexShards: index.DefaultIndexShards, + parsedEncoding: chunkenc.EncSnappy, }, }, { in: Config{ - IndexShards: index.DefaultIndexShards, ChunkEncoding: "bad-enc", + FlushOpBackoff: backoff.Config{ + MinBackoff: 100 * time.Millisecond, + MaxBackoff: 10 * time.Second, + MaxRetries: 1, + }, + FlushOpTimeout: 15 * time.Second, + IndexShards: index.DefaultIndexShards, + }, + expectedErr: "invalid encoding: bad-enc, supported: none, gzip, lz4-64k, snappy, lz4-256k, lz4-1M, lz4, flate, zstd", + }, + { + in: Config{ + ChunkEncoding: chunkenc.EncGZIP.String(), + FlushOpBackoff: backoff.Config{ + MinBackoff: 100 * time.Millisecond, + MaxBackoff: 10 * time.Second, + }, + FlushOpTimeout: 15 * time.Second, + IndexShards: index.DefaultIndexShards, + MaxChunkAge: time.Minute, + }, + expectedErr: "invalid flush op max retries: 0", + }, + { + in: Config{ + ChunkEncoding: chunkenc.EncGZIP.String(), + FlushOpBackoff: backoff.Config{ + MinBackoff: 100 * time.Millisecond, + MaxBackoff: 10 * time.Second, + MaxRetries: 1, + }, + IndexShards: index.DefaultIndexShards, + MaxChunkAge: time.Minute, }, - err: true, + expectedErr: "invalid flush op timeout: 0s", }, { in: Config{ - MaxChunkAge: time.Minute, ChunkEncoding: chunkenc.EncGZIP.String(), + FlushOpBackoff: backoff.Config{ + MinBackoff: 100 * time.Millisecond, + MaxBackoff: 10 * time.Second, + MaxRetries: 1, + }, + FlushOpTimeout: 15 * time.Second, + MaxChunkAge: time.Minute, }, - err: true, + expectedErr: "invalid ingester index shard factor: 0", }, } { t.Run(fmt.Sprint(i), func(t *testing.T) { err := tc.in.Validate() - if tc.err { - require.NotNil(t, err) - return + if tc.expectedErr != "" { + require.EqualError(t, err, tc.expectedErr) + } else { + require.NoError(t, err) + require.Equal(t, tc.expected, tc.in) } - require.Nil(t, err) - require.Equal(t, tc.expected, tc.in) }) } } diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go index 7f1ec78601fff..ecef3f10347b8 100644 --- a/pkg/ingester/instance.go +++ b/pkg/ingester/instance.go @@ -49,6 +49,7 @@ import ( "github.com/grafana/loki/v3/pkg/util/deletion" util_log "github.com/grafana/loki/v3/pkg/util/log" mathutil "github.com/grafana/loki/v3/pkg/util/math" + server_util "github.com/grafana/loki/v3/pkg/util/server" "github.com/grafana/loki/v3/pkg/validation" ) @@ -441,6 +442,12 @@ func (i *instance) getLabelsFromFingerprint(fp model.Fingerprint) labels.Labels } func (i *instance) Query(ctx context.Context, req logql.SelectLogParams) (iter.EntryIterator, error) { + it, err := i.query(ctx, req) + err = server_util.ClientGrpcStatusAndError(err) + return it, err +} + +func (i *instance) query(ctx context.Context, req logql.SelectLogParams) (iter.EntryIterator, error) { expr, err := req.LogSelector() if err != nil { return nil, err @@ -495,6 +502,12 @@ func (i *instance) Query(ctx context.Context, req logql.SelectLogParams) (iter.E } func (i *instance) QuerySample(ctx context.Context, req logql.SelectSampleParams) (iter.SampleIterator, error) { + it, err := i.querySample(ctx, req) + err = server_util.ClientGrpcStatusAndError(err) + return it, err +} + +func (i *instance) querySample(ctx context.Context, req logql.SelectSampleParams) (iter.SampleIterator, error) { expr, err := req.Expr() if err != nil { return nil, err @@ -556,6 +569,12 @@ func (i *instance) QuerySample(ctx context.Context, req logql.SelectSampleParams // If label matchers are given only the matching streams are fetched from the index. // The label names or values are then retrieved from those matching streams. func (i *instance) Label(ctx context.Context, req *logproto.LabelRequest, matchers ...*labels.Matcher) (*logproto.LabelResponse, error) { + lr, err := i.label(ctx, req, matchers...) + err = server_util.ClientGrpcStatusAndError(err) + return lr, err +} + +func (i *instance) label(ctx context.Context, req *logproto.LabelRequest, matchers ...*labels.Matcher) (*logproto.LabelResponse, error) { if len(matchers) == 0 { var labels []string if req.Values { @@ -709,6 +728,12 @@ func (i *instance) Series(ctx context.Context, req *logproto.SeriesRequest) (*lo } func (i *instance) GetStats(ctx context.Context, req *logproto.IndexStatsRequest) (*logproto.IndexStatsResponse, error) { + isr, err := i.getStats(ctx, req) + err = server_util.ClientGrpcStatusAndError(err) + return isr, err +} + +func (i *instance) getStats(ctx context.Context, req *logproto.IndexStatsRequest) (*logproto.IndexStatsResponse, error) { matchers, err := syntax.ParseMatchers(req.Matchers, true) if err != nil { return nil, err @@ -765,6 +790,12 @@ func (i *instance) GetStats(ctx context.Context, req *logproto.IndexStatsRequest } func (i *instance) GetVolume(ctx context.Context, req *logproto.VolumeRequest) (*logproto.VolumeResponse, error) { + vr, err := i.getVolume(ctx, req) + err = server_util.ClientGrpcStatusAndError(err) + return vr, err +} + +func (i *instance) getVolume(ctx context.Context, req *logproto.VolumeRequest) (*logproto.VolumeResponse, error) { matchers, err := syntax.ParseMatchers(req.Matchers, true) if err != nil && req.Matchers != seriesvolume.MatchAny { return nil, err diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go index 7f7dc30361d6a..3055a7fb0c5b7 100644 --- a/pkg/ingester/instance_test.go +++ b/pkg/ingester/instance_test.go @@ -18,6 +18,7 @@ import ( "github.com/grafana/loki/v3/pkg/logql/log" + "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/flagext" "github.com/pkg/errors" "github.com/prometheus/common/model" @@ -40,9 +41,15 @@ import ( func defaultConfig() *Config { cfg := Config{ - BlockSize: 512, - ChunkEncoding: "gzip", - IndexShards: 32, + BlockSize: 512, + ChunkEncoding: "gzip", + IndexShards: 32, + FlushOpTimeout: 15 * time.Second, + FlushOpBackoff: backoff.Config{ + MinBackoff: 100 * time.Millisecond, + MaxBackoff: 10 * time.Second, + MaxRetries: 1, + }, } if err := cfg.Validate(); err != nil { panic(errors.Wrap(err, "error building default test config")) diff --git a/pkg/logcli/output/loki.go b/pkg/logcli/output/loki.go new file mode 100644 index 0000000000000..ad89311bbcb34 --- /dev/null +++ b/pkg/logcli/output/loki.go @@ -0,0 +1 @@ +package output diff --git a/pkg/logproto/compat.go b/pkg/logproto/compat.go index a11467584b58f..4a296fd8e43b6 100644 --- a/pkg/logproto/compat.go +++ b/pkg/logproto/compat.go @@ -506,6 +506,33 @@ func (m *ShardsRequest) LogToSpan(sp opentracing.Span) { sp.LogFields(fields...) } +func (m *DetectedFieldsRequest) GetCachingOptions() (res definitions.CachingOptions) { return } + +func (m *DetectedFieldsRequest) WithStartEnd(start, end time.Time) definitions.Request { + clone := *m + clone.Start = start + clone.End = end + return &clone +} + +func (m *DetectedFieldsRequest) WithQuery(query string) definitions.Request { + clone := *m + clone.Query = query + return &clone +} + +func (m *DetectedFieldsRequest) LogToSpan(sp opentracing.Span) { + fields := []otlog.Field{ + otlog.String("query", m.GetQuery()), + otlog.String("start", m.Start.String()), + otlog.String("end", m.End.String()), + otlog.String("step", time.Duration(m.Step).String()), + otlog.String("field_limit", fmt.Sprintf("%d", m.FieldLimit)), + otlog.String("line_limit", fmt.Sprintf("%d", m.LineLimit)), + } + sp.LogFields(fields...) +} + func (m *QueryPatternsRequest) GetCachingOptions() (res definitions.CachingOptions) { return } func (m *QueryPatternsRequest) WithStartEnd(start, end time.Time) definitions.Request { @@ -534,3 +561,33 @@ func (m *QueryPatternsRequest) LogToSpan(sp opentracing.Span) { } sp.LogFields(fields...) } + +func (m *DetectedLabelsRequest) GetStep() int64 { return 0 } + +func (m *DetectedLabelsRequest) GetCachingOptions() (res definitions.CachingOptions) { return } + +func (m *DetectedLabelsRequest) WithStartEnd(start, end time.Time) definitions.Request { + clone := *m + clone.Start = start + clone.End = end + return &clone +} + +func (m *DetectedLabelsRequest) WithQuery(query string) definitions.Request { + clone := *m + clone.Query = query + return &clone +} + +func (m *DetectedLabelsRequest) WithStartEndForCache(start, end time.Time) resultscache.Request { + return m.WithStartEnd(start, end).(resultscache.Request) +} + +func (m *DetectedLabelsRequest) LogToSpan(sp opentracing.Span) { + fields := []otlog.Field{ + otlog.String("query", m.GetQuery()), + otlog.String("start", m.Start.String()), + otlog.String("end", m.End.String()), + } + sp.LogFields(fields...) +} diff --git a/pkg/logql/syntax/ast.go b/pkg/logql/syntax/ast.go index 6e3f18b7cc8e6..e5e80b4d0c172 100644 --- a/pkg/logql/syntax/ast.go +++ b/pkg/logql/syntax/ast.go @@ -366,14 +366,6 @@ func newLineFilterExpr(ty log.LineMatchType, op, match string) *LineFilterExpr { func newOrLineFilter(left, right *LineFilterExpr) *LineFilterExpr { right.Ty = left.Ty - if left.Ty == log.LineMatchEqual || left.Ty == log.LineMatchRegexp || left.Ty == log.LineMatchPattern { - left.Or = right - right.IsOrChild = true - return left - } - - // !(left or right) == (!left and !right). - // NOTE: Consider, we have chain of "or", != "foo" or "bar" or "baz" // we parse from right to left, so first time left="bar", right="baz", and we don't know the actual `Ty` (equal: |=, notequal: !=, regex: |~, etc). So // it will have default (0, LineMatchEqual). @@ -385,6 +377,13 @@ func newOrLineFilter(left, right *LineFilterExpr) *LineFilterExpr { tmp = tmp.Or } + if left.Ty == log.LineMatchEqual || left.Ty == log.LineMatchRegexp || left.Ty == log.LineMatchPattern { + left.Or = right + right.IsOrChild = true + return left + } + + // !(left or right) == (!left and !right). return newNestedLineFilterExpr(left, right) } diff --git a/pkg/logql/syntax/ast_test.go b/pkg/logql/syntax/ast_test.go index 9090fc98b7558..d75ff2d0261b6 100644 --- a/pkg/logql/syntax/ast_test.go +++ b/pkg/logql/syntax/ast_test.go @@ -545,11 +545,18 @@ func Test_FilterMatcher(t *testing.T) { []linecheck{{"foo", false}, {"bar", true}, {"127.0.0.2", true}, {"127.0.0.1", false}}, }, { - `{app="foo"} |> "foo" or "bar"`, + `{app="foo"} |> "<_>foo<_>" or "<_>bar<_>"`, []*labels.Matcher{ mustNewMatcher(labels.MatchEqual, "app", "foo"), }, - []linecheck{{"foo", true}, {"bar", true}, {"none", false}}, + []linecheck{{"test foo test", true}, {"test bar test", true}, {"none", false}}, + }, + { + `{app="foo"} |> "<_>foo<_>" or "<_>bar<_>" or "<_>baz<_>"`, + []*labels.Matcher{ + mustNewMatcher(labels.MatchEqual, "app", "foo"), + }, + []linecheck{{"test foo test", true}, {"test bar test", true}, {"test baz test", true}, {"none", false}}, }, { `{app="foo"} !> "foo" or "bar"`, @@ -618,6 +625,18 @@ func TestOrLineFilterTypes(t *testing.T) { _ = newOrLineFilter(left, right) require.Equal(t, tt.ty, right.Ty) + require.Equal(t, tt.ty, left.Ty) + }) + + t.Run("right inherits left's type with multiple or filters", func(t *testing.T) { + f1 := &LineFilterExpr{LineFilter: LineFilter{Ty: tt.ty, Match: "something"}} + f2 := &LineFilterExpr{LineFilter: LineFilter{Ty: log.LineMatchEqual, Match: "something"}} + f3 := &LineFilterExpr{LineFilter: LineFilter{Ty: log.LineMatchEqual, Match: "something"}} + + _ = newOrLineFilter(f1, newOrLineFilter(f2, f3)) + require.Equal(t, tt.ty, f1.Ty) + require.Equal(t, tt.ty, f2.Ty) + require.Equal(t, tt.ty, f3.Ty) }) } } diff --git a/pkg/logql/syntax/parser_test.go b/pkg/logql/syntax/parser_test.go index f12309f2b24a5..4c2a85203938b 100644 --- a/pkg/logql/syntax/parser_test.go +++ b/pkg/logql/syntax/parser_test.go @@ -3173,6 +3173,66 @@ var ParseTestCases = []struct { }, }, }, + { + in: `{app="foo"} |= "foo" or "bar" or "baz"`, + exp: &PipelineExpr{ + Left: newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "app", "foo")}), + MultiStages: MultiStageExpr{ + &LineFilterExpr{ + LineFilter: LineFilter{ + Ty: log.LineMatchEqual, + Match: "foo", + }, + Or: newOrLineFilter( + &LineFilterExpr{ + LineFilter: LineFilter{ + Ty: log.LineMatchEqual, + Match: "bar", + }, + IsOrChild: true, + }, + &LineFilterExpr{ + LineFilter: LineFilter{ + Ty: log.LineMatchEqual, + Match: "baz", + }, + IsOrChild: true, + }), + IsOrChild: false, + }, + }, + }, + }, + { + in: `{app="foo"} |> "foo" or "bar" or "baz"`, + exp: &PipelineExpr{ + Left: newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "app", "foo")}), + MultiStages: MultiStageExpr{ + &LineFilterExpr{ + LineFilter: LineFilter{ + Ty: log.LineMatchPattern, + Match: "foo", + }, + Or: newOrLineFilter( + &LineFilterExpr{ + LineFilter: LineFilter{ + Ty: log.LineMatchPattern, + Match: "bar", + }, + IsOrChild: true, + }, + &LineFilterExpr{ + LineFilter: LineFilter{ + Ty: log.LineMatchPattern, + Match: "baz", + }, + IsOrChild: true, + }), + IsOrChild: false, + }, + }, + }, + }, } func TestParse(t *testing.T) { diff --git a/pkg/pattern/drain/drain.go b/pkg/pattern/drain/drain.go index 4d7c52bebf0c6..784beabb2a876 100644 --- a/pkg/pattern/drain/drain.go +++ b/pkg/pattern/drain/drain.go @@ -25,7 +25,9 @@ package drain import ( "math" "strconv" + "strings" "unicode" + "unsafe" "github.com/hashicorp/golang-lru/v2/simplelru" "github.com/prometheus/common/model" @@ -139,7 +141,7 @@ func DefaultConfig() *Config { // MaxClusterDepth and SimTh, the less the chance that there will be // "similar" clusters, but the greater the footprint. SimTh: 0.3, - MaxChildren: 100, + MaxChildren: 15, ParamString: `<_>`, MaxClusters: 300, } @@ -156,22 +158,24 @@ func New(config *Config, metrics *Metrics) *Drain { } d := &Drain{ - config: config, - rootNode: createNode(), - idToCluster: createLogClusterCache(config.MaxClusters, evictFn), - metrics: metrics, - tokenizer: splittingTokenizer{}, // Default to this for now + config: config, + rootNode: createNode(), + idToCluster: createLogClusterCache(config.MaxClusters, evictFn), + metrics: metrics, + tokenizer: newPunctuationTokenizer(), + maxAllowedLineLength: 3000, } return d } type Drain struct { - config *Config - rootNode *Node - idToCluster *LogClusterCache - clustersCounter int - metrics *Metrics - tokenizer LineTokenizer + config *Config + rootNode *Node + idToCluster *LogClusterCache + clustersCounter int + metrics *Metrics + tokenizer LineTokenizer + maxAllowedLineLength int } func (d *Drain) Clusters() []*LogCluster { @@ -183,10 +187,14 @@ func (d *Drain) TrainTokens(tokens []string, stringer func([]string) string, ts } func (d *Drain) Train(content string, ts int64) *LogCluster { - return d.train(d.tokenizer.Tokenize(content), d.tokenizer.Join, ts) + if len(content) > d.maxAllowedLineLength { + return nil + } + tokens, state := d.tokenizer.Tokenize(content) + return d.train(tokens, state, ts) } -func (d *Drain) train(tokens []string, stringer func([]string) string, ts int64) *LogCluster { +func (d *Drain) train(tokens []string, state interface{}, ts int64) *LogCluster { if len(tokens) < 4 { return nil } @@ -196,11 +204,12 @@ func (d *Drain) train(tokens []string, stringer func([]string) string, ts int64) d.clustersCounter++ clusterID := d.clustersCounter matchCluster = &LogCluster{ - Tokens: tokens, - id: clusterID, - Size: 1, - Stringer: stringer, - Chunks: Chunks{}, + Tokens: tokens, + TokenState: state, + id: clusterID, + Size: 1, + Stringer: d.tokenizer.Join, + Chunks: Chunks{}, } matchCluster.append(model.TimeFromUnixNano(ts)) d.idToCluster.Set(clusterID, matchCluster) @@ -219,15 +228,16 @@ func (d *Drain) train(tokens []string, stringer func([]string) string, ts int64) } func (d *Drain) TrainPattern(content string, samples []*logproto.PatternSample) *LogCluster { - tokens := deduplicatePlaceholders(d.tokenizer.Tokenize(content), d.config.ParamString) + tokens, state := d.tokenizer.Tokenize(content) matchCluster := d.treeSearch(d.rootNode, tokens, d.config.SimTh, true) // Match no existing log cluster if matchCluster == nil { d.clustersCounter++ clusterID := d.clustersCounter matchCluster = &LogCluster{ - Tokens: tokens, - id: clusterID, + Tokens: tokens, + TokenState: state, + id: clusterID, } d.idToCluster.Set(clusterID, matchCluster) d.addSeqToPrefixTree(d.rootNode, matchCluster) @@ -241,24 +251,33 @@ func (d *Drain) TrainPattern(content string, samples []*logproto.PatternSample) return matchCluster } -func deduplicatePlaceholders(tokens []string, param string) []string { - if len(tokens) < 2 { - return tokens +func deduplicatePlaceholders(line string, placeholder string) string { + first := strings.Index(line, "<_><_>") + if first == -1 { + return line } - i := 1 - for k := 1; k < len(tokens); k++ { - if tokens[k] != param || tokens[k] != tokens[k-1] { - if i != k { - tokens[i] = tokens[k] + builder := make([]byte, 0, len(line)) + low := 0 + for i := first; i < len(line)-5; i++ { + if line[i:i+len(placeholder)] == placeholder { + high := i + 3 + for ; high < len(line)-2; high += 3 { + if line[high:high+len(placeholder)] != placeholder { + break + } } - i++ + builder = append(builder, line[low:i+len(placeholder)]...) + low = high + i = high } } - return tokens[:i] + builder = append(builder, line[low:]...) + + return unsafe.String(unsafe.SliceData(builder), len(builder)) } func (d *Drain) PatternString(c *LogCluster) string { - s := d.tokenizer.Join(deduplicatePlaceholders(c.Tokens, d.config.ParamString)) + s := deduplicatePlaceholders(d.tokenizer.Join(c.Tokens, c.TokenState), d.config.ParamString) if s == d.config.ParamString { return "" } @@ -271,7 +290,7 @@ func (d *Drain) Delete(cluster *LogCluster) { // Match against an already existing cluster. Match shall be perfect (sim_th=1.0). New cluster will not be created as a result of this call, nor any cluster modifications. func (d *Drain) Match(content string) *LogCluster { - contentTokens := d.tokenizer.Tokenize(content) + contentTokens, _ := d.tokenizer.Tokenize(content) matchCluster := d.treeSearch(d.rootNode, contentTokens, 1.0, true) return matchCluster } @@ -413,6 +432,7 @@ func (d *Drain) addSeqToPrefixTree(rootNode *Node, cluster *LogCluster) { // if token not matched in this layer of existing tree. if _, ok = curNode.keyToChildNode[token]; !ok { if !d.hasNumbers(token) { + // Numbers in token: Prioritize the param string path if _, ok = curNode.keyToChildNode[d.config.ParamString]; ok { if len(curNode.keyToChildNode) < d.config.MaxChildren { newNode := createNode() @@ -435,6 +455,7 @@ func (d *Drain) addSeqToPrefixTree(rootNode *Node, cluster *LogCluster) { } } } else { + // No numbers, use the key as-is to traverse if _, ok = curNode.keyToChildNode[d.config.ParamString]; !ok { newNode := createNode() curNode.keyToChildNode[d.config.ParamString] = newNode diff --git a/pkg/pattern/drain/drain_benchmark_test.go b/pkg/pattern/drain/drain_benchmark_test.go index e03770f613c04..35ec024af138e 100644 --- a/pkg/pattern/drain/drain_benchmark_test.go +++ b/pkg/pattern/drain/drain_benchmark_test.go @@ -39,8 +39,8 @@ func BenchmarkDrain_TrainExtractsPatterns(b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { + drain := New(DefaultConfig(), nil) for _, line := range lines { - drain := New(DefaultConfig(), nil) drain.Train(line, 0) } } diff --git a/pkg/pattern/drain/drain_test.go b/pkg/pattern/drain/drain_test.go index cc16f0b7fd64c..34bcf8b4c12a5 100644 --- a/pkg/pattern/drain/drain_test.go +++ b/pkg/pattern/drain/drain_test.go @@ -4,6 +4,7 @@ import ( "bufio" "fmt" "os" + "strings" "testing" "github.com/stretchr/testify/require" @@ -27,34 +28,34 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { drain: New(DefaultConfig(), nil), inputFile: `testdata/agent-logfmt.txt`, patterns: []string{ - `ts=2024-04-16T15:10:42.556278698Z caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key="/var/log/pods/*b92ee988-5c26-4c64-bba3-ff6a01723759/grafana/*.log:{app=\"grafana\", conprof=\"true\", container=\"grafana\", instanceId=\"i1111\", job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=\"orgnamehere\", plan=\"free\", pod=\"orgnamehere-grafana-7c65678f86-9zhlb\", pod_template_hash=\"7c65678f86\", resource_version=\"143638246\", slug=\"orgnamehere\", stackId=\"866772\"}"`, - `ts=2024-04-16T15:10:42.556706613Z caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key="/var/log/pods/*b92ee988-5c26-4c64-bba3-ff6a01723759/hgrun/*.log:{app=\"grafana\", conprof=\"true\", container=\"hgrun\", instanceId=\"i1111\", job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=\"orgnamehere\", plan=\"free\", pod=\"orgnamehere-grafana-7c65678f86-9zhlb\", pod_template_hash=\"7c65678f86\", resource_version=\"143638246\", slug=\"orgnamehere\", stackId=\"866772\"}"`, - `ts=2024-04-16T15:10:42.556930066Z caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key="/var/log/pods/*b92ee988-5c26-4c64-bba3-ff6a01723759/hg-plugins/*.log:{app=\"grafana\", conprof=\"true\", container=\"hg-plugins\", instanceId=\"i1111\", job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=\"orgnamehere\", plan=\"free\", pod=\"orgnamehere-grafana-7c65678f86-9zhlb\", pod_template_hash=\"7c65678f86\", resource_version=\"143638246\", slug=\"orgnamehere\", stackId=\"866772\"}"`, - `ts=2024-04-16T15:10:42.557102408Z caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key="/var/log/pods/*b92ee988-5c26-4c64-bba3-ff6a01723759/hosted-grafana-security/*.log:{app=\"grafana\", conprof=\"true\", container=\"hosted-grafana-security\", instanceId=\"i1111\", job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=\"orgnamehere\", plan=\"free\", pod=\"orgnamehere-grafana-7c65678f86-9zhlb\", pod_template_hash=\"7c65678f86\", resource_version=\"143638246\", slug=\"orgnamehere\", stackId=\"866772\"}"`, + `ts=2024-04-16T15:10:42.<_> level=info msg="finished node evaluation" controller_id=module.http.cloudwatch_pipelines node_id=prometheus.scrape.<_> duration=<_>.<_>`, `ts=2024-04-16T15:10:43.192290389Z caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key="/var/log/pods/*19a1cce8-5f04-46e0-a124-292b0dd9b343/testcoordinator/*.log:{batch_kubernetes_io_controller_uid=\"25ec5edf-f78e-468b-b6f3-3b9685f0cc8f\", batch_kubernetes_io_job_name=\"testcoordinator-job-2665838\", container=\"testcoordinator\", controller_uid=\"25ec5edf-f78e-468b-b6f3-3b9685f0cc8f\", job=\"k6-cloud/testcoordinator\", job_name=\"testcoordinator-job-2665838\", name=\"testcoordinator\", namespace=\"k6-cloud\", pod=\"testcoordinator-job-2665838-9g8ds\"}"`, - `ts=2024-04-16T15:10:43.551543875Z caller=filetargetmanager.go:397 level=info component=logs logs_config=default msg="Removing target" key="/var/log/pods/*35649bfd-52ff-4281-9294-5f65fd5a89fc/marketplaces-api/*.log:{container=\"marketplaces-api\", job=\"grafana-com/marketplaces-api\", name=\"marketplaces-api\", namespace=\"grafana-com\", pod=\"marketplaces-api-f67ff7567-gqrvb\", pod_template_hash=\"f67ff7567\"}"`, - `ts=<_> caller=filetarget.go:192 level=info component=logs logs_config=default msg="filetarget:watcher closed, tailer stopped, positions saved" path=<_>`, - `ts=<_> caller=filetarget.go:313 level=info component=logs logs_config=default msg="watching new directory" directory=<_>`, - `ts=<_> caller=filetarget.go:326 level=info component=logs logs_config=default msg="removing directory from watcher" directory=<_>`, - `ts=<_> caller=filetargetmanager.go:181 level=info component=logs logs_config=default msg="received file watcher event" name=<_> op=CREATE`, - `ts=<_> caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key=<_> \"kube-proxy\", container=\"kube-proxy\", job=<_> namespace=\"kube-system\", pod=\"kube-proxy-gke-ops-us-east-0-main-n2s32-1-1dd39c-32ae1dde-hmhw\", tier=\"node\"}"`, - `ts=<_> caller=filetargetmanager.go:397 level=info component=logs logs_config=default msg="Removing target" key=<_> \"grafana\", conprof=\"true\", container=\"grafana\", instanceId=<_> job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=<_> plan=\"free\", pod=<_> pod_template_hash=<_> resource_version=<_> slug=<_> stackId=<_>`, - `ts=<_> caller=filetargetmanager.go:397 level=info component=logs logs_config=default msg="Removing target" key=<_> \"grafana\", conprof=\"true\", container=\"hg-plugins\", instanceId=<_> job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=<_> plan=\"free\", pod=<_> pod_template_hash=<_> resource_version=<_> slug=<_> stackId=<_>`, - `ts=<_> caller=filetargetmanager.go:397 level=info component=logs logs_config=default msg="Removing target" key=<_> \"grafana\", conprof=\"true\", container=\"hgrun\", instanceId=<_> job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=<_> plan=\"free\", pod=<_> pod_template_hash=<_> resource_version=<_> slug=<_> stackId=<_>`, - `ts=<_> caller=filetargetmanager.go:397 level=info component=logs logs_config=default msg="Removing target" key=<_> \"grafana\", conprof=\"true\", container=\"hosted-grafana-security\", instanceId=<_> job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=<_> plan=\"free\", pod=<_> pod_template_hash=<_> resource_version=<_> slug=<_> stackId=<_>`, - `ts=<_> caller=log.go:168 component=logs logs_config=default level=info msg="Re-opening moved/deleted file <_> ..."`, - `ts=<_> caller=log.go:168 component=logs logs_config=default level=info msg="Seeked <_> - &{Offset:0 Whence:0}"`, - `ts=<_> caller=log.go:168 component=logs logs_config=default level=info msg="Successfully reopened <_>`, - `ts=<_> caller=log.go:168 component=logs logs_config=default level=info msg="Waiting for <_> to appear..."`, - `ts=<_> caller=logfmt.go:139 level=error component=logs logs_config=default component=file_pipeline component=stage type=logfmt msg="failed to decode logfmt" err="bufio.Scanner:token too long"`, - `ts=<_> caller=logfmt.go:139 level=error component=logs logs_config=default component=file_pipeline component=stage type=logfmt msg="failed to decode logfmt" err="logfmt syntax error at pos <_> on line 1:unexpected '\"'"`, - `ts=<_> caller=tailer.go:118 level=info component=logs logs_config=default component=tailer msg="position timer:exited" path=<_>`, - `ts=<_> caller=tailer.go:147 level=info component=logs logs_config=default component=tailer msg="tail routine:started" path=<_>`, - `ts=<_> caller=tailer.go:155 level=info component=logs logs_config=default component=tailer msg="tail routine:exited" path=<_>`, - `ts=<_> caller=tailer.go:164 level=info component=logs logs_config=default component=tailer msg="tail routine:tail channel closed, stopping tailer" path=<_> reason=null`, - `ts=<_> caller=tailer.go:207 level=info component=logs logs_config=default component=tailer msg="skipping update of position for a file which does not currently exist" path=<_>`, - `ts=<_> caller=tailer.go:245 level=info component=logs logs_config=default component=tailer msg="stopped tailing file" path=<_>`, - `ts=<_> level=info msg="finished node evaluation" controller_id=module.http.cloudwatch_pipelines node_id=<_> duration=<_>`, + `ts=2024-04-16T15:10:43.551782223Z caller=tailer.go:245 level=info component=logs logs_config=default component=tailer msg="stopped tailing file" path=/var/log/pods/grafana-com_marketplaces-api-f67ff7567-gqrvb_35649bfd-52ff-4281-9294-5f65fd5a89fc/marketplaces-api/0.log`, + `ts=2024-04-16T15:10:43.<_> caller=filetargetmanager.go:<_> level=info component=logs logs_config=default msg="<_> target" key="/var/log/pods/*<_>/<_>/*.log:{<_>=\"<_>\", <_>=\"<_><_><_><_><_><_> <_><_><_><_><_>\", namespace=\"<_>\", pod=\"<_>\", <_>=\"<_>\"}"`, + `ts=2024-04-16T15:10:43.<_> caller=tailer.go:<_> level=info component=logs logs_config=default component=tailer msg="<_> <_><_> <_> <_> <_><_> <_> <_><_> <_><_><_><_><_><_><_><_><_><_><_><_><_><_><_><_> <_><_><_>`, + `ts=2024-04-16T15:10:<_>.<_> caller=filetarget.go:192 level=info component=logs logs_config=default msg="filetarget: watcher closed, tailer stopped, positions saved" path=/var/log/pods/*<_>/<_>/*.log`, + `ts=2024-04-16T15:10:<_>.<_> caller=filetarget.go:313 level=info component=logs logs_config=default msg="watching new directory" directory=/var/log/pods/<_>/<_>`, + `ts=2024-04-16T15:10:<_>.<_> caller=filetarget.go:313 level=info component=logs logs_config=default msg="watching new directory" directory=/var/log/pods/hosted-grafana_.<_>/<_>`, + `ts=2024-04-16T15:10:<_>.<_> caller=filetarget.go:326 level=info component=logs logs_config=default msg="removing directory from watcher" directory=/var/log/pods/hosted-grafana_.<_>/<_>`, + `ts=2024-04-16T15:10:<_>.<_> caller=filetargetmanager.go:181 level=info component=logs logs_config=default msg="received file watcher event" name=/var/log/pods/<_>/<_>/<_>.log op=CREATE`, + `ts=2024-04-16T15:10:<_>.<_> caller=filetargetmanager.go:181 level=info component=logs logs_config=default msg="received file watcher event" name=/var/log/pods/<_><_><_>/<_><_><_>.<_> op=CREATE`, + `ts=2024-04-16T15:10:<_>.<_> caller=filetargetmanager.go:181 level=info component=logs logs_config=default msg="received file watcher event" name=/var/log/pods/<_><_><_>/<_><_><_>.<_>.<_> op=CREATE`, + `ts=2024-04-16T15:10:<_>.<_> caller=filetargetmanager.go:181 level=info component=logs logs_config=default msg="received file watcher event" name=/var/log/pods/hosted-grafana_.<_>/<_>/0.log.<_>.<_> op=CREATE`, + `ts=2024-04-16T15:10:<_>.<_> caller=filetargetmanager.go:<_> level=info component=logs logs_config=default msg="<_> target" key="/var/log/pods/*<_>/<_>/*.log:{app=\"grafana\", conprof=\"true\", container=\"<_>\", instanceId=\"<_>\", job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", org=\"<_>\", plan=\"free\", pod=\"<_>\", pod_template_hash=\"<_>\", resource_version=\"<_>\", slug=\"<_>\", stackId=\"<_>\"}"`, + `ts=2024-04-16T15:10:<_>.<_> caller=log.go:168 component=logs logs_config=default level=info msg="Re-opening moved/deleted file /var/log/pods/<_>/<_>/<_>.log ..."`, + `ts=2024-04-16T15:10:<_>.<_> caller=log.go:168 component=logs logs_config=default level=info msg="Re-opening moved/deleted file /var/log/pods/hosted-grafana_.<_>/<_>/0.log ..."`, + `ts=2024-04-16T15:10:<_>.<_> caller=log.go:168 component=logs logs_config=default level=info msg="Seeked /var/log/pods/<_>/<_>/0.log - &{Offset:0 Whence:0}"`, + `ts=2024-04-16T15:10:<_>.<_> caller=log.go:168 component=logs logs_config=default level=info msg="Seeked /var/log/pods/hosted-grafana_.<_>/<_>/0.log - &{Offset:0 Whence:0}"`, + `ts=2024-04-16T15:10:<_>.<_> caller=log.go:168 component=logs logs_config=default level=info msg="Successfully reopened /var/log/pods/<_>/<_>/<_>.log"`, + `ts=2024-04-16T15:10:<_>.<_> caller=log.go:168 component=logs logs_config=default level=info msg="Successfully reopened /var/log/pods/hosted-grafana_.<_>/<_>/0.log"`, + `ts=2024-04-16T15:10:<_>.<_> caller=log.go:168 component=logs logs_config=default level=info msg="Waiting for /var/log/pods/<_>/<_>/0.log to appear..."`, + `ts=2024-04-16T15:10:<_>.<_> caller=log.go:168 component=logs logs_config=default level=info msg="Waiting for /var/log/pods/hosted-grafana_.<_>/<_>/0.log to appear..."`, + `ts=2024-04-16T15:10:<_>.<_> caller=logfmt.go:139 level=error component=logs logs_config=default component=file_pipeline component=stage type=logfmt msg="failed to decode logfmt" err="bufio.Scanner: token too long"`, + `ts=2024-04-16T15:10:<_>.<_> caller=logfmt.go:139 level=error component=logs logs_config=default component=file_pipeline component=stage type=logfmt msg="failed to decode logfmt" err="logfmt syntax error at pos <_> on line 1: unexpected '\"'"`, + `ts=2024-04-16T15:10:<_>.<_> caller=tailer.go:245 level=info component=logs logs_config=default component=tailer msg="stopped tailing file" path=/var/log/pods/hosted-grafana_.<_>/<_>/0.log`, + `ts=2024-04-16T15:10:<_>.<_> caller=tailer.go:<_> level=info component=logs logs_config=default component=tailer msg="<_> <_>: <_>" path=/var/log/pods/<_>/<_>/0.log`, + `ts=2024-04-16T15:10:<_>.<_> caller=tailer.go:<_> level=info component=logs logs_config=default component=tailer msg="<_> <_>: <_>" path=/var/log/pods/hosted-grafana_.<_>/<_>/0.log`, + `ts=2024-04-16T15:10:<_>.<_> caller=tailer.go:<_> level=info component=logs logs_config=default component=tailer msg="<_> <_><_> <_> <_> <_><_> <_> <_><_> <_><_><_><_><_><_><_><_><_><_><_><_><_><_><_><_><_><_> <_><_><_>`, }, }, { @@ -62,126 +63,103 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { inputFile: `testdata/ingester-logfmt.txt`, patterns: []string{ `ts=2024-04-17T09:52:46.363974185Z caller=http.go:194 level=debug traceID=1b48f5156a61ca69 msg="GET /debug/pprof/delta_mutex (200) 1.161082ms"`, - `ts=<_> caller=head.go:216 level=debug tenant=987678 msg="profile is empty after delta computation" metricName=memory`, - `ts=<_> caller=http.go:194 level=debug traceID=<_> orgID=<_> msg="POST /ingester.v1.IngesterService/Push (200) <_>`, + `ts=2024-04-17T09:52:46.<_> caller=head.go:216 level=debug tenant=987678 msg="profile is empty after delta computation" metricName=memory`, + `ts=2024-04-17T09:52:46.<_> caller=http.go:194 level=debug traceID=<_> orgID=<_> msg="POST /ingester.v1.IngesterService/Push (200) <_>.<_>"`, }, }, { drain: New(DefaultConfig(), nil), inputFile: `testdata/drone-json.txt`, patterns: []string{ - `{"duration":<_> "debug","method":"GET","msg":"request completed","referer":"","remote":"10.136.105.40:52702","request":"/metrics","status":200,"time":<_> <_> <_> "GrafanaAgent/v0.40.3 (flow; linux; helm)"}`, - `{"id":<_> "debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":<_> <_> <_>`, - `{"id":<_> "debug","msg":"calculate server capacity","time":<_> <_> <_>`, - `{"id":<_> "debug","msg":"calculate unfinished jobs","time":<_> <_> <_>`, - `{"id":<_> "debug","msg":"check capacity complete","time":<_> <_> <_>`, - `{"id":<_> "debug","msg":"no capacity changes required","time":<_> <_> <_>`, + `{"duration":<_>,"level":"debug","method":"GET","msg":"request completed","referer":"","remote":"10.136.105.40:52702","request":"/metrics","status":200,"time":"<_>:<_>:<_>","user-agent":"GrafanaAgent/v0.40.3 (flow; linux; helm)"}`, + `{"id":"<_>","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"<_>:<_>:<_>"}`, + `{"id":"<_>","level":"debug","msg":"calculate server capacity","time":"<_>:<_>:<_>"}`, + `{"id":"<_>","level":"debug","msg":"calculate unfinished jobs","time":"<_>:<_>:<_>"}`, + `{"id":"<_>","level":"debug","msg":"check capacity complete","time":"<_>:<_>:<_>"}`, + `{"id":"<_>","level":"debug","msg":"no capacity changes required","time":"<_>:<_>:<_>"}`, }, }, { drain: New(DefaultConfig(), nil), inputFile: "testdata/distributor-logfmt.txt", patterns: []string{ - `ts=2024-05-02T12:17:22.115385619Z caller=http.go:194 level=debug traceID=7836a12bb7f1964e orgID=75 msg="POST /ingest?aggregationType=sum&from=1714652227107641016&name=checkoutservice%7B__session_id__%3D294b9729f5a7de95%2Cnamespace%3Dotel-demo%7D&sampleRate=100&spyName=gospy&units=samples&until=1714652242109516917 (200) 1.562143ms"`, - `ts=2024-05-02T12:17:22.242343806Z caller=http.go:194 level=debug traceID=404c6a83a18e66a4 orgID=75 msg="POST /ingest?aggregationType=average&from=1714652227232613927&name=checkoutservice%7B__session_id__%3D294b9729f5a7de95%2Cnamespace%3Dotel-demo%7D&sampleRate=0&spyName=gospy&units=goroutines&until=1714652242232506798 (200) 2.902485ms"`, - `ts=<_> caller=http.go:194 level=debug traceID=<_> orgID=1819 msg="POST /pyroscope/ingest?aggregationType=sum&from=1714652230&name=<_> 0&spyName=scrape&units=samples&until=1714652240 (200) <_>`, - `ts=<_> caller=http.go:194 level=debug traceID=<_> orgID=75 msg="POST /ingest?aggregationType=&from=1714652227232613927&name=checkoutservice%7B__session_id__%3D294b9729f5a7de95%2Cnamespace%3Dotel-demo%7D&sampleRate=<_> gospy&units=&until=1714652242232506798 (200) <_>`, - `ts=<_> caller=http.go:194 level=debug traceID=<_> orgID=<_> msg="POST /push.v1.PusherService/Push <_> <_>`, + `ts=2024-05-02T12:17:22.851228301Z caller=http.go:194 level=debug traceID=1e1fe5ba1756bc38 orgID=1819 msg="POST /pyroscope/ingest?aggregationType=sum&from=1714652230&name=flamegraph.com%7Bapp_kubernetes_io_instance%3Dflamegraph-com%2Capp_kubernetes_io_name%3Dflamegraph-com%2Ccluster%3Dflamegraph.com%2Cinstance%3D10.0.11.146%3A8001%2Cjob%3Dkubernetes-pods%2Cnamespace%3Dflamegraph-com%2Cpod%3Dflamegraph-com-backend-79c858c7bf-jw2hn%2Cpod_template_hash%3D79c858c7bf%2Cpyroscope_tenant%3Dpyroscope%2Ctier%3Dbackend%7D&sampleRate=0&spyName=scrape&units=samples&until=1714652240 (200) 22.345191ms"`, + `ts=2024-05-02T12:17:22.<_> caller=http.go:194 level=debug traceID=<_> orgID=75 msg="POST /ingest?aggregationType=&from=1714652227232613927&name=checkoutservice%7B__session_id__%3D294b9729f5a7de95%2Cnamespace%3Dotel-demo%7D&sampleRate=<_>&spyName=gospy&units=&until=1714652242232506798 (200) <_>.<_>"`, + `ts=2024-05-02T12:17:22.<_> caller=http.go:194 level=debug traceID=<_> orgID=75 msg="POST /ingest?aggregationType=<_>&from=<_>&name=checkoutservice%7B__session_id__%3D294b9729f5a7de95%2Cnamespace%3Dotel-demo%7D&sampleRate=<_>&spyName=gospy&units=<_>&until=<_> (200) <_>.<_>"`, + `ts=2024-05-02T12:17:<_>.<_> caller=http.go:194 level=debug traceID=<_> orgID=1819 msg="POST /pyroscope/ingest?aggregationType=sum&from=1714652230&name=flamegraph.com.frontend%7Bapp_kubernetes_io_instance%3Dflamegraph-com%2Capp_kubernetes_io_name%3Dflamegraph-com%2Ccluster%3Dflamegraph.com%2Cinstance%3D10.0.9.115%3A9091%2Cjob%3Dkubernetes-pods%2Cnamespace%3Dflamegraph-com%2Cpod%3Dflamegraph-com-frontend-6fb87f8785-pd87k%2Cpod_template_hash%3D6fb87f8785%2Cpyroscope_tenant%3Dpyroscope%2Ctier%3Dfrontend%7D&sampleRate=0&spyName=scrape&units=samples&until=1714652240 (200) <_>.<_>"`, + `ts=2024-05-02T12:17:<_>.<_> caller=http.go:194 level=debug traceID=<_> orgID=<_> msg="POST /push.v1.PusherService/Push (<_>) <_>.<_>"`, }, }, { drain: New(DefaultConfig(), nil), inputFile: "testdata/journald.txt", patterns: []string{ - ` exec /bin/hgrun -log.level=debug launch -bundledPluginsManifest /proc/$(pidof plugins-pause)/root/manifest.json -bundledPluginsDir /proc/$(pidof plugins-pause)/root/plugins],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:80,Protocol:TCP,HostIP:,},ContainerPort{Name:grpc,HostPort:0,ContainerPort:10000,Protocol:TCP,HostIP:,},ContainerPort{Name:profiling,HostPort:0,ContainerPort:6060,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:HG_API,Value:http://hosted-grafana-api,ValueFrom:nil,},EnvVar{Name:HG_INSTANCE_SLUG,Value:<_> nil,},EnvVar{Name:HG_INSTANCE_SECRET,Value:<_> nil,},EnvVar{Name:EXTRA_OPTIONS,Value:-profile -profile-port=6060 -profile-addr=0.0.0.0,ValueFrom:nil,},EnvVar{Name:HG_CREATE_TIME_MS,Value:<_> nil,},EnvVar{Name:HG_PULL_POLICY,Value:Always,ValueFrom:nil,},EnvVar{Name:HG_START_REASON,Value:active,ValueFrom:nil,},EnvVar{Name:HGRUN_SECURE_PLUGINS,Value:false,ValueFrom:nil,},EnvVar{Name:HGRUN_PLUGIN_RUNNER_ROOT_CA,Value:false,ValueFrom:nil,},EnvVar{Name:OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,Value:http://jaeger-agent.jaeger.svc.cluster.local:4317,ValueFrom:nil,},EnvVar{Name:JAEGER_SAMPLER_PARAM,Value:1,ValueFrom:nil,},EnvVar{Name:OTEL_RESOURCE_ATTRIBUTES,Value:cluster=dev-us-central-0,namespace=hosted-grafana,ValueFrom:nil,},EnvVar{Name:HG_PROBE_PATH,Value:/api/health,ValueFrom:nil,},EnvVar{Name:HGRUN_EXIT_ON_PLUGIN_FAIL,Value:true,ValueFrom:nil,},EnvVar{Name:HGRUN_PLUGIN_INSTALL_RETRIES,Value:2,ValueFrom:nil,},EnvVar{Name:HGRUN_PLUGIN_INSTALL_CONCURRENCY,Value:1,ValueFrom:nil,},EnvVar{Name:HGRUN_LAUNCH_TIMEOUT,Value:3m0s,ValueFrom:nil,},EnvVar{Name:GOMEMLIMIT,Value:429496730,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{26 -3} {} 26m DecimalSI},memory: {{293601280 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/api/health,Port:{0 80 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:10,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/bin/hgrun check],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/bin/hgrun drain -timeout 1m0s -waitTime 55s],},HTTPGet:nil,TCPSocket:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[SYS_PTRACE],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod <_> ErrImagePull: [rpc error: code =NotFound desc =failed to pull and unpack image "us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> failed to resolve reference "us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> not found, failed to pull and unpack image "us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> failed to resolve reference "us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> unexpected status from HEAD request to https:<_> 403 Forbidden]`, ` ln --force -s /proc/$(pidof hgrun-pause)/root/bin/hgrun /bin/hgrun;`, ` while [ "$(pidof plugins-pause)" = "" ]; do sleep 0.5; done;`, ` ts=2024-05-07T11:59:32.025687537Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request`, - ` ts=2024-05-07T11:59:<_> level=error caller=http_client.go:56 app=hgrun <_> msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health`, + ` ts=2024-05-07T11:59:<_>.<_> level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.<_> msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health`, `2024-05-07T11:59:43.484606Z INFO ExtHandler ExtHandler Downloading agent manifest`, - `2024-05-07T11:59:<_> INFO TelemetryEventsCollector ExtHandler Collected 2 events for extension: Microsoft.Azure.Extensions.CustomScript`, - `<_> Consumed <_> CPU time.`, - `<_> Deactivated successfully.`, + `2024-05-07T11:59:<_>.<_> INFO TelemetryEventsCollector ExtHandler Collected 2 events for extension: Microsoft.Azure.Extensions.CustomScript`, + `<_>.scope: Consumed <_>.<_> CPU time.`, + `<_>.scope: Deactivated successfully.`, `AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=<_> comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined"`, - `E0507 11:59:29.725681 3089 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"azure-resourcemanager-exporter\" with CrashLoopBackOff:\"back-off 5m0s restarting failed container=azure-resourcemanager-exporter pod=azure-resourcemanager-exporter-6b5b58c666-rsttd_infra-exporters(5a95f801-309c-4f33-864a-406262c6ece6)\"" pod="infra-exporters/azure-resourcemanager-exporter-6b5b58c666-rsttd" podUID="5a95f801-309c-4f33-864a-406262c6ece6"`, - `E0507 11:59:31.554203 4531 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"frontend\" with CrashLoopBackOff:\"back-off 5m0s restarting failed container=frontend pod=otel-demo-alt-dev-frontend-79ccf98858-mbj4x_otel-demo-alt(d08e620e-00d0-49f1-a195-820a62e8de8f)\"" pod="otel-demo-alt/otel-demo-alt-dev-frontend-79ccf98858-mbj4x" podUID="d08e620e-00d0-49f1-a195-820a62e8de8f"`, - `E0507 11:59:31.928148 4734 pod_workers.go:1300] "Error syncing pod, skipping" err="unmounted volumes=[terraform-drift-detector-data], unattached volumes=[terraform-drift-detector-data], failed to process volumes=[]:context deadline exceeded" pod="terraform-drift-detector/terraform-drift-detector-d68b4c545-jg2vj" podUID="6c607496-ef26-454e-b2f2-4cb75b233fa3"`, - `E0507 11:59:34.856101 4727 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana-render-security\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-security:0.1.181\\\"\"" pod="integration/grafana-render-service-cbff479fc-cj9tp" podUID="0e3114d1-2f3a-49d6-a71d-dbc75050d8e0"`, + `E0507 11:59:31.928148 4734 pod_workers.go:1300] "Error syncing pod, skipping" err="unmounted volumes=[terraform-drift-detector-data], unattached volumes=[terraform-drift-detector-data], failed to process volumes=[]: context deadline exceeded" pod="terraform-drift-detector/terraform-drift-detector-d68b4c545-jg2vj" podUID="6c607496-ef26-454e-b2f2-4cb75b233fa3"`, `E0507 11:59:34.923938 3027 kuberuntime_manager.go:1261] container &Container{Name:mysqld-exporter,Image:prom/mysqld-exporter:v0.13.0,Command:[],Args:[--collect.info_schema.innodb_metrics],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:9104,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:MYSQL_USER,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:username,Optional:nil,},},},EnvVar{Name:MYSQL_PASSWORD,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:password,Optional:nil,},},},EnvVar{Name:MYSQL_HOST,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:endpoint,Optional:nil,},},},EnvVar{Name:MYSQL_PORT,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:port,Optional:nil,},},},EnvVar{Name:MYSQL_TLS_MODE,Value:preferred,ValueFrom:nil,},EnvVar{Name:DATA_SOURCE_NAME,Value:$(MYSQL_USER):$(MYSQL_PASSWORD)@tcp($(MYSQL_HOST):$(MYSQL_PORT))/?tls=$(MYSQL_TLS_MODE),ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dzx7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod testcrossplane-exporter-c67cfc58f-vbzl4_crossplane-playground(3d49134d-3378-4ec3-824c-5ff4ea2590a5): CreateContainerConfigError: secret "testcrossplane-user-exporter" not found`, - `E0507 11:59:34.923984 3027 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysqld-exporter\" with CreateContainerConfigError: \"secret \\\"testcrossplane-user-exporter\\\" not found\"" pod="crossplane-playground/testcrossplane-exporter-c67cfc58f-vbzl4" podUID="3d49134d-3378-4ec3-824c-5ff4ea2590a5"`, - `E0507 11:59:35.928465 4734 pod_workers.go:1300] "Error syncing pod, skipping" err="unmounted volumes=[custom-grafana-agent], unattached volumes=[], failed to process volumes=[]:context deadline exceeded" pod="loki-dev-010/custom-grafana-agent-856948968f-6jfks" podUID="17b244cc-ecb9-4fbc-beaa-8fa47fafe013"`, - `E0507 11:59:37.252214 4736 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ksm\" with CrashLoopBackOff:\"back-off 5m0s restarting failed container=ksm pod=new-relic-nri-bundle-nrk8s-ksm-6c785668f5-jcxh2_integration(f7cc3cca-2ffb-4fde-a73e-a4ba8b0f6b3c)\"" pod="integration/new-relic-nri-bundle-nrk8s-ksm-6c785668f5-jcxh2" podUID="f7cc3cca-2ffb-4fde-a73e-a4ba8b0f6b3c"`, - `E0507 11:59:39.149450 4729 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-agent\" with CrashLoopBackOff:\"back-off 5m0s restarting failed container=cluster-agent pod=appdynamics-cluster-agent-appdynamics-cluster-agent-56667dmbnkv_integration(69bc5e6c-0451-443e-af8a-c831871afbb8)\"" pod="integration/appdynamics-cluster-agent-appdynamics-cluster-agent-56667dmbnkv" podUID="69bc5e6c-0451-443e-af8a-c831871afbb8"`, - `E0507 11:59:41.375655 4736 kuberuntime_manager.go:1256] container &Container{Name:ruler,Image:grafana/enterprise-metrics:v2.12.0,Command:[],Args:[-target=ruler -config.expand-env=true -config.file=/etc/mimir/mimir.yaml -distributor.remote-timeout=10s],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:grpc,HostPort:0,ContainerPort:9095,Protocol:TCP,HostIP:,},ContainerPort{Name:memberlist,HostPort:0,ContainerPort:7946,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:JAEGER_AGENT_HOST,Value:alloy-otlp.alloy-otlp.svc.cluster.local.,ValueFrom:nil,},EnvVar{Name:JAEGER_TAGS,Value:namespace=ge-metrics-federation,cluster=dev-us-central-0,ValueFrom:nil,},EnvVar{Name:JAEGER_SAMPLER_MANAGER_HOST_PORT,Value:http://alloy-otlp.alloy-otlp.svc.cluster.local.:5778/sampling,ValueFrom:nil,},EnvVar{Name:GOOGLE_APPLICATION_CREDENTIALS,Value:/var/secrets/google/credentials.json,ValueFrom:nil,},EnvVar{Name:AM_TOKEN,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:ruler-alertmanager-token,},Key:token,Optional:nil,},},},EnvVar{Name:JAEGER_REPORTER_MAX_QUEUE_SIZE,Value:1000,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{134217728 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:gcs-credentials,ReadOnly:false,MountPath:/var/secrets/google/,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:config,ReadOnly:false,MountPath:/etc/mimir,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:license,ReadOnly:false,MountPath:/license,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:runtime-config,ReadOnly:false,MountPath:/var/mimir,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:storage,ReadOnly:false,MountPath:/data,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:active-queries,ReadOnly:false,MountPath:/active-query-tracker,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-jtnbs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/ready,Port:{1 0 http-metrics},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:45,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod gem-mimir-ruler-5f56f7846b-fgxdm_ge-metrics-federation(07c06e21-137b-4fdd-b7d3-703f0a567720): CreateContainerConfigError: secret "ruler-alertmanager-token" not found`, - `E0507 11:59:<_> 4731 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"overrides-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/kubernetes-dev/enterprise-logs:callum-shard-firstlast-08\\\"\"" pod="loki-dev-010/overrides-exporter-98c77fd66-6zj6m" podUID="1ff5bf3e-5856-4f6f-ae04-273f2dee170b"`, - `E0507 11:59:<_> <_> kuberuntime_manager.go:1256] container &Container{Name:grafana,Image:us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> [/bin/sh],Args:[-c set -e; while [ "$(pidof hgrun-pause)" ="" ]; do sleep 0.5; done;`, - `E0507 11:59:<_> <_> kuberuntime_manager.go:1256] container &Container{Name:pdc,Image:us.gcr.io/hosted-grafana/pdc:0.1.415,Command:[],Args:[-proxy.auth.ca-keys-dir=/var/run/secrets/pdc-certs -proxy.socks-server.addr=:10443 -proxy.ssh-server.addr=:2222 -proxy.use-socks-username-for-routing -proxy.api.http-address=:9182 -proxy.check-connpool-address-in-ring -memberlist.join=dns+gossip-ring.pdc.svc.cluster.local:7946 -api.http-address=:11443 -distributor.enabled=true -distributor.addr=:10444 -distributor.use-socks-username-for-routing -gateway.enabled=true -gateway.addr=:2244 -log.level=debug -certs.ca-private-key-file=/var/run/secrets/pdc-certs/ca.key -certs.ca-cert-file=/var/run/secrets/pdc-certs/ca.crt -certs.ca-pub-file=/var/run/secrets/pdc-certs/ca.pub -certs.cluster=local-k8s -shard-size=3 -graceful-shutdown-period=30s -enable-multiple-networks],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:socks,HostPort:0,ContainerPort:10443,Protocol:TCP,HostIP:,},ContainerPort{Name:ssh,HostPort:0,ContainerPort:2222,Protocol:TCP,HostIP:,},ContainerPort{Name:distributor,HostPort:0,ContainerPort:10444,Protocol:TCP,HostIP:,},ContainerPort{Name:gateway,HostPort:0,ContainerPort:2244,Protocol:TCP,HostIP:,},ContainerPort{Name:api,HostPort:0,ContainerPort:11443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{250 -3} {} 250m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:pdc-certs,ReadOnly:true,MountPath:/var/run/secrets/pdc-certs,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:<_> true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/ready,Port:{0 11443 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:40,TimeoutSeconds:1,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/bin/sleep 5],},HTTPGet:nil,TCPSocket:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Never,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod <_> ErrImageNeverPull: Container image "us.gcr.io/hosted-grafana/pdc:0.1.415" is not present with pull policy of Never`, - `E0507 11:59:<_> <_> kuberuntime_manager.go:1256] container &Container{Name:ruler,Image:grafana/enterprise-metrics:v2.11.1,Command:[],Args:[-target=ruler -config.expand-env=true -config.file=/etc/mimir/mimir.yaml],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:grpc,HostPort:0,ContainerPort:9095,Protocol:TCP,HostIP:,},ContainerPort{Name:memberlist,HostPort:0,ContainerPort:7946,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:JAEGER_AGENT_HOST,Value:<_> nil,},EnvVar{Name:JAEGER_TAGS,Value:namespace=ge-metrics-federation,cluster=dev-us-central-0,ValueFrom:nil,},EnvVar{Name:JAEGER_SAMPLER_MANAGER_HOST_PORT,Value:http:<_> 5778/sampling,ValueFrom:nil,},EnvVar{Name:GOOGLE_APPLICATION_CREDENTIALS,Value:/var/secrets/google/credentials.json,ValueFrom:nil,},EnvVar{Name:AM_TOKEN,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:ruler-alertmanager-token,},Key:token,Optional:nil,},},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{134217728 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:gcs-credentials,ReadOnly:false,MountPath:/var/secrets/google/,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:config,ReadOnly:false,MountPath:/etc/mimir,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:license,ReadOnly:false,MountPath:/license,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:runtime-config,ReadOnly:false,MountPath:/var/mimir,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:storage,ReadOnly:false,MountPath:/data,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:active-queries,ReadOnly:false,MountPath:/active-query-tracker,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:<_> true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/ready,Port:{1 0 http-metrics},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:45,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod <_> CreateContainerConfigError: secret "ruler-alertmanager-token" not found`, - `E0507 11:59:<_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"gcom-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/kubernetes-dev/frontend-monitoring:6a8eb5a\\\"\"" <_> <_>`, - `E0507 11:59:<_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ErrImagePull: \"[rpc error: code =NotFound desc =failed to pull and unpack image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> failed to resolve reference \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> not found, failed to pull and unpack image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> failed to resolve reference \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> unexpected status from HEAD request to https:<_> 403 Forbidden]\"" <_> <_>`, - `E0507 11:59:<_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> <_> <_>`, - `E0507 11:59:<_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pdc\" with ErrImageNeverPull: \"Container image \\\"us.gcr.io/hosted-grafana/pdc:0.1.415\\\" is not present with pull policy of Never\"" <_> <_>`, - `E0507 11:59:<_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ruler\" with CreateContainerConfigError: \"secret \\\"ruler-alertmanager-token\\\" not found\"" <_> <_>`, - `E0507 11:59:<_> <_> prober.go:104] "Probe errored" err="rpc error: code =NotFound desc =failed to exec in container: failed to load task: no running task found: task <_> not found: not found" probeType="Readiness" <_> <_> containerName="grafana"`, - `E0507 11:59:<_> <_> remote_image.go:180] "PullImage from image service failed" err="rpc error: code =NotFound desc =failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> not found" image="us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>`, - `E0507 11:59:<_> <_> remote_image.go:180] "PullImage from image service failed" err="rpc error: code =Unknown desc =failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> unexpected status from HEAD request to https:<_> 403 Forbidden" image="us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>`, - `E0507 11:59:<_> <_> remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code =NotFound desc =an error occurred when try to find container <_> not found" <_>`, - `E0507 11:59:<_> <_> remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code =NotFound desc =failed to exec in container: failed to load task: no running task found: task <_> not found: not found" <_> cmd=["/bin/hgrun","check"]`, - `E0507 <_> 4733 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus\" with CrashLoopBackOff:\"back-off 5m0s restarting failed container=prometheus pod=bryan-prometheus-0_bryan-prometheus(6dadfe71-eb19-4231-a96e-c64bb5499a1e)\"" pod="bryan-prometheus/bryan-prometheus-0" podUID="6dadfe71-eb19-4231-a96e-c64bb5499a1e"`, - `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"agent\" with CrashLoopBackOff:\"back-off 5m0s restarting failed container=agent pod=<_> pod=<_> podUID=<_>`, - `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cortex-gw\" with CrashLoopBackOff:\"back-off 5m0s restarting failed container=cortex-gw pod=<_> pod=<_> podUID=<_>`, - `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"goldpinger\" with CrashLoopBackOff:\"back-off 5m0s restarting failed container=goldpinger pod=<_> pod=<_> podUID=<_>`, - `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff:\"back-off <_> restarting failed container=grafana pod=<_> pod=<_> podUID=<_>`, - `E0507 <_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"support-agent\" with CrashLoopBackOff:\"back-off 5m0s restarting failed container=support-agent pod=<_> pod=<_> podUID=<_>`, - `E0507 <_> <_> prober.go:239] "Unable to write all bytes from execInContainer" err="short write" expectedBytes=<_> actualBytes=10240`, - `I0507 11:59:29.320184 1537502 kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="logs-endpoint-dev-005/kafka-controller-0" secret="" err="secret \"not-needed\" not found"`, + `E0507 11:59:35.928465 4734 pod_workers.go:1300] "Error syncing pod, skipping" err="unmounted volumes=[custom-grafana-agent], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="loki-dev-010/custom-grafana-agent-856948968f-6jfks" podUID="17b244cc-ecb9-4fbc-beaa-8fa47fafe013"`, + `E0507 11:59:<_>.<_> <_> kuberuntime_manager.go:1256] container &Container{Name:grafana,Image:us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>,Command:[/bin/sh],Args:[-c set -e; while [ "$(pidof hgrun-pause)" = "" ]; do sleep 0.5; done;`, + `E0507 11:59:<_>.<_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"<_>\" with CrashLoopBackOff: \"back-off <_> restarting failed container=<_> pod=<_>(<_>)\"" pod="<_>/<_>" podUID="<_>"`, + `E0507 11:59:<_>.<_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"<_>\" with CreateContainerConfigError: \"secret \\\"<_>\\\" not found\"" pod="<_>/<_>" podUID="<_>"`, + `E0507 11:59:<_>.<_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"<_>\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/<_>:<_>.<_>.<_>\\\"\"" pod="<_>/<_>" podUID="<_>"`, + `E0507 11:59:<_>.<_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"<_>\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/kubernetes-dev/<_>:<_>\\\"\"" pod="<_>/<_>" podUID="<_>"`, + `E0507 11:59:<_>.<_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ErrImagePull: \"[rpc error: code = NotFound desc = failed to pull and unpack image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\\\": failed to resolve reference \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\\\": us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>: not found, failed to pull and unpack image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\\\": failed to resolve reference \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\\\": unexpected status from HEAD request to https://us.gcr.io/v2/hosted-grafana/hosted-grafana-pro/manifests/<_>.1.<_>: 403 Forbidden]\"" pod="hosted-grafana/<_>" podUID="<_>"`, + `E0507 11:59:<_>.<_> <_> pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pdc\" with ErrImageNeverPull: \"Container image \\\"us.gcr.io/hosted-grafana/pdc:0.1.415\\\" is not present with pull policy of Never\"" pod="pdc/<_>" podUID="<_>"`, + `E0507 11:59:<_>.<_> <_> prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task <_> not found: not found" probeType="Readiness" pod="hosted-grafana/<_>" podUID="<_>" containerName="grafana"`, + `E0507 11:59:<_>.<_> <_> prober.go:239] "Unable to write all bytes from execInContainer" err="short write" expectedBytes=<_> actualBytes=10240`, + `E0507 11:59:<_>.<_> <_> remote_image.go:180] "PullImage from image service failed" err="rpc error: code = NotFound desc = failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\": failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\": us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>: not found" image="us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>"`, + `E0507 11:59:<_>.<_> <_> remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\": failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\": unexpected status from HEAD request to https://us.gcr.io/v2/hosted-grafana/hosted-grafana-pro/manifests/<_>.1.<_>: 403 Forbidden" image="us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>"`, + `E0507 11:59:<_>.<_> <_> remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"<_>\": not found" containerID="<_>"`, + `E0507 11:59:<_>.<_> <_> remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task <_> not found: not found" containerID="<_>" cmd=["/bin/hgrun","check"]`, `I0507 11:59:31.815514 2791 azure_credentials.go:220] image(us.gcr.io/hosted-grafana/hosted-grafana-pro) is not from ACR, return empty authentication`, - `I0507 11:59:32.409568 581823 cache.go:40] re-using cached key and certificate`, - `I0507 11:59:33.422254 1537502 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-x28r" status="Running"`, `I0507 11:59:34.518822 3224 kuberuntime_container.go:745] "Killing container with a grace period" pod="hosted-grafana/hosted-grafana-api-7b6bd9b949-9csb4" podUID="25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" containerName="hgapi" containerID="containerd://c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e" gracePeriod=30`, `I0507 11:59:34.834734 3224 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-95j2t\" (UniqueName: \"kubernetes.io/projected/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-kube-api-access-95j2t\") pod \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\" (UID: \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\") "`, `I0507 11:59:34.834794 3224 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"pdc-certs\" (UniqueName: \"kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-pdc-certs\") pod \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\" (UID: \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\") "`, `I0507 11:59:34.834835 3224 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"gcs-serviceaccount\" (UniqueName: \"kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-gcs-serviceaccount\") pod \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\" (UID: \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\") "`, - `I0507 11:59:34.836955 3224 operation_generator.go:888] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-pdc-certs" (OuterVolumeSpecName: "pdc-certs") pod "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" (UID: "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9"). InnerVolumeSpecName "pdc-certs". PluginName "kubernetes.io/secret", VolumeGidValue ""`, `I0507 11:59:34.841404 3224 operation_generator.go:888] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-kube-api-access-95j2t" (OuterVolumeSpecName: "kube-api-access-95j2t") pod "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" (UID: "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9"). InnerVolumeSpecName "kube-api-access-95j2t". PluginName "kubernetes.io/projected", VolumeGidValue ""`, - `I0507 11:59:34.841447 3224 operation_generator.go:888] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-gcs-serviceaccount" (OuterVolumeSpecName: "gcs-serviceaccount") pod "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" (UID: "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9"). InnerVolumeSpecName "gcs-serviceaccount". PluginName "kubernetes.io/secret", VolumeGidValue ""`, - `I0507 11:59:34.854084 4727 kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="integration/grafana-render-service-cbff479fc-cj9tp" secret="" err="secret \"us-gcr-io-hosted-grafana\" not found"`, `I0507 11:59:34.936025 3224 reconciler_common.go:300] "Volume detached for volume \"pdc-certs\" (UniqueName: \"kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-pdc-certs\") on node \"ip-10-60-2-58.us-east-2.compute.internal\" DevicePath \"\""`, - `I0507 11:59:37.133005 3782 prober.go:107] "Probe failed" probeType="Readiness" pod="loki-dev-014/loki-dev-014-rollout-operator-58fc68b876-2qhmp" podUID="e6504036-2514-4ecc-b78c-c47061f60c9f" containerName="rollout-operator" probeResult="failure" output="HTTP probe failed with statuscode:500"`, - `I0507 11:59:37.915108 4726 prober.go:107] "Probe failed" probeType="Readiness" pod="agent-management-dev-002/agent-management-api-7ff7b9b9-k9nft" podUID="9893f9ac-f3e4-41fb-8da7-592061d2386c" containerName="agent-management-api" probeResult="failure" output="HTTP probe failed with statuscode:400"`, + `I0507 11:59:34.<_> 3224 operation_generator.go:888] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/<_>" (OuterVolumeSpecName: "<_>") pod "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" (UID: "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9"). InnerVolumeSpecName "<_>". PluginName "kubernetes.io/secret", VolumeGidValue ""`, + `I0507 11:59:34.<_> 3224 reconciler_common.go:300] "Volume detached for volume \"<_>\" (UniqueName: \"kubernetes.io/<_>/<_>\") on node \"ip-10-60-2-58.us-east-2.compute.internal\" DevicePath \"\""`, + `I0507 11:59:37.<_> <_> prober.go:107] "Probe failed" probeType="Readiness" pod="<_>/<_>" podUID="<_>" containerName="<_>" probeResult="failure" output="HTTP probe failed with statuscode: <_>"`, `I0507 11:59:38.116658 2791 azure_credentials.go:220] image(us.gcr.io/hosted-grafana/hg-plugins) is not from ACR, return empty authentication`, `I0507 11:59:39.168633 2776 kubelet.go:2493] "SyncLoop (probe)" probe="readiness" status="" pod="hosted-grafana/dafdeveuwest2-grafana-7845d969b5-f8h5q"`, - `I0507 11:59:39.560605 4739 kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="logs-endpoint-dev-005/kafka-exporter-766c6757b5-bggf6" secret="" err="secret \"not-needed\" not found"`, - `I0507 11:59:<_> 2791 azure_credentials.go:220] image(us.gcr.io/hosted-grafana/hgrun) is not from ACR, return empty authentication`, - `I0507 11:59:<_> 3224 reconciler_common.go:300] "Volume detached for volume <_> (UniqueName: <_> on node \"ip-10-60-2-58.us-east-2.compute.internal\" DevicePath \"\""`, - `I0507 11:59:<_> 6247 prober.go:107] "Probe failed" probeType="Readiness" pod="grafana-agent/grafana-agent-helm-4" podUID="c36c5200-1cd6-4093-893c-c022f91af996" containerName="grafana-agent" probeResult="failure" output="Get \"http://10.0.99.125:3090/-/ready\": dial tcp 10.0.99.125:3090: connect: connection refused"`, - `I0507 11:59:<_> <_> generic.go:334] "Generic (PLEG): container finished" <_> <_> exitCode=1`, - `I0507 11:59:<_> <_> kubelet.go:<_> "SyncLoop (PLEG): event for pod" <_> event={"ID":<_> "ContainerDied","Data":<_>`, - `I0507 11:59:<_> <_> kubelet.go:<_> "SyncLoop (PLEG): event for pod" <_> event={"ID":<_> "ContainerStarted","Data":<_>`, - `I0507 11:59:<_> <_> kubelet.go:<_> "SyncLoop DELETE" source="api" <_>`, - `I0507 11:59:<_> <_> kubelet.go:<_> "SyncLoop REMOVE" source="api" <_>`, - `I0507 11:59:<_> <_> kubelet_getters.go:187] "Pod status updated" <_> status="Running"`, - `I0507 11:59:<_> <_> kubelet_volumes.go:<_> "Cleaned up orphaned pod volumes dir" <_> <_>`, - `I0507 11:59:<_> <_> pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":<_> err="failed to get container status <_> rpc error: code =NotFound desc =an error occurred when try to find container <_> not found"`, - `I0507 11:59:<_> <_> scope.go:117] "RemoveContainer" <_>`, - `I0507 11:59:<_> <_> cache.go:40] re-using cached key and certificate`, - `I0507 <_> <_> kubelet.go:2498] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod=<_>`, - `I0507 <_> <_> kubelet.go:2498] "SyncLoop (probe)" probe="readiness" status="ready" pod=<_>`, - `I0507 <_> <_> kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod=<_> secret="" err="secret \"dockerhub\" not found"`, - `I0507 <_> <_> kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod=<_> secret="" err="secret \"gcr\" not found"`, - `I0507 <_> <_> prober.go:107] "Probe failed" probeType="Readiness" pod=<_> podUID=<_> containerName="grafana" probeResult="failure" output=<`, - `IPv4: martian source <_> from <_> on dev eth0`, + `I0507 11:59:<_>.<_> 2791 azure_credentials.go:220] image(us.gcr.io/hosted-grafana/hgrun) is not from ACR, return empty authentication`, + `I0507 11:59:<_>.<_> 6247 prober.go:107] "Probe failed" probeType="Readiness" pod="grafana-agent/grafana-agent-helm-4" podUID="c36c5200-1cd6-4093-893c-c022f91af996" containerName="grafana-agent" probeResult="failure" output="Get \"http://10.0.99.125:3090/-/ready\": dial tcp 10.0.99.125:3090: connect: connection refused"`, + `I0507 11:59:<_>.<_> <_> generic.go:334] "Generic (PLEG): container finished" podID="<_>" containerID="<_>" exitCode=1`, + `I0507 11:59:<_>.<_> <_> kubelet.go:2498] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="hosted-grafana/<_>"`, + `I0507 11:59:<_>.<_> <_> kubelet.go:2498] "SyncLoop (probe)" probe="readiness" status="ready" pod="hosted-grafana/<_>"`, + `I0507 11:59:<_>.<_> <_> kubelet.go:<_>] "SyncLoop (PLEG): event for pod" pod="<_>/<_>" event={"ID":"<_>","Type":"<_>","Data":"<_>"}`, + `I0507 11:59:<_>.<_> <_> kubelet.go:<_>] "SyncLoop DELETE" source="api" pods=["hosted-grafana/<_>"]`, + `I0507 11:59:<_>.<_> <_> kubelet.go:<_>] "SyncLoop REMOVE" source="api" pods=["hosted-grafana/<_>"]`, + `I0507 11:59:<_>.<_> <_> kubelet_getters.go:187] "Pod status updated" pod="kube-system/<_>" status="Running"`, + `I0507 11:59:<_>.<_> <_> kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="<_>/<_>" secret="" err="secret \"<_>\" not found"`, + `I0507 11:59:<_>.<_> <_> kubelet_volumes.go:<_>] "Cleaned up orphaned pod volumes dir" podUID="<_>" path="/var/lib/kubelet/pods/<_>/volumes"`, + `I0507 11:59:<_>.<_> <_> pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"<_>"} err="failed to get container status \"<_>\": rpc error: code = NotFound desc = an error occurred when try to find container \"<_>\": not found"`, + `I0507 11:59:<_>.<_> <_> prober.go:107] "Probe failed" probeType="Readiness" pod="hosted-grafana/<_>" podUID="<_>" containerName="grafana" probeResult="failure" output=<`, + `I0507 11:59:<_>.<_> <_> scope.go:117] "RemoveContainer" containerID="<_>"`, + `I0507 11:59:<_>.<_> <_> cache.go:40] re-using cached key and certificate`, + `IPv4: martian source 10.132.<_>.<_> from 10.132.<_>.<_>, on dev eth0`, `PRC: Renewing lease on eth0.`, `RCV: Reply message on eth0 from fe80::e9:7eff:fedf:3d37.`, `Removed slice libcontainer container kubepods-burstable-pod25cb986c_3d6c_4ed0_abf3_ee59ed6175f9.slice.`, - `Started libcontainer container <_>`, + `Started cri-containerd-95bf586cd79d43120ff44582d4dbd2476de61744411f8515b9b2c527a41fd5d9.scope.`, + `Started libcontainer container <_>.`, `XMT: Renew on eth0, interval 9700ms.`, - `XMT: Solicit on eth0, interval <_>`, - `audit:type=1400 <_> apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=<_> comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined"`, + `XMT: Solicit on eth0, interval <_>.`, + `audit: type=1400 audit(<_>.<_>:<_>): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=<_> comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined"`, `kauditd_printk_skb: <_> callbacks suppressed`, `ll header: 00000000: 42 01 0a 80 00 <_> 42 01 0a 80 00 01 08 00`, `net_ratelimit: 2 callbacks suppressed`, - `time="2024-05-07T11:59:32.755926053Z" level=info msg="CreateContainer within sandbox \"81e019a0248a0300a328fd59f9939c3eaa1b98aa7f325a7f6e00592633275ef6\" for container &ContainerMetadata{Name:checkoutservice,Attempt:3417,}"`, + `run-containerd-io.containerd.runtime.v2.task-k8s.<_>.mount: Deactivated successfully.`, + `run-containerd-runc-k8s.io-e5f17d69eee483ec8d43b26d5d628246984ba92f794ee5f3748935f5b6448b9b-runc.6eAyHn.mount: Deactivated successfully.`, `time="2024-05-07T11:59:34.519591759Z" level=info msg="StopContainer for \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\" with timeout 30 (s)"`, `time="2024-05-07T11:59:34.520032214Z" level=info msg="Stop container \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\" with signal terminated"`, `time="2024-05-07T11:59:34.591282703Z" level=info msg="StopContainer for \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\" returns successfully"`, @@ -189,34 +167,33 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { `time="2024-05-07T11:59:34.592084495Z" level=info msg="Container to stop \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\" must be in running or unknown state, current state \"CONTAINER_EXITED\""`, `time="2024-05-07T11:59:34.706960850Z" level=info msg="TearDown network for sandbox \"c605ad2cdc74c6b5288f2532ad71cce81a28ef6965f97a89ff6609deb825553a\" successfully"`, `time="2024-05-07T11:59:34.707025668Z" level=info msg="StopPodSandbox for \"c605ad2cdc74c6b5288f2532ad71cce81a28ef6965f97a89ff6609deb825553a\" returns successfully"`, - `time="2024-05-07T11:59:36.177858616Z" level=info msg="CreateContainer within sandbox \"81e019a0248a0300a328fd59f9939c3eaa1b98aa7f325a7f6e00592633275ef6\" for &ContainerMetadata{Name:checkoutservice,Attempt:3417,} returns container id \"95bf586cd79d43120ff44582d4dbd2476de61744411f8515b9b2c527a41fd5d9\""`, - `time="2024-05-07T11:59:38.484586527Z" level=error msg="Failed to delete exec process \"d9e0a1867ce73695ad859f2b0a76fe8f5053db8a5e49142d747e53a445729bd4\" for container \"6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408\"" error="ttrpc:closed:unknown"`, - `time="2024-05-07T11:59:43.941729092Z" level=info msg="CreateContainer within sandbox \"ee9dc07bca79ef7dffe2a6eb326e27236e9e97c35913c7aae16ee0a62632fc25\" for container &ContainerMetadata{Name:cortex-gw,Attempt:1660,}"`, - `time="2024-05-07T11:59:43.954289531Z" level=info msg="CreateContainer within sandbox \"ee9dc07bca79ef7dffe2a6eb326e27236e9e97c35913c7aae16ee0a62632fc25\" for &ContainerMetadata{Name:cortex-gw,Attempt:1660,} returns container id \"93fa5decd62691912f90c9b27526f5e00183239bfa4d3f4ea8578a7873b9c2b4\""`, - `time="2024-05-07T11:59:<_> level=error msg="ExecSync for <_> failed" error="rpc error: code =NotFound desc =failed to exec in container: failed to load task: no running task found: task <_> not found: not found"`, - `time="2024-05-07T11:59:<_> level=error msg="PullImage \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> failed" error="failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> unexpected status from HEAD request to https:<_> 403 Forbidden"`, - `time="2024-05-07T11:59:<_> level=error msg="PullImage \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> failed" error="rpc error: code =NotFound desc =failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> us.gcr.io/hosted-grafana/hosted-grafana-pro:<_> not found"`, - `time="2024-05-07T11:59:<_> level=info msg="CreateContainer within sandbox <_> for &ContainerMetadata{Name:grafana,Attempt:<_> returns container id <_>`, - `time="2024-05-07T11:59:<_> level=info msg="CreateContainer within sandbox <_> for &ContainerMetadata{Name:hgrun,Attempt:0,} returns container id <_>`, - `time="2024-05-07T11:59:<_> level=info msg="CreateContainer within sandbox <_> for container &ContainerMetadata{Name:grafana,Attempt:<_>`, - `time="2024-05-07T11:59:<_> level=info msg="CreateContainer within sandbox <_> for container &ContainerMetadata{Name:hgrun,Attempt:0,}"`, - `time="2024-05-07T11:59:<_> level=info msg="ImageCreate event name:<_> <_> labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"`, - `time="2024-05-07T11:59:<_> level=info msg="ImageUpdate event name:<_> <_> labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"`, - `time="2024-05-07T11:59:<_> level=info msg="PullImage \"us.gcr.io/hosted-grafana/hgrun:0.1.452\" returns image reference \"sha256:9fb1bce3e4a228f50768d21842cd7d7fafc1d586eaa0326c9d3c86d79a36868a\""`, - `time="2024-05-07T11:59:<_> level=info msg="PullImage \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-70397\" returns image reference \"sha256:0036b00b52fc547c944c1c820817d91fba6e20775cbf4e6c3e09ad2e682dbd73\""`, - `time="2024-05-07T11:59:<_> level=info msg="Pulled image \"us.gcr.io/hosted-grafana/hgrun:0.1.452\" with image id \"sha256:9fb1bce3e4a228f50768d21842cd7d7fafc1d586eaa0326c9d3c86d79a36868a\", repo tag \"us.gcr.io/hosted-grafana/hgrun:0.1.452\", repo digest \"us.gcr.io/hosted-grafana/hgrun@sha256:b492dbbbee9faf9dba63c9fd89e6f9e148239765454c6a54c4284a2828dec153\", size \"19109699\" in <_>`, - `time="2024-05-07T11:59:<_> level=info msg="Pulled image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-70397\" with image id \"sha256:0036b00b52fc547c944c1c820817d91fba6e20775cbf4e6c3e09ad2e682dbd73\", repo tag \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-70397\", repo digest \"us.gcr.io/hosted-grafana/hosted-grafana-pro@sha256:0853965a142fb95648de3281a7c71de0d05fb51616bc32b523dc2f1da6ca06dc\", size \"173405048\" in <_>`, - `time=<_> level=error msg="ContainerStatus for <_> failed" error="rpc error:code = NotFound desc = an error occurred when try to find container <_> not found"`, - `time=<_> level=info msg="PullImage <_>`, - `time=<_> level=info msg="RemoveContainer for <_>`, - `time=<_> level=info msg="RemoveContainer for <_> returns successfully"`, - `time=<_> level=info msg="StartContainer for <_>`, - `time=<_> level=info msg="StartContainer for <_> returns successfully"`, - `time=<_> level=info msg="cleaning up dead shim" namespace=k8s.io`, - `time=<_> level=info msg="shim disconnected" id=<_> namespace=k8s.io`, - `time=<_> level=info msg="stop pulling image <_> active requests=0, bytes read=<_>`, - `time=<_> level=info msg="trying next host - response was http.StatusNotFound" host=us.gcr.io`, - `time=<_> level=warning msg="cleaning up after shim disconnected" id=<_> namespace=k8s.io`, + `time="2024-05-07T11:59:38.117772842Z" level=info msg="PullImage \"us.gcr.io/hosted-grafana/hg-plugins:2024-05-07-v545244-f51851984\""`, + `time="2024-05-07T11:59:38.484586527Z" level=error msg="Failed to delete exec process \"d9e0a1867ce73695ad859f2b0a76fe8f5053db8a5e49142d747e53a445729bd4\" for container \"6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408\"" error="ttrpc: closed: unknown"`, + `time="2024-05-07T11:59:<_>.<_>" level=error msg="ContainerStatus for \"<_>\" failed" error="rpc error: code = NotFound desc = an error occurred when try to find container \"<_>\": not found"`, + `time="2024-05-07T11:59:<_>.<_>" level=error msg="ExecSync for \"<_>\" failed" error="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task <_> not found: not found"`, + `time="2024-05-07T11:59:<_>.<_>" level=error msg="PullImage \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\" failed" error="failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\": failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\": unexpected status from HEAD request to https://us.gcr.io/v2/hosted-grafana/hosted-grafana-pro/manifests/<_>.1.<_>: 403 Forbidden"`, + `time="2024-05-07T11:59:<_>.<_>" level=error msg="PullImage \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\" failed" error="rpc error: code = NotFound desc = failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\": failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>\": us.gcr.io/hosted-grafana/hosted-grafana-pro:<_>.1.<_>: not found"`, + `time="2024-05-07T11:59:<_>.<_>" level=info msg="CreateContainer within sandbox \"<_>\" for &ContainerMetadata{Name:<_>,Attempt:<_>,} returns container id \"<_>\""`, + `time="2024-05-07T11:59:<_>.<_>" level=info msg="CreateContainer within sandbox \"<_>\" for container &ContainerMetadata{Name:<_>,Attempt:<_>,}"`, + `time="2024-05-07T11:59:<_>.<_>" level=info msg="ImageCreate event name:\"sha256:<_>\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"`, + `time="2024-05-07T11:59:<_>.<_>" level=info msg="ImageCreate event name:\"us.gcr.io/hosted-grafana/<_>:<_>.1.<_>\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"`, + `time="2024-05-07T11:59:<_>.<_>" level=info msg="ImageCreate event name:\"us.gcr.io/hosted-grafana/<_>@sha256:<_>\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"`, + `time="2024-05-07T11:59:<_>.<_>" level=info msg="ImageUpdate event name:\"sha256:<_>\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"`, + `time="2024-05-07T11:59:<_>.<_>" level=info msg="ImageUpdate event name:\"us.gcr.io/hosted-grafana/<_>:<_>.1.<_>\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"`, + `time="2024-05-07T11:59:<_>.<_>" level=info msg="ImageUpdate event name:\"us.gcr.io/hosted-grafana/<_>@sha256:<_>\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}"`, + `time="2024-05-07T11:59:<_>.<_>" level=info msg="PullImage \"us.gcr.io/hosted-grafana/<_>:<_>.1.<_>\" returns image reference \"sha256:<_>\""`, + `time="2024-05-07T11:59:<_>.<_>" level=info msg="PullImage \"us.gcr.io/hosted-grafana/<_>:<_>.1.<_>\""`, + `time="2024-05-07T11:59:<_>.<_>" level=info msg="Pulled image \"us.gcr.io/hosted-grafana/<_>:<_>.1.<_>\" with image id \"sha256:<_>\", repo tag \"us.gcr.io/hosted-grafana/<_>:<_>.1.<_>\", repo digest \"us.gcr.io/hosted-grafana/<_>@sha256:<_>\", size \"<_>\" in <_>.<_>"`, + `time="2024-05-07T11:59:<_>.<_>" level=info msg="RemoveContainer for \"<_>\" returns successfully"`, + `time="2024-05-07T11:59:<_>.<_>" level=info msg="RemoveContainer for \"<_>\""`, + `time="2024-05-07T11:59:<_>.<_>" level=info msg="StartContainer for \"<_>\" returns successfully"`, + `time="2024-05-07T11:59:<_>.<_>" level=info msg="StartContainer for \"<_>\""`, + `time="2024-05-07T11:59:<_>.<_>" level=info msg="cleaning up dead shim" namespace=k8s.io`, + `time="2024-05-07T11:59:<_>.<_>" level=info msg="shim disconnected" id=<_> namespace=k8s.io`, + `time="2024-05-07T11:59:<_>.<_>" level=info msg="stop pulling image us.gcr.io/hosted-grafana/<_>:<_>.1.<_>: active requests=0, bytes read=<_>"`, + `time="2024-05-07T11:59:<_>.<_>" level=info msg="trying next host - response was http.StatusNotFound" host=us.gcr.io`, + `time="2024-05-07T11:59:<_>.<_>" level=warning msg="cleaning up after shim disconnected" id=<_> namespace=k8s.io`, + `var-lib-containerd-tmpmounts-containerd\<_>.mount: Deactivated successfully.`, }, }, { @@ -224,21 +201,19 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { inputFile: "testdata/kafka.txt", patterns: []string{ `[2024-05-07 10:55:40,626] INFO [LocalLog partition=ingest-6, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=180391157, size=16991045, lastModifiedTime=1715075754780, largestRecordTimestamp=Some(1715075754774)),LogSegment(baseOffset=180393429, size=16997692, lastModifiedTime=1715075760206, largestRecordTimestamp=Some(1715075760186)),LogSegment(baseOffset=180395889, size=16998200, lastModifiedTime=1715075765542, largestRecordTimestamp=Some(1715075765526)),LogSegment(baseOffset=180398373, size=16977347, lastModifiedTime=1715075770515, largestRecordTimestamp=Some(1715075770504)) (kafka.log.LocalLog$)`, - `[2024-05-07 10:55:40,638] INFO [LocalLog partition=ingest-6, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=180400817, size=16997594, lastModifiedTime=1715075775780, largestRecordTimestamp=Some(1715075775771)),LogSegment(baseOffset=180403261, size=16992344, lastModifiedTime=1715075781053, largestRecordTimestamp=Some(1715075781021)),LogSegment(baseOffset=180405723, size=16989895, lastModifiedTime=1715075786205, largestRecordTimestamp=Some(1715075786174)),LogSegment(baseOffset=180408118, size=16998698, lastModifiedTime=1715075791681, largestRecordTimestamp=Some(1715075791673)),LogSegment(baseOffset=180410608, size=16995676, lastModifiedTime=1715075796438, largestRecordTimestamp=Some(1715075796430)),LogSegment(baseOffset=180412733, size=16963278, lastModifiedTime=1715075800534, largestRecordTimestamp=Some(1715075800511)),LogSegment(baseOffset=180414883, size=16984328, lastModifiedTime=1715075805272, largestRecordTimestamp=Some(1715075805230)),LogSegment(baseOffset=180417063, size=16989109, lastModifiedTime=1715075810381, largestRecordTimestamp=Some(1715075810372)),LogSegment(baseOffset=180419267, size=16996871, lastModifiedTime=1715075815153, largestRecordTimestamp=Some(1715075815125)),LogSegment(baseOffset=180421560, size=16988558, lastModifiedTime=1715075819785, largestRecordTimestamp=Some(1715075819763)),LogSegment(baseOffset=180424008, size=16999292, lastModifiedTime=1715075825336, largestRecordTimestamp=Some(1715075825303)),LogSegment(baseOffset=180426459, size=16990595, lastModifiedTime=1715075830839, largestRecordTimestamp=Some(1715075830827)),LogSegment(baseOffset=180428944, size=16995859, lastModifiedTime=1715075835942, largestRecordTimestamp=Some(1715075835904)),LogSegment(baseOffset=180431327, size=16992294, lastModifiedTime=1715075841219, largestRecordTimestamp=Some(1715075841214)),LogSegment(baseOffset=180433867, size=16966736, lastModifiedTime=1715075846443, largestRecordTimestamp=Some(1715075846401)),LogSegment(baseOffset=180436204, size=16894731, lastModifiedTime=1715075853273, largestRecordTimestamp=Some(1715075853244)),LogSegment(baseOffset=180438984, size=16983529, lastModifiedTime=1715075858911, largestRecordTimestamp=Some(1715075858891)),LogSegment(baseOffset=180441466, size=16996933, lastModifiedTime=1715075863566, largestRecordTimestamp=Some(1715075863554)),LogSegment(baseOffset=180443778, size=16999841, lastModifiedTime=1715075866199, largestRecordTimestamp=Some(1715075866185)),LogSegment(baseOffset=180445367, size=16992471, lastModifiedTime=1715075870385, largestRecordTimestamp=Some(1715075870347)),LogSegment(baseOffset=180447366, size=16999996, lastModifiedTime=1715075875102, largestRecordTimestamp=Some(1715075875091)),LogSegment(baseOffset=180449601, size=16994426, lastModifiedTime=1715075879927, largestRecordTimestamp=Some(1715075879926)),LogSegment(baseOffset=180452079, size=16998020, lastModifiedTime=1715075885293, largestRecordTimestamp=Some(1715075885263)),LogSegment(baseOffset=180454546, size=16992231, lastModifiedTime=1715075890424, largestRecordTimestamp=Some(1715075890409)),LogSegment(baseOffset=180456986, size=16970315, lastModifiedTime=1715075895719, largestRecordTimestamp=Some(1715075895690)),LogSegment(baseOffset=180459366, size=16990785, lastModifiedTime=1715075900996, largestRecordTimestamp=Some(1715075900985)),LogSegment(baseOffset=180461885, size=16996655, lastModifiedTime=1715075905847, largestRecordTimestamp=Some(1715075905841)),LogSegment(baseOffset=180464299, size=16982181, lastModifiedTime=1715075911052, largestRecordTimestamp=Some(1715075911028)),LogSegment(baseOffset=180466821, size=16997630, lastModifiedTime=1715075915962, largestRecordTimestamp=Some(1715075915953)),LogSegment(baseOffset=180468968, size=16995723, lastModifiedTime=1715075920325, largestRecordTimestamp=Some(1715075920308)),LogSegment(baseOffset=180471046, size=16979316, lastModifiedTime=1715075924724, largestRecordTimestamp=Some(1715075924697)),LogSegment(baseOffset=180473259, size=16995238, lastModifiedTime=1715075929645, largestRecordTimestamp=Some(1715075929624)),LogSegment(baseOffset=180475486, size=16988461, lastModifiedTime=1715075934288, largestRecordTimestamp=Some(1715075934283)),LogSegment(baseOffset=180477735, size=16993767, lastModifiedTime=1715075939277, largestRecordTimestamp=Some(1715075939270)),LogSegment(baseOffset=180480095, size=16995409, lastModifiedTime=1715075944639, largestRecordTimestamp=Some(1715075944635)),LogSegment(baseOffset=180482560, size=16992784, lastModifiedTime=1715075949760, largestRecordTimestamp=Some(1715075949760)),LogSegment(baseOffset=180484967, size=16990838, lastModifiedTime=1715075954937, largestRecordTimestamp=Some(1715075954929)),LogSegment(baseOffset=180487377, size=16976794, lastModifiedTime=1715075960151, largestRecordTimestamp=Some(1715075960119)),LogSegment(baseOffset=180489919, size=16997379, lastModifiedTime=1715075965116, largestRecordTimestamp=Some(1715075965085)),LogSegment(baseOffset=180492304, size=16956613, lastModifiedTime=1715075970448, largestRecordTimestamp=Some(1715075970424)),LogSegment(baseOffset=180494832, size=16895640, lastModifiedTime=1715075975354, largestRecordTimestamp=Some(1715075975341)),LogSegment(baseOffset=180496930, size=16998328, lastModifiedTime=1715075979813, largestRecordTimestamp=Some(1715075979796)),LogSegment(baseOffset=180499079, size=16995699, lastModifiedTime=1715075984309, largestRecordTimestamp=Some(1715075984285)),LogSegment(baseOffset=180501183, size=16993785, lastModifiedTime=1715075989086, largestRecordTimestamp=Some(1715075989064)),LogSegment(baseOffset=180503431, size=16989600, lastModifiedTime=1715075993713, largestRecordTimestamp=Some(1715075993683)),LogSegment(baseOffset=180505674, size=16984790, lastModifiedTime=1715075998337, largestRecordTimestamp=Some(1715075998318)),LogSegment(baseOffset=180508022, size=16982630, lastModifiedTime=1715076003671, largestRecordTimestamp=Some(1715076003660)),LogSegment(baseOffset=180510439, size=16999488, lastModifiedTime=1715076009000, largestRecordTimestamp=Some(1715076008996)),LogSegment(baseOffset=180512848, size=16997845, lastModifiedTime=1715076014033, largestRecordTimestamp=Some(1715076014032)),LogSegment(baseOffset=180515281, size=16990661, lastModifiedTime=1715076019245, largestRecordTimestamp=Some(1715076019216)),LogSegment(baseOffset=180517815, size=16996244, lastModifiedTime=1715076023989, largestRecordTimestamp=Some(1715076023963)),LogSegment(baseOffset=180520112, size=16992012, lastModifiedTime=1715076029243, largestRecordTimestamp=Some(1715076029231)) (kafka.log.LocalLog$)`, `[2024-05-07 10:55:53,038] INFO [LocalLog partition=mimir-dev-09-aggregations-offsets-1, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=447957, size=948, lastModifiedTime=1715059232052, largestRecordTimestamp=Some(1715059232002)),LogSegment(baseOffset=447969, size=948, lastModifiedTime=1715059424352, largestRecordTimestamp=Some(1715059424301)) (kafka.log.LocalLog$)`, - `[2024-05-07 10:55:<_> INFO Deleted log <_> (kafka.log.LogSegment)`, - `[2024-05-07 10:55:<_> INFO Deleted offset index <_> (kafka.log.LogSegment)`, - `[2024-05-07 10:55:<_> INFO Deleted producer state snapshot <_> (kafka.log.SnapshotFile)`, - `[2024-05-07 10:55:<_> INFO Deleted time index <_> (kafka.log.LogSegment)`, - `[2024-05-07 10:55:<_> INFO [ProducerStateManager <_> Wrote producer snapshot at offset <_> with 0 producer ids in <_> ms. (kafka.log.ProducerStateManager)`, - `[2024-05-07 <_> INFO [LocalLog partition=<_> dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=<_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> (kafka.log.LocalLog$)`, - `[2024-05-07 <_> INFO [LocalLog partition=<_> dir=/bitnami/kafka/data] Rolled new log segment at offset <_> in <_> ms. (kafka.log.LocalLog)`, - `[2024-05-07 <_> INFO [LocalLog partition=mimir-dev-09-aggregations-offsets-0, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=<_> size=948, lastModifiedTime=<_> largestRecordTimestamp=<_> (kafka.log.LocalLog$)`, - `[2024-05-07 <_> INFO [UnifiedLog partition=<_> dir=/bitnami/kafka/data] Deleting segment LogSegment(baseOffset=<_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> due to retention size <_> breach. Log size after deletion will be <_> (kafka.log.UnifiedLog)`, - `[2024-05-07 <_> INFO [UnifiedLog partition=<_> dir=/bitnami/kafka/data] Deleting segments due to log start offset <_> breach:LogSegment(baseOffset=<_> size=948, lastModifiedTime=<_> largestRecordTimestamp=<_> <_> size=948, lastModifiedTime=<_> largestRecordTimestamp=<_> (kafka.log.UnifiedLog)`, - `[2024-05-07 <_> INFO [UnifiedLog partition=<_> dir=/bitnami/kafka/data] Deleting segments due to log start offset <_> breach:LogSegment(baseOffset=<_> size=<_> lastModifiedTime=<_> largestRecordTimestamp=<_> (kafka.log.UnifiedLog)`, - `[2024-05-07 <_> INFO [UnifiedLog partition=<_> dir=/bitnami/kafka/data] Incremented log start offset to <_> due to leader offset increment (kafka.log.UnifiedLog)`, - `[2024-05-07 <_> INFO [UnifiedLog partition=<_> dir=/bitnami/kafka/data] Incremented log start offset to <_> due to segment deletion (kafka.log.UnifiedLog)`, + `[2024-05-07 10:55:53,<_>] INFO [LocalLog partition=mimir-dev-09-aggregations-offsets-0, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=<_>, size=948, lastModifiedTime=<_>, largestRecordTimestamp=Some(<_>)) (kafka.log.LocalLog$)`, + `[2024-05-07 10:55:<_>,<_>] INFO Deleted log /bitnami/kafka/data/<_>/<_>.log.deleted. (kafka.log.LogSegment)`, + `[2024-05-07 10:55:<_>,<_>] INFO Deleted offset index /bitnami/kafka/data/<_>/<_>.index.deleted. (kafka.log.LogSegment)`, + `[2024-05-07 10:55:<_>,<_>] INFO Deleted producer state snapshot /bitnami/kafka/data/<_>/<_>.snapshot.deleted (kafka.log.SnapshotFile)`, + `[2024-05-07 10:55:<_>,<_>] INFO Deleted time index /bitnami/kafka/data/<_>/<_>.timeindex.deleted. (kafka.log.LogSegment)`, + `[2024-05-07 10:55:<_>,<_>] INFO [LocalLog partition=<_>, dir=/bitnami/kafka/data] Rolled new log segment at offset <_> in <_> ms. (kafka.log.LocalLog)`, + `[2024-05-07 10:55:<_>,<_>] INFO [ProducerStateManager partition=<_>] Wrote producer snapshot at offset <_> with 0 producer ids in <_> ms. (kafka.log.ProducerStateManager)`, + `[2024-05-07 10:55:<_>,<_>] INFO [UnifiedLog partition=<_>, dir=/bitnami/kafka/data] Deleting segment LogSegment(baseOffset=<_>, size=<_>, lastModifiedTime=<_>, largestRecordTimestamp=Some(<_>)) due to retention size <_> breach. Log size after deletion will be <_>. (kafka.log.UnifiedLog)`, + `[2024-05-07 10:55:<_>,<_>] INFO [UnifiedLog partition=<_>, dir=/bitnami/kafka/data] Deleting segments due to log start offset <_> breach: LogSegment(baseOffset=<_>, size=948, lastModifiedTime=<_>, largestRecordTimestamp=Some(<_>)),LogSegment(baseOffset=<_>, size=948, lastModifiedTime=<_>, largestRecordTimestamp=Some(<_>)) (kafka.log.UnifiedLog)`, + `[2024-05-07 10:55:<_>,<_>] INFO [UnifiedLog partition=<_>, dir=/bitnami/kafka/data] Deleting segments due to log start offset <_> breach: LogSegment(baseOffset=<_>, size=<_>, lastModifiedTime=<_>, largestRecordTimestamp=Some(<_>)) (kafka.log.UnifiedLog)`, + `[2024-05-07 10:55:<_>,<_>] INFO [UnifiedLog partition=<_>, dir=/bitnami/kafka/data] Incremented log start offset to <_> due to leader offset increment (kafka.log.UnifiedLog)`, + `[2024-05-07 10:55:<_>,<_>] INFO [UnifiedLog partition=<_>, dir=/bitnami/kafka/data] Incremented log start offset to <_> due to segment deletion (kafka.log.UnifiedLog)`, }, }, { @@ -246,20 +221,22 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { inputFile: "testdata/kubernetes.txt", patterns: []string{ `I0507 12:02:27.947830 1 nodeutilization.go:274] "Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers"`, - `I0507 12:02:<_> 1 defaultevictor.go:163] "pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable" <_>`, - `I0507 12:02:<_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, - `I0507 12:02:<_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="pod has local storage and descheduler is not configured with evictLocalStoragePods"`, - `I0507 12:02:<_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="pod is a DaemonSet pod"`, - `I0507 12:02:<_> 1 node.go:157] "Pod does not fit on any other node" pod:<_> node:<_> error:="[pod node selector does not match the node label, insufficient <_>`, - `I0507 12:02:<_> 1 node.go:157] "Pod does not fit on any other node" pod:<_> node:<_> error:="[pod node selector does not match the node label, insufficient <_> insufficient <_>`, - `I0507 12:02:<_> 1 node.go:157] "Pod does not fit on any other node" pod:<_> node:<_> error:="[pod node selector does not match the node label, insufficient <_> insufficient <_> insufficient pods]"`, - `I0507 12:02:<_> 1 node.go:157] "Pod does not fit on any other node" pod:<_> node:<_> error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient <_>`, - `I0507 12:02:<_> 1 node.go:157] "Pod does not fit on any other node" pod:<_> node:<_> error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient <_> insufficient <_>`, - `I0507 12:02:<_> 1 node.go:157] "Pod does not fit on any other node" pod:<_> node:<_> error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]"`, - `I0507 12:02:<_> 1 node.go:157] "Pod does not fit on any other node" pod:<_> node:<_> error:="insufficient cpu"`, - `I0507 12:02:<_> 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:<_> error:="[insufficient <_> insufficient <_>`, - `I0507 12:02:<_> 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:<_> error:="pod node selector does not match the node label"`, - `I0507 12:02:<_> 1 node.go:339] "no Pod antiaffinity rule found" <_>`, + `I0507 12:02:27.<_> 1 defaultevictor.go:163] "pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable" pod="<_>/<_>"`, + `I0507 12:02:27.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="<_>/<_>" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods"`, + `I0507 12:02:27.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="ge-logs/<_>" checks="[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, + `I0507 12:02:27.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="insight-logs/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, + `I0507 12:02:27.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="loki-dev-ssd/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, + `I0507 12:02:27.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="promtail-ops/<_>" checks="[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, + `I0507 12:02:27.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="pyroscope-ebpf/<_>" checks="pod is a DaemonSet pod"`, + `I0507 12:02:27.<_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>/<_>" node:="<_>" error:="[pod node selector does not match the node label, <_> <_><_> <_> <_><_> <_> <_>]"`, + `I0507 12:02:27.<_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>/<_>" node:="<_>" error:="[pod node selector does not match the node label, insufficient <_>, insufficient <_>]"`, + `I0507 12:02:27.<_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>/<_>" node:="<_>" error:="[pod node selector does not match the node label, insufficient <_>]"`, + `I0507 12:02:27.<_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>/<_>" node:="<_>" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient <_>, insufficient <_>]"`, + `I0507 12:02:27.<_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>/<_>" node:="<_>" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient <_>]"`, + `I0507 12:02:27.<_> 1 node.go:157] "Pod does not fit on any other node" pod:="<_>/<_>" node:="<_>" error:="insufficient cpu"`, + `I0507 12:02:27.<_> 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="<_>" error:="[insufficient <_>, insufficient <_>]"`, + `I0507 12:02:27.<_> 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="<_>" error:="pod node selector does not match the node label"`, + `I0507 12:02:27.<_> 1 node.go:339] "no Pod antiaffinity rule found" pod="<_>/<_>"`, `I0507 12:04:17.595169 1 descheduler.go:155] Building a pod evictor`, `I0507 12:04:17.596431 1 nodeutilization.go:204] "Node is underutilized" node="gke-dev-eu-west-3-main-n2s8-1-1dd39c-d1c92061-4z2l" usage={"cpu":"984m","memory":"611Mi","pods":"16"} usagePercentage={"cpu":12.44,"memory":2.15,"pods":25}`, `I0507 12:04:17.596484 1 highnodeutilization.go:107] "Criteria for a node below target utilization" CPU=50 Mem=50 Pods=100`, @@ -267,26 +244,33 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { `I0507 12:04:17.596528 1 nodeutilization.go:260] "Total capacity to be moved" CPU=5060 Mem=112216292800 Pods=163`, `I0507 12:04:17.596651 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/metrics-server-v0.6.3-68f5b7c4d5-t5mz8" checks="[pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, `I0507 12:04:17.596803 1 defaultevictor.go:202] "Pod fails the following checks" pod="gadget/gadget-zjjts" checks="[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, - `I0507 12:04:<_> 1 nodeutilization.go:207] "Node is overutilized" <_> usage={"cpu":<_> <_> <_> usagePercentage={"cpu":<_> <_> <_>`, - `I0507 12:<_> <_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="[pod has system critical priority, pod has higher priority than specified priority class threshold]"`, - `I0507 12:<_> <_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, - `I0507 12:<_> <_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]"`, - `I0507 12:<_> <_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, - `I0507 12:<_> <_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold]"`, - `I0507 12:<_> <_> 1 defaultevictor.go:202] "Pod fails the following checks" <_> checks="[pod is a mirror pod, pod is a static pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, - `I0507 12:<_> <_> 1 descheduler.go:<_> "Number of evicted pods" <_>`, - `I0507 12:<_> <_> 1 nodeutilization.go:<_> "Evicting pods from node" <_> usage={"cpu":<_> <_> <_>`, - `I0507 12:<_> <_> 1 nodeutilization.go:<_> "No removable pods on node, try next node" <_>`, - `I0507 12:<_> <_> 1 profile.go:<_> "Total number of pods evicted" extension point="Balance" <_>`, - `I0507 12:<_> <_> 1 reflector.go:<_> k8s.io/client-go/informers/factory.go:<_> Watch close - <_> total <_> items received`, - `I0507 <_> 1 <_> "Pods on node" node=<_> allPods=<_> nonRemovablePods=<_> removablePods=<_>`, + `I0507 12:04:17.<_> 1 nodeutilization.go:207] "Node is overutilized" node="<_>" usage={"cpu":"<_>","memory":"<_>","pods":"<_>"} usagePercentage={"cpu":<_>.<_>,"memory":<_>.<_>,"pods":<_>.<_>}`, + `I0507 12:04:17.<_> 1 nodeutilization.go:207] "Node is overutilized" node="<_>" usage={"cpu":"<_>","memory":"<_>","pods":"<_>"} usagePercentage={"cpu":<_>.<_>,"memory":<_>.<_>,"pods":<_>}`, + `I0507 12:<_>:<_>.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="agent-logs/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, + `I0507 12:<_>:<_>.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="conntrack-exporter/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]"`, + `I0507 12:<_>:<_>.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="goldpinger/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]"`, + `I0507 12:<_>:<_>.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/<_>" checks="[pod has system critical priority, pod has higher priority than specified priority class threshold]"`, + `I0507 12:<_>:<_>.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/<_>" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, + `I0507 12:<_>:<_>.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/<_>" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold]"`, + `I0507 12:<_>:<_>.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/<_>" checks="[pod is a mirror pod, pod is a static pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, + `I0507 12:<_>:<_>.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="netfilter-exporter/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, + `I0507 12:<_>:<_>.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="node-exporter/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]"`, + `I0507 12:<_>:<_>.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="promtail-ops/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]"`, + `I0507 12:<_>:<_>.<_> 1 defaultevictor.go:202] "Pod fails the following checks" pod="startup/<_>" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]"`, + `I0507 12:<_>:<_>.<_> 1 descheduler.go:<_>] "Number of evicted pods" totalEvicted=<_>`, + `I0507 12:<_>:<_>.<_> 1 nodeutilization.go:<_>] "Evicting pods from node" node="<_>" usage={"cpu":"<_>","memory":"<_>","pods":"<_>"}`, + `I0507 12:<_>:<_>.<_> 1 nodeutilization.go:<_>] "No removable pods on node, try next node" node="<_>"`, + `I0507 12:<_>:<_>.<_> 1 nodeutilization.go:<_>] "Pods on node" node="<_>" allPods=<_> nonRemovablePods=<_> removablePods=<_>`, + `I0507 12:<_>:<_>.<_> 1 profile.go:<_>] "Total number of pods evicted" extension point="Balance" evictedPods=<_>`, + `I0507 12:<_>:<_>.<_> 1 reflector.go:<_>] k8s.io/client-go/informers/factory.go:<_>: Watch close - *v1.<_> total <_> items received`, }, }, { drain: New(DefaultConfig(), nil), inputFile: "testdata/vault.txt", patterns: []string{ - `2024-05-07T10:<_> <_> [INFO] expiration: revoked lease: <_>`, + `2024-05-07T10:56:38.667Z [INFO] expiration: revoked lease: lease_id=auth/gcp/login/h4c031a99aa555040a0dd99864d828e946c6d4e31f4f5178757183def61f9d104`, + `2024-05-07T10:<_>:<_>.<_> [INFO] expiration: revoked lease: lease_id=auth/kubernetes/<_>/login/<_>`, }, }, { @@ -294,86 +278,129 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { inputFile: "testdata/calico.txt", patterns: []string{ `2024-05-08 15:23:56.403 [DEBUG][615489] felix/table.go 699: Finished loading iptables state ipVersion=0x4 table="filter"`, + `2024-05-08 15:23:56.403 [INFO][615489] felix/summary.go 100: Summarising 1 dataplane reconciliation loops over 600ms: avg=119ms longest=119ms (resync-filter-v4)`, `2024-05-08 15:23:56.614 [DEBUG][76] felix/int_dataplane.go 1777: Refreshing routes`, `2024-05-08 15:23:56.615 [DEBUG][76] felix/route_rule.go 179: Queueing a resync of routing rules. ipVersion=4`, - `2024-05-08 15:23:56.615 [DEBUG][76] felix/route_table.go 480:Queueing a resync of routing table. ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, - `2024-05-08 15:23:56.615 [DEBUG][76] felix/route_table.go 480:Queueing a resync of routing table. ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1`, + `2024-05-08 15:23:56.615 [DEBUG][76] felix/route_table.go 480: Queueing a resync of routing table. ifaceRegex="<_>.<_>" ipVersion=0x4 tableIndex=<_>`, `2024-05-08 15:23:56.615 [DEBUG][76] felix/route_table.go 533: Check interfaces matching regex`, `2024-05-08 15:23:56.615 [DEBUG][76] felix/wireguard.go 605: Queueing a resync of wireguard configuration ipVersion=0x4`, `2024-05-08 15:23:56.615 [DEBUG][76] felix/wireguard.go 654: Wireguard is not in-sync - verifying wireguard configuration is removed ipVersion=0x4`, `2024-05-08 15:23:56.617 [DEBUG][76] felix/wireguard.go 1503: Wireguard is disabled and does not exist ifaceName="wireguard.cali" ipVersion=0x4`, `2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 584: Flag no OIF for full re-sync`, - `2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 614:Synchronised routes on interface ifaceName="*NoOIF*" ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1`, - `2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 661:Syncing interface routes ifaceName="*NoOIF*" ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1`, - `2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 686:Reconcile against kernel programming ifaceName="*NoOIF*" ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1`, - `2024-05-08 15:23:57.942 [WARNING][56] felix/table.go 654:Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "", "", "", "", "", "", "", "", "tVnHkvAo15HuiPy0", "", ""} chainName="OUTPUT" expectedRuleIDs=[]string{"tVnHkvAo15HuiPy0", "", "", "", "", "", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="raw"`, - `2024-05-08 15:23:57.942 [WARNING][56] felix/table.go 654:Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "6gwbT8clXdHdC1b1"} chainName="PREROUTING" expectedRuleIDs=[]string{"6gwbT8clXdHdC1b1", "", "", "", ""} ipVersion=0x4 table="raw"`, - `2024-05-08 15:23:57.969 [WARNING][56] felix/table.go 654:Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "Cz_u1IQiXIMmKD4c", "", "", "", "", "", "", "", "", "", "", "", ""} chainName="INPUT" expectedRuleIDs=[]string{"Cz_u1IQiXIMmKD4c", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="filter"`, - `2024-05-08 15:23:57.969 [WARNING][56] felix/table.go 654:Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "tVnHkvAo15HuiPy0", "", "", "", "", ""} chainName="OUTPUT" expectedRuleIDs=[]string{"tVnHkvAo15HuiPy0", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="filter"`, + `2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 614: Synchronised routes on interface ifaceName="*NoOIF*" ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1`, + `2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 661: Syncing interface routes ifaceName="*NoOIF*" ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1`, + `2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 686: Reconcile against kernel programming ifaceName="*NoOIF*" ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1`, + `2024-05-08 15:23:56.624 [INFO][76] felix/summary.go 100: Summarising 1 dataplane reconciliation loops over 200ms: avg=10ms longest=10ms (resync-routes-v4,resync-routes-v4,resync-rules-v4,resync-wg)`, + `2024-05-08 15:23:56.<_> [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="<_>" ipVersion=0x4 table="filter"`, + `2024-05-08 15:23:56.<_> [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="<_>.<_>" ipVersion=0x4 table="filter"`, + `2024-05-08 15:23:56.<_> [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-ksa.<_>.<_>" ipVersion=0x4 table="filter"`, + `2024-05-08 15:23:56.<_> [DEBUG][76] felix/route_table.go 557: Resync: found calico-owned interface ifaceName="<_>" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, + `2024-05-08 15:23:56.<_> [DEBUG][76] felix/route_table.go 614: Synchronised routes on interface ifaceName="<_>" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, + `2024-05-08 15:23:56.<_> [DEBUG][76] felix/route_table.go 661: Syncing interface routes ifaceName="<_>" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, + `2024-05-08 15:23:56.<_> [DEBUG][76] felix/route_table.go 686: Reconcile against kernel programming ifaceName="<_>" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, + `2024-05-08 15:23:56.<_> [DEBUG][76] felix/route_table.go 880: Processing route: 254 <_> 10.68.10.<_>/32 ifaceName="<_>" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, + `2024-05-08 15:23:56.<_> [DEBUG][76] felix/route_table.go 915: Route is correct dest=10.68.10.<_>/32 ifaceName="<_>" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, + `2024-05-08 15:23:57.942 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "", "", "", "", "", "", "", "", "tVnHkvAo15HuiPy0", "", ""} chainName="OUTPUT" expectedRuleIDs=[]string{"tVnHkvAo15HuiPy0", "", "", "", "", "", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="raw"`, + `2024-05-08 15:23:57.942 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "6gwbT8clXdHdC1b1"} chainName="PREROUTING" expectedRuleIDs=[]string{"6gwbT8clXdHdC1b1", "", "", "", ""} ipVersion=0x4 table="raw"`, + `2024-05-08 15:23:57.969 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "Cz_u1IQiXIMmKD4c", "", "", "", "", "", "", "", "", "", "", "", ""} chainName="INPUT" expectedRuleIDs=[]string{"Cz_u1IQiXIMmKD4c", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="filter"`, + `2024-05-08 15:23:57.969 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "tVnHkvAo15HuiPy0", "", "", "", "", ""} chainName="OUTPUT" expectedRuleIDs=[]string{"tVnHkvAo15HuiPy0", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="filter"`, + `2024-05-08 15:23:58.169 [INFO][2333] felix/summary.go 100: Summarising 35 dataplane reconciliation loops over 1m2s: avg=12ms longest=46ms (resync-filter-v4,resync-filter-v6,resync-mangle-v4,resync-mangle-v6,update-filter-v4,update-filter-v6)`, `2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 957: Examining link for MTU calculation mtu=1500 name="eth0"`, `2024-05-08 15:23:58.680 [DEBUG][216945] felix/int_dataplane.go 1785: Reschedule kick received`, `2024-05-08 15:23:58.681 [DEBUG][216945] felix/feature_detect.go 112: Refreshing detected iptables features`, - `2024-05-08 15:23:58.681 [DEBUG][216945] felix/table.go 944:Invalidating dataplane cache ipVersion=0x4 reason="refresh timer" table="nat"`, + `2024-05-08 15:23:58.681 [DEBUG][216945] felix/table.go 944: Invalidating dataplane cache ipVersion=0x4 reason="refresh timer" table="nat"`, `2024-05-08 15:23:58.684 [DEBUG][216945] felix/feature_detect.go 242: Ran iptables --version rawVersion="iptables v1.8.4 (legacy)\n"`, `2024-05-08 15:23:58.684 [DEBUG][216945] felix/feature_detect.go 255: Parsed iptables version version=1.8.4`, `2024-05-08 15:23:58.684 [DEBUG][216945] felix/table.go 604: Loading current iptables state and checking it is correct. ipVersion=0x4 table="nat"`, `2024-05-08 15:23:58.684 [DEBUG][216945] felix/versionparse.go 110: Raw kernel version rawVersion="Linux version 5.15.0-1057-azure (buildd@lcy02-amd64-033) (gcc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0, GNU ld (GNU Binutils for Ubuntu) 2.38) #65-Ubuntu SMP Fri Feb 9 18:39:24 UTC 2024\n"`, `2024-05-08 15:23:58.684 [DEBUG][216945] felix/versionparse.go 118: Parsed kernel version version=5.15.0-1057`, `2024-05-08 15:23:58.715 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line="# Generated by iptables-nft-save v1.8.4 on Wed May 8 15:23:58 2024" table="nat"`, - `2024-05-08 15:23:58.716 [DEBUG][216945] felix/table.go 851:Parsing line ipVersion=0x4 line="*nat" table="nat"`, + `2024-05-08 15:23:58.716 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line="*nat" table="nat"`, `2024-05-08 15:23:58.716 [DEBUG][216945] felix/table.go 881: Not an append, skipping ipVersion=0x4 line="# Generated by iptables-nft-save v1.8.4 on Wed May 8 15:23:58 2024" table="nat"`, - `2024-05-08 15:23:58.716 [DEBUG][216945] felix/table.go 881:Not an append, skipping ipVersion=0x4 line="*nat" table="nat"`, - `2024-05-08 15:23:58.717 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":POSTROUTING ACCEPT [0:0]" table="nat"`, - `2024-05-08 15:23:58.717 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="POSTROUTING" ipVersion=0x4 line=":POSTROUTING ACCEPT [0:0]" table="nat"`, - `2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":OUTPUT ACCEPT [0:0]" table="nat"`, - `2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":PREROUTING ACCEPT [0:0]" table="nat"`, - `2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="OUTPUT" ipVersion=0x4 line=":OUTPUT ACCEPT [0:0]" table="nat"`, - `2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="PREROUTING" ipVersion=0x4 line=":PREROUTING ACCEPT [0:0]" table="nat"`, - `2024-05-08 15:23:<_> <_> felix/endpoint_mgr.go 443: Reporting endpoint status. dirtyEndpoints=set.Set{}`, - `2024-05-08 15:23:<_> <_> felix/health.go 167: Health: <_>`, - `2024-05-08 15:23:<_> <_> felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"async_calc_graph", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:20000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:<_> ext:<_> loc:(*time.Location)(0x4ce3aa0)}}`, - `2024-05-08 15:23:<_> <_> felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"felix-startup", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:0, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:<_> ext:<_> loc:(*time.Location)(0x4ce3aa0)}}`, - `2024-05-08 15:23:<_> <_> felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"int_dataplane", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:90000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:<_> ext:<_> loc:(*time.Location)(0x4ce3aa0)}}`, - `2024-05-08 15:23:<_> <_> felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"}`, - `2024-05-08 15:23:<_> <_> felix/health.go <_> GET <_>`, - `2024-05-08 15:23:<_> <_> felix/int_dataplane.go 1773: Refreshing IP sets state`, - `2024-05-08 15:23:<_> <_> felix/int_dataplane.go 1807: Applying dataplane updates`, - `2024-05-08 15:23:<_> <_> felix/int_dataplane.go 2080: Asked to reschedule. <_>`, - `2024-05-08 15:23:<_> <_> felix/ipsets.go 234: Asked to resync with the dataplane on next update. family="inet"`, - `2024-05-08 15:23:<_> <_> felix/ipsets.go 314: Resyncing ipsets with dataplane. family="inet"`, - `2024-05-08 15:23:<_> <_> felix/ipsets.go 426: Parsing IP set. family="inet" <_>`, - `2024-05-08 15:23:<_> <_> felix/ipsets.go 607: Skipping expected Calico IP set. family="inet" <_>`, - `2024-05-08 15:23:<_> <_> felix/ipsets.go 643: No dirty IP sets. family="inet"`, - `2024-05-08 15:23:<_> <_> felix/summary.go 100: Summarising <_> dataplane reconciliation loops over <_> <_> <_> <_>`, - `2024-05-08 15:23:<_> <_> felix/sync_client.go 347: Ping received from Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type=""`, - `2024-05-08 15:23:<_> <_> felix/sync_client.go 356: Pong sent to Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type=""`, - `2024-05-08 15:23:<_> <_> felix/sync_client.go 434: New message from Typha. connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} envelope=syncproto.Envelope{Message:syncproto.MsgPing{Timestamp:time.Date(2024, time.May, 8, 15, 23, <_> <_> time.Local)}} type=""`, - `2024-05-08 15:23:<_> <_> felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 <_>`, - `2024-05-08 15:23:<_> <_> felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 <_>`, - `2024-05-08 15:23:<_> <_> felix/wireguard.go 652: Wireguard is not enabled, skipping sync ipVersion=0x4`, - `2024-05-08 15:23:<_> <_> felix/xdp_state.go 1004: Updating ipsetIDsToMembers cache. family=4`, - `2024-05-08 15:23:<_> <_> felix/xdp_state.go 1043: Processing pending diff state. cs=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}} family=4`, - `2024-05-08 15:23:<_> <_> felix/xdp_state.go 1270: Finished processing pending diff state. bpfActions=intdataplane.xdpBPFActions{CreateMap:set.Typed[string]{}, RemoveMap:set.Typed[string]{}, AddToMap:map[string]map[string]uint32{}, RemoveFromMap:map[string]map[string]uint32{}, InstallXDP:set.Typed[string]{}, UninstallXDP:set.Typed[string]{}, MembersToDrop:map[string]map[string]uint32{}, MembersToAdd:map[string]map[string]uint32{}} family=4 newCS=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}}`, - `2024-05-08 15:23:<_> <_> felix/xdp_state.go 1605: Getting member changes. family=4 oldMembers=map[string]set.Set[string]{}`, - `2024-05-08 15:23:<_> <_> felix/xdp_state.go 1798: Processing BPF actions. family="ipv4"`, - `2024-05-08 15:23:<_> <_> felix/xdp_state.go 1932: Finished processing BPF actions. family="ipv4"`, - `2024-05-08 15:23:<_> <_> felix/xdp_state.go 968: Processing member updates. family=4`, - `2024-05-08 15:23:<_> [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":<_> - [0:0]" table="nat"`, - `2024-05-08 15:23:<_> [DEBUG][216945] felix/table.go 870: Found forward-reference <_> ipVersion=0x4 line=":<_> - [0:0]" table="nat"`, - `2024-05-08 15:23:<_> [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection <_> <_>`, - `2024-05-08 <_> <_> felix/ipsets.go 366:Finished IPSets resync family="inet" numInconsistenciesFound=0 resyncDuration=<_>`, - `2024-05-08 <_> <_> felix/ipsets.go 467:Found member in dataplane canon=<_> family="inet" member=<_> setID="this-host"`, - `2024-05-08 <_> <_> felix/ipsets.go 589:Whitelisting IP sets. ID="all-ipam-pools" family="inet" mainName="cali40all-ipam-pools"`, - `2024-05-08 <_> <_> felix/ipsets.go 589:Whitelisting IP sets. ID="masq-ipam-pools" family="inet" mainName="cali40masq-ipam-pools"`, - `2024-05-08 <_> <_> felix/ipsets.go 589:Whitelisting IP sets. ID="this-host" family="inet" mainName="cali40this-host"`, - `2024-05-08 <_> [DEBUG][615489] felix/table.go 677:Skipping expected chain chainName=<_> ipVersion=0x4 table="filter"`, - `2024-05-08 <_> [DEBUG][76] felix/route_table.go 557:Resync:found calico-owned interface ifaceName=<_> ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, - `2024-05-08 <_> [DEBUG][76] felix/route_table.go 614:Synchronised routes on interface ifaceName=<_> ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, - `2024-05-08 <_> [DEBUG][76] felix/route_table.go 661:Syncing interface routes ifaceName=<_> ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, - `2024-05-08 <_> [DEBUG][76] felix/route_table.go 686:Reconcile against kernel programming ifaceName=<_> ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, - `2024-05-08 <_> [DEBUG][76] felix/route_table.go 880:Processing route:254 <_> <_> ifaceName=<_> ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, - `2024-05-08 <_> [DEBUG][76] felix/route_table.go 915:Route is correct dest=<_> ifaceName=<_> ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, - `bird: Netlink: No route to host`, + `2024-05-08 15:23:58.716 [DEBUG][216945] felix/table.go 881: Not an append, skipping ipVersion=0x4 line="*nat" table="nat"`, + `2024-05-08 15:23:58.<_> [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":<_> <_> [0:0]" table="nat"`, + `2024-05-08 15:23:58.<_> [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="<_>" ipVersion=0x4 line=":<_> <_> [0:0]" table="nat"`, + `2024-05-08 15:23:58.<_> [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=<_> name="<_>"`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/endpoint_mgr.go 443: Reporting endpoint status. dirtyEndpoints=set.Set{}`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/health.go 167: Health: <_>`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"<_>", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:<_>, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:<_>, ext:<_>, loc:(*time.Location)(0x4ce3aa0)}}`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"}`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/health.go <_>: GET /<_>`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/int_dataplane.go 1773: Refreshing IP sets state`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/int_dataplane.go 1807: Applying dataplane updates`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/int_dataplane.go 2080: Asked to reschedule. delay=<_>.<_>`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/ipsets.go 234: Asked to resync with the dataplane on next update. family="inet"`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/ipsets.go 314: Resyncing ipsets with dataplane. family="inet"`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/ipsets.go 366: Finished IPSets resync family="inet" numInconsistenciesFound=0 resyncDuration=<_>.<_>`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/ipsets.go 426: Parsing IP set. family="inet" setName="<_>"`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/ipsets.go 467: Found member in dataplane canon=<_>.<_>.<_>.<_> family="inet" member="<_>.<_>.<_>.<_>" setID="this-host"`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/ipsets.go 589: Whitelisting IP sets. ID="<_>" family="inet" mainName="<_>"`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/ipsets.go 607: Skipping expected Calico IP set. family="inet" setName="<_>"`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/ipsets.go 643: No dirty IP sets. family="inet"`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/sync_client.go 347: Ping received from Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type=""`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/sync_client.go 356: Pong sent to Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type=""`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/sync_client.go 434: New message from Typha. connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} envelope=syncproto.Envelope{Message:syncproto.MsgPing{Timestamp:time.Date(2024, time.May, 8, 15, 23, <_>, <_>, time.Local)}} type=""`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="<_>"`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="<_>"`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/wireguard.go 652: Wireguard is not enabled, skipping sync ipVersion=0x4`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/xdp_state.go 1004: Updating ipsetIDsToMembers cache. family=4`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/xdp_state.go 1043: Processing pending diff state. cs=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}} family=4`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/xdp_state.go 1270: Finished processing pending diff state. bpfActions=intdataplane.xdpBPFActions{CreateMap:set.Typed[string]{}, RemoveMap:set.Typed[string]{}, AddToMap:map[string]map[string]uint32{}, RemoveFromMap:map[string]map[string]uint32{}, InstallXDP:set.Typed[string]{}, UninstallXDP:set.Typed[string]{}, MembersToDrop:map[string]map[string]uint32{}, MembersToAdd:map[string]map[string]uint32{}} family=4 newCS=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}}`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/xdp_state.go 1605: Getting member changes. family=4 oldMembers=map[string]set.Set[string]{}`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/xdp_state.go 1798: Processing BPF actions. family="ipv4"`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/xdp_state.go 1932: Finished processing BPF actions. family="ipv4"`, + `2024-05-08 15:23:<_>.<_> [DEBUG][<_>] felix/xdp_state.go 968: Processing member updates. family=4`, + `2024-05-08 15:23:<_>.<_> [INFO][<_>] felix/summary.go 100: Summarising <_> dataplane reconciliation loops over <_>.<_>: avg=<_> longest=<_> (<_>)`, + "bird: Netlink: No route to host", + }, + }, + { + drain: New(DefaultConfig(), nil), + inputFile: "testdata/grafana-ruler.txt", + patterns: []string{ + `level=debug ts=2024-05-29T13:44:15.804597912Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance"`, + `level=debug ts=2024-05-29T13:44:15.<_> caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance"`, + `level=debug ts=2024-05-29T13:44:15.<_> caller=remote_instance_store.go:51 user=<_> slug=<_> msg="calling SaveAlertInstance"`, + `logger=ngalert.scheduler user=102553 slug=flownative version=1 fingerprint=4ad9e35be0f80ca3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.79499903Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.794695854s EvaluationString:}]" duration=116.038803ms`, + `logger=ngalert.scheduler user=473762 slug=intentiq version=35 fingerprint=0bc4b6f46a852420 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.788200731Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.787878355s EvaluationString:}]" duration=15.345212ms`, + `logger=ngalert.scheduler user=70430 slug=dapperlabs version=1 fingerprint=65a68c433031b4e0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.790598463Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.78875161s EvaluationString:}]" duration=1.693079007s`, + `logger=ngalert.state.manager user=102553 slug=flownative instance= t=2024-05-29T13:44:15.795103234Z level=debug msg="Setting next state" handler=resultNormal`, + `logger=ngalert.state.manager user=15338 slug=rstsoftwarerc instance= t=2024-05-29T13:44:15.790951656Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:00Z next_ends_at=2024-05-29T13:48:00Z`, + `logger=ngalert.state.manager user=172772 slug=ppbtradingtribe instance="datasource_uid=p06gSxS7k, ref_id=A" t=2024-05-29T13:44:15.793080651Z level=debug msg="Keeping state" state=Normal`, + `logger=ngalert.state.manager user=172772 slug=ppbtradingtribe t=2024-05-29T13:44:15.79304032Z level=debug msg="State manager processing evaluation results" resultCount=1`, + `logger=ngalert.state.manager user=228733 slug=csmoney instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.796750449Z level=debug msg="Setting next state" handler=resultNoData`, + `logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dish" t=2024-05-29T13:44:15.788780219Z level=debug msg="Keeping state" state=Normal`, + `logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=optimumfixed" t=2024-05-29T13:44:15.788904162Z level=debug msg="Keeping state" state=Normal`, + `logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=rcn" t=2024-05-29T13:44:15.789011178Z level=debug msg="Setting next state" handler=resultNormal`, + `logger=ngalert.state.manager user=412141 slug=sharethrough instance="datasource_uid=pFBylkiVz, ref_id=Swap Usage for Alert" t=2024-05-29T13:44:15.792756002Z level=debug msg="Setting next state" handler=resultNoData`, + `logger=ngalert.state.manager user=412141 slug=sharethrough instance="datasource_uid=pFBylkiVz, ref_id=Swap Usage for Alert" t=2024-05-29T13:44:15.792775073Z level=debug msg="Keeping state" state=Normal`, + `logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.799932951Z level=debug msg="Setting next state" handler=resultNormal`, + `logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.799945019Z level=debug msg="Keeping state" state=Normal`, + `logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.<_> level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData`, + `logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.<_> level=debug msg="Keeping state" state=Normal`, + `logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.<_> level=debug msg="Setting next state" handler=resultNoData`, + `logger=ngalert.state.manager user=473762 slug=intentiq t=2024-05-29T13:44:15.788261794Z level=debug msg="State manager processing evaluation results" resultCount=1`, + `logger=ngalert.state.manager user=630397 slug=tatin instance= t=2024-05-29T13:44:15.795542988Z level=debug msg="Keeping state" state=Normal`, + `logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.800327814Z level=debug msg="Setting next state" handler=resultNoData`, + `logger=ngalert.state.manager user=692010 slug=mercariusprod instance="datasource_uid=gfds-prometheus-wrapper, ref_id=B" t=2024-05-29T13:44:15.791100679Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData`, + `logger=ngalert.state.manager user=692010 slug=mercariusprod instance="datasource_uid=gfds-prometheus-wrapper, ref_id=B" t=2024-05-29T13:44:15.791114955Z level=debug msg="Keeping state" state=Normal`, + `logger=ngalert.state.manager user=692010 slug=mercariusprod instance="datasource_uid=gfds-prometheus-wrapper, ref_id=B" t=2024-05-29T13:44:15.791129917Z level=debug msg="Setting next state" handler=resultNoData`, + `logger=ngalert.state.manager user=84535 slug=arweave instance= t=2024-05-29T13:44:15.796640981Z level=debug msg="Setting next state" handler=resultNormal`, + `logger=ngalert.state.manager user=84535 slug=arweave t=2024-05-29T13:44:15.796542294Z level=debug msg="State manager processing evaluation results" resultCount=1`, + `logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=<_>, instance=172.30.<_>.<_>:8080, job=integrations/kubernetes/kube-state-metrics, namespace=<_>, pod=<_>, uid=<_>" t=2024-05-29T13:44:15.<_> level=debug msg="Setting next state" handler=resultNormal`, + `logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d" t=2024-05-29T13:44:15.78870732Z level=debug msg="Keeping state" state=Normal`, + `logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=crs-app, instance=172.30.<_>.<_>:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=<_>, uid=<_>" t=2024-05-29T13:44:15.<_> level=debug msg="Keeping state" state=Normal`, + `logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c" t=2024-05-29T13:44:15.790564871Z level=debug msg="Keeping state" state=Normal`, + `logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=node, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed" t=2024-05-29T13:44:15.791738618Z level=debug msg="Keeping state" state=Normal`, + `logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a" t=2024-05-29T13:44:15.79227249Z level=debug msg="Keeping state" state=Normal`, + `logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.<_>.<_>:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=<_>, uid=<_>" t=2024-05-29T13:44:15.<_> level=debug msg="Keeping state" state=Normal`, + `logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=tdsdevauthts-utils, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthts-utils-7f54f8d7b4-njddr, uid=352d7df2-7832-41f3-ad3e-cbe1a060c968" t=2024-05-29T13:44:15.793846886Z level=debug msg="Keeping state" state=Normal`, + `logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=tdsqalivets-utils, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivets-utils-75b748978f-r2vkj, uid=1d39d0d7-d483-427b-ba91-45d897674698" t=2024-05-29T13:44:15.794284465Z level=debug msg="Keeping state" state=Normal`, + `logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=ts-app, instance=172.30.<_>.<_>:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=<_>, uid=<_>" t=2024-05-29T13:44:15.<_> level=debug msg="Keeping state" state=Normal`, + `logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=ts-web, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthts-web-57f5b6f56b-bdmh9, uid=8f6b5224-94ce-4f5d-ba08-03f9fc2f572f" t=2024-05-29T13:44:15.795397351Z level=debug msg="Keeping state" state=Normal`, + `logger=ngalert.state.manager.persist user=14927 slug=rstsoftware t=2024-05-29T13:44:15.798496844Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=26.340653ms`, + `logger=ngalert.state.manager.persist user=20177 slug=paddledash t=2024-05-29T13:44:15.806655602Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1`, + `logger=ngalert.state.manager.persist user=<_> slug=<_> t=2024-05-29T13:44:15.<_> level=debug msg="Saving alert states" count=<_> max_state_save_concurrency=1`, }, }, } @@ -454,7 +481,6 @@ func TestDrain_TrainGeneratesMatchablePatterns(t *testing.T) { } }) } - } func TestDrain_TrainGeneratesPatternsMatchableByLokiPatternFilter(t *testing.T) { @@ -509,16 +535,17 @@ func TestDrain_TrainGeneratesPatternsMatchableByLokiPatternFilter(t *testing.T) }, }, { - name: "Unicode characters are matchable", + name: "Scheduler patterns are matchable", drain: New(DefaultConfig(), nil), inputLines: []string{ - `13:25:18.033470 ▶ INFO route ops sending to dest https://graphite-cortex-ops-blocks-us-east4.grafana.net/graphite/metrics: service_is_carbon-relay-ng.instance_is_carbon-relay-ng-c665b7b-j2trk.mtype_is_gauge.dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics.unit_is_B.what_is_FlushSize.type_is_manual.stat_is_max_999 0.00 1717075518`, - `13:25:18.033422 ▶ INFO route ops sending to dest https://graphite-cortex-ops-blocks-us-east4.grafana.net/graphite/metrics: service_is_carbon-relay-ng.instance_is_carbon-relay-ng-c665b7b-j2trk.mtype_is_gauge.dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics.unit_is_B.what_is_FlushSize.type_is_manual.stat_is_max_99 0.00 1717075518`, - `13:25:18.033394 ▶ INFO route ops sending to dest https://graphite-cortex-ops-blocks-us-east4.grafana.net/graphite/metrics: service_is_carbon-relay-ng.instance_is_carbon-relay-ng-c665b7b-j2trk.mtype_is_gauge.dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics.unit_is_B.what_is_FlushSize.type_is_manual.stat_is_max_95 0.00 1717075518`, - `13:25:18.033364 ▶ INFO route ops sending to dest https://graphite-cortex-ops-blocks-us-east4.grafana.net/graphite/metrics: service_is_carbon-relay-ng.instance_is_carbon-relay-ng-c665b7b-j2trk.mtype_is_gauge.dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics.unit_is_B.what_is_FlushSize.type_is_manual.stat_is_max_75 0.00 1717075518`, - `13:25:18.033335 ▶ INFO route ops sending to dest https://graphite-cortex-ops-blocks-us-east4.grafana.net/graphite/metrics: service_is_carbon-relay-ng.instance_is_carbon-relay-ng-c665b7b-j2trk.mtype_is_gauge.dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics.unit_is_B.what_is_FlushSize.type_is_manual.stat_is_max_50 0.00 1717075518`, - `13:25:18.033304 ▶ INFO route ops sending to dest https://graphite-cortex-ops-blocks-us-east4.grafana.net/graphite/metrics: service_is_carbon-relay-ng.instance_is_carbon-relay-ng-c665b7b-j2trk.mtype_is_gauge.dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics.unit_is_B.what_is_FlushSize.type_is_manual.stat_is_std 0.00 1717075518`, - `13:25:18.033281 ▶ INFO route ops sending to dest https://graphite-cortex-ops-blocks-us-east4.grafana.net/graphite/metrics: service_is_carbon-relay-ng.instance_is_carbon-relay-ng-c665b7b-j2trk.mtype_is_gauge.dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics.unit_is_B.what_is_FlushSize.type_is_manual.stat_is_mean 0.00 1717075518`, + `ts=2024-05-30T12:50:36.648377186Z caller=scheduler_processor.go:143 level=warn msg="error contacting scheduler" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"" addr=10.0.151.101:9095`, + `ts=2024-05-30T12:50:36.350575929Z caller=scheduler_processor.go:143 level=warn msg="error contacting scheduler" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"" addr=10.0.151.101:9095`, + `ts=2024-05-30T12:50:36.335784477Z caller=scheduler_processor.go:143 level=warn msg="error contacting scheduler" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"" addr=10.0.151.101:9095`, + `ts=2024-05-30T12:50:36.250406732Z caller=scheduler_processor.go:143 level=warn msg="error contacting scheduler" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"" addr=10.0.151.101:9095`, + `ts=2024-05-30T12:50:36.248030329Z caller=scheduler_processor.go:143 level=warn msg="error contacting scheduler" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"" addr=10.0.45.239:9095`, + `ts=2024-05-30T12:50:36.176344754Z caller=scheduler_processor.go:143 level=warn msg="error contacting scheduler" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"" addr=10.0.151.101:9095`, + `ts=2024-05-30T12:50:36.174730772Z caller=scheduler_processor.go:143 level=warn msg="error contacting scheduler" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"" addr=10.0.151.101:9095`, + `ts=2024-05-30T12:50:36.076517207Z caller=scheduler_processor.go:143 level=warn msg="error contacting scheduler" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"" addr=10.0.45.239:9095`, }, }, } @@ -541,5 +568,60 @@ func TestDrain_TrainGeneratesPatternsMatchableByLokiPatternFilter(t *testing.T) } }) } +} + +func TestDeduplicatePlaceholders(b *testing.T) { + type dedupCase struct { + line string + want string + } + cases := []dedupCase{ + { + line: "abcd", + want: "abcd", + }, + { + line: "<_><_>abcd", + want: "<_>abcd", + }, + { + line: strings.Repeat("<_>", 100), + want: "<_>", + }, + { + line: "<_> <_>", + want: "<_> <_>", + }, + { + line: strings.Repeat("<_> ", 100), + want: strings.Repeat("<_> ", 100), + }, + { + line: "<_><<_>", + want: "<_><<_>", + }, + { + line: "<_><->", + want: "<_><->", + }, + { + line: strings.Repeat(strings.Repeat("<_>", 100)+" ", 100), + want: strings.Repeat("<_> ", 100), + }, + { + line: "<<<<<<<_><_>>>>>>>>", + want: "<<<<<<<_>>>>>>>>", + }, + { + line: strings.Repeat("A", 100) + "<_><_>", + want: strings.Repeat("A", 100) + "<_>", + }, + } + for i, tc := range cases { + b.Run(fmt.Sprintf("Dedup %d", i), func(t *testing.T) { + got := deduplicatePlaceholders(tc.line, `<_>`) + require.Equal(t, tc.want, got) + }) + } } diff --git a/pkg/pattern/drain/line_tokenizer.go b/pkg/pattern/drain/line_tokenizer.go index 1317fbe3fca88..89bf34a5569b5 100644 --- a/pkg/pattern/drain/line_tokenizer.go +++ b/pkg/pattern/drain/line_tokenizer.go @@ -1,25 +1,96 @@ package drain -import "strings" +import ( + "strings" + "unicode" +) type LineTokenizer interface { - Tokenize(line string) []string - Join(tokens []string) string + Tokenize(line string) ([]string, interface{}) + Join(tokens []string, state interface{}) string } type spacesTokenizer struct{} -func (spacesTokenizer) Tokenize(line string) []string { - return strings.Split(line, " ") +func (spacesTokenizer) Tokenize(line string) ([]string, interface{}) { + return strings.Split(line, " "), nil } -func (spacesTokenizer) Join(tokens []string) string { +func (spacesTokenizer) Join(tokens []string, _ interface{}) string { return strings.Join(tokens, " ") } +type punctuationTokenizer struct { + includeDelimiters [128]rune + excludeDelimiters [128]rune +} + +func newPunctuationTokenizer() *punctuationTokenizer { + var included [128]rune + var excluded [128]rune + included['='] = 1 + excluded['_'] = 1 + excluded['-'] = 1 + return &punctuationTokenizer{ + includeDelimiters: included, + excludeDelimiters: excluded, + } +} + +func (p *punctuationTokenizer) Tokenize(line string) ([]string, interface{}) { + tokens := make([]string, len(line)) // Maximum size is every character is punctuation + spacesAfter := make([]int, strings.Count(line, " ")) // Could be a bitmap, but it's not worth it for a few bytes. + + start := 0 + nextTokenIdx := 0 + nextSpaceIdx := 0 + for i, char := range line { + if unicode.IsLetter(char) || unicode.IsNumber(char) || char < 128 && p.excludeDelimiters[char] != 0 { + continue + } + included := char < 128 && p.includeDelimiters[char] != 0 + if char == ' ' || included || unicode.IsPunct(char) { + if i > start { + tokens[nextTokenIdx] = line[start:i] + nextTokenIdx++ + } + if char == ' ' { + spacesAfter[nextSpaceIdx] = nextTokenIdx - 1 + nextSpaceIdx++ + } else { + tokens[nextTokenIdx] = line[i : i+1] + nextTokenIdx++ + } + start = i + 1 + } + } + + if start < len(line) { + tokens[nextTokenIdx] = line[start:] + nextTokenIdx++ + } + + return tokens[:nextTokenIdx], spacesAfter[:nextSpaceIdx] +} + +func (p *punctuationTokenizer) Join(tokens []string, state interface{}) string { + spacesAfter := state.([]int) + strBuilder := strings.Builder{} + spacesIdx := 0 + for i, token := range tokens { + strBuilder.WriteString(token) + for spacesIdx < len(spacesAfter) && i == spacesAfter[spacesIdx] { + // One entry for each space following the token + strBuilder.WriteRune(' ') + spacesIdx++ + } + } + return strBuilder.String() +} + type splittingTokenizer struct{} -func (splittingTokenizer) Tokenize(line string) []string { +func (splittingTokenizer) Tokenize(line string) ([]string, interface{}) { numEquals := strings.Count(line, "=") numColons := strings.Count(line, ":") numSpaces := strings.Count(line, " ") @@ -32,24 +103,31 @@ func (splittingTokenizer) Tokenize(line string) []string { } tokens := make([]string, 0, expectedTokens) + spacesAfter := make([]int, 0, strings.Count(line, " ")) for _, token := range strings.SplitAfter(line, keyvalSeparator) { - tokens = append(tokens, strings.Split(token, " ")...) + words := strings.Split(token, " ") + for i, entry := range words { + tokens = append(tokens, entry) + if i == len(words)-1 { + continue + } + spacesAfter = append(spacesAfter, len(tokens)-1) + } } - return tokens + return tokens, spacesAfter } -func (splittingTokenizer) Join(tokens []string) string { - var builder strings.Builder - for _, token := range tokens { - if strings.HasSuffix(token, "=") || strings.HasSuffix(token, ":") { - builder.WriteString(token) - } else { - builder.WriteString(token + " ") +func (splittingTokenizer) Join(tokens []string, state interface{}) string { + spacesAfter := state.([]int) + strBuilder := strings.Builder{} + spacesIdx := 0 + for i, token := range tokens { + strBuilder.WriteString(token) + for spacesIdx < len(spacesAfter) && i == spacesAfter[spacesIdx] { + // One entry for each space following the token + strBuilder.WriteRune(' ') + spacesIdx++ } } - output := builder.String() - if output[len(output)-1] == ' ' { - return output[:len(output)-1] - } - return output + return strBuilder.String() } diff --git a/pkg/pattern/drain/line_tokenizer_test.go b/pkg/pattern/drain/line_tokenizer_test.go index 8cb541a61b629..1eda1b51068a3 100644 --- a/pkg/pattern/drain/line_tokenizer_test.go +++ b/pkg/pattern/drain/line_tokenizer_test.go @@ -1,59 +1,163 @@ package drain import ( - "reflect" "testing" + + "github.com/stretchr/testify/require" ) -func TestSplittingTokenizer_Tokenize(t *testing.T) { - tokenizer := splittingTokenizer{} +type TestCase struct { + name string + line string + want map[string][]string +} - tests := []struct { - name string - line string - want []string - }{ - { - name: "Test with equals sign", - line: "key1=value1 key2=value2", - want: []string{"key1=", "value1", "key2=", "value2"}, +const typePunctuation = "punctuation" +const typeSplitting = "splitting" + +var testCases = []TestCase{ + { + name: "Test with equals sign", + line: "key1=value1 key2=value2", + want: map[string][]string{ + typePunctuation: {"key1", "=", "value1", "key2", "=", "value2"}, + typeSplitting: {"key1=", "value1", "key2=", "value2"}, }, - { - name: "Test with colon", - line: "key1:value1 key2:value2", - want: []string{"key1:", "value1", "key2:", "value2"}, + }, + { + name: "Test with colon", + line: "key1:value1 key2:value2", + want: map[string][]string{ + typePunctuation: {"key1", ":", "value1", "key2", ":", "value2"}, + typeSplitting: {"key1:", "value1", "key2:", "value2"}, }, - { - name: "Test with mixed delimiters, more = than :", - line: "key1=value1 key2:value2 key3=value3", - want: []string{"key1=", "value1", "key2:value2", "key3=", "value3"}, + }, + { + name: "Test with mixed delimiters, more = than :", + line: "key1=value1 key2:value2 key3=value3", + want: map[string][]string{ + typePunctuation: {"key1", "=", "value1", "key2", ":", "value2", "key3", "=", "value3"}, + typeSplitting: {"key1=", "value1", "key2:value2", "key3=", "value3"}, }, + }, + { + name: "Test with mixed delimiters, more : than =", + line: "key1:value1 key2:value2 key3=value3", + want: map[string][]string{ + typePunctuation: {"key1", ":", "value1", "key2", ":", "value2", "key3", "=", "value3"}, + typeSplitting: {"key1:", "value1", "key2:", "value2", "key3=value3"}, + }, + }, + { + name: "Dense json", + line: `{"key1":"value1","key2":"value2","key3":"value3"}`, + want: map[string][]string{ + typePunctuation: {`{`, `"`, `key1`, `"`, `:`, `"`, `value1`, `"`, `,`, `"`, `key2`, `"`, `:`, `"`, `value2`, `"`, `,`, `"`, `key3`, `"`, `:`, `"`, `value3`, `"`, `}`}, + typeSplitting: {`{"key1":`, `"value1","key2":`, `"value2","key3":`, `"value3"}`}, + }, + }, + { + name: "json with spaces", + line: `{"key1":"value1", "key2":"value2", "key3":"value3"}`, + want: map[string][]string{ + typePunctuation: {`{`, `"`, `key1`, `"`, `:`, `"`, `value1`, `"`, `,`, `"`, `key2`, `"`, `:`, `"`, `value2`, `"`, `,`, `"`, `key3`, `"`, `:`, `"`, `value3`, `"`, `}`}, + typeSplitting: {`{"key1":`, `"value1",`, `"key2":`, `"value2",`, `"key3":`, `"value3"}`}, + }, + }, + { + name: "logfmt multiword values", + line: `key1=value1 key2=value2 msg="this is a message"`, + want: map[string][]string{ + typePunctuation: {"key1", "=", "value1", "key2", "=", "value2", "msg", "=", `"`, `this`, "is", "a", `message`, `"`}, + typeSplitting: {"key1=", "value1", "key2=", "value2", "msg=", `"this`, "is", "a", `message"`}, + }, + }, + { + name: "longer line", + line: "09:17:38.033366 ▶ INFO route ops sending to dest https://graphite-cortex-ops-blocks-us-east4.grafana.net/graphite/metrics: service_is_carbon-relay-ng.instance_is_carbon-relay-ng-c665b7b-j2trk.mtype_is_counter.dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics.unit_is_Metric.action_is_drop.reason_is_queue_full 0 1717060658", + want: map[string][]string{ + typePunctuation: {`09`, `:`, `17`, `:`, `38`, `.`, `033366`, `▶`, `INFO`, `route`, `ops`, `sending`, `to`, `dest`, `https`, `:`, `/`, `/`, `graphite-cortex-ops-blocks-us-east4`, `.`, `grafana`, `.`, `net`, `/`, `graphite`, `/`, `metrics`, `:`, `service_is_carbon-relay-ng`, `.`, `instance_is_carbon-relay-ng-c665b7b-j2trk`, `.`, `mtype_is_counter`, `.`, `dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics`, `.`, `unit_is_Metric`, `.`, `action_is_drop`, `.`, `reason_is_queue_full`, `0`, `1717060658`}, + typeSplitting: {`09:`, `17:`, `38.033366`, `▶`, `INFO`, ``, `route`, `ops`, `sending`, `to`, `dest`, `https:`, `//graphite-cortex-ops-blocks-us-east4.grafana.net/graphite/metrics:`, ``, `service_is_carbon-relay-ng.instance_is_carbon-relay-ng-c665b7b-j2trk.mtype_is_counter.dest_is_https_graphite-cortex-ops-blocks-us-east4_grafana_netgraphitemetrics.unit_is_Metric.action_is_drop.reason_is_queue_full`, `0`, `1717060658`}, + }, + }, + { + name: "Consecutive splits points: equals followed by space", + line: `ts=2024-05-30T12:50:36.648377186Z caller=scheduler_processor.go:143 level=warn msg="error contacting scheduler" err="rpc error: code = Unavailable desc = connection error: desc = \"error reading server preface: EOF\"" addr=10.0.151.101:9095`, + want: map[string][]string{ + typePunctuation: {`ts`, `=`, `2024-05-30T12`, `:`, `50`, `:`, `36`, `.`, `648377186Z`, `caller`, `=`, `scheduler_processor`, `.`, `go`, `:`, `143`, `level`, `=`, `warn`, `msg`, `=`, `"`, `error`, `contacting`, `scheduler`, `"`, `err`, `=`, `"`, `rpc`, `error`, `:`, `code`, `=`, `Unavailable`, `desc`, `=`, `connection`, `error`, `:`, `desc`, `=`, `\`, `"`, `error`, `reading`, `server`, `preface`, `:`, `EOF`, `\`, `"`, `"`, `addr`, `=`, `10`, `.`, `0`, `.`, `151`, `.`, `101`, `:`, `9095`}, + typeSplitting: {"ts=", "2024-05-30T12:50:36.648377186Z", "caller=", "scheduler_processor.go:143", "level=", "warn", "msg=", "\"error", "contacting", "scheduler\"", "err=", "\"rpc", "error:", "code", "=", ``, "Unavailable", "desc", "=", ``, "connection", "error:", "desc", "=", ``, `\"error`, "reading", "server", "preface:", `EOF\""`, "addr=", "10.0.151.101:9095"}, + }, + }, + { + name: "Only punctation", + line: `!@£$%^&*()`, + want: map[string][]string{ + typePunctuation: {`!`, `@`, `£$`, `%`, `^`, `&`, `*`, `(`, `)`}, + typeSplitting: {`!@£$%^&*()`}, + }, + }, +} + +func TestTokenizer_Tokenize(t *testing.T) { + tests := []struct { + name string + tokenizer LineTokenizer + }{ { - name: "Test with mixed delimiters, more : than =", - line: "key1:value1 key2:value2 key3=value3", - want: []string{"key1:", "value1", "key2:", "value2", "key3=value3"}, + name: typePunctuation, + tokenizer: newPunctuationTokenizer(), }, { - name: "Dense json", - line: `{"key1":"value1","key2":"value2","key3":"value3"}`, - want: []string{`{"key1":`, `"value1","key2":`, `"value2","key3":`, `"value3"}`}, + name: typeSplitting, + tokenizer: splittingTokenizer{}, }, + } + + for _, tt := range tests { + for _, tc := range testCases { + t.Run(tt.name+":"+tc.name, func(t *testing.T) { + got, _ := tt.tokenizer.Tokenize(tc.line) + require.Equal(t, tc.want[tt.name], got) + }) + } + } +} + +func TestTokenizer_TokenizeAndJoin(t *testing.T) { + tests := []struct { + name string + tokenizer LineTokenizer + }{ { - name: "json with spaces", - line: `{"key1":"value1", "key2":"value2", "key3":"value3"}`, - want: []string{`{"key1":`, `"value1",`, `"key2":`, `"value2",`, `"key3":`, `"value3"}`}, + name: typePunctuation, + tokenizer: newPunctuationTokenizer(), }, { - name: "logfmt multiword values", - line: `key1=value1 key2=value2 msg="this is a message"`, - want: []string{"key1=", "value1", "key2=", "value2", "msg=", `"this`, "is", "a", `message"`}, + name: typeSplitting, + tokenizer: splittingTokenizer{}, }, } for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := tokenizer.Tokenize(tt.line); !reflect.DeepEqual(got, tt.want) { - t.Errorf("splittingTokenizer.Tokenize() = %v, want %v", got, tt.want) + for _, tc := range testCases { + t.Run(tt.name+":"+tc.name, func(t *testing.T) { + got := tt.tokenizer.Join(tt.tokenizer.Tokenize(tc.line)) + require.Equal(t, tc.line, got) + }) + } + } +} + +func BenchmarkSplittingTokenizer(b *testing.B) { + tokenizer := newPunctuationTokenizer() + + for _, tt := range testCases { + tc := tt + b.Run(tc.name, func(b *testing.B) { + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + tokenizer.Tokenize(tc.line) } }) } diff --git a/pkg/pattern/drain/log_cluster.go b/pkg/pattern/drain/log_cluster.go index af5932d16f706..cffff3abe5215 100644 --- a/pkg/pattern/drain/log_cluster.go +++ b/pkg/pattern/drain/log_cluster.go @@ -11,16 +11,18 @@ import ( ) type LogCluster struct { - id int - Size int - Tokens []string - Stringer func([]string) string - Chunks Chunks + id int + Size int + Tokens []string + TokenState interface{} + Stringer func([]string, interface{}) string + + Chunks Chunks } func (c *LogCluster) String() string { if c.Stringer != nil { - return c.Stringer(c.Tokens) + return c.Stringer(c.Tokens, c.TokenState) } return strings.Join(c.Tokens, " ") } diff --git a/pkg/pattern/drain/testdata/grafana-ruler.txt b/pkg/pattern/drain/testdata/grafana-ruler.txt new file mode 100644 index 0000000000000..54b6854d9e172 --- /dev/null +++ b/pkg/pattern/drain/testdata/grafana-ruler.txt @@ -0,0 +1,50000 @@ +logger=ngalert.state.manager.persist user=20177 slug=paddledash t=2024-05-29T13:44:15.806655602Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=debug ts=2024-05-29T13:44:15.805113753Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=103548 slug=gen2 t=2024-05-29T13:44:15.805016017Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=debug ts=2024-05-29T13:44:15.804597912Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.802571162Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.801740193Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.800327814Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.799945019Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.799932951Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:15.799982989Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager.persist user=679831 slug=joveostageaws t=2024-05-29T13:44:15.798839218Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager.persist user=14927 slug=rstsoftware t=2024-05-29T13:44:15.798496844Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=26.340653ms +level=debug ts=2024-05-29T13:44:15.797668756Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.797275166Z caller=remote_instance_store.go:51 user=868411 slug=cmpladnp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=228733 slug=csmoney instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.796750449Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=84535 slug=arweave instance= t=2024-05-29T13:44:15.796640981Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=84535 slug=arweave t=2024-05-29T13:44:15.796542294Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=wcs9-tds-devus-jenkins-w, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-jenkins-w-6c6cb984d8-qrpm7, uid=d229ff35-bf4d-4bb5-8791-60b0a3bebca8" t=2024-05-29T13:44:15.796130498Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=vault, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65" t=2024-05-29T13:44:15.796062736Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=vault, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d" t=2024-05-29T13:44:15.795990925Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.795593051Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=ts-web, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivets-web-6fc5b6f9c5-6spps, uid=b75b2425-e66c-4869-94f7-cfecc5d4c935" t=2024-05-29T13:44:15.795680228Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=630397 slug=tatin instance= t=2024-05-29T13:44:15.795542988Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=ts-web, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthts-web-57f5b6f56b-bdmh9, uid=8f6b5224-94ce-4f5d-ba08-03f9fc2f572f" t=2024-05-29T13:44:15.795397351Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=102553 slug=flownative instance= t=2024-05-29T13:44:15.795103234Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=102553 slug=flownative version=1 fingerprint=4ad9e35be0f80ca3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.79499903Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.794695854s EvaluationString:}]" duration=116.038803ms +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=ts-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthts-app-989d79dbb-lwc9p, uid=a6cfb6f8-edfe-4c28-8435-acb6d54f3599" t=2024-05-29T13:44:15.795068084Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=ts-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivets-app-5b7ff985b6-c59n4, uid=4d533dcf-4e6c-4ffe-a0fc-caa6e617c8c8" t=2024-05-29T13:44:15.794992842Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=ts-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivets-app-5b7ff985b6-4nw98, uid=855af10e-bb32-49c1-8a47-0fba814e437c" t=2024-05-29T13:44:15.794979122Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=ts-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthts-app-989d79dbb-lwc9p, uid=a6cfb6f8-edfe-4c28-8435-acb6d54f3599" t=2024-05-29T13:44:15.794753977Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=ts-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivets-app-5b7ff985b6-4nw98, uid=855af10e-bb32-49c1-8a47-0fba814e437c" t=2024-05-29T13:44:15.794631294Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=tdsqausauthts-utils, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthts-utils-59f788556b-xrfpx, uid=d195032e-df70-4672-bc90-79692b1411af" t=2024-05-29T13:44:15.794322337Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=tdsqalivets-utils, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivets-utils-75b748978f-r2vkj, uid=1d39d0d7-d483-427b-ba91-45d897674698" t=2024-05-29T13:44:15.794284465Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=tdsdevauthts-utils, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthts-utils-7f54f8d7b4-njddr, uid=352d7df2-7832-41f3-ad3e-cbe1a060c968" t=2024-05-29T13:44:15.793876757Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=tdsdevauthts-utils, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthts-utils-7f54f8d7b4-njddr, uid=352d7df2-7832-41f3-ad3e-cbe1a060c968" t=2024-05-29T13:44:15.793846886Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-slave-5558869975-m6fb5, uid=ae9f0c0b-7cd7-4591-81f4-3e4ba7b1edbf" t=2024-05-29T13:44:15.793416796Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60" t=2024-05-29T13:44:15.793216421Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=172772 slug=ppbtradingtribe instance="datasource_uid=p06gSxS7k, ref_id=A" t=2024-05-29T13:44:15.793080651Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=172772 slug=ppbtradingtribe t=2024-05-29T13:44:15.79304032Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833" t=2024-05-29T13:44:15.792980836Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227" t=2024-05-29T13:44:15.792956616Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833" t=2024-05-29T13:44:15.792793782Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=412141 slug=sharethrough t=2024-05-29T13:44:15.79278731Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=412141 slug=sharethrough instance="datasource_uid=pFBylkiVz, ref_id=Swap Usage for Alert" t=2024-05-29T13:44:15.792775073Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412141 slug=sharethrough instance="datasource_uid=pFBylkiVz, ref_id=Swap Usage for Alert" t=2024-05-29T13:44:15.792756002Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a" t=2024-05-29T13:44:15.79227249Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7" t=2024-05-29T13:44:15.791954212Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d" t=2024-05-29T13:44:15.791863631Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=node, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed" t=2024-05-29T13:44:15.791738618Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c" t=2024-05-29T13:44:15.791660547Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=kaniko1, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed" t=2024-05-29T13:44:15.791526073Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.791206493Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=jnlp, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed" t=2024-05-29T13:44:15.791456811Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=helm, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed" t=2024-05-29T13:44:15.79134478Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.791225391Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=692010 slug=mercariusprod instance="datasource_uid=gfds-prometheus-wrapper, ref_id=B" t=2024-05-29T13:44:15.791129917Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=692010 slug=mercariusprod instance="datasource_uid=gfds-prometheus-wrapper, ref_id=B" t=2024-05-29T13:44:15.791114955Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance="datasource_uid=gfds-prometheus-wrapper, ref_id=B" t=2024-05-29T13:44:15.791100679Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:15.791027617Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=15338 slug=rstsoftwarerc instance= t=2024-05-29T13:44:15.790951656Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:00Z next_ends_at=2024-05-29T13:48:00Z +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7" t=2024-05-29T13:44:15.791010011Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=70430 slug=dapperlabs version=1 fingerprint=65a68c433031b4e0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.790598463Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.78875161s EvaluationString:}]" duration=1.693079007s +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49" t=2024-05-29T13:44:15.790593572Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c" t=2024-05-29T13:44:15.790564871Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52" t=2024-05-29T13:44:15.790229164Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c" t=2024-05-29T13:44:15.790085591Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18" t=2024-05-29T13:44:15.79004016Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced" t=2024-05-29T13:44:15.789860646Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52" t=2024-05-29T13:44:15.78960996Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa" t=2024-05-29T13:44:15.789407005Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced" t=2024-05-29T13:44:15.789216261Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=371756 slug=asapp t=2024-05-29T13:44:15.789039986Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 +logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=rcn" t=2024-05-29T13:44:15.789011178Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=optimumfixed" t=2024-05-29T13:44:15.788904162Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788771442Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788761161Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788725479Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dish" t=2024-05-29T13:44:15.788780219Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788701028Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788691799Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.78866505Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788646347Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788639897Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.7886198Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d" t=2024-05-29T13:44:15.78870732Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.7885482Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.78854173Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788522663Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788502704Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788472468Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788464205Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.78841334Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788374794Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788330559Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788320822Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.788310995Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=473762 slug=intentiq t=2024-05-29T13:44:15.788261794Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.scheduler user=473762 slug=intentiq version=35 fingerprint=0bc4b6f46a852420 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.788200731Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.787878355s EvaluationString:}]" duration=15.345212ms +logger=ngalert.scheduler user=893151 slug=cmtdsnp version=1 fingerprint=0db5016ab8b43d15 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.781149137Z level=debug msg="Alert rule evaluated" results="[{Instance:cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761 Value:0xc03af9a7a0} B:{Var:B Labels:cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761 Value:0xc03af9a800} C:{Var:C Labels:cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761 Value:0xc03af9a860}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764303759s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761} value=0 ]} {Instance:cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761 Value:0xc03af9a910} B:{Var:B Labels:cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761 Value:0xc03af9a958} C:{Var:C Labels:cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761 Value:0xc03af9a9c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76433916s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=argocd-application-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-application-controller-0, uid=ed798931-5824-4e7d-9f54-3225a6307761} value=0 ]} {Instance:cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2 Value:0xc03af9aa70} B:{Var:B Labels:cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2 Value:0xc03af9ae60} C:{Var:C Labels:cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2 Value:0xc03af9aeb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764355151s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2} value=0 ]} {Instance:cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2 Value:0xc03af9afa8} B:{Var:B Labels:cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2 Value:0xc03af9b008} C:{Var:C Labels:cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2 Value:0xc03af9b070}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764373772s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=argocd-applicationset-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh, uid=805c5578-2751-48e3-8be3-baadb00840c2} value=0 ]} {Instance:cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e Value:0xc03af9b118} B:{Var:B Labels:cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e Value:0xc03af9b178} C:{Var:C Labels:cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e Value:0xc03af9b1e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764387162s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e} value=0 ]} {Instance:cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e Value:0xc030f5c0d0} B:{Var:B Labels:cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e Value:0xc030f5c160} C:{Var:C Labels:cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e Value:0xc030f5c1c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764408623s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=argocd-notifications-controller, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct, uid=5089875c-5641-46ab-b20e-ce2aa25c7f2e} value=0 ]} {Instance:cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a Value:0xc030f5c3e8} B:{Var:B Labels:cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a Value:0xc030f5c4f8} C:{Var:C Labels:cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a Value:0xc030f5c360}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764423283s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a} value=0 ]} {Instance:cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a Value:0xc030f5c680} B:{Var:B Labels:cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a Value:0xc030f5c6e8} C:{Var:C Labels:cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a Value:0xc030f5c5d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764439703s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=argocd-repo-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-repo-server-665d6b7b59-m5929, uid=c3b49347-3f87-4aad-842c-77225cad682a} value=0 ]} {Instance:cluster=tds-np-cluster, container=argocd-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=argocd-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026 Value:0xc030f5c7d0} B:{Var:B Labels:cluster=tds-np-cluster, container=argocd-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026 Value:0xc030f5c840} C:{Var:C Labels:cluster=tds-np-cluster, container=argocd-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026 Value:0xc030f5c8a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764453224s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=argocd-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=argocd-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=argocd-server, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026} value=0 ]} {Instance:cluster=tds-np-cluster, container=argocd-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=argocd-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026 Value:0xc030f5c9f8} B:{Var:B Labels:cluster=tds-np-cluster, container=argocd-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026 Value:0xc030f5ca80} C:{Var:C Labels:cluster=tds-np-cluster, container=argocd-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026 Value:0xc030f5c968}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764468124s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=argocd-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=argocd-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=argocd-server, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-server-5986f74c99-p8nsr, uid=dbf73d6f-0e51-458d-8d72-a63982e78026} value=0 ]} {Instance:cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50 Value:0xc030f5cba0} B:{Var:B Labels:cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50 Value:0xc030f5cc08} C:{Var:C Labels:cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50 Value:0xc030f5cb40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764487155s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50} value=0 ]} {Instance:cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50 Value:0xc030f5ccd8} B:{Var:B Labels:cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50 Value:0xc030f5cd40} C:{Var:C Labels:cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50 Value:0xc030f5cda8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764503805s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=cdap-sandbox, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62, uid=6b9f044a-473a-4c6d-8934-3647088e4e50} value=0 ]} {Instance:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd Value:0xc030f5cf88} B:{Var:B Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd Value:0xc030f5ce50} C:{Var:C Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd Value:0xc030f5cf38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764517896s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd} value=0 ]} {Instance:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d Value:0xc030f5d158} B:{Var:B Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d Value:0xc030f5d1b0} C:{Var:C Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d Value:0xc030f5d208}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764531786s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d} value=0 ]} {Instance:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0 Value:0xc030f5d2a8} B:{Var:B Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0 Value:0xc030f5d420} C:{Var:C Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0 Value:0xc030f5d480}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764558377s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0} value=0 ]} {Instance:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce Value:0xc030f5d520} B:{Var:B Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce Value:0xc030f5d578} C:{Var:C Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce Value:0xc030f5d5c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764572477s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce} value=0 ]} {Instance:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65 Value:0xc030f5d670} B:{Var:B Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65 Value:0xc030f5d6c8} C:{Var:C Labels:cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65 Value:0xc030f5d718}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764585898s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=consul, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65} value=0 ]} {Instance:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd Value:0xc030f5d7c0} B:{Var:B Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd Value:0xc030f5d820} C:{Var:C Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd Value:0xc030f5d920}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764600318s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-vault-consu-76f5467596-mm9qn, uid=58f8624a-e2fd-4f88-bc15-5d8e8b90febd} value=0 ]} {Instance:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d Value:0xc030f5d9d0} B:{Var:B Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d Value:0xc030f5da30} C:{Var:C Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d Value:0xc030f5da80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764613268s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879, uid=f5320297-1117-400f-9704-d4f43fa1127d} value=0 ]} {Instance:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0 Value:0xc030f5db20} B:{Var:B Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0 Value:0xc030f5db80} C:{Var:C Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0 Value:0xc030f5dbd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764631609s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg, uid=2142b2fa-c391-4493-9f17-ecab47b386c0} value=0 ]} {Instance:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce Value:0xc030f5dd70} B:{Var:B Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce Value:0xc030f5ddc8} C:{Var:C Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce Value:0xc030f5dc80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764645279s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-vault-consul-86db767f5-ttpm8, uid=546be242-3313-46c4-b57c-8a87f4e320ce} value=0 ]} {Instance:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65 Value:0xc030f5def8} B:{Var:B Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65 Value:0xc030f5df50} C:{Var:C Labels:cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65 Value:0xc030f5dfa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76465865s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=consul, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-vault-cons-85f6c4f87d-tpdmj, uid=2aa0ac70-24e0-4323-a7c7-61fead7b0c65} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769 Value:0xc007df22f8} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769 Value:0xc007df2170} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769 Value:0xc007df22a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76467379s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d Value:0xc007df2448} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d Value:0xc007df2498} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d Value:0xc007df24e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764687301s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2 Value:0xc007df2b10} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2 Value:0xc007df2ed8} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2 Value:0xc007df3318}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764702761s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced Value:0xc007df3a90} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced Value:0xc007df3e50} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced Value:0xc039f66028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764718062s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a Value:0xc039f660c8} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a Value:0xc039f66120} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a Value:0xc039f66178}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764731882s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53 Value:0xc039f66318} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53 Value:0xc039f66368} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53 Value:0xc039f662c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764763083s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa Value:0xc039f66410} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa Value:0xc039f66470} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa Value:0xc039f664c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764779893s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05 Value:0xc039f66570} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05 Value:0xc039f665c8} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05 Value:0xc039f66620}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764794374s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18 Value:0xc039f666c8} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18 Value:0xc039f66720} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18 Value:0xc039f66778}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764809464s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c Value:0xc039f66820} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c Value:0xc039f66878} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c Value:0xc039f668d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764827125s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e Value:0xc039f66970} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e Value:0xc039f669d0} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e Value:0xc039f66a28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764844805s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52 Value:0xc039f66b40} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52 Value:0xc039f66b90} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52 Value:0xc039f66ae0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764859176s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc Value:0xc039f66ce8} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc Value:0xc039f66c40} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc Value:0xc039f66c98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764873126s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d Value:0xc039f66de8} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d Value:0xc039f66e40} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d Value:0xc039f66d90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764887547s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769 Value:0xc039f66ef0} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769 Value:0xc039f66f48} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769 Value:0xc039f66fa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764901857s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthcrs-app-56bc9488c4-tkppb, uid=19609211-799f-4cc4-a64c-3362d923f769} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d Value:0xc039f67050} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d Value:0xc039f670b0} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d Value:0xc039f67108}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764914537s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv, uid=d87cce81-2d06-4007-b374-2a5a83761d1d} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2 Value:0xc039f671b0} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2 Value:0xc039f67208} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2 Value:0xc039f67260}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764928968s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh, uid=9a76f4f5-2b38-47cf-a5a4-5a833d2759f2} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced Value:0xc039f673b8} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced Value:0xc039f67308} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced Value:0xc039f67360}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764945518s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk, uid=451a3b1f-50fa-4418-bb67-e273772ceced} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a Value:0xc039f674b0} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a Value:0xc039f67510} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a Value:0xc039f67460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764957669s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivecrs-app-6d66bccddd-hgx74, uid=8c4026f5-4ee1-4123-bc8f-57aeb412532a} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53 Value:0xc039f675c0} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53 Value:0xc039f67610} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53 Value:0xc039f67670}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764971009s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt, uid=9ce20c7e-8191-4dce-af1d-0a7635d43f53} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa Value:0xc039f67720} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa Value:0xc039f67780} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa Value:0xc039f677d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.764983579s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg, uid=f20540a5-8b7c-48c2-96a2-a264404f0afa} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05 Value:0xc039f67878} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05 Value:0xc039f678c8} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05 Value:0xc039f67928}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76499886s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm, uid=70196c6b-2e71-44d9-adfc-63c54d9b1c05} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18 Value:0xc039f679c8} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18 Value:0xc039f67a20} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18 Value:0xc039f67a78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76501074s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn, uid=9c835179-b911-4296-a481-705af4228a18} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c Value:0xc039f67b20} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c Value:0xc039f67b78} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c Value:0xc039f67bd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765025851s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs, uid=81ec285c-745f-47c5-9ae8-771f4a5ba74c} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e Value:0xc039f67d20} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e Value:0xc039f67c78} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e Value:0xc039f67cd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765039801s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivecrs-app-c9d7c4c9-jqjpv, uid=07c9df8a-44c1-4891-9b39-b83b86e8919e} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52 Value:0xc039f67dc0} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52 Value:0xc039f67e18} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52 Value:0xc039f67e70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765053501s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-742jp, uid=cfbfe1d6-c33f-4618-8876-ffec0b52cb52} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc Value:0xc039f67f70} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc Value:0xc039f67fc8} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc Value:0xc039f67f20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765066942s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthcrs-app-cc98b9c59-fcljp, uid=66b34df8-8f91-497d-874e-e78827970bdc} value=0 ]} {Instance:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d Value:0xc03d388070} B:{Var:B Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d Value:0xc03d3880c8} C:{Var:C Labels:cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d Value:0xc03d388120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765097643s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=crs-app, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivecrs-app-6cf8645f7b-zcz4f, uid=0280b90d-a8da-4a3f-a10f-90f4e7d2ee3d} value=0 ]} {Instance:cluster=tds-np-cluster, container=dex, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=dex, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c Value:0xc03d388290} B:{Var:B Labels:cluster=tds-np-cluster, container=dex, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c Value:0xc03d3881d8} C:{Var:C Labels:cluster=tds-np-cluster, container=dex, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c Value:0xc03d388230}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765112593s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=dex, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=dex, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=dex, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c} value=0 ]} {Instance:cluster=tds-np-cluster, container=dex, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=dex, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c Value:0xc03d388340} B:{Var:B Labels:cluster=tds-np-cluster, container=dex, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c Value:0xc03d388450} C:{Var:C Labels:cluster=tds-np-cluster, container=dex, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c Value:0xc03d388528}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765126413s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=dex, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=dex, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=dex, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-dex-server-6c87968c75-qqvdk, uid=b2313800-1bdd-4857-b3f9-211d6bae131c} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1 Value:0xc03d3886e0} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1 Value:0xc03d388798} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1 Value:0xc03d388688}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765142264s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea Value:0xc03d3888a8} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea Value:0xc03d388908} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea Value:0xc03d388960}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765158244s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c Value:0xc03d388c28} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c Value:0xc03d388a68} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c Value:0xc03d388b78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765171556s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49 Value:0xc03d388cd0} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49 Value:0xc03d388d30} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49 Value:0xc03d388d88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765184626s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec Value:0xc03d388e88} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec Value:0xc03d388ee0} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec Value:0xc03d388e38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765197737s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14 Value:0xc03d389038} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14 Value:0xc03d389098} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14 Value:0xc03d3890f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765212627s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1 Value:0xc03d3892f8} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1 Value:0xc03d389240} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1 Value:0xc03d3892a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765227147s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj, uid=86d8728e-14ab-409b-adc8-4d0d8c89f0d1} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea Value:0xc03d3893f0} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea Value:0xc03d389458} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea Value:0xc03d3894b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765246268s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf, uid=0bc92410-f1bd-4cb0-951e-533bae3780ea} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c Value:0xc03d389600} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c Value:0xc03d389660} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c Value:0xc03d389720}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765262588s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4, uid=2b8456c8-297f-4763-8f00-f8076b542d7c} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49 Value:0xc03d3898f8} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49 Value:0xc03d389950} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49 Value:0xc03d389898}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765575208s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl, uid=15c097da-a56b-4fbd-a66d-477c24638f49} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec Value:0xc03d389a40} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec Value:0xc03d389ac0} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec Value:0xc03d389b28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765607309s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-frontend-5d5766c56b-t88vn, uid=e9432221-3850-408e-be4a-37c1f06cceec} value=0 ]} {Instance:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14 Value:0xc03d389cb0} B:{Var:B Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14 Value:0xc03d389d00} C:{Var:C Labels:cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14 Value:0xc03d389c58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765621829s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=frontend, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-frontend-749899bd65-rncb9, uid=735d03dc-29e0-4a03-9058-371077b57f14} value=0 ]} {Instance:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc01b17c000} B:{Var:B Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc03d389ea0} C:{Var:C Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc03d389f58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.765635919s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ]} {Instance:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc01b17c120} B:{Var:B Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc01b17c180} C:{Var:C Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc01b17c0c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76564905s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ]} {Instance:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc01b17c278} B:{Var:B Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc01b17c2d0} C:{Var:C Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc01b17c220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76566618s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ]} {Instance:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc01b17c378} B:{Var:B Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc01b17c3d0} C:{Var:C Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc01b17c420}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76599224s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ]} {Instance:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc01b17c510} B:{Var:B Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc01b17c570} C:{Var:C Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc01b17c4c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76601021s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ]} {Instance:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc01b17c618} B:{Var:B Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc01b17c668} C:{Var:C Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc01b17c6c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766028111s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ]} {Instance:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc01b17c808} B:{Var:B Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc01b17c760} C:{Var:C Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc01b17c7b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766043021s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ]} {Instance:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc01b17c900} B:{Var:B Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc01b17c958} C:{Var:C Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc01b17c8b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766056622s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ]} {Instance:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc01b17caa8} B:{Var:B Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc01b17ca00} C:{Var:C Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc01b17ca50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766070182s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ]} {Instance:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc01b17cb50} B:{Var:B Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc01b17cba0} C:{Var:C Labels:cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc01b17cbf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766087283s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=gitea, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ]} {Instance:cluster=tds-np-cluster, container=helm, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=helm, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17ccc0} B:{Var:B Labels:cluster=tds-np-cluster, container=helm, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17cd28} C:{Var:C Labels:cluster=tds-np-cluster, container=helm, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17cd90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766100093s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=helm, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=helm, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=helm, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ]} {Instance:cluster=tds-np-cluster, container=helm, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=helm, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17cf60} B:{Var:B Labels:cluster=tds-np-cluster, container=helm, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17ce48} C:{Var:C Labels:cluster=tds-np-cluster, container=helm, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17cef8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766115693s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=helm, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=helm, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=helm, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ]} {Instance:cluster=tds-np-cluster, container=jenkins, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=jenkins, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0 Value:0xc01b17d100} B:{Var:B Labels:cluster=tds-np-cluster, container=jenkins, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0 Value:0xc01b17d030} C:{Var:C Labels:cluster=tds-np-cluster, container=jenkins, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0 Value:0xc01b17d098}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766129774s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=jenkins, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=jenkins, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=jenkins, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0} value=0 ]} {Instance:cluster=tds-np-cluster, container=jenkins, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=jenkins, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0 Value:0xc01b17d228} B:{Var:B Labels:cluster=tds-np-cluster, container=jenkins, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0 Value:0xc01b17d288} C:{Var:C Labels:cluster=tds-np-cluster, container=jenkins, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0 Value:0xc01b17d1c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766143314s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=jenkins, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=jenkins, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=jenkins, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=jenkins-deployment-5469b67d9b-64hgc, uid=9d3785a5-0b4d-4756-814c-17c117a142f0} value=0 ]} {Instance:cluster=tds-np-cluster, container=jnlp, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=jnlp, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d350} B:{Var:B Labels:cluster=tds-np-cluster, container=jnlp, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d3a8} C:{Var:C Labels:cluster=tds-np-cluster, container=jnlp, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d400}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766158405s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=jnlp, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=jnlp, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=jnlp, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ]} {Instance:cluster=tds-np-cluster, container=jnlp, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=jnlp, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d518} B:{Var:B Labels:cluster=tds-np-cluster, container=jnlp, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d578} C:{Var:C Labels:cluster=tds-np-cluster, container=jnlp, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d4b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766172735s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=jnlp, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=jnlp, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=jnlp, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ]} {Instance:cluster=tds-np-cluster, container=kaniko1, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=kaniko1, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d638} B:{Var:B Labels:cluster=tds-np-cluster, container=kaniko1, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d688} C:{Var:C Labels:cluster=tds-np-cluster, container=kaniko1, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d720}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766187725s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=kaniko1, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=kaniko1, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=kaniko1, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ]} {Instance:cluster=tds-np-cluster, container=kaniko1, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=kaniko1, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d7f0} B:{Var:B Labels:cluster=tds-np-cluster, container=kaniko1, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d860} C:{Var:C Labels:cluster=tds-np-cluster, container=kaniko1, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d8c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766203076s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=kaniko1, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=kaniko1, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=kaniko1, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ]} {Instance:cluster=tds-np-cluster, container=kaniko2, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=kaniko2, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d9d8} B:{Var:B Labels:cluster=tds-np-cluster, container=kaniko2, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17da38} C:{Var:C Labels:cluster=tds-np-cluster, container=kaniko2, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17d978}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766216616s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=kaniko2, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=kaniko2, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=kaniko2, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ]} {Instance:cluster=tds-np-cluster, container=kaniko2, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=kaniko2, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17db60} B:{Var:B Labels:cluster=tds-np-cluster, container=kaniko2, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17dbb8} C:{Var:C Labels:cluster=tds-np-cluster, container=kaniko2, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc01b17db00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766229717s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=kaniko2, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=kaniko2, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=kaniko2, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ]} {Instance:cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8 Value:0xc01b17dcc8} B:{Var:B Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8 Value:0xc01b17dd20} C:{Var:C Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8 Value:0xc01b17dc70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766244637s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8} value=0 ]} {Instance:cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c Value:0xc01b17de60} B:{Var:B Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c Value:0xc01b17dec0} C:{Var:C Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c Value:0xc01b17de10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766258198s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c} value=0 ]} {Instance:cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8 Value:0xc01b17dfd0} B:{Var:B Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8 Value:0xc007bfa558} C:{Var:C Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8 Value:0xc01b17df70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766275218s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-exo-mesh-6fb55bc86c-hj6q4, uid=a4b94ad1-0a6a-41ac-8d3a-6ce7fe6671e8} value=0 ]} {Instance:cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c Value:0xc007bfa978} B:{Var:B Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c Value:0xc007bfa9e8} C:{Var:C Labels:cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c Value:0xc007bfaa50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766289788s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=mesh, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx, uid=9211d299-849a-4c56-95be-633b10fffe3c} value=0 ]} {Instance:cluster=tds-np-cluster, container=node, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=node, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc007bfacc0} B:{Var:B Labels:cluster=tds-np-cluster, container=node, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc007bfb200} C:{Var:C Labels:cluster=tds-np-cluster, container=node, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc007bfb460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766302519s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=node, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=node, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=node, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ]} {Instance:cluster=tds-np-cluster, container=node, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=node, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc007bfb750} B:{Var:B Labels:cluster=tds-np-cluster, container=node, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc007bfb7c8} C:{Var:C Labels:cluster=tds-np-cluster, container=node, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed Value:0xc007bfb648}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766315829s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=node, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=node, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=node, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7, uid=ca99b6a7-f08f-475a-adf6-dcf8c8936eed} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc007bfb9b8} B:{Var:B Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc007bfba20} C:{Var:C Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc007bfbaa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76632805s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc007bfbc70} B:{Var:B Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc017514000} C:{Var:C Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc017514210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76634059s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc017514340} B:{Var:B Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc0175143b0} C:{Var:C Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc017514460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76635318s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc017514548} B:{Var:B Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc0175145c0} C:{Var:C Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc017514650}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766368291s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc017514728} B:{Var:B Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc017514790} C:{Var:C Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc0175147f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766381841s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc017514918} B:{Var:B Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc0175149c8} C:{Var:C Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b Value:0xc0175148b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766396332s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m, uid=f44c20bf-791d-4c57-8e6d-81fdeaedec5b} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc017514c70} B:{Var:B Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc017514ab8} C:{Var:C Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7 Value:0xc017514bd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766409542s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9, uid=95295c01-4f77-4706-8fa1-6e894b1447b7} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc017514d38} B:{Var:B Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc017514da0} C:{Var:C Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7 Value:0xc017514e18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766422132s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg, uid=2b9b17f9-fbac-48bd-988f-31c6b76810d7} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc0175150d8} B:{Var:B Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc017514f48} C:{Var:C Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d Value:0xc017514fb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766435853s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qa-git-gitea-654cd6bb87-h7jkc, uid=57d7a792-6fe8-429e-a37d-737acd090f4d} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc0175151a8} B:{Var:B Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc017515208} C:{Var:C Labels:cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2 Value:0xc017515278}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766450273s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgres, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=wcs9-tds-qaus-git-gitea-75dc8cd659-k86f4, uid=273f1ee9-4e21-4771-92ec-afd2b1721bb2} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgresql, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgresql, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41 Value:0xc017515370} B:{Var:B Labels:cluster=tds-np-cluster, container=postgresql, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41 Value:0xc0175153e0} C:{Var:C Labels:cluster=tds-np-cluster, container=postgresql, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41 Value:0xc017515468}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766463784s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgresql, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgresql, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgresql, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41} value=0 ]} {Instance:cluster=tds-np-cluster, container=postgresql, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=postgresql, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41 Value:0xc017515558} B:{Var:B Labels:cluster=tds-np-cluster, container=postgresql, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41 Value:0xc017515610} C:{Var:C Labels:cluster=tds-np-cluster, container=postgresql, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41 Value:0xc017515710}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766478724s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=postgresql, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=postgresql, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=postgresql, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevcaauth-strapi-postgresql-0, uid=9985b825-e8e4-4f35-bbcc-287e655f0f41} value=0 ]} {Instance:cluster=tds-np-cluster, container=redis, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=redis, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc Value:0xc017515980} B:{Var:B Labels:cluster=tds-np-cluster, container=redis, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc Value:0xc017515850} C:{Var:C Labels:cluster=tds-np-cluster, container=redis, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc Value:0xc017515900}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766493104s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=redis, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=redis, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=redis, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc} value=0 ]} {Instance:cluster=tds-np-cluster, container=redis, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=redis, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc Value:0xc017515b08} B:{Var:B Labels:cluster=tds-np-cluster, container=redis, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc Value:0xc017515b80} C:{Var:C Labels:cluster=tds-np-cluster, container=redis, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc Value:0xc017515a48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766505865s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=redis, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=redis, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=redis, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds-devops, pod=argocd-redis-7d8d46cc7f-m7gpl, uid=a3171dae-0648-434f-b9c9-068dd86699bc} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a Value:0xc017515c60} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a Value:0xc017515d48} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a Value:0xc017515e00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766518805s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b Value:0xc017515f80} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b Value:0xc017515fe0} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b Value:0xc017515ed8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766531756s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5 Value:0xc010050190} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5 Value:0xc0100501e0} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5 Value:0xc010050238}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766543886s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd Value:0xc0100502f8} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd Value:0xc010050700} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd Value:0xc010050758}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766557896s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd Value:0xc010050850} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd Value:0xc0100508b0} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd Value:0xc010050800}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766571407s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a Value:0xc010050958} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a Value:0xc0100509b0} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a Value:0xc010051210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766585347s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4, uid=c4f14b2b-581a-4543-a848-af6e25ada58a} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b Value:0xc0100512b8} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b Value:0xc010051310} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b Value:0xc010051368}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766601518s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevusauthsearch-app-master-546cd5cc-smnwh, uid=3bbaf094-2ea3-4764-bf8b-f9f3172e947b} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5 Value:0xc010051468} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5 Value:0xc0100514b8} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5 Value:0xc010051510}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766614698s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodauthsearch-app-master-68ffbdf94d-rbk2s, uid=a0661a71-a856-4072-b75e-9fb28aabf4d5} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd Value:0xc0100515b0} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd Value:0xc010051600} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd Value:0xc010051658}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766630558s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv, uid=8f84f0bc-5f32-4e3f-8670-9f60864759fd} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd Value:0xc0100517d0} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd Value:0xc010051730} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd Value:0xc010051780}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766647769s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-master, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv, uid=7cb4f9d2-b414-4070-ace3-ad51fb0f49cd} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0 Value:0xc010051c50} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0 Value:0xc010051cb0} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0 Value:0xc010051d40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766662059s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd Value:0xc02a7540c0} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd Value:0xc02a754110} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd Value:0xc02a754068}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.7666789s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679 Value:0xc02a754270} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679 Value:0xc02a7541d0} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679 Value:0xc02a754220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76669259s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227 Value:0xc02a754310} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227 Value:0xc02a754368} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227 Value:0xc02a7543b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766705791s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833 Value:0xc02a754458} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833 Value:0xc02a7544a8} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833 Value:0xc02a7544f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766721281s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0 Value:0xc02a754600} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0 Value:0xc02a754650} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0 Value:0xc02a7545b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766735071s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-repeater-7f6d99ddf6-6m9wf, uid=b2e77d0f-e52b-4908-8b35-9fe3de9075a0} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd Value:0xc02a754768} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd Value:0xc02a7547c0} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd Value:0xc02a754710}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766748572s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-repeater-5596664d54-8pzh6, uid=f3687f83-6af2-4e37-b69d-9b564a2739fd} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679 Value:0xc02a754870} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679 Value:0xc02a7548e8} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679 Value:0xc02a754940}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766771933s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64, uid=e687930a-1c4b-43fd-97c3-4be17e79a679} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227 Value:0xc02a7549e0} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227 Value:0xc02a754a38} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227 Value:0xc02a754aa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766785663s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws, uid=1f513acf-ba36-4abb-a435-ca6d5400b227} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833 Value:0xc02a754b50} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833 Value:0xc02a754ba8} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833 Value:0xc02a754c00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766821454s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-repeater, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx, uid=3452a789-78d7-4e95-b885-4e862d380833} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60 Value:0xc02a754cd8} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60 Value:0xc02a754d40} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60 Value:0xc02a754db8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766832464s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a Value:0xc02a754eb0} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a Value:0xc02a754f10} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a Value:0xc02a754f78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766844145s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058 Value:0xc02a755090} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058 Value:0xc02a7550f8} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058 Value:0xc02a755030}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766856295s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561 Value:0xc02a7551c0} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561 Value:0xc02a755220} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561 Value:0xc02a755280}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766868135s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6 Value:0xc02a755340} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6 Value:0xc02a7553a0} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6 Value:0xc02a755400}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766879746s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-slave-5558869975-m6fb5, uid=ae9f0c0b-7cd7-4591-81f4-3e4ba7b1edbf State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-slave-5558869975-m6fb5, uid=ae9f0c0b-7cd7-4591-81f4-3e4ba7b1edbf Value:0xc02a7554c8} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-slave-5558869975-m6fb5, uid=ae9f0c0b-7cd7-4591-81f4-3e4ba7b1edbf Value:0xc02a755540} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-slave-5558869975-m6fb5, uid=ae9f0c0b-7cd7-4591-81f4-3e4ba7b1edbf Value:0xc02a7555a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766893946s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-slave-5558869975-m6fb5, uid=ae9f0c0b-7cd7-4591-81f4-3e4ba7b1edbf} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-slave-5558869975-m6fb5, uid=ae9f0c0b-7cd7-4591-81f4-3e4ba7b1edbf} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.43.160:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-slave-5558869975-m6fb5, uid=ae9f0c0b-7cd7-4591-81f4-3e4ba7b1edbf} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60 Value:0xc02a755740} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60 Value:0xc02a755670} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60 Value:0xc02a7556d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766908177s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevlivesearch-app-slave-5f9d7fd6bc-sxjt4, uid=b332559c-562b-4c45-94cd-27d40a864a60} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a Value:0xc02a7558f0} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a Value:0xc02a755818} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a Value:0xc02a755880}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766922317s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw, uid=034086a0-1104-4270-bac7-4588dfa3648a} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058 Value:0xc02a755a80} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058 Value:0xc02a7559c0} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058 Value:0xc02a755a20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766935097s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-7j6vq, uid=ae51b866-27f7-4181-afc6-1afaf8d56058} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561 Value:0xc02a755b50} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561 Value:0xc02a755bc0} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561 Value:0xc02a755c38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766947418s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdspreprodlivesearch-app-slave-6f5f8dfd8b-wcfsp, uid=f3ec083a-27fb-463b-a60b-2e1842373561} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6 Value:0xc02a755d00} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6 Value:0xc02a755d60} C:{Var:C Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6 Value:0xc02a755dc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.766960678s EvaluationString:[ var='A' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6} value=0 ], [ var='B' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6} value=0 ], [ var='C' labels={cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777, uid=7e2d1fa7-4e1c-41aa-a4fa-d21bd117b4e6} value=0 ]} {Instance:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-slave-5558869975-m6fb5, uid=ae9f0c0b-7cd7-4591-81f4-3e4ba7b1edbf State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-metrics, namespace=tds, pod=tdsqauslivesearch-app-slave-5558869975-m6fb5, uid=ae9f0c0b-7cd7-4591-81f4-3e4ba7b1edbf Value:0xc02a755e80} B:{Var:B Labels:cluster=tds-np-cluster, container=search-app-slave, instance=172.30.58.138:8080, job=integrations/kubernetes/kube-state-me +level=debug ts=2024-05-29T13:44:15.788087186Z caller=remote_instance_store.go:51 user=151289 slug=everflow msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:15.786018318Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +logger=ngalert.state.manager user=615392 slug=shinemetrics instance="__name__=probe_success, config_version=1715008305715867392, instance=https://api.shine.fr/v2/referrals/liveness_check, job=Liveness Check referrals-v2, probe=Amsterdam" t=2024-05-29T13:44:15.785579974Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=production, environment=production, namespace=workforce, pod=wf-replica-service-7b4df8ff7f-k4mzn" t=2024-05-29T13:44:15.785573489Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=production, environment=production, namespace=workforce, pod=wf-replica-service-7b4df8ff7f-h8r69" t=2024-05-29T13:44:15.785527862Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=production, environment=production, namespace=workforce, pod=wf-replica-service-7b4df8ff7f-9gbcz" t=2024-05-29T13:44:15.785475171Z level=debug msg="Keeping state" state=Normal +level=error ts=2024-05-29T13:44:15.785177173Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" +logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=production, environment=production, namespace=workforce, pod=wf-import-service-5c95f8f985-fdzbq" t=2024-05-29T13:44:15.785285397Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=production, environment=production, namespace=workforce, pod=wf-forecast-service-75f5ddb88d-vprqs" t=2024-05-29T13:44:15.785102836Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=production, environment=production, namespace=workforce, pod=wf-forecast-service-75f5ddb88d-nwfg2" t=2024-05-29T13:44:15.784965233Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=production, environment=production, namespace=workforce, pod=wf-export-service-66dbcf8f5b-jrc4g" t=2024-05-29T13:44:15.784820075Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.784694596Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.784612816Z caller=remote_instance_store.go:51 user=94501 slug=datastax msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:15.784474628Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=production, environment=production, namespace=workforce, pod=wf-budget-service-5d9b6c54f8-wjf9k" t=2024-05-29T13:44:15.784299856Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=production, environment=production, namespace=workforce, pod=wf-budget-service-5d9b6c54f8-wjf9k" t=2024-05-29T13:44:15.784287937Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.784172425Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=production, environment=production, namespace=workforce, pod=budget-gateway-service-5bf9899ddb-4hj4v" t=2024-05-29T13:44:15.783938316Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=396586 slug=opengov t=2024-05-29T13:44:15.783836757Z level=debug msg="State manager processing evaluation results" resultCount=24 +level=debug ts=2024-05-29T13:44:15.783603379Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.783596503Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=163513 slug=dialpad t=2024-05-29T13:44:15.78346786Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 +logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.783454581Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.78344714Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.783417446Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.783407599Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.scheduler user=163513 slug=dialpad version=35 fingerprint=c5a97915aa68b6b4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.783300388Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.782775149s EvaluationString:}]" duration=52.906714ms +level=debug ts=2024-05-29T13:44:15.782165884Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.78157326Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.68816ms +logger=ngalert.state.manager user=765158 slug=stellarmenus instance="__name__=up, instance=grafana-prod, job=Step Functions" t=2024-05-29T13:44:15.781423863Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:15.780992116Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.scheduler user=312340 slug=lakefs version=100 fingerprint=78b1f02b6c94c6b4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.78084665Z level=debug msg="Alert rule evaluated" results="[{Instance:TableName=control-plane-v2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=control-plane-v2 Value:0xc017202a68} C:{Var:C Labels:TableName=control-plane-v2 Value:0xc017202a60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.780331055s EvaluationString:[ var='B' labels={TableName=control-plane-v2} value=0 ], [ var='C' labels={TableName=control-plane-v2} value=0 ]}]" duration=44.56025ms +logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:15.780918271Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=24.50355ms +level=debug ts=2024-05-29T13:44:15.780526613Z caller=remote_instance_store.go:51 user=756904 slug=orbdatanfr msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.779528076Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.778315273Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.777510495Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=237629 slug=ocrolus t=2024-05-29T13:44:15.777076516Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.775967332Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.075377ms +logger=ngalert.state.manager.persist user=656158 slug=muonspacegroundprod t=2024-05-29T13:44:15.775731596Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=656158 slug=muonspacegroundprod instance="datasource_uid=a27bb067-67c3-4636-aa16-ed387b9bc21e, ref_id=ssd_used" previous_handler=resultNoData t=2024-05-29T13:44:15.775712504Z level=debug msg="Execution keep last state is Normal" handler=resultNormal +logger=ngalert.state.manager user=806229 slug=simplisafe instance="host=ip-10-91-5-100.us-west-2.compute.internal" t=2024-05-29T13:44:15.77284151Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=14927 slug=rstsoftware instance= t=2024-05-29T13:44:15.771474347Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:00Z next_ends_at=2024-05-29T13:48:00Z +logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770768917Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770729015Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770622945Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770613173Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770598955Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770579081Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770571348Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770550889Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770387812Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770365274Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770292505Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770271125Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770231852Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770221589Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.770163201Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=206107 slug=hydrolix version=3 fingerprint=4ecfee11a8a54653 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.7700428Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=fdeyrm9s020owb, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.769733229s EvaluationString:}]" duration=123.571101ms +level=info ts=2024-05-29T13:44:15.769608655Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.156.57:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddbhsq1zf0gsle alerts=1 +logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.769599016Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.924338ms +logger=ngalert.scheduler user=404375 slug=cbeanalytics version=2 fingerprint=ccf0a14cbed23fee attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.76775728Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.767411307s EvaluationString:}]" duration=16.800948ms +level=info ts=2024-05-29T13:44:15.767623342Z caller=grafana.go:247 user=396586 slug=opengov msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=16" groups=40 alerts=0 +logger=ngalert.scheduler user=491157 slug=prd01wr version=2 fingerprint=165f2fee356ad8f8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.767433563Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.76711132s EvaluationString:}]" duration=21.075989ms +level=debug ts=2024-05-29T13:44:15.767074048Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.766874668Z level=debug msg="Keeping state" state=Normal +level=error ts=2024-05-29T13:44:15.766358493Z caller=remote_rule_evaluator.go:110 user=432323 slug=lithic msg="remote evaluate failed" code=Code(422) err="failed to parse expression 'B': reduction avg not implemented" +level=debug ts=2024-05-29T13:44:15.765364182Z caller=remote_instance_store.go:51 user=381989 slug=vanoordacf msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=328755 slug=infogrideu instance="ServiceName=sensor-planning-api" t=2024-05-29T13:44:15.763868946Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.764331698Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.764288258Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=453308 slug=hyperzodprod instance= t=2024-05-29T13:44:15.763944033Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=453308 slug=hyperzodprod instance= t=2024-05-29T13:44:15.763924718Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.763473101Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.762876862Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.762989195Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.762895982Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.762846442Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.762835984Z level=debug msg="Setting next state" handler=resultError +level=debug ts=2024-05-29T13:44:15.76276045Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.761866075Z caller=remote_instance_store.go:51 user=882448 slug=bookbookspace1 msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.761779898Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=446790 slug=empowereco instance="instance=stargaze" t=2024-05-29T13:44:15.761521942Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=446790 slug=empowereco instance="instance=jackal" t=2024-05-29T13:44:15.761415347Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=446790 slug=empowereco instance="instance=jackal" t=2024-05-29T13:44:15.761399917Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.761178995Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.759196238Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.759118674Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.758652888Z caller=remote_instance_store.go:51 user=868411 slug=cmpladnp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.758263356Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +level=debug ts=2024-05-29T13:44:15.757924854Z caller=remote_instance_store.go:51 user=502468 slug=gmawater msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.757443058Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdg5sm3oacbnkc, ref_id=A" t=2024-05-29T13:44:15.756384898Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.scheduler user=206107 slug=hydrolix version=3 fingerprint=f72cb230217f9c02 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.756213335Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=fdg5sm3oacbnkc, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.755874102s EvaluationString:}]" duration=54.049352ms +logger=ngalert.state.manager.persist user=328755 slug=infogrideu t=2024-05-29T13:44:15.755491937Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager.persist user=163513 slug=dialpad t=2024-05-29T13:44:15.755393099Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=debug ts=2024-05-29T13:44:15.753795376Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.753194967Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" +level=info ts=2024-05-29T13:44:15.753056343Z caller=grafana.go:247 user=884866 slug=cnonumerique msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=15&state=firing&state=pending&state=error" groups=10 alerts=0 +logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.751862342Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 +logger=ngalert.state.manager user=698963 slug=lemonade instance="app=home-risk, pod=home-risk-668d54b448-jfx7r" t=2024-05-29T13:44:15.75185027Z level=debug msg="Keeping state" state=Normal +Error parsing panelUID for alert annotationruleID433dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=698963 slug=lemonade version=5 fingerprint=b5b925e753db6a58 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.751653578Z level=debug msg="Alert rule evaluated" results="[{Instance:app=home-risk, pod=home-risk-668d54b448-f4hll State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=home-risk, pod=home-risk-668d54b448-f4hll Value:0xc036c6dfc0} THRESHOLD:{Var:THRESHOLD Labels:app=home-risk, pod=home-risk-668d54b448-f4hll Value:0xc036c6df80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.751349421s EvaluationString:[ var='QUERY' labels={app=home-risk, pod=home-risk-668d54b448-f4hll} value=0 ], [ var='THRESHOLD' labels={app=home-risk, pod=home-risk-668d54b448-f4hll} value=0 ]} {Instance:app=home-risk, pod=home-risk-668d54b448-jfx7r State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=home-risk, pod=home-risk-668d54b448-jfx7r Value:0xc010006010} THRESHOLD:{Var:THRESHOLD Labels:app=home-risk, pod=home-risk-668d54b448-jfx7r Value:0xc010006070}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.751363919s EvaluationString:[ var='QUERY' labels={app=home-risk, pod=home-risk-668d54b448-jfx7r} value=0 ], [ var='THRESHOLD' labels={app=home-risk, pod=home-risk-668d54b448-jfx7r} value=0 ]}]" duration=51.034272ms +level=info ts=2024-05-29T13:44:15.75166588Z caller=remote_alert_sender.go:94 user=191376 slug=abalabuha host=abalabuha-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.152.120.77:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=y-G__2Onk alerts=1 +logger=ngalert.state.manager.persist user=191376 slug=abalabuha t=2024-05-29T13:44:15.7515138Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.418151ms +logger=ngalert.state.manager user=516847 slug=signit instance= t=2024-05-29T13:44:15.751108277Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=516847 slug=signit version=28 fingerprint=5e9b2f15ba72108e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.750990512Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc04cb68eb8} B:{Var:B Labels: Value:0xc04cb68f30} C:{Var:C Labels: Value:0xc04cb68f38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.75063771s EvaluationString:[ var='A' labels={} value=25.314814814814525 ], [ var='B' labels={} value=25.314814814814525 ], [ var='C' labels={} value=0 ]}]" duration=21.376286ms +logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.750883505Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=214309 slug=spenmo instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.75064387Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=214309 slug=spenmo instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.750630523Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.750623536Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=214309 slug=spenmo version=277 fingerprint=fca3cec3a4df409e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.750488744Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.750146121s EvaluationString:}]" duration=39.999951ms +logger=ngalert.scheduler user=250150 slug=bizagi version=58 fingerprint=5d2c306009ecd05a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.750471677Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.75027071s EvaluationString:}]" duration=399.007287ms +logger=ngalert.state.manager user=465668 slug=xpressinfra instance= t=2024-05-29T13:44:15.750541675Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.749221346Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=856040 slug=kuady t=2024-05-29T13:44:15.748670215Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.801796ms +level=debug ts=2024-05-29T13:44:15.746308437Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.746013121Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="namespace=reserve-resource" t=2024-05-29T13:44:15.745871096Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="namespace=komodor" t=2024-05-29T13:44:15.745702534Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="namespace=argocd" t=2024-05-29T13:44:15.745590653Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.745326664Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=107.085363ms +level=debug ts=2024-05-29T13:44:15.745015631Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.744748336Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=882448 slug=bookbookspace1 instance="datasource_uid=grafanacloud-logs, ref_id=Number of Exception Logs" t=2024-05-29T13:44:15.743723748Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=882448 slug=bookbookspace1 instance="datasource_uid=grafanacloud-logs, ref_id=Number of Exception Logs" t=2024-05-29T13:44:15.743651906Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=882448 slug=bookbookspace1 instance="datasource_uid=grafanacloud-logs, ref_id=Number of Exception Logs" t=2024-05-29T13:44:15.743632896Z level=debug msg="Setting next state" handler=resultNoData +level=info ts=2024-05-29T13:44:15.743522425Z caller=remote_alert_sender.go:94 user=622339 slug=lendbr host=lendbr-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.117.83:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddk7e8hti9pmoc alerts=1 +logger=ngalert.state.manager.persist user=622339 slug=lendbr t=2024-05-29T13:44:15.742695906Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=48.313161ms +level=debug ts=2024-05-29T13:44:15.742687997Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=421567 slug=nexx360 t=2024-05-29T13:44:15.742332798Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=4947 slug=mediamath instance="datasource_uid=000000020, ref_id=A,B" t=2024-05-29T13:44:15.742054174Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.scheduler user=4947 slug=mediamath version=1 fingerprint=dde0eed59739c93d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.741957263Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=000000020, ref_id=A,B State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.741674269s EvaluationString:}]" duration=37.835978ms +logger=ngalert.state.manager.persist user=426229 slug=accelbyte t=2024-05-29T13:44:15.741237667Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager.persist user=707607 slug=obi t=2024-05-29T13:44:15.738358799Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=301090 slug=racktopsystems instance="customer_number=CN00014B, is_vm=false, scope=public, stability=Release, system_serial=RT0001II, version=23.6.0.195" t=2024-05-29T13:44:15.740217929Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.738111564Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.739998314Z caller=remote_instance_store.go:51 user=76255 slug=benzinga msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=76255 slug=benzinga t=2024-05-29T13:44:15.739955852Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=301090 slug=racktopsystems instance="customer_number=CN00011S, is_vm=false, scope=public, stability=release, system_serial=RT0001CI, version=23.6.1.263" t=2024-05-29T13:44:15.739502448Z level=debug msg="Setting next state" handler=resultNormal +level=info ts=2024-05-29T13:44:15.739403982Z caller=remote_alert_sender.go:94 user=78401 slug=ayadav6 host=ayadav6-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.152.83.20:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=EVMYgIw7k alerts=1 +logger=ngalert.state.manager user=475799 slug=dpdcz instance="stream_name=DX_CUSTOMERS" t=2024-05-29T13:44:15.738873417Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=301090 slug=racktopsystems instance="customer_number=CN0000XZ, is_vm=false, scope=public, stability=release, system_serial=RT0001AK, version=23.2.0.54" t=2024-05-29T13:44:15.738745928Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=856040 slug=kuady t=2024-05-29T13:44:15.737868329Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager.persist user=265756 slug=vowfood t=2024-05-29T13:44:15.737426394Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.073081ms +level=debug ts=2024-05-29T13:44:15.736607645Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=301090 slug=racktopsystems instance="customer_number=CN000001, is_vm=false, scope=public, stability=release, system_serial=RT0000XS, version=23.4.6.50" t=2024-05-29T13:44:15.7358482Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.732159013Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.732103713Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=191376 slug=abalabuha t=2024-05-29T13:44:15.732089299Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=info ts=2024-05-29T13:44:15.732032482Z caller=remote_image_capturer.go:61 user=191376 slug=abalabuha rule_org_id=1 rule_uid=y-G__2Onk dashboard=PuXkhsuMk panel=46 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" +level=debug ts=2024-05-29T13:44:15.731345009Z caller=remote_instance_store.go:51 user=868411 slug=cmpladnp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=824501 slug=bendingspoons t=2024-05-29T13:44:15.73125327Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=191376 slug=abalabuha t=2024-05-29T13:44:15.73120469Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.731095634Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=371756 slug=asapp t=2024-05-29T13:44:15.73068826Z level=debug msg="Saving alert states done" count=4 max_state_save_concurrency=1 duration=75.358914ms +level=debug ts=2024-05-29T13:44:15.729790503Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.726530699Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.726228577Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.726114377Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" +logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=66174db2c4744fdc attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.725557472Z level=debug msg="Alert rule evaluated" results="[{Instance:name=keepLastValue(eadp.gos.torch.prod.bf-2021-xbsx-gen5.Users_in_Game,5) Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc0b894f2c8} IgnoreBelow:{Var:IgnoreBelow Labels: Value:0xc0b894f2f0} Threshold:{Var:Threshold Labels: Value:0xc0b894f2f8} compare:{Var:compare Labels:name=keepLastValue(eadp.gos.torch.prod.bf-2021-xbsx-gen5.Users_in_Game,5) Query Value:0xc0b894f338} sum:{Var:sum Labels:name=keepLastValue(eadp.gos.torch.prod.bf-2021-xbsx-gen5.Users_in_Game,5) Query Value:0xc0b894f2b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.723851807s EvaluationString:[ var='Breaches' labels={} value=1 ], [ var='IgnoreBelow' labels={} value=2000 ], [ var='Threshold' labels={} value=-6 ], [ var='compare' labels={name=keepLastValue(eadp.gos.torch.prod.bf-2021-xbsx-gen5.Users_in_Game,5) Query} value=0 ], [ var='sum' labels={name=keepLastValue(eadp.gos.torch.prod.bf-2021-xbsx-gen5.Users_in_Game,5) Query} value=0 ]}]" duration=52.989132ms +level=debug ts=2024-05-29T13:44:15.724947703Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=472647 slug=planet instance="service=urlsigning@file" t=2024-05-29T13:44:15.724890383Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:15.724634441Z level=debug msg="State manager processing evaluation results" resultCount=6 +level=debug ts=2024-05-29T13:44:15.723391804Z caller=remote_instance_store.go:51 user=265756 slug=vowfood msg="calling SaveAlertInstance" +level=info ts=2024-05-29T13:44:15.722156468Z caller=remote_alert_sender.go:94 user=681509 slug=momotfilip host=momotfilip-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.49.130:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=c4e36770-a2b2-4f1d-b6fa-06d42192d97f alerts=1 +level=debug ts=2024-05-29T13:44:15.721337791Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.721145336Z caller=remote_instance_store.go:51 user=868411 slug=cmpladnp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-us-ewr-comcast-01" t=2024-05-29T13:44:15.720985738Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-sg-sin-gsl-03" t=2024-05-29T13:44:15.719138772Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=386776 slug=rcsworks t=2024-05-29T13:44:15.718788495Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.316604ms +level=debug ts=2024-05-29T13:44:15.718505831Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-sg-sin-gsl-01" t=2024-05-29T13:44:15.718531261Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.717782339Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-pl-wmi-dp-01" t=2024-05-29T13:44:15.716988805Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=78401 slug=ayadav6 instance= t=2024-05-29T13:44:15.716638922Z level=debug msg="Execution error state is Alerting" handler=resultAlerting previous_handler=resultError +logger=ngalert.state.manager user=78401 slug=ayadav6 instance= t=2024-05-29T13:44:15.716627833Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.scheduler user=78401 slug=ayadav6 version=1 fingerprint=cfcc861d56cfafaa attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.716566198Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': can not get data source by uid, uid is empty" duration=638.761µs +level=error ts=2024-05-29T13:44:15.716534985Z caller=remote_rule_evaluator.go:110 user=78401 slug=ayadav6 msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': can not get data source by uid, uid is empty" +logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.715890778Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.425512ms +level=debug ts=2024-05-29T13:44:15.715719772Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-no-osl-glesys-02" t=2024-05-29T13:44:15.71526246Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.714611148Z caller=remote_instance_store.go:51 user=502468 slug=gmawater msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=173730 slug=nikon t=2024-05-29T13:44:15.714090022Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.327805ms +level=debug ts=2024-05-29T13:44:15.712676426Z caller=remote_instance_store.go:51 user=177465 slug=fairtiq msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-hk-hkg-dp-01" t=2024-05-29T13:44:15.711742041Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.71154909Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.711403648Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=797387 slug=roadrunnerdev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.711270568Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager.persist user=150145 slug=pleasant t=2024-05-29T13:44:15.710363139Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.209065ms +logger=ngalert.state.manager user=398018 slug=joepegs instance= t=2024-05-29T13:44:15.709186657Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=432323 slug=lithic instance= t=2024-05-29T13:44:15.708952993Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=432323 slug=lithic instance= t=2024-05-29T13:44:15.708934947Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.708568187Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=620731 slug=masonite instance="resourceName=SLVAZQAINFMDMDQ" t=2024-05-29T13:44:15.708484406Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-fr-cdg-dp-02" t=2024-05-29T13:44:15.707892586Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.707825743Z caller=remote_instance_store.go:51 user=318387 slug=luarx msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.707785581Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-es-mad-dp-01" t=2024-05-29T13:44:15.707278492Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:15.707195829Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=28.966293ms +logger=ngalert.state.manager user=681509 slug=momotfilip instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.706997882Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=681509 slug=momotfilip instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.706982413Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=681509 slug=momotfilip t=2024-05-29T13:44:15.706951481Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.705810331Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-ch-zrh-dp-01" t=2024-05-29T13:44:15.706010779Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-ca-yyz-dp-03" t=2024-05-29T13:44:15.705911747Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:15.70543289Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-br-sao-vultr-01" t=2024-05-29T13:44:15.705448335Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.7052521Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.705124811Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.704662008Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.704406949Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.703834271Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-at-vie-dp-03" t=2024-05-29T13:44:15.703575127Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.703551954Z caller=remote_instance_store.go:51 user=868411 slug=cmpladnp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-at-vie-dp-01" t=2024-05-29T13:44:15.703241202Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=389502 slug=ciscoiot t=2024-05-29T13:44:15.702674797Z level=debug msg="Skip rule evaluation because it is paused" +logger=ngalert.state.manager user=386776 slug=rcsworks instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.702432784Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=386776 slug=rcsworks instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.702408195Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=p2p-vultr-mex-ar-a01" t=2024-05-29T13:44:15.702396232Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=386776 slug=rcsworks version=2 fingerprint=e8763a41bede9687 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.702314783Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.702000443s EvaluationString:}]" duration=34.703757ms +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=p2p-linode-sin-id-a02" t=2024-05-29T13:44:15.701852935Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.701481986Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=134486 slug=podigee instance="cmd=exec, hostname=railspodigeecache-green-nbg1-02, instance=5.75.188.152:9121, job=consul_services, quantile=99, service=redis_exporter" t=2024-05-29T13:44:15.700990278Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=p2p-linode-ewr-us-a03" t=2024-05-29T13:44:15.700940238Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.700816961Z caller=remote_instance_store.go:51 user=502468 slug=gmawater msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=753403 slug=romich t=2024-05-29T13:44:15.700467507Z level=debug msg="Saving alert states done" count=28 max_state_save_concurrency=1 duration=648.573495ms +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv6-linode-ewr-us-z01" t=2024-05-29T13:44:15.70050439Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv6-linode-bom-in-a03" t=2024-05-29T13:44:15.7003573Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.70020854Z caller=remote_instance_store.go:51 user=756904 slug=orbdatanfr msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.700224149Z caller=remote_instance_store.go:51 user=716600 slug=microntechnology msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.699322841Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv4-vultr-cdg-fr-a06" t=2024-05-29T13:44:15.69914996Z level=debug msg="Keeping state" state=Normal +level=debug component=discovery ts=2024-05-29T13:44:15.698798967Z caller=retry.go:58 user=529753 msg="retrying grpc request" method=/remoteruler.rules.v1.RulesService/GetByRuleGroup attempt=4 +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv4-m247-bru-be-a02" t=2024-05-29T13:44:15.696917347Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=851297 slug=roadrunneruat t=2024-05-29T13:44:15.696609227Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 +logger=ngalert.state.manager user=851297 slug=roadrunneruat instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.696526756Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:15.696542311Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=851297 slug=roadrunneruat instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.696469875Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=851297 slug=roadrunneruat instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.696459244Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:15.696139788Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=38.279667ms +logger=ngalert.state.manager user=150145 slug=pleasant instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.696129816Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=150145 slug=pleasant t=2024-05-29T13:44:15.696101307Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv4-bt-fra-de-a02" t=2024-05-29T13:44:15.696083272Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.696025666Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv4-aruba-flr-it-b02" t=2024-05-29T13:44:15.695798386Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.695060806Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=173730 slug=nikon t=2024-05-29T13:44:15.694757251Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=debug ts=2024-05-29T13:44:15.694678058Z caller=remote_instance_store.go:51 user=94501 slug=datastax msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=173730 slug=nikon t=2024-05-29T13:44:15.694664217Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=622339 slug=lendbr instance="__name__=kube_horizontalpodautoscaler_status_current_replicas, cluster=prod-shared-palmdale, horizontalpodautoscaler=keda-hpa-worker-voldemort-process-cerc-optin-request, instance=grafana-cloud-monitoring-kube-state-metrics.grafana-cloud-monitoring.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=voldemort" t=2024-05-29T13:44:15.693794553Z level=debug msg="Setting next state" handler=resultAlerting +level=debug ts=2024-05-29T13:44:15.693430247Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=402122 slug=leapwallet t=2024-05-29T13:44:15.69275558Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.231683ms +logger=ngalert.state.manager user=697570 slug=carroteco t=2024-05-29T13:44:15.691502467Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=538355 slug=flogic t=2024-05-29T13:44:15.691176252Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.690567143Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=851297 slug=roadrunneruat instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.690415512Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=851297 slug=roadrunneruat version=1 fingerprint=9c1a80daca99034c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.6902981Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.690044953s EvaluationString:}]" duration=7.138562ms +logger=ngalert.state.manager.persist user=716600 slug=microntechnology t=2024-05-29T13:44:15.690140727Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 +logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:15.690105787Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.scheduler user=716600 slug=microntechnology version=1 fingerprint=f8ddac07d74aede4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.689947704Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.68981761s EvaluationString:}]" duration=8.845291ms +logger=ngalert.state.manager user=756004 slug=jdsportsprd instance="agent_hostname=ip-10-0-101-115, instance=ip-10-0-101-115:9090, job=integrations/node_exporter" t=2024-05-29T13:44:15.689652309Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:15.689378433Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:15.689361088Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.689149625Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=735588 slug=srepradnya t=2024-05-29T13:44:15.688925376Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.689000395Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.688979042Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +logger=ngalert.scheduler user=735588 slug=srepradnya version=5 fingerprint=d120aa2c0631ffcf attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.688849265Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.688612809s EvaluationString:}]" duration=7.679271ms +level=debug ts=2024-05-29T13:44:15.688876216Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.687984649Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.687856961Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=715708 slug=ggiprod t=2024-05-29T13:44:15.687779927Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=6.666104ms +logger=ngalert.state.manager.persist user=615073 slug=origence t=2024-05-29T13:44:15.687549987Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=615073 slug=origence t=2024-05-29T13:44:15.687473282Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.687231608Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.687259287Z caller=remote_instance_store.go:51 user=756904 slug=orbdatanfr msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.687212134Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.687064744Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.686708769Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.685900873Z caller=remote_instance_store.go:51 user=295631 slug=dapvizor msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=295631 slug=dapvizor t=2024-05-29T13:44:15.685836397Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=295631 slug=dapvizor instance="datasource_uid=ioFV1Jn4z, ref_id=A" t=2024-05-29T13:44:15.685798676Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:15.685466716Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=196413 slug=form3production instance="Region=-, ServiceLimit=Route 53 Max Health Checks, ServiceName=Route53" t=2024-05-29T13:44:15.684634669Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.684570409Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.683241135Z caller=remote_instance_store.go:51 user=615392 slug=shinemetrics msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=344017 slug=descript t=2024-05-29T13:44:15.68282555Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.250146ms +logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:15.68200537Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=111653 slug=theassociationmxp instance= t=2024-05-29T13:44:15.681443556Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.681390478Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=111653 slug=theassociationmxp t=2024-05-29T13:44:15.681400602Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.scheduler user=111653 slug=theassociationmxp version=1 fingerprint=201c78cc669a9dad attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.681321764Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.680873163s EvaluationString:}]" duration=44.157366ms +logger=ngalert.state.manager user=715708 slug=ggiprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.68096289Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.scheduler user=715708 slug=ggiprod version=1 fingerprint=80fed88493f41399 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.680868089Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.680651243s EvaluationString:}]" duration=6.764154ms +logger=ngalert.state.manager user=171235 slug=circleslabs instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.67968682Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.679617897Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:15.679412762Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.418322ms +level=debug ts=2024-05-29T13:44:15.678348561Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.678163434Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.67825407Z caller=remote_instance_store.go:51 user=191103 slug=amazonadmin msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:15.678225911Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=55a1ecdf8e408796 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.677660052Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.677403174s EvaluationString:}]" duration=156.396427ms +level=debug ts=2024-05-29T13:44:15.677444326Z caller=remote_instance_store.go:51 user=423441 slug=outgoinc msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.675315795Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.675271694Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 +logger=ngalert.state.manager user=698963 slug=lemonade instance="app=munic-device-management, pod=munic-device-management-7b66f56644-k2ntg" t=2024-05-29T13:44:15.675250196Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=698963 slug=lemonade instance="app=munic-device-management, pod=munic-device-management-7b66f56644-jtsf9" t=2024-05-29T13:44:15.675182621Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:15.675119188Z level=debug msg="State manager processing evaluation results" resultCount=2 +logger=ngalert.scheduler user=698963 slug=lemonade version=1 fingerprint=404505bccc452ab1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.675009104Z level=debug msg="Alert rule evaluated" results="[{Instance:app=munic-device-management, pod=munic-device-management-7b66f56644-jtsf9 State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=munic-device-management, pod=munic-device-management-7b66f56644-jtsf9 Value:0xc036c6c2f8} THRESHOLD:{Var:THRESHOLD Labels:app=munic-device-management, pod=munic-device-management-7b66f56644-jtsf9 Value:0xc036c6c320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.674583749s EvaluationString:[ var='QUERY' labels={app=munic-device-management, pod=munic-device-management-7b66f56644-jtsf9} value=0 ], [ var='THRESHOLD' labels={app=munic-device-management, pod=munic-device-management-7b66f56644-jtsf9} value=0 ]} {Instance:app=munic-device-management, pod=munic-device-management-7b66f56644-k2ntg State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=munic-device-management, pod=munic-device-management-7b66f56644-k2ntg Value:0xc036c6c348} THRESHOLD:{Var:THRESHOLD Labels:app=munic-device-management, pod=munic-device-management-7b66f56644-k2ntg Value:0xc036c6c370}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.674599964s EvaluationString:[ var='QUERY' labels={app=munic-device-management, pod=munic-device-management-7b66f56644-k2ntg} value=0 ], [ var='THRESHOLD' labels={app=munic-device-management, pod=munic-device-management-7b66f56644-k2ntg} value=0 ]}]" duration=42.657383ms +logger=ngalert.state.manager.persist user=20177 slug=paddledash t=2024-05-29T13:44:15.675013012Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=22.322388ms +level=debug ts=2024-05-29T13:44:15.674611693Z caller=remote_instance_store.go:51 user=756904 slug=orbdatanfr msg="calling SaveAlertInstance" +logger=ngalert.scheduler user=158536 slug=clearsaleantifraude version=22 fingerprint=980185d2a9b2b90f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.674500456Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=alert_disater_recovery_connections_counter State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.674214799s EvaluationString:}]" duration=13.531697ms +logger=ngalert.scheduler user=402122 slug=leapwallet version=41 fingerprint=810cea1fe67883aa attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.673406118Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.673060613s EvaluationString:}]" duration=22.498891ms +level=debug ts=2024-05-29T13:44:15.672963586Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=114492 slug=railsbank instance="QueueName=PROD-PLAY-RB_QUEUE_STATE_MACHINE_LEDGER-SQS" t=2024-05-29T13:44:15.670814199Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.66903228Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.668981158Z caller=remote_instance_store.go:51 user=94501 slug=datastax msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=55491 slug=demandbase t=2024-05-29T13:44:15.668801851Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=23.265211ms +logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-8a51615fd7ec486a, persistentvolumeclaim=data-zookeeper-0" t=2024-05-29T13:44:15.668764373Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-8a51615fd7ec486a, persistentvolumeclaim=data-zookeeper-0" t=2024-05-29T13:44:15.668751741Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-2db0546e54a0406f, persistentvolumeclaim=data-zookeeper-1" t=2024-05-29T13:44:15.668654571Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.667553644Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.667299798Z caller=remote_instance_store.go:51 user=177465 slug=fairtiq msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=344017 slug=descript t=2024-05-29T13:44:15.664570521Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=96267 slug=dhlcamarafria2019 instance= t=2024-05-29T13:44:15.664122392Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=96267 slug=dhlcamarafria2019 t=2024-05-29T13:44:15.664080874Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.663771094Z caller=remote_instance_store.go:51 user=487988 slug=microstrategyits msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.66300213Z caller=remote_instance_store.go:51 user=22398 slug=sunfolding msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.662970569Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.662183408Z caller=remote_instance_store.go:51 user=738479 slug=gohero msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.662145529Z caller=remote_instance_store.go:51 user=442934 slug=arqit msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.662121694Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=844274 slug=tixity instance="diskmountid=/" t=2024-05-29T13:44:15.66206385Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.662104618Z caller=remote_image_capturer.go:54 user=22398 slug=sunfolding rule_org_id=1 rule_uid=edae5869-8fa6-4fb1-8011-9257895c3628 dashboard=UsnySUPZz panel=69 msg="rendering alert image with grafana" +level=debug ts=2024-05-29T13:44:15.66206511Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=B" t=2024-05-29T13:44:15.662015889Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:15.661809738Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.661759037Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.661676906Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=22398 slug=sunfolding t=2024-05-29T13:44:15.661065735Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=359640 slug=swfseu instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.660739473Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.660687581Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:15.660555984Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=4947 slug=mediamath instance="datasource_uid=000000020, ref_id=A" t=2024-05-29T13:44:15.660543077Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +level=debug ts=2024-05-29T13:44:15.659823559Z caller=remote_instance_store.go:51 user=753403 slug=romich msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=890273 slug=cmhusqnp t=2024-05-29T13:44:15.659836767Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.536285ms +level=debug ts=2024-05-29T13:44:15.659510429Z caller=remote_instance_store.go:51 user=147497 slug=rhodev msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.658739826Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.657758453Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.657916199Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=4947 slug=mediamath instance= t=2024-05-29T13:44:15.657835317Z level=warn msg="Failed to take an image" dashboard=wG02QrzZk panel=120 error="rpc error: code = Code(422) desc = screenshots unavailable" +level=debug ts=2024-05-29T13:44:15.657706367Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.657034278Z caller=remote_image_capturer.go:54 user=4947 slug=mediamath rule_org_id=1 rule_uid=fdbhspzwx1hj7e dashboard=wG02QrzZk panel=120 msg="rendering alert image with grafana" +logger=ngalert.scheduler user=4947 slug=mediamath version=1 fingerprint=bc3710097ab94a2e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.656839828Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Alerting Error: Results:map[] Values:map[B0:{Var:B Labels:__name__=kube_node_spec_unschedulable, container=kube-state-metrics, endpoint=http, instance=10.233.75.27:8080, job=kube-state-metrics, namespace=prometheus, node=ord-mathco-prd023, pod=prometheus-kube-state-metrics-685b975bb7-n9tc8, service=prometheus-kube-state-metrics Value:0xc0351fe7f8} B1:{Var:B Labels:__name__=kube_node_spec_unschedulable, container=kube-state-metrics, endpoint=http, instance=10.233.75.27:8080, job=kube-state-metrics, namespace=prometheus, node=ord-mathco-prd035, pod=prometheus-kube-state-metrics-685b975bb7-n9tc8, service=prometheus-kube-state-metrics Value:0xc0351fe888}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.656547528s EvaluationString:[ var='B0' metric='kube_node_spec_unschedulable' labels={__name__=kube_node_spec_unschedulable, container=kube-state-metrics, endpoint=http, instance=10.233.75.27:8080, job=kube-state-metrics, namespace=prometheus, node=ord-mathco-prd023, pod=prometheus-kube-state-metrics-685b975bb7-n9tc8, service=prometheus-kube-state-metrics} value=1501 ], [ var='B1' metric='kube_node_spec_unschedulable' labels={__name__=kube_node_spec_unschedulable, container=kube-state-metrics, endpoint=http, instance=10.233.75.27:8080, job=kube-state-metrics, namespace=prometheus, node=ord-mathco-prd035, pod=prometheus-kube-state-metrics-685b975bb7-n9tc8, service=prometheus-kube-state-metrics} value=1501 ]}]" duration=74.557611ms +level=debug ts=2024-05-29T13:44:15.656590808Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:15.656533959Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=206107 slug=hydrolix instance= t=2024-05-29T13:44:15.656504355Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.656072492Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager.persist user=253106 slug=elenasmonitor t=2024-05-29T13:44:15.655787303Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=47.234785ms +level=debug ts=2024-05-29T13:44:15.655543358Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=spectrum-cable, uri=/rpc/com.asapp.schemas.product.chat.core.services.Core/PublishEvent" t=2024-05-29T13:44:15.655285113Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=jetblue, uri=/rpc/com.asapp.schemas.product.chat.core.services.Core/PublishEvent" t=2024-05-29T13:44:15.655222173Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=aizhomesol, uri=/rpc/com.asapp.schemas.product.chat.core.services.Core/PublishEvent" t=2024-05-29T13:44:15.655057172Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=ingress-nginx-internal-controller, namespace=ingress-nginx-internal" t=2024-05-29T13:44:15.654958991Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=fairtiq-rkrp-dataplatform-adapter-worker, namespace=fairtiq-rkrp-dataplatform-adapter" t=2024-05-29T13:44:15.654591697Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.65444881Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=fairtiq-purchase-clearing-worker, namespace=fairtiq-purchase" t=2024-05-29T13:44:15.654329977Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=fairtiq-pricing-data-manager-web, namespace=fairtiq-pricing-data-manager" t=2024-05-29T13:44:15.65429408Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=fairtiq-payment-rkrp-web, namespace=fairtiq-payment-rkrp" t=2024-05-29T13:44:15.654207411Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=fairtiq-notification-gateway-pushnotification-worker, namespace=fairtiq-notification-gateway" t=2024-05-29T13:44:15.65387503Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=fairtiq-notification-gateway-pushnotification-worker, namespace=fairtiq-notification-gateway" t=2024-05-29T13:44:15.653859345Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.653739902Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.653782908Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=fairtiq-idle-tracker-notifier-notification-worker, namespace=fairtiq-idle-tracker-notifier" t=2024-05-29T13:44:15.653562046Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=fairtiq-hermes-worker, namespace=fairtiq-hermes" t=2024-05-29T13:44:15.653464684Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=fairtiq-hermes-worker, namespace=fairtiq-hermes" t=2024-05-29T13:44:15.653455884Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=fairtiq-go-worker, namespace=fairtiq-go" t=2024-05-29T13:44:15.653388396Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=fairtiq-fraudnonscalable-worker, namespace=fairtiq-fraud" t=2024-05-29T13:44:15.653117604Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=fairtiq-customer-care-worker, namespace=fairtiq-customer-care" t=2024-05-29T13:44:15.652960206Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=fairtiq-campaign-web, namespace=fairtiq-campaign" t=2024-05-29T13:44:15.652769084Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=20177 slug=paddledash instance="Component=currency-service, SLI=CurrencySettingsPatchAPILatency" t=2024-05-29T13:44:15.652770224Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=20177 slug=paddledash instance="Component=currency-service, SLI=CurrencySettingsPatchAPILatency" t=2024-05-29T13:44:15.652758381Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=20177 slug=paddledash t=2024-05-29T13:44:15.652719136Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.scheduler user=20177 slug=paddledash version=2 fingerprint=704656f72f8efcfe attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.652633379Z level=debug msg="Alert rule evaluated" results="[{Instance:Component=currency-service, SLI=CurrencySettingsPatchAPILatency State:Normal Error: Results:map[] Values:map[AlertCondition:{Var:AlertCondition Labels:Component=currency-service, SLI=CurrencySettingsPatchAPILatency Value:0xc0415f13e0} BurnRate:{Var:BurnRate Labels:Component=currency-service, SLI=CurrencySettingsPatchAPILatency Value:0xc0415f1420} GoodEvents:{Var:GoodEvents Labels:Component=currency-service, SLI=CurrencySettingsPatchAPILatency Value:0xc0415f1340} ValidEvents:{Var:ValidEvents Labels:Component=currency-service, SLI=CurrencySettingsPatchAPILatency Value:0xc0415f1390}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.652304888s EvaluationString:[ var='AlertCondition' labels={Component=currency-service, SLI=CurrencySettingsPatchAPILatency} value=0 ], [ var='BurnRate' labels={Component=currency-service, SLI=CurrencySettingsPatchAPILatency} value=NaN ], [ var='GoodEvents' labels={Component=currency-service, SLI=CurrencySettingsPatchAPILatency} value=0 ], [ var='ValidEvents' labels={Component=currency-service, SLI=CurrencySettingsPatchAPILatency} value=0 ]}]" duration=89.309657ms +logger=ngalert.state.manager user=20177 slug=paddledash t=2024-05-29T13:44:15.652625724Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=fairtiq-activity-log-worker, namespace=fairtiq-activity-log" t=2024-05-29T13:44:15.652583934Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.652464516Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=external-secrets-webhook, namespace=external-secrets" t=2024-05-29T13:44:15.652486928Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=coredns, namespace=kube-system" t=2024-05-29T13:44:15.652331621Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=ciaco-pricing-pricing-worker, namespace=ciaco-pricing" t=2024-05-29T13:44:15.652255161Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=ciaco-pricing-data-installer-web, namespace=ciaco-pricing-data-installer" t=2024-05-29T13:44:15.652239073Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=ciaco-novapt-web, namespace=ciaco-novapt" t=2024-05-29T13:44:15.652157208Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=ciaco-novapt-web, namespace=ciaco-novapt" t=2024-05-29T13:44:15.652138177Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=ciaco-ml-lcidetector-worker, namespace=ciaco-ml" t=2024-05-29T13:44:15.652073787Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=ciaco-jm-worker, namespace=ciaco-jm" t=2024-05-29T13:44:15.65192881Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.651943376Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=ciaco-backend-web, namespace=ciaco-backend" t=2024-05-29T13:44:15.651810606Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.651869652Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=ciaco-access-web, namespace=ciaco-access-web" t=2024-05-29T13:44:15.651731864Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.651846379Z caller=remote_instance_store.go:51 user=536824 slug=forgerockit msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.651841071Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.651698267Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=argocd-dex-server, namespace=argocd" t=2024-05-29T13:44:15.651516908Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=staging, deployment=argocd-applicationset-controller, namespace=argocd" t=2024-05-29T13:44:15.651455946Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=sandbox, deployment=prometheus-blackbox-exporter, namespace=grafana-agent" t=2024-05-29T13:44:15.651349985Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=sandbox, deployment=kube-state-metrics, namespace=grafana-agent" t=2024-05-29T13:44:15.651174186Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=sandbox, deployment=kube-dns-autoscaler, namespace=kube-system" t=2024-05-29T13:44:15.651097648Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=sandbox, deployment=external-secrets-webhook, namespace=external-secrets" t=2024-05-29T13:44:15.650838823Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=sandbox, deployment=external-secrets, namespace=external-secrets" t=2024-05-29T13:44:15.650759574Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=sandbox, deployment=coredns, namespace=kube-system" t=2024-05-29T13:44:15.650644037Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.650634388Z caller=remote_instance_store.go:51 user=893158 slug=cmfollnp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=sandbox, deployment=argocd-commenter-controller-manager, namespace=argocd-commenter-system" t=2024-05-29T13:44:15.650350241Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=sandbox, deployment=argo-workflows-server, namespace=argo-workflows" t=2024-05-29T13:44:15.650237024Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=sandbox, deployment=amqp-sensor-5m5gq, namespace=fairtiq-hermes-ops" t=2024-05-29T13:44:15.650194694Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=sandbox, deployment=amqp-eventsource-2ggmx, namespace=fairtiq-hermes-ops" t=2024-05-29T13:44:15.650144987Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=surface, namespace=surface" t=2024-05-29T13:44:15.650061222Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=rabbitmq-exporter-prometheus-rabbitmq-exporter, namespace=rabbitmq-exporter" t=2024-05-29T13:44:15.650019593Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=733461 slug=lattice instance="instance=localhost:7400, job=sequencer-1, layer=l2, network=garnet, type=l2_safe" t=2024-05-29T13:44:15.649913802Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=733461 slug=lattice instance="instance=localhost:7400, job=follower-0, layer=l2, network=garnet, type=l2_safe" t=2024-05-29T13:44:15.64977443Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:15.648125754Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.647943657Z caller=remote_instance_store.go:51 user=691102 slug=deluxeconfdev msg="calling SaveAlertInstance" +level=error ts=2024-05-29T13:44:15.647956796Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" +logger=ngalert.state.manager user=691102 slug=deluxeconfdev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.647782414Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=691102 slug=deluxeconfdev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.647772164Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=691102 slug=deluxeconfdev t=2024-05-29T13:44:15.647758204Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.scheduler user=691102 slug=deluxeconfdev version=1 fingerprint=4bc6cea9ad43a1b0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.647691532Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.647523313s EvaluationString:}]" duration=10.797075ms +level=debug ts=2024-05-29T13:44:15.646441392Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=866972 slug=mitsubishi t=2024-05-29T13:44:15.646359339Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.208702ms +logger=ngalert.state.manager user=55491 slug=demandbase instance="datasource_uid=000000350, ref_id=B,C" t=2024-05-29T13:44:15.645441926Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=journey-tracer, namespace=journey-tracer" t=2024-05-29T13:44:15.64469215Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=fairtiq-sts-web, namespace=fairtiq-sts" t=2024-05-29T13:44:15.644387282Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=fairtiq-rkrp-loyalty-web, namespace=fairtiq-rkrp-loyalty" t=2024-05-29T13:44:15.6441099Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.644148037Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=fairtiq-realtime-tracker-metrics-connector-worker, namespace=fairtiq-realtime-tracker-metrics" t=2024-05-29T13:44:15.643959228Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.643783545Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=fairtiq-payment-worker, namespace=fairtiq-payment" t=2024-05-29T13:44:15.643683298Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=430961 slug=solifi version=4 fingerprint=d4e0cd5dd23ee714 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.643658456Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.643380509s EvaluationString:}]" duration=114.793013ms +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=fairtiq-payment-web, namespace=fairtiq-payment" t=2024-05-29T13:44:15.643654265Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.643627842Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=fairtiq-partner-worker, namespace=fairtiq-partner" t=2024-05-29T13:44:15.64354296Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=fairtiq-notification-worker, namespace=fairtiq-notification" t=2024-05-29T13:44:15.643477592Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=fairtiq-mobile-telemetry-web, namespace=fairtiq-mobile-telemetry" t=2024-05-29T13:44:15.643211523Z level=debug msg="Setting next state" handler=resultNormal +level=info ts=2024-05-29T13:44:15.643139686Z caller=grafana.go:247 user=786662 slug=skycareaignoc msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=c78d146f-55b3-43c2-bde7-5e0d2c9e9f46" groups=0 alerts=0 +logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.643042979Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.643013669Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=fairtiq-jmlab-web, namespace=fairtiq-jmlab" t=2024-05-29T13:44:15.642712524Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=765874 slug=rhwstaging t=2024-05-29T13:44:15.642594706Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=15.043877ms +level=debug ts=2024-05-29T13:44:15.642436634Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=fairtiq-ftd-manager-web, namespace=fairtiq-ftd-manager" t=2024-05-29T13:44:15.642271922Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=fairtiq-ftd-manager-web, namespace=fairtiq-ftd-manager" t=2024-05-29T13:44:15.6422442Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=fairtiq-data-exporter, namespace=fairtiq-data-exporter" t=2024-05-29T13:44:15.641994405Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=fairtiq-customer-care-worker, namespace=fairtiq-customer-care" t=2024-05-29T13:44:15.641792471Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.641833477Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=fairtiq-activity-log-web, namespace=fairtiq-activity-log" t=2024-05-29T13:44:15.641386083Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=external-secrets-cert-controller, namespace=external-secrets" t=2024-05-29T13:44:15.641253207Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=ciaco-pricing-clearing-worker, namespace=ciaco-pricing" t=2024-05-29T13:44:15.640726272Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=ciaco-ml-lcidetector-worker, namespace=ciaco-ml" t=2024-05-29T13:44:15.640604507Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=ciaco-jm-worker, namespace=ciaco-jm" t=2024-05-29T13:44:15.6404576Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=ciaco-beout-simulation, namespace=ciaco-beout-simulation" t=2024-05-29T13:44:15.64037573Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=ciaco-backend-pricing-worker, namespace=ciaco-backend" t=2024-05-29T13:44:15.640249845Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.640170228Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=ciaco-access-web, namespace=ciaco-access-web" t=2024-05-29T13:44:15.6401982Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=ciaco-access, namespace=ciaco-access" t=2024-05-29T13:44:15.640167303Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=528849 slug=bitvavo t=2024-05-29T13:44:15.640136293Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=debug ts=2024-05-29T13:44:15.640080282Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=production, deployment=aws-load-balancer-controller, namespace=alb-controller" t=2024-05-29T13:44:15.640116508Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.639937063Z caller=remote_instance_store.go:51 user=618621 slug=sendamatic msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=loadtest, deployment=fairtiq-travel-web, namespace=fairtiq-travel" t=2024-05-29T13:44:15.639456521Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.63960187Z caller=remote_instance_store.go:51 user=87052 slug=polystream msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=loadtest, deployment=fairtiq-hermes-worker, namespace=fairtiq-hermes" t=2024-05-29T13:44:15.63937294Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=loadtest, deployment=fairtiq-go-worker, namespace=fairtiq-go" t=2024-05-29T13:44:15.639291419Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=618621 slug=sendamatic t=2024-05-29T13:44:15.639059776Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=loadtest, deployment=external-secrets-cert-controller, namespace=external-secrets" t=2024-05-29T13:44:15.638975975Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=856040 slug=kuady instance= t=2024-05-29T13:44:15.639041775Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=856040 slug=kuady instance= t=2024-05-29T13:44:15.639023035Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:15.638902456Z caller=remote_image_capturer.go:54 user=87052 slug=polystream rule_org_id=1 rule_uid=G89Eyn_nk dashboard=Trb3KjvZz panel=43 msg="rendering alert image with grafana" +logger=ngalert.state.manager user=87052 slug=polystream instance= t=2024-05-29T13:44:15.638839686Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=loadtest, deployment=ciaco-pricing-pricing-worker, namespace=ciaco-pricing" t=2024-05-29T13:44:15.638771269Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=loadtest, deployment=ciaco-jm-worker, namespace=ciaco-jm" t=2024-05-29T13:44:15.63865192Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=loadtest, deployment=ciaco-jm-web, namespace=ciaco-jm" t=2024-05-29T13:44:15.638613278Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=loadtest, deployment=ciaco-backend-pricing-worker, namespace=ciaco-backend" t=2024-05-29T13:44:15.63850284Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=loadtest, deployment=ciaco-access, namespace=ciaco-access" t=2024-05-29T13:44:15.638405403Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=loadtest, deployment=aws-load-balancer-controller, namespace=alb-controller" t=2024-05-29T13:44:15.638366497Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=loadtest, deployment=argocd-server, namespace=argocd" t=2024-05-29T13:44:15.638323881Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=loadtest, deployment=argocd-notifications-controller, namespace=argocd" t=2024-05-29T13:44:15.638229576Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177465 slug=fairtiq instance="cluster=loadtest, deployment=argocd-dex-server, namespace=argocd" t=2024-05-29T13:44:15.638180301Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.638293191Z caller=remote_instance_store.go:51 user=841587 slug=tfxprod msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.638323401Z caller=remote_instance_store.go:51 user=781424 slug=n1eko msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.638297311Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.638032753Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" +Error parsing panelUID for alert annotationruleID2665dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=698963 slug=lemonade version=4 fingerprint=7ddbf80cca564d67 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.638013293Z level=debug msg="Alert rule evaluated" results="[{Instance:app=arcs-api, pod=arcs-api-69cdf955-bx2x6 State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=arcs-api, pod=arcs-api-69cdf955-bx2x6 Value:0xc02a9a8d58} THRESHOLD:{Var:THRESHOLD Labels:app=arcs-api, pod=arcs-api-69cdf955-bx2x6 Value:0xc02a9a8d90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.637655643s EvaluationString:[ var='QUERY' labels={app=arcs-api, pod=arcs-api-69cdf955-bx2x6} value=0 ], [ var='THRESHOLD' labels={app=arcs-api, pod=arcs-api-69cdf955-bx2x6} value=0 ]} {Instance:app=arcs-api, pod=arcs-api-69cdf955-q7z28 State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=arcs-api, pod=arcs-api-69cdf955-q7z28 Value:0xc02a9a8dd0} THRESHOLD:{Var:THRESHOLD Labels:app=arcs-api, pod=arcs-api-69cdf955-q7z28 Value:0xc02a9a8e10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.637670298s EvaluationString:[ var='QUERY' labels={app=arcs-api, pod=arcs-api-69cdf955-q7z28} value=0 ], [ var='THRESHOLD' labels={app=arcs-api, pod=arcs-api-69cdf955-q7z28} value=0 ]}]" duration=74.661277ms +level=debug ts=2024-05-29T13:44:15.637630696Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.636897447Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:15.636837038Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.636745144Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-us-ewr-gsl-02" t=2024-05-29T13:44:15.636389339Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-us-dca-latitude-02" t=2024-05-29T13:44:15.636009874Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-us-dca-latitude-02" t=2024-05-29T13:44:15.635999064Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=245291 slug=pismo version=652 fingerprint=0677a999737228a5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.635594297Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.635351369s EvaluationString:}]" duration=401.610858ms +logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.635282039Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=debug ts=2024-05-29T13:44:15.63521168Z caller=remote_instance_store.go:51 user=866972 slug=mitsubishi msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=866972 slug=mitsubishi t=2024-05-29T13:44:15.635148478Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=866972 slug=mitsubishi instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.635133379Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=866972 slug=mitsubishi instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.635108968Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=866972 slug=mitsubishi t=2024-05-29T13:44:15.635094737Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=25487 slug=cryptoview instance= t=2024-05-29T13:44:15.634850343Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=25487 slug=cryptoview t=2024-05-29T13:44:15.634562446Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.634012539Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.633555956Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.63302841Z caller=remote_instance_store.go:51 user=612213 slug=crl msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=612213 slug=crl t=2024-05-29T13:44:15.632960269Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=612213 slug=crl instance= t=2024-05-29T13:44:15.632946597Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.632406784Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.632346636Z caller=remote_instance_store.go:51 user=84360 slug=sib msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=608555 slug=ias t=2024-05-29T13:44:15.632319592Z level=info msg="Detected stale state entry" cacheID="[[\"Series\",\"query67221c8bc1874a12b7883a0c023ee3a2\"],[\"TargetGroup\",\"targetgroup/eng-ct-zh-zt-ml/7540ca45763622da\"],[\"__alert_rule_namespace_uid__\",\"a63484ae-feeb-4a65-a678-b4b6dd77dc42\"],[\"__alert_rule_uid__\",\"f681bf88-eab2-4dbf-b554-1e735e6f40eb\"],[\"alertname\",\"LTS High response time [NO]\"],[\"grafana_folder\",\"CRE\"],[\"team\",\"cre\"]]" state=Normal reason= +logger=ngalert.state.manager user=608555 slug=ias instance="Series=query17f5ae661fb9489889ce2e64e413fb9c, TargetGroup=targetgroup/eng-ct-zh-zt-ml/7540ca45763622da" t=2024-05-29T13:44:15.632292596Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.631524411Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.631513845Z caller=remote_instance_store.go:51 user=530405 slug=zetetic msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=530405 slug=zetetic instance="chain=Kusama, pool=Mermaid 1" t=2024-05-29T13:44:15.631461502Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=444728 slug=stgnextgen instance= t=2024-05-29T13:44:15.631329215Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=444728 slug=stgnextgen t=2024-05-29T13:44:15.631148964Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.scheduler user=444728 slug=stgnextgen version=2 fingerprint=e63cf2f1b06fb2fa attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.631032945Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.63070082s EvaluationString:}]" duration=362.430284ms +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-nz-akl-gsl-02" t=2024-05-29T13:44:15.631078364Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.629771663Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-nl-ams-gsl-01" t=2024-05-29T13:44:15.629620652Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.629423115Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=426229 slug=accelbyte instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.62860727Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-mx-mex-vultr-01" t=2024-05-29T13:44:15.628091325Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.627945989Z caller=remote_instance_store.go:51 user=615392 slug=shinemetrics msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.62760564Z caller=remote_instance_store.go:51 user=765874 slug=rhwstaging msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=765874 slug=rhwstaging instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.627516338Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=765874 slug=rhwstaging instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.627479167Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-jp-tyo-dp-02" t=2024-05-29T13:44:15.627460827Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=662363 slug=facephi t=2024-05-29T13:44:15.627355215Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.853488ms +logger=ngalert.scheduler user=313711 slug=julienbeduneau version=1 fingerprint=3d54c5e55847ac82 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.625639363Z level=debug msg="Alert rule evaluated" results="[{Instance:onprem=ALTEREGO State:Normal Error: Results:map[] Values:map[NB_LOGS_30_MIN:{Var:NB_LOGS_30_MIN Labels:onprem=ALTEREGO Value:0xc02567c9d0} NB_LOGS_BELOW_1:{Var:NB_LOGS_BELOW_1 Labels:onprem=ALTEREGO Value:0xc02567ca18} NB_LOGS_LAST_30M:{Var:NB_LOGS_LAST_30M Labels:onprem=ALTEREGO Value:0xc02567c998}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.625266858s EvaluationString:[ var='NB_LOGS_30_MIN' labels={onprem=ALTEREGO} value=3 ], [ var='NB_LOGS_BELOW_1' labels={onprem=ALTEREGO} value=0 ], [ var='NB_LOGS_LAST_30M' labels={onprem=ALTEREGO} value=3 ]}]" duration=62.546563ms +level=debug ts=2024-05-29T13:44:15.625104477Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.625072594Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.306162ms +logger=ngalert.state.manager user=96036 slug=stivenc instance= t=2024-05-29T13:44:15.624756439Z level=debug msg="Execution error state is Alerting" handler=resultAlerting previous_handler=resultError +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-in-del-vultr-01" t=2024-05-29T13:44:15.624398597Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.624062997Z caller=remote_instance_store.go:51 user=530405 slug=zetetic msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.624115101Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.624023784Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.623220596Z caller=remote_instance_store.go:51 user=491157 slug=prd01wr msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=491157 slug=prd01wr t=2024-05-29T13:44:15.623162885Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=491157 slug=prd01wr instance="DatabaseClass=db.r5.4xlarge" t=2024-05-29T13:44:15.623141584Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=155740 slug=routific t=2024-05-29T13:44:15.621760847Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-il-tlv-dp-01" t=2024-05-29T13:44:15.621665351Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=27737 slug=edfmancapital t=2024-05-29T13:44:15.621476878Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=27737 slug=edfmancapital instance= t=2024-05-29T13:44:15.621461971Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=696798 slug=mcv instance= t=2024-05-29T13:44:15.621350998Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.620924381Z caller=remote_instance_store.go:51 user=868411 slug=cmpladnp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-gb-lon-ukserv-03" t=2024-05-29T13:44:15.619759711Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.619316823Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.619019615Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.617752196Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.617445343Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.617322233Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.608576964Z caller=remote_instance_store.go:51 user=253106 slug=elenasmonitor msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.615823715Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.615604327Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=114492 slug=railsbank t=2024-05-29T13:44:15.615725794Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.614826986Z caller=remote_instance_store.go:51 user=615392 slug=shinemetrics msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.614532847Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=139073 slug=cargo1 t=2024-05-29T13:44:15.614579311Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=139073 slug=cargo1 instance= t=2024-05-29T13:44:15.614549767Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=662363 slug=facephi t=2024-05-29T13:44:15.614446355Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-76246a41d2c84388, persistentvolumeclaim=main-main-795j-pgdata" t=2024-05-29T13:44:15.613946676Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=206107 slug=hydrolix t=2024-05-29T13:44:15.613767101Z level=debug msg="State manager processing evaluation results" resultCount=9 +level=debug ts=2024-05-29T13:44:15.613747417Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:15.613687818Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=debug ts=2024-05-29T13:44:15.612806607Z caller=remote_instance_store.go:51 user=805026 slug=powwro11y msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=61907 slug=fullstory t=2024-05-29T13:44:15.612469987Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=67.806033ms +Error parsing panelUID for alert annotationruleID2429dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.61255013Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=75.005451ms +level=debug ts=2024-05-29T13:44:15.612194846Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.612028804Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.611532159Z caller=remote_instance_store.go:51 user=679831 slug=joveostageaws msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=489921 slug=statuscake t=2024-05-29T13:44:15.611482042Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=489921 slug=statuscake instance= t=2024-05-29T13:44:15.611466515Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=489921 slug=statuscake instance= t=2024-05-29T13:44:15.61145251Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-fr-mrs-gsl-02" t=2024-05-29T13:44:15.611158064Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.611100952Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=4947 slug=mediamath instance="datasource_uid=000000020, ref_id=campaign_margins_calc_query" t=2024-05-29T13:44:15.610803228Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +logger=ngalert.state.manager user=4947 slug=mediamath instance="datasource_uid=000000020, ref_id=campaign_margins_calc_query" t=2024-05-29T13:44:15.610793713Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager.persist user=716631 slug=sugatsune t=2024-05-29T13:44:15.610789713Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 +logger=ngalert.scheduler user=4947 slug=mediamath version=1 fingerprint=75f131f94e1a675d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.610695796Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=000000020, ref_id=campaign_margins_calc_query State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.610433213s EvaluationString:}]" duration=1.647020763s +logger=ngalert.state.manager user=716631 slug=sugatsune instance="__name__=windows_service_status, agent_hostname=sgst-qa-app01, instance=sgst-qa-app01:12345, job=integrations/windows_exporter, name=enable_pimql, status=ok" t=2024-05-29T13:44:15.610744822Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.610799605Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:15.610680557Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=716631 slug=sugatsune t=2024-05-29T13:44:15.61063679Z level=debug msg="State manager processing evaluation results" resultCount=2 +level=debug ts=2024-05-29T13:44:15.610402016Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.610220799Z caller=remote_instance_store.go:51 user=147497 slug=rhodev msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.608148938Z caller=remote_instance_store.go:51 user=148654 slug=tinybeans msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-fr-cdg-dp-02" t=2024-05-29T13:44:15.608381575Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.608351038Z caller=remote_instance_store.go:51 user=756904 slug=orbdatanfr msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.608208304Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=901230 slug=integromonitor instance= previous_handler=resultError t=2024-05-29T13:44:15.608044782Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=901230 slug=integromonitor instance= previous_handler=resultError t=2024-05-29T13:44:15.60803596Z level=debug msg="Execution keep last state is Normal" handler=resultNormal +logger=ngalert.state.manager.persist user=716527 slug=newpigqa t=2024-05-29T13:44:15.607512757Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=716527 slug=newpigqa t=2024-05-29T13:44:15.607468076Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.scheduler user=716527 slug=newpigqa version=1 fingerprint=dec1ac71fe198463 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.607389965Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.60720621s EvaluationString:}]" duration=9.147846ms +level=debug ts=2024-05-29T13:44:15.607373525Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.607270503Z caller=remote_instance_store.go:51 user=94501 slug=datastax msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.605050951Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.604488143Z caller=remote_instance_store.go:51 user=800848 slug=flowfoundation msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=800848 slug=flowfoundation instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.604381208Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.scheduler user=800848 slug=flowfoundation version=2 fingerprint=17c3b456abf7e11e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.604265322Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.603624143s EvaluationString:}]" duration=101.078902ms +logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.603877039Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=139.737818ms +level=debug ts=2024-05-29T13:44:15.60168123Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.601176756Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=615392 slug=shinemetrics instance="metric.label.memory=256, metric.label.status=error, metric.label.trigger_type=google.pubsub.topic.publish, resource.label.function_name=transactionsCreate, resource.label.project_id=shine-163816, resource.label.region=europe-west1, resource.type=cloud_function" t=2024-05-29T13:44:15.60101864Z level=debug msg="Setting next state" handler=resultAlerting +logger=ngalert.state.manager.persist user=679831 slug=joveostageaws t=2024-05-29T13:44:15.600860408Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 +logger=ngalert.state.manager user=679831 slug=joveostageaws instance="datasource_uid=fe98eaba-ee1b-4198-8ef3-9181223fbc0d, ref_id=A" t=2024-05-29T13:44:15.600852209Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=679831 slug=joveostageaws instance="datasource_uid=fe98eaba-ee1b-4198-8ef3-9181223fbc0d, ref_id=A" t=2024-05-29T13:44:15.600837075Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:15.600758677Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=615392 slug=shinemetrics instance="metric.label.memory=2048, metric.label.status=error, metric.label.trigger_type=HTTP_TRIGGER, resource.label.function_name=createBankAccount, resource.label.project_id=shine-163816, resource.label.region=europe-west1, resource.type=cloud_function" t=2024-05-29T13:44:15.60047074Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.599292668Z caller=remote_instance_store.go:51 user=112732 slug=gleamer msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:15.598191561Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager.persist user=714577 slug=readypactest t=2024-05-29T13:44:15.598128347Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=20.106554ms +level=debug ts=2024-05-29T13:44:15.597739284Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=844490 slug=prea t=2024-05-29T13:44:15.596768429Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=20.923654ms +logger=ngalert.state.manager.persist user=214309 slug=spenmo t=2024-05-29T13:44:15.59616226Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.727891ms +level=debug ts=2024-05-29T13:44:15.595823373Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.595815327Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" +logger=ngalert.scheduler user=27737 slug=edfmancapital version=1 fingerprint=284ac7f980de3869 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.595176894Z level=debug msg="Alert rule evaluated" results="[{Instance:QueueName=os-updates-queue State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:QueueName=os-updates-queue Value:0xc00c786cc0} C:{Var:C Labels:QueueName=os-updates-queue Value:0xc00c786cc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.594638329s EvaluationString:[ var='B' labels={QueueName=os-updates-queue} value=0 ], [ var='C' labels={QueueName=os-updates-queue} value=0 ]}]" duration=107.770697ms +level=debug ts=2024-05-29T13:44:15.594284739Z caller=remote_instance_store.go:51 user=464973 slug=equansdatahub msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=464973 slug=equansdatahub t=2024-05-29T13:44:15.594192865Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.594088677Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=314067 slug=itsme t=2024-05-29T13:44:15.593322854Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.585595ms +level=debug ts=2024-05-29T13:44:15.592470587Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:15.591518284Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=debug ts=2024-05-29T13:44:15.591393003Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.588912852Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.588718643Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.587820947Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:15.587783415Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.587183858Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.586875614Z caller=remote_instance_store.go:51 user=615392 slug=shinemetrics msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.586328285Z caller=remote_instance_store.go:51 user=714577 slug=readypactest msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=698119 slug=simonsprod t=2024-05-29T13:44:15.585952839Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=698119 slug=simonsprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.585928529Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.scheduler user=698119 slug=simonsprod version=1 fingerprint=b2f2e19f869bfe7c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.585817107Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.585645546s EvaluationString:}]" duration=6.573962ms +level=debug ts=2024-05-29T13:44:15.585151916Z caller=remote_instance_store.go:51 user=871095 slug=cmcnginp msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.584479883Z caller=remote_instance_store.go:51 user=214309 slug=spenmo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=214309 slug=spenmo instance="datasource_uid=grafanacloud-prom, ref_id=A,B" t=2024-05-29T13:44:15.584404032Z level=debug msg="Setting next state" handler=resultNoData +level=info ts=2024-05-29T13:44:15.584418726Z caller=remote_alert_sender.go:94 user=54972 slug=zanglang host=zanglang-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.152.53.86:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=XlGHxvWVk alerts=1 +logger=ngalert.state.manager.persist user=54972 slug=zanglang t=2024-05-29T13:44:15.584269573Z level=debug msg="Saving alert states done" count=16 max_state_save_concurrency=1 duration=236.612066ms +level=debug ts=2024-05-29T13:44:15.582961222Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.582764513Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.582754552Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.58272442Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.582512429Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=114286 slug=enverus t=2024-05-29T13:44:15.582515037Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=debug ts=2024-05-29T13:44:15.582284428Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.58183773Z caller=remote_instance_store.go:51 user=893158 slug=cmfollnp msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.580627408Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.579509773Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.579422204Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=698047 slug=gamesworkshop t=2024-05-29T13:44:15.579093583Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=18.385714ms +level=debug ts=2024-05-29T13:44:15.578470533Z caller=remote_instance_store.go:51 user=868411 slug=cmpladnp msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.578355317Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.578149274Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=714577 slug=readypactest t=2024-05-29T13:44:15.578018194Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 +logger=ngalert.state.manager user=714577 slug=readypactest instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.577967783Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=714577 slug=readypactest instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.577954412Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.577975026Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=debug ts=2024-05-29T13:44:15.577874945Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:15.577913723Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.577865125Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=314067 slug=itsme t=2024-05-29T13:44:15.576672856Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=info ts=2024-05-29T13:44:15.576559517Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.156.57:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=edbhspyyxf85jc alerts=1 +logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=b9c613047193b706 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.576343003Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=a7c4b457-6a7a-416c-b994-1407dd32ed34, ref_id=Query,QueryPrevious State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.575855469s EvaluationString:}]" duration=57.647083ms +level=debug ts=2024-05-29T13:44:15.576077732Z caller=remote_instance_store.go:51 user=337951 slug=pawapay msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.575291792Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.575140597Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.575076873Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.575104014Z caller=remote_instance_store.go:51 user=530405 slug=zetetic msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.574536976Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.574105474Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=691059 slug=deluxeconfstg t=2024-05-29T13:44:15.572903187Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=691059 slug=deluxeconfstg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.572883666Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.scheduler user=472647 slug=planet version=3 fingerprint=819450fa000f5e87 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.572799903Z level=debug msg="Alert rule evaluated" results="[{Instance:metric.name=value_num_undelivered_messages_max_max State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.name=value_num_undelivered_messages_max_max Value:0xc01fd93668} C:{Var:C Labels:metric.name=value_num_undelivered_messages_max_max Value:0xc01fd936a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.572422738s EvaluationString:[ var='B' labels={metric.name=value_num_undelivered_messages_max_max} value=0 ], [ var='C' labels={metric.name=value_num_undelivered_messages_max_max} value=0 ]}]" duration=113.773568ms +logger=ngalert.state.historian backend=loki user=516446 slug=awarehqdev t=2024-05-29T13:44:15.570304699Z level=debug msg="Done saving alert state history batch" +level=debug ts=2024-05-29T13:44:15.568499653Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.567649428Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.567454712Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.567184469Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=689030 slug=simonsuat t=2024-05-29T13:44:15.566824783Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.616753ms +level=debug ts=2024-05-29T13:44:15.566753932Z caller=remote_instance_store.go:51 user=715709 slug=mtbprod msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.566435461Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-dk-cph-glesys-02" t=2024-05-29T13:44:15.56625081Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=307001 slug=hirerightdev t=2024-05-29T13:44:15.564829713Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=916144 slug=cmjjilpd instance="device=/dev/sda1, fstype=vfat, instance=puuswe1cjillwcadbs1001.jill.gcp.hclsw.internal, job=Auth-DB-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.564290762Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=916144 slug=cmjjilpd instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=puuswe1bjillwcldbs1001.jill.gcp.hclsw.internal, job=Live-DB-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.564137909Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=916144 slug=cmjjilpd instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=puuswe1bjillwcldbs1001.jill.gcp.hclsw.internal, job=Live-DB-Host-VM, mountpoint=/db2inst" t=2024-05-29T13:44:15.564079818Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=916144 slug=cmjjilpd instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=puuswe1bjillwcldbs1001.jill.gcp.hclsw.internal, job=Live-DB-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.564066428Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=916144 slug=cmjjilpd instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=puuswe1ajillutlbst1001.jill.gcp.hclsw.internal, job=Bastion-VM-Host, mountpoint=/export" t=2024-05-29T13:44:15.563985896Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=916144 slug=cmjjilpd instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=puuswe1ajillutlbst1001.jill.gcp.hclsw.internal, job=Bastion-VM-Host, mountpoint=/data" t=2024-05-29T13:44:15.563972566Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=472647 slug=planet t=2024-05-29T13:44:15.564082477Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-ch-zrh-dp-01" t=2024-05-29T13:44:15.563467248Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:15.563301317Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=806229 slug=simplisafe instance= t=2024-05-29T13:44:15.563277927Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=806229 slug=simplisafe version=41 fingerprint=c821e5b8447d5ba5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.563151125Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc0712e52d0} B:{Var:B Labels: Value:0xc0712e52d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.562228805s EvaluationString:[ var='A' labels={} value=0 ], [ var='B' labels={} value=0 ]}]" duration=48.771187ms +level=debug ts=2024-05-29T13:44:15.562684646Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" +logger=ngalert.state.historian backend=loki user=516446 slug=awarehqdev t=2024-05-29T13:44:15.562607707Z level=debug msg="Alert state changed creating annotation" newState="Normal (MissingSeries)" oldState=Pending +logger=ngalert.state.historian backend=loki user=516446 slug=awarehqdev t=2024-05-29T13:44:15.562581207Z level=debug msg="Alert state changed creating annotation" newState=Pending oldState=Normal +logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:15.562219104Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=28.761329ms +level=debug ts=2024-05-29T13:44:15.561869911Z caller=remote_instance_store.go:51 user=155740 slug=routific msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=155740 slug=routific instance= t=2024-05-29T13:44:15.561811986Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=155740 slug=routific instance= t=2024-05-29T13:44:15.561799685Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=155740 slug=routific t=2024-05-29T13:44:15.561751434Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-at-vie-dp-03" t=2024-05-29T13:44:15.560905487Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=698047 slug=gamesworkshop instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.560694618Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=698047 slug=gamesworkshop version=1 fingerprint=c4fecc013f465f0a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.560578607Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.56042631s EvaluationString:}]" duration=6.321697ms +logger=ngalert.state.manager user=696798 slug=mcv instance="datasource_uid=a7c4b457-6a7a-416c-b994-1407dd32ed34, ref_id=Query,QueryPrevious" t=2024-05-29T13:44:15.560584413Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-at-vie-dp-01" t=2024-05-29T13:44:15.560625874Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=269887 slug=blackrockdev t=2024-05-29T13:44:15.56008357Z level=debug msg="Skip rule evaluation because it is paused" +level=debug ts=2024-05-29T13:44:15.559694524Z caller=remote_instance_store.go:51 user=723897 slug=inthepocket msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.559422601Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=350346 slug=restake t=2024-05-29T13:44:15.559114979Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.581134ms +level=debug ts=2024-05-29T13:44:15.559125148Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.558937688Z caller=remote_instance_store.go:51 user=805026 slug=powwro11y msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.558773689Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:15.558026717Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager.persist user=923052 slug=magicairestricted t=2024-05-29T13:44:15.557701366Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=65.960028ms +logger=ngalert.state.manager.persist user=689030 slug=simonsuat t=2024-05-29T13:44:15.557204299Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=689030 slug=simonsuat instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.557188779Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=689030 slug=simonsuat instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.557165879Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=715709 slug=mtbprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.556978425Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=715709 slug=mtbprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.556966214Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=715709 slug=mtbprod t=2024-05-29T13:44:15.556941774Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager.persist user=716630 slug=coapdev t=2024-05-29T13:44:15.556562148Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=7.261674ms +level=debug ts=2024-05-29T13:44:15.556276088Z caller=remote_instance_store.go:51 user=137351 slug=pinnacle21 msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.55577084Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.555750678Z caller=remote_instance_store.go:51 user=170883 slug=datacontrol msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=146728 slug=dgc t=2024-05-29T13:44:15.555761407Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=116.259935ms +level=debug ts=2024-05-29T13:44:15.554952315Z caller=remote_instance_store.go:51 user=527202 slug=lnrsusinsurancedev msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=901230 slug=integromonitor t=2024-05-29T13:44:15.554523154Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.407907ms +level=debug ts=2024-05-29T13:44:15.554514546Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.553218015Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.553154537Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.552874755Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" +level=info ts=2024-05-29T13:44:15.552821217Z caller=remote_alert_sender.go:94 user=186562 slug=defier host=defier-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.152.62.20:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=iKyxEnUnk alerts=1 +level=debug ts=2024-05-29T13:44:15.552404077Z caller=remote_instance_store.go:51 user=713314 slug=tpceunonprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=713314 slug=tpceunonprod t=2024-05-29T13:44:15.552338515Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=713314 slug=tpceunonprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.552309956Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.scheduler user=713314 slug=tpceunonprod version=1 fingerprint=a69f99a7576a4002 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.552227424Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.551971791s EvaluationString:}]" duration=7.450237ms +level=debug ts=2024-05-29T13:44:15.551998773Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.551612858Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.551604825Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=327842 slug=exabeam instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.551599135Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.scheduler user=327842 slug=exabeam version=357 fingerprint=78d0fe53863ad6f5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.551515357Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.551271932s EvaluationString:}]" duration=30.490684ms +logger=ngalert.state.manager user=707603 slug=canoneurope instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.550409227Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=707603 slug=canoneurope instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.550368297Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=707603 slug=canoneurope version=1 fingerprint=45b65a2d12d4383a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.550208377Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.549924429s EvaluationString:}]" duration=10.69707ms +level=debug ts=2024-05-29T13:44:15.550211598Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.550137005Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +logger=ngalert.scheduler user=191103 slug=amazonadmin version=194 fingerprint=a6e9d70e99d781a8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.549912853Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.549722846s EvaluationString:}]" duration=58.709103ms +logger=ngalert.state.manager user=114492 slug=railsbank instance="datasource_uid=KZy8Z1O7k, ref_id=DLQ" t=2024-05-29T13:44:15.549924906Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:15.549784952Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.549648133Z caller=remote_instance_store.go:51 user=743579 slug=neotax msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=516446 slug=awarehqdev t=2024-05-29T13:44:15.549531781Z level=debug msg="Deleting alert states" count=1 +logger=ngalert.state.manager user=516446 slug=awarehqdev t=2024-05-29T13:44:15.549523281Z level=info msg="Detected stale state entry" cacheID="[[\"EndpointName\",\"screenshotdetect-deployment\"],[\"Series\",\"query206560f3514447f49eb8f0c066f71a2a\"],[\"__alert_rule_namespace_uid__\",\"D-8RyMx4z\"],[\"__alert_rule_uid__\",\"kLXBSfb4kz\"],[\"alertname\",\"screenshotdetect-deployment-no-invocations\"],[\"grafana_folder\",\"bi\"],[\"group\",\"SageMakerNoInvocations\"],[\"route\",\"team=bi\"],[\"team\",\"bi\"]]" state=Pending reason= +logger=ngalert.state.manager user=516446 slug=awarehqdev instance="EndpointName=screenshotdetect-deployment, Series=query754aaa9a67e94850a7b1d780d85f296e" t=2024-05-29T13:44:15.54950148Z level=debug msg="Changing state" previous_state=Normal next_state=Pending previous_ends_at=2024-05-29T13:44:10Z next_ends_at=2024-05-29T13:48:10Z +level=debug ts=2024-05-29T13:44:15.549556881Z caller=remote_instance_store.go:57 user=516446 slug=awarehqdev msg="calling DeleteAlertInstances - not implemented" +level=debug ts=2024-05-29T13:44:15.549362927Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=716630 slug=coapdev t=2024-05-29T13:44:15.549253434Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.549280237Z caller=remote_instance_store.go:51 user=530405 slug=zetetic msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.549252324Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.549066482Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.548026644Z caller=remote_instance_store.go:51 user=263582 slug=prestowillis msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=350346 slug=restake t=2024-05-29T13:44:15.54752881Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager.persist user=277970 slug=teckresourcestest t=2024-05-29T13:44:15.547499213Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=35.443774ms +logger=ngalert.state.manager user=350346 slug=restake t=2024-05-29T13:44:15.54747184Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:15.547297428Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=191103 slug=amazonadmin version=63 fingerprint=b8a6d2fcc07b3cc0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.547182523Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.546947577s EvaluationString:}]" duration=172.720412ms +level=debug ts=2024-05-29T13:44:15.546422624Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.546364761Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.546350373Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.546342184Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.546334533Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:15.545968606Z caller=remote_instance_store.go:51 user=841587 slug=tfxprod msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.545210647Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" +logger=ngalert.scheduler user=309009 slug=elestyle version=1 fingerprint=9b388181970bf5fd attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.545014176Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=zxr_3eR4z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.544749642s EvaluationString:}]" duration=156.290885ms +logger=ngalert.state.manager user=77750 slug=screenmeet instance= t=2024-05-29T13:44:15.544849885Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.scheduler user=77750 slug=screenmeet version=4 fingerprint=78e2a544267ce0cb attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.544766904Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.544404172s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=59.062786ms +logger=ngalert.state.manager.persist user=61907 slug=fullstory t=2024-05-29T13:44:15.544659537Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=61907 slug=fullstory t=2024-05-29T13:44:15.544566727Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.544553134Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=26.464456ms +logger=ngalert.scheduler user=61907 slug=fullstory version=1 fingerprint=c18f10eaa780028b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.54444143Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc0197d1750} B:{Var:B Labels: Value:0xc0197d1758}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.544018445s EvaluationString:[ var='A' labels={} value=71 ], [ var='B' labels={} value=0 ]}]" duration=111.768536ms +logger=ngalert.state.manager user=332555 slug=omexomcs t=2024-05-29T13:44:15.543040161Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.542039788Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=901230 slug=integromonitor instance= t=2024-05-29T13:44:15.541073248Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=148654 slug=tinybeans instance="instance=https://accounts.tinybeans.com/health/alive" t=2024-05-29T13:44:15.540724664Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.540754674Z caller=remote_rule_evaluator.go:193 user=396586 slug=opengov msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" +level=debug ts=2024-05-29T13:44:15.540598138Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=695885 slug=lululemonprod t=2024-05-29T13:44:15.539588919Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.884925ms +level=info ts=2024-05-29T13:44:15.539569701Z caller=grafana.go:247 user=523256 slug=enovaafrica msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=16" groups=2 alerts=0 +level=debug ts=2024-05-29T13:44:15.5391315Z caller=remote_instance_store.go:51 user=781424 slug=n1eko msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.538975627Z caller=remote_instance_store.go:51 user=682219 slug=mcb1 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=682219 slug=mcb1 instance="beta_kubernetes_io_arch=amd64, beta_kubernetes_io_os=linux, device=/dev/sda1, fstype=ext4, instance=10.1.9.183:9100, job=node, kubernetes_io_arch=amd64, kubernetes_io_hostname=mcb-paas-node-9, kubernetes_io_os=linux, microk8s_io_cluster=true, mountpoint=/, node_kubernetes_io_microk8s_controlplane=microk8s-controlplane" t=2024-05-29T13:44:15.538812433Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=781424 slug=n1eko instance="__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp8" t=2024-05-29T13:44:15.538806454Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=781424 slug=n1eko instance="__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp8" t=2024-05-29T13:44:15.538792232Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=781424 slug=n1eko instance="__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp6" t=2024-05-29T13:44:15.538704551Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=682219 slug=mcb1 instance="beta_kubernetes_io_arch=amd64, beta_kubernetes_io_os=linux, device=/dev/sda1, fstype=ext4, instance=10.1.82.67:9100, job=node, kubernetes_io_arch=amd64, kubernetes_io_hostname=mcb-paas-node-8, kubernetes_io_os=linux, microk8s_io_cluster=true, mountpoint=/, node_kubernetes_io_microk8s_controlplane=microk8s-controlplane" t=2024-05-29T13:44:15.538583609Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=781424 slug=n1eko instance="__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp2" t=2024-05-29T13:44:15.53860448Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=781424 slug=n1eko instance="__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp10" t=2024-05-29T13:44:15.538560668Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=781424 slug=n1eko instance="__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp1" t=2024-05-29T13:44:15.538506267Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=781424 slug=n1eko version=19 fingerprint=45eb457d264a4b2f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.538088609Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp1 Value:0xc067f80078} C:{Var:C Labels:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp1 Value:0xc067f800c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.527457579s EvaluationString:[ var='B' labels={__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp1} value=48 ], [ var='C' labels={__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp1} value=0 ]} {Instance:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp10 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp10 Value:0xc067f80160} C:{Var:C Labels:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp10 Value:0xc067f80198}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.527465609s EvaluationString:[ var='B' labels={__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp10} value=0 ], [ var='C' labels={__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp10} value=0 ]} {Instance:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp2 Value:0xc067f80228} C:{Var:C Labels:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp2 Value:0xc067f80270}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.527468779s EvaluationString:[ var='B' labels={__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp2} value=36.5 ], [ var='C' labels={__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp2} value=0 ]} {Instance:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp4 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp4 Value:0xc067f802f0} C:{Var:C Labels:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp4 Value:0xc067f80328}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.527473089s EvaluationString:[ var='B' labels={__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp4} value=32 ], [ var='C' labels={__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp4} value=0 ]} {Instance:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp6 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp6 Value:0xc067f80398} C:{Var:C Labels:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp6 Value:0xc067f803d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.52747595s EvaluationString:[ var='B' labels={__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp6} value=31 ], [ var='C' labels={__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp6} value=0 ]} {Instance:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp7 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp7 Value:0xc067f80460} C:{Var:C Labels:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp7 Value:0xc067f80498}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.527479449s EvaluationString:[ var='B' labels={__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp7} value=33.5 ], [ var='C' labels={__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp7} value=0 ]} {Instance:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp8 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp8 Value:0xc067f80518} C:{Var:C Labels:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp8 Value:0xc067f80550}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.5274848s EvaluationString:[ var='B' labels={__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp8} value=0 ], [ var='C' labels={__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp8} value=0 ]} {Instance:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp9 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp9 Value:0xc067f805c8} C:{Var:C Labels:__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp9 Value:0xc067f80600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.52748992s EvaluationString:[ var='B' labels={__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp9} value=0 ], [ var='C' labels={__name__=node_hwmon_temp_celsius, chip=platform_nct6775_2592, instance=192.168.1.138:9100, job=node, sensor=temp9} value=0 ]}]" duration=63.57497ms +logger=ngalert.state.manager user=682219 slug=mcb1 t=2024-05-29T13:44:15.538311843Z level=debug msg="State manager processing evaluation results" resultCount=3 +logger=ngalert.state.manager.persist user=538355 slug=flogic t=2024-05-29T13:44:15.537980183Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=35.443082ms +level=debug ts=2024-05-29T13:44:15.537576509Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=698963 slug=lemonade instance="app=underwriting-platform-events-worker, pod=underwriting-platform-events-worker-5658d54585-xbhkr" t=2024-05-29T13:44:15.537497049Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.537403529Z caller=remote_instance_store.go:51 user=35611 slug=play msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=78401 slug=ayadav6 t=2024-05-29T13:44:15.534889745Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=78401 slug=ayadav6 instance= t=2024-05-29T13:44:15.534030785Z level=debug msg="Setting next state" handler=resultError +level=debug ts=2024-05-29T13:44:15.536792824Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.536807244Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" +logger=ngalert.scheduler user=332555 slug=omexomcs version=52 fingerprint=f1e71cbc343b969f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.536673669Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.536421343s EvaluationString:}]" duration=634.598249ms +level=debug ts=2024-05-29T13:44:15.536715709Z caller=remote_instance_store.go:51 user=94501 slug=datastax msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=4947 slug=mediamath instance="datasource_uid=000000020, ref_id=A" t=2024-05-29T13:44:15.536468305Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=4947 slug=mediamath instance="datasource_uid=000000020, ref_id=A" t=2024-05-29T13:44:15.536454075Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:15.536406819Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.53543564Z caller=remote_instance_store.go:51 user=527202 slug=lnrsusinsurancedev msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.535252642Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.535298711Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=109452 slug=deltarisk t=2024-05-29T13:44:15.535004572Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=170883 slug=datacontrol instance="datasource_uid=fKD2mywGk, ref_id=A" t=2024-05-29T13:44:15.53506726Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=170883 slug=datacontrol instance="datasource_uid=fKD2mywGk, ref_id=A" t=2024-05-29T13:44:15.535062952Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +Error parsing panelUID for alert annotationruleID308dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=109452 slug=deltarisk version=14 fingerprint=3402c6fe099f610d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.534893673Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.534320873s EvaluationString:}]" duration=128.610011ms +logger=ngalert.state.manager user=170883 slug=datacontrol instance="datasource_uid=fKD2mywGk, ref_id=A" t=2024-05-29T13:44:15.535039393Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=170883 slug=datacontrol instance="datasource_uid=fKD2mywGk, ref_id=A" t=2024-05-29T13:44:15.535014496Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=170883 slug=datacontrol instance="datasource_uid=fKD2mywGk, ref_id=A" t=2024-05-29T13:44:15.535001365Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:15.533844133Z caller=remote_instance_store.go:51 user=261837 slug=empowercloud msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.533527946Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:15.533453476Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=debug ts=2024-05-29T13:44:15.533360266Z caller=remote_instance_store.go:51 user=343704 slug=haendlerbund msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.533401783Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=849222 slug=franv2dev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.53146871Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=849222 slug=franv2dev version=1 fingerprint=44f4b8f738802764 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.531339628Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.53112675s EvaluationString:}]" duration=8.824161ms +logger=ngalert.state.manager.persist user=713299 slug=btcnonprod t=2024-05-29T13:44:15.529839092Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=7.076111ms +logger=ngalert.state.manager.persist user=713315 slug=mtbnonprod t=2024-05-29T13:44:15.529414825Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 +level=debug ts=2024-05-29T13:44:15.529468696Z caller=remote_instance_store.go:51 user=713315 slug=mtbnonprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=713315 slug=mtbnonprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.529385674Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=713315 slug=mtbnonprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.529222972Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=713315 slug=mtbnonprod t=2024-05-29T13:44:15.529204552Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.528894407Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:15.528882132Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.831019ms +logger=ngalert.state.manager.persist user=695885 slug=lululemonprod t=2024-05-29T13:44:15.528701313Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=695885 slug=lululemonprod t=2024-05-29T13:44:15.528628602Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.527809981Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=485988 slug=alfromentpro instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.527283578Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:59:10Z next_ends_at=2024-05-29T14:04:10Z +logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=1mr10216z5, Method=ANY, Resource=/service-fees/{proxy+}, Stage=$default" t=2024-05-29T13:44:15.527141974Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.527006032Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.526827869Z caller=remote_instance_store.go:51 user=465816 slug=metricgamingqa msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=82372 slug=fout t=2024-05-29T13:44:15.526109029Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=82372 slug=fout instance= t=2024-05-29T13:44:15.526082912Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=82372 slug=fout t=2024-05-29T13:44:15.52598453Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.525727103Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.525220553Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.524477Z caller=remote_instance_store.go:51 user=82372 slug=fout msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=716519 slug=bradfordprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.524268887Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.523289555Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=713299 slug=btcnonprod t=2024-05-29T13:44:15.522759681Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=713299 slug=btcnonprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.522726201Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.522826313Z caller=remote_instance_store.go:51 user=713299 slug=btcnonprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=713299 slug=btcnonprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.52271648Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:15.522499764Z caller=remote_instance_store.go:51 user=261837 slug=empowercloud msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.522535058Z caller=remote_instance_store.go:51 user=374423 slug=bitburst msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.522414257Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=374423 slug=bitburst t=2024-05-29T13:44:15.522286749Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.521527111Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.521457835Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.521404341Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=762770 slug=brainlab instance="__name__=vcenter_host_cpu_utilization_percent, bl_gc_lbac_tenants=;ww-itcis_virtualization, vcenter_cluster_name=vSphere_Farm_Japan, vcenter_host_name=jpspesx02.brainlab.net" t=2024-05-29T13:44:15.521350543Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=762770 slug=brainlab instance="__name__=vcenter_host_cpu_utilization_percent, bl_gc_lbac_tenants=;ww-itcis_virtualization, vcenter_cluster_name=vSphere_Farm_Japan, vcenter_host_name=jpspesx01.brainlab.net" t=2024-05-29T13:44:15.521325072Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=373502 slug=stakeandrelax t=2024-05-29T13:44:15.521238038Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.358114ms +logger=ngalert.state.manager user=762770 slug=brainlab instance="__name__=vcenter_host_cpu_utilization_percent, bl_gc_lbac_tenants=;ww-itcis_virtualization, vcenter_cluster_name=vSphere_Farm_Israel, vcenter_host_name=ilspesx02.brainlab.net" t=2024-05-29T13:44:15.521199859Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=762770 slug=brainlab instance="__name__=vcenter_host_cpu_utilization_percent, bl_gc_lbac_tenants=;ww-itcis_virtualization, vcenter_cluster_name=vSphere_Farm_HPC_Germany, vcenter_host_name=despesxhpc08.brainlab.net" t=2024-05-29T13:44:15.521144868Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=762770 slug=brainlab instance="__name__=vcenter_host_cpu_utilization_percent, bl_gc_lbac_tenants=;ww-itcis_virtualization, vcenter_cluster_name=vSphere_Farm_HPC_Germany, vcenter_host_name=despesxhpc07.brainlab.net" t=2024-05-29T13:44:15.521114547Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=762770 slug=brainlab instance="__name__=vcenter_host_cpu_utilization_percent, bl_gc_lbac_tenants=;ww-itcis_virtualization, vcenter_cluster_name=vSphere_Farm_HPC_Germany, vcenter_host_name=despesxhpc04.brainlab.net" t=2024-05-29T13:44:15.521034364Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.521100136Z caller=remote_instance_store.go:51 user=527202 slug=lnrsusinsurancedev msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=527202 slug=lnrsusinsurancedev t=2024-05-29T13:44:15.521047374Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 +logger=ngalert.state.manager.persist user=472647 slug=planet t=2024-05-29T13:44:15.520973163Z level=debug msg="Saving alert states done" count=20 max_state_save_concurrency=1 duration=509.845216ms +logger=ngalert.state.manager user=762770 slug=brainlab instance="__name__=vcenter_host_cpu_utilization_percent, bl_gc_lbac_tenants=;ww-itcis_virtualization, vcenter_cluster_name=vSphere_Farm_HPC_Germany, vcenter_host_name=despesxhpc01.brainlab.net" t=2024-05-29T13:44:15.520966974Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=527202 slug=lnrsusinsurancedev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.520987362Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=762770 slug=brainlab instance="__name__=vcenter_host_cpu_utilization_percent, bl_gc_lbac_tenants=;ww-itcis_virtualization, vcenter_cluster_name=vSphere_Farm_Austria, vcenter_host_name=atspesx01.brainlab.net" t=2024-05-29T13:44:15.52086264Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=527202 slug=lnrsusinsurancedev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.520899907Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=762770 slug=brainlab instance="__name__=vcenter_host_cpu_utilization_percent, bl_gc_lbac_tenants=;ww-itcis_virtualization, vcenter_cluster_name=despesxml02, vcenter_host_name=despesxml02.brainlab.net" t=2024-05-29T13:44:15.5208065Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=762770 slug=brainlab instance="__name__=vcenter_host_cpu_utilization_percent, bl_gc_lbac_tenants=;ww-itcis_virtualization, vcenter_cluster_name=despesxgrid05, vcenter_host_name=despesxgrid05.brainlab.net" t=2024-05-29T13:44:15.520752958Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.520817136Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=186562 slug=defier t=2024-05-29T13:44:15.52075505Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=186562 slug=defier instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.520710228Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:15.520690003Z caller=remote_instance_store.go:51 user=893158 slug=cmfollnp msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.520734661Z caller=remote_image_capturer.go:33 user=186562 slug=defier rule_org_id=1 rule_uid=iKyxEnUnk msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" +level=debug ts=2024-05-29T13:44:15.520042587Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.519958549Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.519290351Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.633267ms +level=debug ts=2024-05-29T13:44:15.519092021Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.518923411Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.518917419Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.51807352Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=245291 slug=pismo instance="datasource_uid=grafanacloud-logs, ref_id=Query" t=2024-05-29T13:44:15.518057087Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=trustami-cache.haendlerbund.de" t=2024-05-29T13:44:15.518101629Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=swarm-manager.haendlerbund.de" t=2024-05-29T13:44:15.517978904Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=swarm-manager.haendlerbund.de" t=2024-05-29T13:44:15.51796928Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.517860508Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.517766507Z caller=remote_instance_store.go:51 user=743579 slug=neotax msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=shopauskunft.de" t=2024-05-29T13:44:15.517703155Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.517796056Z caller=remote_instance_store.go:51 user=265692 slug=beekeeper msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=rtvergleicher.haendlerbund.de" t=2024-05-29T13:44:15.517616388Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.517502447Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.517529592Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.517508665Z caller=remote_alert_sender.go:94 user=114286 slug=enverus host=enverus-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.144.27.157:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=bd5628ea-8ba9-4f55-b682-e9f191cd8d23 alerts=1 + level=info ts=2024-05-29T13:44:15.517472955Z caller=remote_alert_sender.go:94 user=114286 slug=enverus host=enverus-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.144.89.182:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=bd5628ea-8ba9-4f55-b682-e9f191cd8d23 alerts=1 + level=debug ts=2024-05-29T13:44:15.517216651Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:15.517075358Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=44.583618ms + level=debug ts=2024-05-29T13:44:15.517090128Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=marketsupply.com" t=2024-05-29T13:44:15.516990237Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=marketplace.haendlerbund.de" t=2024-05-29T13:44:15.516926019Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.516745477Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=lta.haendlerbund.de" t=2024-05-29T13:44:15.51681501Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.516799418Z caller=remote_instance_store.go:51 user=615392 slug=shinemetrics msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.516724088Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=logistik-watchblog.de" t=2024-05-29T13:44:15.516707764Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=legaltext-cache.haendlerbund.de" t=2024-05-29T13:44:15.516647325Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=legal-connect.de" t=2024-05-29T13:44:15.51660167Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=legal-connect.de" t=2024-05-29T13:44:15.516587851Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=leech.haendlerbund.de" t=2024-05-29T13:44:15.516535656Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=karriere.haendlerbund.de" t=2024-05-29T13:44:15.516477595Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=karriere.haendlerbund.de" t=2024-05-29T13:44:15.516467535Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=info-service.haendlerbund.de" t=2024-05-29T13:44:15.516296902Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=haendlerbund.de" t=2024-05-29T13:44:15.51623886Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=haendlerbund.de" t=2024-05-29T13:44:15.5162279Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.516227491Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=datenschutz-weiterleitungen.haendlerbund.de" t=2024-05-29T13:44:15.516056716Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=crm.haendlerbund.de" t=2024-05-29T13:44:15.515983996Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=credit-check.haendlerbund.de" t=2024-05-29T13:44:15.515779792Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=cms.haendlerbund.de" t=2024-05-29T13:44:15.515669828Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.515645461Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=auth0-logs-tools.haendlerbund.de" t=2024-05-29T13:44:15.515521744Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=api-docs-tools.haendlerbund.de" t=2024-05-29T13:44:15.515445035Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.515447629Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343704 slug=haendlerbund instance="__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=amazon-watchblog.de" t=2024-05-29T13:44:15.515383607Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.515275809Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.515237496Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=343704 slug=haendlerbund version=1 fingerprint=d79acb88c797342b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.514444245Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=adz-personal.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=adz-personal.de Value:0xc04b671c60} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=adz-personal.de Value:0xc04b671cb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.511797656s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=adz-personal.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=adz-personal.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=amazon-watchblog.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=amazon-watchblog.de Value:0xc04b671d30} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=amazon-watchblog.de Value:0xc04b671d78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.511821178s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=amazon-watchblog.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=amazon-watchblog.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=api-docs-tools.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=api-docs-tools.haendlerbund.de Value:0xc04b671e00} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=api-docs-tools.haendlerbund.de Value:0xc04b671e48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.511832138s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=api-docs-tools.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=api-docs-tools.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=auth0-logs-tools.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=auth0-logs-tools.haendlerbund.de Value:0xc04b671ed8} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=auth0-logs-tools.haendlerbund.de Value:0xc04b671f20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.5118428s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=auth0-logs-tools.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=auth0-logs-tools.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=auth0-user-management.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=auth0-user-management.haendlerbund.de Value:0xc04b671fa8} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=auth0-user-management.haendlerbund.de Value:0xc04b671fe8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.511850138s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=auth0-user-management.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=auth0-user-management.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=cms.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=cms.haendlerbund.de Value:0xc0119a21c0} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=cms.haendlerbund.de Value:0xc0119a22e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.51185823s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=cms.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=cms.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=credit-check.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=credit-check.haendlerbund.de Value:0xc0119a2390} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=credit-check.haendlerbund.de Value:0xc0119a2428}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.511879533s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=credit-check.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=credit-check.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=crm.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=crm.haendlerbund.de Value:0xc0119a2698} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=crm.haendlerbund.de Value:0xc0119a27c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.511887467s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=crm.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=crm.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=datenschutz-weiterleitungen.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=datenschutz-weiterleitungen.haendlerbund.de Value:0xc0119a2a98} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=datenschutz-weiterleitungen.haendlerbund.de Value:0xc0119a29f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.51189564s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=datenschutz-weiterleitungen.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=datenschutz-weiterleitungen.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=events.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=events.haendlerbund.de Value:0xc0119a2c50} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=events.haendlerbund.de Value:0xc0119a2c98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.511903833s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=events.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=events.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=haendlerbund.de Value:0xc0119a2da0} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=haendlerbund.de Value:0xc0119a2df8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.511911299s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=info-service.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=info-service.haendlerbund.de Value:0xc0119a2eb8} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=info-service.haendlerbund.de Value:0xc0119a2f18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.511919147s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=info-service.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=info-service.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=itb-recht.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=itb-recht.de Value:0xc0119a2ff0} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=itb-recht.de Value:0xc0119a3068}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.511926917s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=itb-recht.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=itb-recht.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=joomla-updates.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=joomla-updates.haendlerbund.de Value:0xc0119a3100} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=joomla-updates.haendlerbund.de Value:0xc0119a3170}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.511934565s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=joomla-updates.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=joomla-updates.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=karriere.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=karriere.haendlerbund.de Value:0xc0119a3210} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=karriere.haendlerbund.de Value:0xc0119a3278}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.511942617s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=karriere.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=karriere.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=leech.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=leech.haendlerbund.de Value:0xc0119a3420} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=leech.haendlerbund.de Value:0xc0119a3518}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.511954217s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=leech.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=leech.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=legal-connect.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=legal-connect.de Value:0xc0119a3738} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=legal-connect.de Value:0xc0119a38a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.511960593s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=legal-connect.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=legal-connect.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=legaltext-cache.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=legaltext-cache.haendlerbund.de Value:0xc0119a3b30} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=legaltext-cache.haendlerbund.de Value:0xc0119a3b88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.511966637s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=legaltext-cache.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=legaltext-cache.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=logistik-watchblog.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=logistik-watchblog.de Value:0xc0119a3cf8} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=logistik-watchblog.de Value:0xc0119a3df0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.511972893s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=logistik-watchblog.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=logistik-watchblog.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=logs.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=logs.haendlerbund.de Value:0xc0119a3fc0} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=logs.haendlerbund.de Value:0xc01a9f8038}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.511981717s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=logs.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=logs.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=lta.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=lta.haendlerbund.de Value:0xc01a9f8308} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=lta.haendlerbund.de Value:0xc01a9f8210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.511988692s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=lta.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=lta.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=marketplace.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=marketplace.haendlerbund.de Value:0xc01a9f8608} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=marketplace.haendlerbund.de Value:0xc01a9f8750}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.51199528s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=marketplace.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=marketplace.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=marketsupply.com State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=marketsupply.com Value:0xc01a9f8b58} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=marketsupply.com Value:0xc01a9f8a50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.512001891s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=marketsupply.com} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=marketsupply.com} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=mein.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=mein.haendlerbund.de Value:0xc01a9f8da0} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=mein.haendlerbund.de Value:0xc01a9f8e58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.512010658s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=mein.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=mein.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=mitglieder.hb-intern.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=mitglieder.hb-intern.de Value:0xc01a9f9178} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=mitglieder.hb-intern.de Value:0xc01a9f9088}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.512018303s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=mitglieder.hb-intern.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=mitglieder.hb-intern.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=monitoring-tools.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=monitoring-tools.haendlerbund.de Value:0xc01a9f9430} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=monitoring-tools.haendlerbund.de Value:0xc01a9f9368}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.512026125s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=monitoring-tools.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=monitoring-tools.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=musterschreiben.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=musterschreiben.haendlerbund.de Value:0xc01a9f95e8} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=musterschreiben.haendlerbund.de Value:0xc01a9f96f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.512034582s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=musterschreiben.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=musterschreiben.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=nexus-messe.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=nexus-messe.de Value:0xc01a9f9910} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=nexus-messe.de Value:0xc01a9f9a30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.512042458s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=nexus-messe.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=nexus-messe.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=onlinehaendler-news.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=onlinehaendler-news.de Value:0xc01a9f9c90} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=onlinehaendler-news.de Value:0xc01a9f9b98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.512048894s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=onlinehaendler-news.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=onlinehaendler-news.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=rba.shopauskunft.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=rba.shopauskunft.de Value:0xc01a9f9f78} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=rba.shopauskunft.de Value:0xc01a9f9e60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.512068897s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=rba.shopauskunft.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=rba.shopauskunft.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=registry.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=registry.haendlerbund.de Value:0xc017cb8938} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=registry.haendlerbund.de Value:0xc05872c008}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.51207787s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=registry.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=registry.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=rtvergleicher.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=rtvergleicher.haendlerbund.de Value:0xc055ab6050} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=rtvergleicher.haendlerbund.de Value:0xc055ab6090}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.512092352s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=rtvergleicher.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=rtvergleicher.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=shopauskunft.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=shopauskunft.de Value:0xc055ab6138} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=shopauskunft.de Value:0xc055ab6188}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.51210245s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=shopauskunft.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=shopauskunft.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=signatures.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=signatures.haendlerbund.de Value:0xc055ab6218} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=signatures.haendlerbund.de Value:0xc055ab6260}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.512109605s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=signatures.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=signatures.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=socialmediarechtstexte.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=socialmediarechtstexte.haendlerbund.de Value:0xc055ab62e0} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=socialmediarechtstexte.haendlerbund.de Value:0xc055ab6328}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.512118035s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=socialmediarechtstexte.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=socialmediarechtstexte.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=swarm-data.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=swarm-data.haendlerbund.de Value:0xc055ab63b0} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=swarm-data.haendlerbund.de Value:0xc055ab63f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.512126591s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=swarm-data.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=swarm-data.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=swarm-manager.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=swarm-manager.haendlerbund.de Value:0xc055ab64b8} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=swarm-manager.haendlerbund.de Value:0xc055ab6478}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.512134632s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=swarm-manager.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=swarm-manager.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=system-architect-tools.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=system-architect-tools.haendlerbund.de Value:0xc055ab6538} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=system-architect-tools.haendlerbund.de Value:0xc055ab6578}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.512142153s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=system-architect-tools.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=system-architect-tools.haendlerbund.de} value=0 ]} {Instance:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=trustami-cache.haendlerbund.de State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=trustami-cache.haendlerbund.de Value:0xc055ab65e8} C:{Var:C Labels:__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=trustami-cache.haendlerbund.de Value:0xc055ab6640}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.512149512s EvaluationString:[ var='B' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=trustami-cache.haendlerbund.de} value=1 ], [ var='C' labels={__name__=http_certificate_valid, cluster=hb-productive-droplets, instance=pythia:2617, job=pythia, server=trustami-cache.haendlerbund.de} value=0 ]}]" duration=75.067969ms + level=debug ts=2024-05-29T13:44:15.51515165Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.515031275Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.51495968Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.514910675Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.514691447Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.514616818Z caller=remote_instance_store.go:51 user=708531 slug=dooshimagbamwuan msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.514244131Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.514243639Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.51415349Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.51423205Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.51421349Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.514201468Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.513792535Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.513699884Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.512988025Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.513108831Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=28.388363ms + level=debug ts=2024-05-29T13:44:15.512107808Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.512354519Z caller=remote_instance_store.go:51 user=739013 slug=altoglobalsharing msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.512104005Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=739013 slug=altoglobalsharing t=2024-05-29T13:44:15.512289537Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=277970 slug=teckresourcestest t=2024-05-29T13:44:15.512051472Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:15.512029583Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + level=debug ts=2024-05-29T13:44:15.511968631Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=error ts=2024-05-29T13:44:15.511909035Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'C': data source not found" + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=2 fingerprint=79930f02a2944649 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.511945306Z level=error msg="Failed to evaluate rule" error="failed to build query 'C': data source not found" duration=5.317297ms + level=debug ts=2024-05-29T13:44:15.511915457Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.511845813Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.511856604Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.511749563Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.511707122Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=691103 slug=caetest t=2024-05-29T13:44:15.511610231Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.122395ms + level=debug ts=2024-05-29T13:44:15.51154784Z caller=remote_instance_store.go:51 user=500743 slug=sgr msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.511469343Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.511457218Z caller=remote_instance_store.go:51 user=432323 slug=lithic msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.511377685Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=432323 slug=lithic version=1 fingerprint=454895bedeb1d391 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.511192601Z level=debug msg="Alert rule evaluated" results="[{Instance:QueueName=wire-transfer-consumer-live-dlq State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:QueueName=wire-transfer-consumer-live-dlq Value:0xc00d2c6680} C:{Var:C Labels:QueueName=wire-transfer-consumer-live-dlq Value:0xc00d2c6688}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.510861196s EvaluationString:[ var='B' labels={QueueName=wire-transfer-consumer-live-dlq} value=0 ], [ var='C' labels={QueueName=wire-transfer-consumer-live-dlq} value=0 ]}]" duration=52.842282ms + level=debug ts=2024-05-29T13:44:15.51104074Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.510695863Z caller=remote_rule_evaluator.go:193 user=656459 slug=activeport msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + level=debug ts=2024-05-29T13:44:15.510173343Z caller=remote_instance_store.go:51 user=481110 slug=g123 msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:15.509880997Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=432323 slug=lithic instance="DBClusterIdentifier=prod-journal-processor-cluster" t=2024-05-29T13:44:15.50983821Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID2118dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=174675 slug=journalprod version=1 fingerprint=438c93ae7f9811bb attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.509729443Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=uF2hBHyGz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.509385404s EvaluationString:}]" duration=13.2186ms + level=debug ts=2024-05-29T13:44:15.509716612Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=432323 slug=lithic t=2024-05-29T13:44:15.509642241Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.508889049Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.509082878Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.509086502Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.509020732Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:15.508985085Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info ts=2024-05-29T13:44:15.508932589Z caller=remote_alert_sender.go:94 user=22398 slug=sunfolding host=sunfolding-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.83.87:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=acead728-1e4b-43fc-a0a3-1641f5a2a8aa alerts=1 + logger=ngalert.state.manager.persist user=373502 slug=stakeandrelax t=2024-05-29T13:44:15.508877184Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=373502 slug=stakeandrelax instance= t=2024-05-29T13:44:15.508860284Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.508888683Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=373502 slug=stakeandrelax instance= t=2024-05-29T13:44:15.508833199Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.508849771Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=22398 slug=sunfolding t=2024-05-29T13:44:15.508844414Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.282272ms + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.508156343Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance= t=2024-05-29T13:44:15.507867758Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.507298054Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.506984436Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.506583057Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.506579649Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.506674746Z caller=remote_instance_store.go:51 user=465816 slug=metricgamingqa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.506724945Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.50659852Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.506540431Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.506543939Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.506554982Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.521526ms + level=debug ts=2024-05-29T13:44:15.506521858Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.506508878Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.506509882Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.50608305Z caller=remote_instance_store.go:51 user=723897 slug=inthepocket msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.505292907Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.504720405Z caller=remote_alert_sender.go:94 user=377166 slug=tinmoth host=tinmoth-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.149.85.5:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=K6RVpqm4k alerts=1 + level=debug ts=2024-05-29T13:44:15.504690082Z caller=remote_instance_store.go:51 user=112732 slug=gleamer msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112732 slug=gleamer instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.504624666Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:15.504566453Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=112732 slug=gleamer version=1 fingerprint=acc7077c6a5ad2b3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.504386725Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.504061566s EvaluationString:}]" duration=13.060228ms + level=debug ts=2024-05-29T13:44:15.504451106Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.504198193Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.503928654Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.504075284Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.503935381Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.503695578Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:15.50344038Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.503377129Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=501236 slug=adevintapipes instance="datasource_uid=grafanacloud-usage, ref_id=A" t=2024-05-29T13:44:15.503119703Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=501236 slug=adevintapipes t=2024-05-29T13:44:15.50304354Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:15.502999267Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.502804116Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.502734825Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.502548806Z caller=remote_instance_store.go:51 user=691103 slug=caetest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=691103 slug=caetest instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.502468905Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=538355 slug=flogic version=25 fingerprint=708a4baa651d73c2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.502390059Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=aws_ec2_cpucredit_balance_average, account_id=641264638977, dimension_InstanceId=i-06ca0103a3864fdd6, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:ec2:ap-northeast-1:641264638977:instance/i-06ca0103a3864fdd6, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=aws_ec2_cpucredit_balance_average, account_id=641264638977, dimension_InstanceId=i-06ca0103a3864fdd6, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:ec2:ap-northeast-1:641264638977:instance/i-06ca0103a3864fdd6, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter Value:0xc01ba702f0} D:{Var:D Labels:__name__=aws_ec2_cpucredit_balance_average, account_id=641264638977, dimension_InstanceId=i-06ca0103a3864fdd6, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:ec2:ap-northeast-1:641264638977:instance/i-06ca0103a3864fdd6, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter Value:0xc01ba703c0} E:{Var:E Labels:__name__=aws_ec2_cpucredit_balance_average, account_id=641264638977, dimension_InstanceId=i-06ca0103a3864fdd6, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:ec2:ap-northeast-1:641264638977:instance/i-06ca0103a3864fdd6, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter Value:0xc01ba704b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.501998996s EvaluationString:[ var='A' labels={__name__=aws_ec2_cpucredit_balance_average, account_id=641264638977, dimension_InstanceId=i-06ca0103a3864fdd6, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:ec2:ap-northeast-1:641264638977:instance/i-06ca0103a3864fdd6, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter} value=288 ], [ var='D' labels={__name__=aws_ec2_cpucredit_balance_average, account_id=641264638977, dimension_InstanceId=i-06ca0103a3864fdd6, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:ec2:ap-northeast-1:641264638977:instance/i-06ca0103a3864fdd6, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter} value=288 ], [ var='E' labels={__name__=aws_ec2_cpucredit_balance_average, account_id=641264638977, dimension_InstanceId=i-06ca0103a3864fdd6, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:ec2:ap-northeast-1:641264638977:instance/i-06ca0103a3864fdd6, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter} value=0 ]}]" duration=13.31279ms + logger=ngalert.state.manager.persist user=119453 slug=edisonlearning t=2024-05-29T13:44:15.50245903Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.84385ms + level=debug ts=2024-05-29T13:44:15.501461458Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114492 slug=railsbank instance="QueueName=PROD-PLAY-NTF_NOTIFICATION_INITIAL_DELIVERY_QUEUE-SQS" t=2024-05-29T13:44:15.501339329Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=114492 slug=railsbank t=2024-05-29T13:44:15.501282798Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.50075326Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + Error parsing panelUID for alert annotationruleID955dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.500034864Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=69.360373ms + level=debug ts=2024-05-29T13:44:15.500170923Z caller=remote_instance_store.go:51 user=512575 slug=eigenlayer msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=512575 slug=eigenlayer version=67 fingerprint=4426658709a7d3c7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.499895372Z level=debug msg="Alert rule evaluated" results="[{Instance:fields.path=, fields.statusCode= State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:fields.path=, fields.statusCode= Value:0xc01cd5f368} C:{Var:C Labels:fields.path=, fields.statusCode= Value:0xc01cd5f268}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.499351728s EvaluationString:[ var='B' labels={fields.path=, fields.statusCode=} value=0 ], [ var='C' labels={fields.path=, fields.statusCode=} value=0 ]}]" duration=269.900607ms + level=debug ts=2024-05-29T13:44:15.4999901Z caller=remote_instance_store.go:51 user=471861 slug=planetstaging msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.499956322Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=471861 slug=planetstaging instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.499908368Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=471861 slug=planetstaging instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.499888386Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.499046837Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.499491166Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod t=2024-05-29T13:44:15.498997364Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.499691381Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.499562435Z caller=remote_instance_store.go:51 user=485459 slug=heroiclabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=687021 slug=heviai instance="instance=trakya1" t=2024-05-29T13:44:15.49951502Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=687021 slug=heviai instance="instance=sislietfal1" t=2024-05-29T13:44:15.499317119Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.499172018Z caller=remote_image_capturer.go:33 user=687021 slug=heviai rule_org_id=1 rule_uid=fdgfeclj7jqiod msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:15.498819071Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=687021 slug=heviai instance="instance=okmeydani1" t=2024-05-29T13:44:15.499013217Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.498893399Z caller=remote_instance_store.go:51 user=500743 slug=sgr msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.498826306Z caller=remote_instance_store.go:51 user=243675 slug=oneschema msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.498682558Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=687021 slug=heviai instance="instance=hacettepe" t=2024-05-29T13:44:15.498679715Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=687021 slug=heviai instance="instance=guven" t=2024-05-29T13:44:15.498614715Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=687021 slug=heviai instance="instance=guven" t=2024-05-29T13:44:15.498608815Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=687021 slug=heviai instance="instance=ftr" t=2024-05-29T13:44:15.498551815Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.498602334Z caller=remote_instance_store.go:51 user=147497 slug=rhodev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=687021 slug=heviai instance="instance=cerrahpasa1" t=2024-05-29T13:44:15.498464814Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.498385316Z caller=remote_instance_store.go:51 user=261837 slug=empowercloud msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=687021 slug=heviai instance="instance=bayindir" t=2024-05-29T13:44:15.498377814Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:15.498326361Z caller=remote_alert_sender.go:94 user=22398 slug=sunfolding host=sunfolding-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.37.117:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=e4b81ed3-337e-48e1-a9c4-625d4e162dff alerts=1 + logger=ngalert.state.manager user=687021 slug=heviai instance="instance=antalyaeah" t=2024-05-29T13:44:15.498296413Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=687021 slug=heviai instance="instance=antalyaeah" t=2024-05-29T13:44:15.498285313Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=150145 slug=pleasant t=2024-05-29T13:44:15.498206462Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=21.850966ms + level=debug ts=2024-05-29T13:44:15.498099949Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=687021 slug=heviai instance="instance=acibadem-1" t=2024-05-29T13:44:15.498162012Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=687021 slug=heviai instance="instance=acibadem-1" t=2024-05-29T13:44:15.498150612Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.498084333Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=811546 slug=fyld t=2024-05-29T13:44:15.49797824Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=811546 slug=fyld version=1 fingerprint=726dbde65d72afed attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.497881388Z level=debug msg="Alert rule evaluated" results="[{Instance:DBInstanceIdentifier=sitestream-dev-db-encrypted State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:DBInstanceIdentifier=sitestream-dev-db-encrypted Value:0xc06cb625d8} Freeable Memory:{Var:Freeable Memory Labels:DBInstanceIdentifier=sitestream-dev-db-encrypted Value:0xc06cb625d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.497380372s EvaluationString:[ var='C' labels={DBInstanceIdentifier=sitestream-dev-db-encrypted} value=0 ], [ var='Freeable Memory' labels={DBInstanceIdentifier=sitestream-dev-db-encrypted} value=5.272510464e+08 ]}]" duration=65.012667ms + level=debug ts=2024-05-29T13:44:15.49784519Z caller=remote_instance_store.go:51 user=146728 slug=dgc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=656284 slug=cencosudx t=2024-05-29T13:44:15.497625299Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.964483ms + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.497585953Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.828489ms + level=debug ts=2024-05-29T13:44:15.497163168Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.497052913Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.497091Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.497082091Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.496814219Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo instance="QueueName=ImportAccruals-dead-letter" t=2024-05-29T13:44:15.496741577Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.496124986Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=228733 slug=csmoney t=2024-05-29T13:44:15.495940568Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.77849ms + level=debug ts=2024-05-29T13:44:15.495900584Z caller=remote_instance_store.go:51 user=615392 slug=shinemetrics msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.495821885Z caller=remote_instance_store.go:51 user=265692 slug=beekeeper msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.495796117Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.216429ms + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:15.495745162Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.495670822Z caller=remote_instance_store.go:51 user=841587 slug=tfxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.495099807Z caller=remote_instance_store.go:51 user=173730 slug=nikon msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.495034299Z caller=remote_alert_sender.go:94 user=18335 slug=semaphore host=semaphore-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.67.109:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=da4aa33e-5b18-44cb-8762-e2af1adc4bf9 alerts=1 + logger=ngalert.state.manager.persist user=715708 slug=ggiprod t=2024-05-29T13:44:15.494725673Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.494713654Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=715708 slug=ggiprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.494650732Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=715708 slug=ggiprod version=1 fingerprint=fa695a1c0c3a1788 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.49454624Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.494417167s EvaluationString:}]" duration=8.943843ms + level=debug ts=2024-05-29T13:44:15.494512723Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.494496573Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.494465504Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.494519315Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.49450661Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.494264337Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.494160878Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.493990085Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.782131ms + level=debug ts=2024-05-29T13:44:15.493877313Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=538037 slug=drivewealth t=2024-05-29T13:44:15.493885033Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.493863277Z caller=remote_instance_store.go:51 user=173730 slug=nikon msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=173730 slug=nikon instance= t=2024-05-29T13:44:15.493823151Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=173730 slug=nikon instance= t=2024-05-29T13:44:15.493812397Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.493715155Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=707420 slug=pangealab t=2024-05-29T13:44:15.493329489Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=707420 slug=pangealab instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.493315499Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.493566341Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.49343069Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.493466543Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.49340104Z caller=remote_instance_store.go:51 user=707420 slug=pangealab msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.493424303Z caller=remote_instance_store.go:51 user=767797 slug=mgmresorts msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.493383259Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114492 slug=railsbank instance="ClusterName=BETA-PLAY-EVENT-STREAMING, ServiceName=SYNC-WORKER-SERVICE" t=2024-05-29T13:44:15.493390098Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=767797 slug=mgmresorts t=2024-05-29T13:44:15.493389492Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.493194094Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.493138685Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.493028211Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.493012454Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.492880349Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.492863434Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=111653 slug=theassociationmxp t=2024-05-29T13:44:15.492574238Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=24.107043ms + level=debug ts=2024-05-29T13:44:15.492124731Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.492563167Z caller=remote_instance_store.go:51 user=417450 slug=legitsecurity msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.492125668Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.492007137Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.491988103Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.491804208Z caller=remote_instance_store.go:51 user=923052 slug=magicairestricted msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=923052 slug=magicairestricted t=2024-05-29T13:44:15.491738956Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=923052 slug=magicairestricted instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.491721008Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=923052 slug=magicairestricted t=2024-05-29T13:44:15.491659625Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.491556794Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.491275273Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.491264726Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.491021309Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=319327 slug=cvi instance="datasource_uid=grafanacloud-prom, ref_id=A,B" t=2024-05-29T13:44:15.490890039Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=319327 slug=cvi version=16 fingerprint=e8afab025107ceb5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.490798588Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A,B State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.490467291s EvaluationString:}]" duration=21.065219ms + level=debug ts=2024-05-29T13:44:15.490771125Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.490512136Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.490119887Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.490275172Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.490119983Z caller=remote_instance_store.go:51 user=482907 slug=wavelonp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.490031936Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=206439 slug=relaypro t=2024-05-29T13:44:15.490038189Z level=debug msg="Saving alert states" count=7 max_state_save_concurrency=1 + logger=ngalert.state.manager user=206439 slug=relaypro instance="cluster=mob, environment=qa, role=ibotdr_fdb" t=2024-05-29T13:44:15.489932371Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206439 slug=relaypro instance="cluster=mob, environment=qa, role=ibot_fdb" t=2024-05-29T13:44:15.489846472Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206439 slug=relaypro instance="cluster=mob, environment=pro, role=ibot_fdb" t=2024-05-29T13:44:15.489636283Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.489532677Z caller=remote_instance_store.go:51 user=173730 slug=nikon msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=173730 slug=nikon t=2024-05-29T13:44:15.489494887Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=173730 slug=nikon instance= t=2024-05-29T13:44:15.489477506Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.489453914Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=173730 slug=nikon version=5 fingerprint=e5a9451d38930b4d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.489388278Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.489076335s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=381.070692ms + logger=ngalert.state.manager user=206439 slug=relaypro t=2024-05-29T13:44:15.489276327Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}The minimum process count on the {{role}} FDB cluster in the {{cluster}}_{{environment}} environment does not equal the expected value. This is abnormal.': error parsing template __alert_FDB - Process Count (differs from expected, email): template: __alert_FDB - Process Count (differs from expected, email):1: function \"role\" not defined" + level=debug ts=2024-05-29T13:44:15.489190676Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.48915808Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=608555 slug=ias t=2024-05-29T13:44:15.488881418Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.881719ms + level=debug ts=2024-05-29T13:44:15.488640225Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.488660475Z caller=remote_instance_store.go:51 user=191103 slug=amazonadmin msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:15.488626454Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance= t=2024-05-29T13:44:15.488484294Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=288032 slug=dapperlabssre t=2024-05-29T13:44:15.48843529Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=288032 slug=dapperlabssre version=1 fingerprint=4694f20510b9ccc2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.488344807Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.48803023s EvaluationString:}]" duration=135.386051ms + level=debug ts=2024-05-29T13:44:15.488094135Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=optimumfixed" t=2024-05-29T13:44:15.488029567Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.487511265Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + Error parsing panelUID for alert annotationruleID304dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=20177 slug=paddledash t=2024-05-29T13:44:15.48758058Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=34.62001ms + level=debug ts=2024-05-29T13:44:15.487513814Z caller=remote_instance_store.go:51 user=871095 slug=cmcnginp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.487476181Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=22398 slug=sunfolding t=2024-05-29T13:44:15.487427674Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.487434171Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.487441219Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=assurantlifestyle" t=2024-05-29T13:44:15.487423231Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=B" t=2024-05-29T13:44:15.487379586Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=american-airlines" t=2024-05-29T13:44:15.487319419Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.487191004Z caller=remote_instance_store.go:51 user=155740 slug=routific msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="application=gateway-microservice" t=2024-05-29T13:44:15.487219909Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=aizhomesol" t=2024-05-29T13:44:15.487207947Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=140107 slug=jqiannian t=2024-05-29T13:44:15.486976558Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=140107 slug=jqiannian instance= t=2024-05-29T13:44:15.486963349Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:47:00Z next_ends_at=2024-05-29T13:48:00Z + level=debug ts=2024-05-29T13:44:15.486864421Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.486673669Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=379c916c3b243664 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.486604975Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.48632822s EvaluationString:}]" duration=315.546914ms + level=debug ts=2024-05-29T13:44:15.486658385Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=YGG" t=2024-05-29T13:44:15.486448507Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=XEM" t=2024-05-29T13:44:15.486236143Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=WOO" t=2024-05-29T13:44:15.486202233Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=20946 slug=extole instance="datasource_uid=000000001, ref_id=B" t=2024-05-29T13:44:15.486001271Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=USDT" t=2024-05-29T13:44:15.486013229Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=UNI" t=2024-05-29T13:44:15.485924637Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=UNFI" t=2024-05-29T13:44:15.485881186Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=TRX" t=2024-05-29T13:44:15.485803425Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.485737408Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.485666183Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=TRU" t=2024-05-29T13:44:15.485590501Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=196413 slug=form3production instance="Region=eu-west-2, ServiceLimit=General Purpose SSD (gp2) volume storage (TiB), ServiceName=EBS" t=2024-05-29T13:44:15.485043734Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=196413 slug=form3production instance="Region=eu-west-2, ServiceLimit=General Purpose SSD (gp2) volume storage (TiB), ServiceName=EBS" t=2024-05-29T13:44:15.485029604Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.484906234Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.484653997Z caller=remote_instance_store.go:51 user=119453 slug=edisonlearning msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=119453 slug=edisonlearning t=2024-05-29T13:44:15.484612218Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=119453 slug=edisonlearning instance="datasource_uid=m0NHOnzMk, ref_id=A" t=2024-05-29T13:44:15.48458847Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.484525464Z caller=remote_instance_store.go:51 user=615392 slug=shinemetrics msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.484611404Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.484524057Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.484384059Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=119453 slug=edisonlearning t=2024-05-29T13:44:15.484558223Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=119453 slug=edisonlearning version=1 fingerprint=2b46bca820543348 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.484474663Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=m0NHOnzMk, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.484122793s EvaluationString:}]" duration=902.488422ms + level=debug ts=2024-05-29T13:44:15.484327965Z caller=remote_instance_store.go:51 user=868411 slug=cmpladnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.48384466Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=635771 slug=sharedservices t=2024-05-29T13:44:15.48372487Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.483702316Z caller=remote_instance_store.go:51 user=656284 slug=cencosudx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=635771 slug=sharedservices instance="cluster=eng-eks-dev" t=2024-05-29T13:44:15.483676109Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=656284 slug=cencosudx t=2024-05-29T13:44:15.483655226Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=656284 slug=cencosudx instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.483639476Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=656284 slug=cencosudx t=2024-05-29T13:44:15.483605335Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info ts=2024-05-29T13:44:15.483562174Z caller=grafana.go:247 user=482906 slug=wavelo msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query= groups=20 alerts=0 + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=SXP" t=2024-05-29T13:44:15.483413457Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=SUI" t=2024-05-29T13:44:15.483381847Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=STX" t=2024-05-29T13:44:15.483362376Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.483360227Z caller=remote_instance_store.go:51 user=377166 slug=tinmoth msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=STX" t=2024-05-29T13:44:15.483355606Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=SSV" t=2024-05-29T13:44:15.483336186Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=114286 slug=enverus t=2024-05-29T13:44:15.483327311Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=114286 slug=enverus instance= t=2024-05-29T13:44:15.483318408Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=SNX" t=2024-05-29T13:44:15.483266683Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.483095171Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=SKL" t=2024-05-29T13:44:15.483245203Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=SKL" t=2024-05-29T13:44:15.483233374Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=SHIB" t=2024-05-29T13:44:15.483213353Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=SHIB" t=2024-05-29T13:44:15.483201713Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=SAND" t=2024-05-29T13:44:15.483151502Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=SAND" t=2024-05-29T13:44:15.483145582Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.483114323Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=RUNE" t=2024-05-29T13:44:15.483091791Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=REEF" t=2024-05-29T13:44:15.482984569Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=RDNT" t=2024-05-29T13:44:15.482958258Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=POWR" t=2024-05-29T13:44:15.482900137Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=POWR" t=2024-05-29T13:44:15.482892417Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=PHB" t=2024-05-29T13:44:15.482840686Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=PERP" t=2024-05-29T13:44:15.482809915Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=PEOPLE" t=2024-05-29T13:44:15.482794654Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.48273006Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=OM" t=2024-05-29T13:44:15.482660622Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=OCEAN" t=2024-05-29T13:44:15.482608901Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=NEAR" t=2024-05-29T13:44:15.48259766Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=MKR" t=2024-05-29T13:44:15.48256892Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.482539231Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.343872ms + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=MINA" t=2024-05-29T13:44:15.482536069Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=MASK" t=2024-05-29T13:44:15.482446658Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=MANA" t=2024-05-29T13:44:15.482427507Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=MANA" t=2024-05-29T13:44:15.482416617Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=LUNC" t=2024-05-29T13:44:15.482385276Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=LUNC" t=2024-05-29T13:44:15.482373046Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.482336915Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=LTC" t=2024-05-29T13:44:15.482353246Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.482337277Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.677722ms + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=LTC" t=2024-05-29T13:44:15.482340956Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=LPT" t=2024-05-29T13:44:15.482311375Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=377166 slug=tinmoth instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.482247065Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=LINK" t=2024-05-29T13:44:15.482277094Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=LINK" t=2024-05-29T13:44:15.482272234Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=377166 slug=tinmoth t=2024-05-29T13:44:15.482210867Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=LDO" t=2024-05-29T13:44:15.482202263Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=KEY" t=2024-05-29T13:44:15.482178462Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.482186533Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=377166 slug=tinmoth version=2 fingerprint=abca0d2e7a873d0b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.482099319Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.481707041s EvaluationString:}]" duration=10.4509ms + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=JASMY" t=2024-05-29T13:44:15.482145212Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=IOTX" t=2024-05-29T13:44:15.482123181Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=IOTA" t=2024-05-29T13:44:15.482112571Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=INJ" t=2024-05-29T13:44:15.482080721Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=IMX" t=2024-05-29T13:44:15.48206858Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=IMX" t=2024-05-29T13:44:15.48206063Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.482028226Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=HIGH" t=2024-05-29T13:44:15.482003819Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=HIFI" t=2024-05-29T13:44:15.481980869Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=HBAR" t=2024-05-29T13:44:15.481954457Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=GRT" t=2024-05-29T13:44:15.481940577Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=GMT" t=2024-05-29T13:44:15.481901477Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=GAL" t=2024-05-29T13:44:15.481841066Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=FLOW" t=2024-05-29T13:44:15.481779555Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=FLOKI" t=2024-05-29T13:44:15.481755204Z level=warn msg="Failed to take an image" dashboard=xc-trades panel=15 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager.persist user=679831 slug=joveostageaws t=2024-05-29T13:44:15.481670096Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=19.995082ms + level=debug ts=2024-05-29T13:44:15.481581918Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.481558204Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:15.48153623Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:15.481461797Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=127813 slug=clearsale instance= t=2024-05-29T13:44:15.481490874Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=127813 slug=clearsale instance= t=2024-05-29T13:44:15.481480619Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.481383712Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.481225074Z caller=remote_instance_store.go:51 user=228733 slug=csmoney msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=114286 slug=enverus t=2024-05-29T13:44:15.481152258Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=114286 slug=enverus instance= t=2024-05-29T13:44:15.481136813Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=228733 slug=csmoney instance="datasource_uid=stWgXyV7z, ref_id=A" t=2024-05-29T13:44:15.481112745Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=228733 slug=csmoney t=2024-05-29T13:44:15.481088173Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=228733 slug=csmoney version=22 fingerprint=c949995cbef5b658 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.48098151Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=stWgXyV7z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.480531451s EvaluationString:}]" duration=72.40484ms + logger=ngalert.state.manager.persist user=277807 slug=info96f8 t=2024-05-29T13:44:15.480880384Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.173663ms + level=debug ts=2024-05-29T13:44:15.480918428Z caller=remote_instance_store.go:51 user=380446 slug=antstream msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=380446 slug=antstream instance= t=2024-05-29T13:44:15.48086331Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.480890376Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.480812806Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.480765184Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.480703181Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.480690923Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=732767 slug=rackgenius instance="instance=cwrkp, job=integrations/node_exporter" t=2024-05-29T13:44:15.480704693Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=732767 slug=rackgenius instance="instance=cwrkp, job=integrations/node_exporter" t=2024-05-29T13:44:15.480693103Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=EUR" t=2024-05-29T13:44:15.480508439Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=ETH" t=2024-05-29T13:44:15.480483719Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.480505591Z caller=remote_instance_store.go:51 user=295631 slug=dapvizor msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=295631 slug=dapvizor t=2024-05-29T13:44:15.480446714Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=EOS" t=2024-05-29T13:44:15.480448888Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=EGLD" t=2024-05-29T13:44:15.480417856Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=EGLD" t=2024-05-29T13:44:15.480408837Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=DYDX" t=2024-05-29T13:44:15.480396976Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=DUSK" t=2024-05-29T13:44:15.480380677Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=DOT" t=2024-05-29T13:44:15.480342686Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=295631 slug=dapvizor t=2024-05-29T13:44:15.480338325Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.480255098Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=CKB" t=2024-05-29T13:44:15.480169262Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=CKB" t=2024-05-29T13:44:15.480163602Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=CHR" t=2024-05-29T13:44:15.480118781Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=CFX" t=2024-05-29T13:44:15.480083871Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=C98" t=2024-05-29T13:44:15.480040279Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=BTC" t=2024-05-29T13:44:15.480004498Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.480024609Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=BNB" t=2024-05-29T13:44:15.479957378Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=BLZ" t=2024-05-29T13:44:15.479917387Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.479881338Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=BICO" t=2024-05-29T13:44:15.479890896Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.479828321Z caller=remote_instance_store.go:51 user=442934 slug=arqit msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=BEL" t=2024-05-29T13:44:15.479875186Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=BEL" t=2024-05-29T13:44:15.479864526Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=BAKE" t=2024-05-29T13:44:15.479802175Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.479810502Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=ATOM" t=2024-05-29T13:44:15.479741623Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.479728391Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=ASTR" t=2024-05-29T13:44:15.479699193Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=ARPA" t=2024-05-29T13:44:15.479625441Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=729654 slug=bmsmonitoring t=2024-05-29T13:44:15.479602591Z level=warn msg="Tick dropped because alert rule evaluation is too slow" rule_uid=e875b3a9-c454-4496-b216-51794adb2a76 org_id=1 time=2024-05-29T13:43:10Z + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=ARKM" t=2024-05-29T13:44:15.479613341Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=ARKM" t=2024-05-29T13:44:15.479605101Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=ARB" t=2024-05-29T13:44:15.47958506Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=AR" t=2024-05-29T13:44:15.47957357Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=AR" t=2024-05-29T13:44:15.47956623Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=APT" t=2024-05-29T13:44:15.4795542Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=APT" t=2024-05-29T13:44:15.47954999Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=ALGO" t=2024-05-29T13:44:15.479475718Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=AGIX" t=2024-05-29T13:44:15.479449528Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=841587 slug=tfxprod instance="asset=ACH" t=2024-05-29T13:44:15.479388587Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=841587 slug=tfxprod version=2 fingerprint=dd458fad226a8c2f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.47859737Z level=debug msg="Alert rule evaluated" results="[{Instance:asset=1INCH State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=1INCH Value:0xc03219a590} C:{Var:C Labels:asset=1INCH Value:0xc03219a5a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475106938s EvaluationString:[ var='B' labels={asset=1INCH} value=0.0333693725 ], [ var='C' labels={asset=1INCH} value=0 ]} {Instance:asset=AAVE State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=AAVE Value:0xc03219a5d0} C:{Var:C Labels:asset=AAVE Value:0xc03219a5e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475128038s EvaluationString:[ var='B' labels={asset=AAVE} value=0.000826935914 ], [ var='C' labels={asset=AAVE} value=0 ]} {Instance:asset=ACH State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=ACH Value:0xc03219a608} C:{Var:C Labels:asset=ACH Value:0xc03219a618}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475136318s EvaluationString:[ var='B' labels={asset=ACH} value=133.6209787044 ], [ var='C' labels={asset=ACH} value=0 ]} {Instance:asset=ADA State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=ADA Value:0xc03219a638} C:{Var:C Labels:asset=ADA Value:0xc03219a648}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475142688s EvaluationString:[ var='B' labels={asset=ADA} value=0 ], [ var='C' labels={asset=ADA} value=0 ]} {Instance:asset=AGIX State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=AGIX Value:0xc03219a670} C:{Var:C Labels:asset=AGIX Value:0xc03219a688}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475149719s EvaluationString:[ var='B' labels={asset=AGIX} value=1.0637579e-05 ], [ var='C' labels={asset=AGIX} value=0 ]} {Instance:asset=ALGO State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=ALGO Value:0xc03219a6b0} C:{Var:C Labels:asset=ALGO Value:0xc03219a6c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475158849s EvaluationString:[ var='B' labels={asset=ALGO} value=0 ], [ var='C' labels={asset=ALGO} value=0 ]} {Instance:asset=ALPHA State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=ALPHA Value:0xc03219a6f0} C:{Var:C Labels:asset=ALPHA Value:0xc03219a708}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475165169s EvaluationString:[ var='B' labels={asset=ALPHA} value=244.0647 ], [ var='C' labels={asset=ALPHA} value=0 ]} {Instance:asset=ANKR State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=ANKR Value:0xc03219a730} C:{Var:C Labels:asset=ANKR Value:0xc03219a748}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475173539s EvaluationString:[ var='B' labels={asset=ANKR} value=0 ], [ var='C' labels={asset=ANKR} value=0 ]} {Instance:asset=APE State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=APE Value:0xc03219a768} C:{Var:C Labels:asset=APE Value:0xc03219a778}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475179459s EvaluationString:[ var='B' labels={asset=APE} value=3.913311e-06 ], [ var='C' labels={asset=APE} value=0 ]} {Instance:asset=APT State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=APT Value:0xc03219a798} C:{Var:C Labels:asset=APT Value:0xc03219a7a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475186309s EvaluationString:[ var='B' labels={asset=APT} value=0.000510663974 ], [ var='C' labels={asset=APT} value=0 ]} {Instance:asset=AR State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=AR Value:0xc03219a7d8} C:{Var:C Labels:asset=AR Value:0xc03219a7c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475192579s EvaluationString:[ var='B' labels={asset=AR} value=0.30414088775 ], [ var='C' labels={asset=AR} value=0 ]} {Instance:asset=ARB State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=ARB Value:0xc03219a7f8} C:{Var:C Labels:asset=ARB Value:0xc03219a808}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475197409s EvaluationString:[ var='B' labels={asset=ARB} value=5.44204e-05 ], [ var='C' labels={asset=ARB} value=0 ]} {Instance:asset=ARKM State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=ARKM Value:0xc03219a830} C:{Var:C Labels:asset=ARKM Value:0xc03219a848}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475202349s EvaluationString:[ var='B' labels={asset=ARKM} value=0.20050016 ], [ var='C' labels={asset=ARKM} value=0 ]} {Instance:asset=ARPA State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=ARPA Value:0xc03219a870} C:{Var:C Labels:asset=ARPA Value:0xc03219a888}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475207639s EvaluationString:[ var='B' labels={asset=ARPA} value=0 ], [ var='C' labels={asset=ARPA} value=0 ]} {Instance:asset=ASTR State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=ASTR Value:0xc03219a8b0} C:{Var:C Labels:asset=ASTR Value:0xc03219a8c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475212239s EvaluationString:[ var='B' labels={asset=ASTR} value=0.0056631375 ], [ var='C' labels={asset=ASTR} value=0 ]} {Instance:asset=ATA State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=ATA Value:0xc03219a8f8} C:{Var:C Labels:asset=ATA Value:0xc03219a8e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475217239s EvaluationString:[ var='B' labels={asset=ATA} value=0 ], [ var='C' labels={asset=ATA} value=0 ]} {Instance:asset=ATOM State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=ATOM Value:0xc03219a920} C:{Var:C Labels:asset=ATOM Value:0xc03219a938}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475221859s EvaluationString:[ var='B' labels={asset=ATOM} value=0.006277914921 ], [ var='C' labels={asset=ATOM} value=0 ]} {Instance:asset=AVAX State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=AVAX Value:0xc03219a960} C:{Var:C Labels:asset=AVAX Value:0xc03219a978}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475226929s EvaluationString:[ var='B' labels={asset=AVAX} value=0.002235009647 ], [ var='C' labels={asset=AVAX} value=0 ]} {Instance:asset=BADGER State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=BADGER Value:0xc03219a9c0} C:{Var:C Labels:asset=BADGER Value:0xc03219a9a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475232009s EvaluationString:[ var='B' labels={asset=BADGER} value=0.0143184525 ], [ var='C' labels={asset=BADGER} value=0 ]} {Instance:asset=BAKE State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=BAKE Value:0xc03219ac70} C:{Var:C Labels:asset=BAKE Value:0xc03219abb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475238409s EvaluationString:[ var='B' labels={asset=BAKE} value=92.4401372832 ], [ var='C' labels={asset=BAKE} value=0 ]} {Instance:asset=BCH State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=BCH Value:0xc03219aca0} C:{Var:C Labels:asset=BCH Value:0xc03219ac90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475244209s EvaluationString:[ var='B' labels={asset=BCH} value=0 ], [ var='C' labels={asset=BCH} value=0 ]} {Instance:asset=BEL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=BEL Value:0xc03219acc0} C:{Var:C Labels:asset=BEL Value:0xc03219acd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475248839s EvaluationString:[ var='B' labels={asset=BEL} value=0 ], [ var='C' labels={asset=BEL} value=0 ]} {Instance:asset=BICO State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=BICO Value:0xc03219acf8} C:{Var:C Labels:asset=BICO Value:0xc03219ad10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.47525334s EvaluationString:[ var='B' labels={asset=BICO} value=34.3250735 ], [ var='C' labels={asset=BICO} value=0 ]} {Instance:asset=BLZ State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=BLZ Value:0xc03219ad40} C:{Var:C Labels:asset=BLZ Value:0xc03219ad30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.47525938s EvaluationString:[ var='B' labels={asset=BLZ} value=0 ], [ var='C' labels={asset=BLZ} value=0 ]} {Instance:asset=BNB State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=BNB Value:0xc03219ad60} C:{Var:C Labels:asset=BNB Value:0xc03219ad70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.47526406s EvaluationString:[ var='B' labels={asset=BNB} value=0 ], [ var='C' labels={asset=BNB} value=0 ]} {Instance:asset=BNX State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=BNX Value:0xc03219ad90} C:{Var:C Labels:asset=BNX Value:0xc03219ada0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.4752686s EvaluationString:[ var='B' labels={asset=BNX} value=0 ], [ var='C' labels={asset=BNX} value=0 ]} {Instance:asset=BTC State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=BTC Value:0xc03219adc0} C:{Var:C Labels:asset=BTC Value:0xc03219add0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.47527518s EvaluationString:[ var='B' labels={asset=BTC} value=0.0040766373 ], [ var='C' labels={asset=BTC} value=0 ]} {Instance:asset=C98 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=C98 Value:0xc03219adf0} C:{Var:C Labels:asset=C98 Value:0xc03219ae00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.47528034s EvaluationString:[ var='B' labels={asset=C98} value=1.4239165e-05 ], [ var='C' labels={asset=C98} value=0 ]} {Instance:asset=CELO State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=CELO Value:0xc03219ae28} C:{Var:C Labels:asset=CELO Value:0xc03219ae40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.47528519s EvaluationString:[ var='B' labels={asset=CELO} value=46.973869334 ], [ var='C' labels={asset=CELO} value=0 ]} {Instance:asset=CFX State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=CFX Value:0xc03219ae70} C:{Var:C Labels:asset=CFX Value:0xc03219ae60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.47529027s EvaluationString:[ var='B' labels={asset=CFX} value=35.139857922 ], [ var='C' labels={asset=CFX} value=0 ]} {Instance:asset=CHR State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=CHR Value:0xc03219ae90} C:{Var:C Labels:asset=CHR Value:0xc03219aea0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.47529519s EvaluationString:[ var='B' labels={asset=CHR} value=0.23885307 ], [ var='C' labels={asset=CHR} value=0 ]} {Instance:asset=CHZ State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=CHZ Value:0xc03219aec0} C:{Var:C Labels:asset=CHZ Value:0xc03219aed0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475301331s EvaluationString:[ var='B' labels={asset=CHZ} value=4.964749e-06 ], [ var='C' labels={asset=CHZ} value=0 ]} {Instance:asset=CKB State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=CKB Value:0xc03219af00} C:{Var:C Labels:asset=CKB Value:0xc03219aef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475306401s EvaluationString:[ var='B' labels={asset=CKB} value=0.00124458654 ], [ var='C' labels={asset=CKB} value=0 ]} {Instance:asset=COMP State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=COMP Value:0xc03219afc0} C:{Var:C Labels:asset=COMP Value:0xc03219afa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475311441s EvaluationString:[ var='B' labels={asset=COMP} value=49.60352682 ], [ var='C' labels={asset=COMP} value=0 ]} {Instance:asset=COTI State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=COTI Value:0xc03219afe8} C:{Var:C Labels:asset=COTI Value:0xc03219b000}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475316641s EvaluationString:[ var='B' labels={asset=COTI} value=0 ], [ var='C' labels={asset=COTI} value=0 ]} {Instance:asset=CRV State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=CRV Value:0xc03219b020} C:{Var:C Labels:asset=CRV Value:0xc03219b030}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475321201s EvaluationString:[ var='B' labels={asset=CRV} value=9.552672e-06 ], [ var='C' labels={asset=CRV} value=0 ]} {Instance:asset=CYBER State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=CYBER Value:0xc03219b060} C:{Var:C Labels:asset=CYBER Value:0xc03219b078}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475325941s EvaluationString:[ var='B' labels={asset=CYBER} value=0.0760829535 ], [ var='C' labels={asset=CYBER} value=0 ]} {Instance:asset=DOGE State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=DOGE Value:0xc03219b0b8} C:{Var:C Labels:asset=DOGE Value:0xc03219b0a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475331141s EvaluationString:[ var='B' labels={asset=DOGE} value=0.000319971163 ], [ var='C' labels={asset=DOGE} value=0 ]} {Instance:asset=DOT State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=DOT Value:0xc03219b0e8} C:{Var:C Labels:asset=DOT Value:0xc03219b0d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475336361s EvaluationString:[ var='B' labels={asset=DOT} value=0.000389399646 ], [ var='C' labels={asset=DOT} value=0 ]} {Instance:asset=DUSK State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=DUSK Value:0xc03219b110} C:{Var:C Labels:asset=DUSK Value:0xc03219b128}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475341381s EvaluationString:[ var='B' labels={asset=DUSK} value=0.194790349 ], [ var='C' labels={asset=DUSK} value=0 ]} {Instance:asset=DYDX State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=DYDX Value:0xc03219b150} C:{Var:C Labels:asset=DYDX Value:0xc03219b168}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475348151s EvaluationString:[ var='B' labels={asset=DYDX} value=0.0143096195 ], [ var='C' labels={asset=DYDX} value=0 ]} {Instance:asset=EGLD State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=EGLD Value:0xc03219b190} C:{Var:C Labels:asset=EGLD Value:0xc03219b1a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475354971s EvaluationString:[ var='B' labels={asset=EGLD} value=0.204962023 ], [ var='C' labels={asset=EGLD} value=0 ]} {Instance:asset=ENS State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=ENS Value:0xc03219b1c8} C:{Var:C Labels:asset=ENS Value:0xc03219b1d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475361732s EvaluationString:[ var='B' labels={asset=ENS} value=0 ], [ var='C' labels={asset=ENS} value=0 ]} {Instance:asset=EOS State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=EOS Value:0xc03219b1f8} C:{Var:C Labels:asset=EOS Value:0xc03219b208}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475372532s EvaluationString:[ var='B' labels={asset=EOS} value=9.936158e-06 ], [ var='C' labels={asset=EOS} value=0 ]} {Instance:asset=ETC State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=ETC Value:0xc03219b228} C:{Var:C Labels:asset=ETC Value:0xc03219b2c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475378442s EvaluationString:[ var='B' labels={asset=ETC} value=0.001612149419 ], [ var='C' labels={asset=ETC} value=0 ]} {Instance:asset=ETH State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=ETH Value:0xc03219b2e8} C:{Var:C Labels:asset=ETH Value:0xc03219b308}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475384002s EvaluationString:[ var='B' labels={asset=ETH} value=0.0028384032 ], [ var='C' labels={asset=ETH} value=0 ]} {Instance:asset=EUR State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=EUR Value:0xc03219b328} C:{Var:C Labels:asset=EUR Value:0xc03219b338}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475389142s EvaluationString:[ var='B' labels={asset=EUR} value=1.692522e-06 ], [ var='C' labels={asset=EUR} value=0 ]} {Instance:asset=FDUSD State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=FDUSD Value:0xc03219b378} C:{Var:C Labels:asset=FDUSD Value:0xc03219b360}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475394162s EvaluationString:[ var='B' labels={asset=FDUSD} value=0.054620662104 ], [ var='C' labels={asset=FDUSD} value=0 ]} {Instance:asset=FET State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=FET Value:0xc03219b3b8} C:{Var:C Labels:asset=FET Value:0xc03219b3a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475399322s EvaluationString:[ var='B' labels={asset=FET} value=2.9830892e-05 ], [ var='C' labels={asset=FET} value=0 ]} {Instance:asset=FIL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=FIL Value:0xc03219b3e8} C:{Var:C Labels:asset=FIL Value:0xc03219b3d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475404412s EvaluationString:[ var='B' labels={asset=FIL} value=0.000560900595 ], [ var='C' labels={asset=FIL} value=0 ]} {Instance:asset=FLOKI State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:asset=FLOKI Value:0xc03219b418} C:{Var:C Labels:asset=FLOKI Value:0xc03219b430}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475410932s EvaluationString:[ var='B' labels={asset=FLOKI} value=3322.48548494205 ], [ var='C' labels={asset=FLOKI} value=1 ]} {Instance:asset=FLOW State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=FLOW Value:0xc03219b458} C:{Var:C Labels:asset=FLOW Value:0xc03219b470}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475418022s EvaluationString:[ var='B' labels={asset=FLOW} value=0.0063125001 ], [ var='C' labels={asset=FLOW} value=0 ]} {Instance:asset=FRONT State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=FRONT Value:0xc03219b618} C:{Var:C Labels:asset=FRONT Value:0xc03219b520}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475424932s EvaluationString:[ var='B' labels={asset=FRONT} value=0 ], [ var='C' labels={asset=FRONT} value=0 ]} {Instance:asset=FTM State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=FTM Value:0xc03219b638} C:{Var:C Labels:asset=FTM Value:0xc03219b648}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475431662s EvaluationString:[ var='B' labels={asset=FTM} value=0 ], [ var='C' labels={asset=FTM} value=0 ]} {Instance:asset=GAL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=GAL Value:0xc03219b668} C:{Var:C Labels:asset=GAL Value:0xc03219b678}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475438592s EvaluationString:[ var='B' labels={asset=GAL} value=0.00319608753 ], [ var='C' labels={asset=GAL} value=0 ]} {Instance:asset=GALA State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=GALA Value:0xc03219b6a0} C:{Var:C Labels:asset=GALA Value:0xc03219b6b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475444932s EvaluationString:[ var='B' labels={asset=GALA} value=3.991521e-06 ], [ var='C' labels={asset=GALA} value=0 ]} {Instance:asset=GAS State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=GAS Value:0xc03219b6d8} C:{Var:C Labels:asset=GAS Value:0xc03219b6e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475450792s EvaluationString:[ var='B' labels={asset=GAS} value=0.4714255 ], [ var='C' labels={asset=GAS} value=0 ]} {Instance:asset=GMT State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=GMT Value:0xc03219b708} C:{Var:C Labels:asset=GMT Value:0xc03219b718}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475455852s EvaluationString:[ var='B' labels={asset=GMT} value=2.1394626e-05 ], [ var='C' labels={asset=GMT} value=0 ]} {Instance:asset=GMX State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=GMX Value:0xc03219b738} C:{Var:C Labels:asset=GMX Value:0xc03219b748}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475461243s EvaluationString:[ var='B' labels={asset=GMX} value=81.861055 ], [ var='C' labels={asset=GMX} value=0 ]} {Instance:asset=GRT State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=GRT Value:0xc03219b778} C:{Var:C Labels:asset=GRT Value:0xc03219b768}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475466423s EvaluationString:[ var='B' labels={asset=GRT} value=0.036183279 ], [ var='C' labels={asset=GRT} value=0 ]} {Instance:asset=HBAR State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=HBAR Value:0xc03219b7a0} C:{Var:C Labels:asset=HBAR Value:0xc03219b7b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475471263s EvaluationString:[ var='B' labels={asset=HBAR} value=279.294753265 ], [ var='C' labels={asset=HBAR} value=0 ]} {Instance:asset=HIFI State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=HIFI Value:0xc03219b7e0} C:{Var:C Labels:asset=HIFI Value:0xc03219b7f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475477203s EvaluationString:[ var='B' labels={asset=HIFI} value=0 ], [ var='C' labels={asset=HIFI} value=0 ]} {Instance:asset=HIGH State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=HIGH Value:0xc03219b820} C:{Var:C Labels:asset=HIGH Value:0xc03219b838}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475481923s EvaluationString:[ var='B' labels={asset=HIGH} value=0 ], [ var='C' labels={asset=HIGH} value=0 ]} {Instance:asset=ICP State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=ICP Value:0xc03219b858} C:{Var:C Labels:asset=ICP Value:0xc03219b868}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475486633s EvaluationString:[ var='B' labels={asset=ICP} value=0.00026748123 ], [ var='C' labels={asset=ICP} value=0 ]} {Instance:asset=ID State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=ID Value:0xc03219b898} C:{Var:C Labels:asset=ID Value:0xc03219b888}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475491583s EvaluationString:[ var='B' labels={asset=ID} value=110.6332 ], [ var='C' labels={asset=ID} value=0 ]} {Instance:asset=IMX State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=IMX Value:0xc03219b8c8} C:{Var:C Labels:asset=IMX Value:0xc03219b8b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475496473s EvaluationString:[ var='B' labels={asset=IMX} value=0.000182844134 ], [ var='C' labels={asset=IMX} value=0 ]} {Instance:asset=INJ State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=INJ Value:0xc03219b908} C:{Var:C Labels:asset=INJ Value:0xc03219b8f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475501543s EvaluationString:[ var='B' labels={asset=INJ} value=0.000183062832 ], [ var='C' labels={asset=INJ} value=0 ]} {Instance:asset=IOTA State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=IOTA Value:0xc03219b930} C:{Var:C Labels:asset=IOTA Value:0xc03219b948}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475506763s EvaluationString:[ var='B' labels={asset=IOTA} value=0.13983034 ], [ var='C' labels={asset=IOTA} value=0 ]} {Instance:asset=IOTX State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=IOTX Value:0xc03219b988} C:{Var:C Labels:asset=IOTX Value:0xc03219b970}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475512443s EvaluationString:[ var='B' labels={asset=IOTX} value=42.30249984 ], [ var='C' labels={asset=IOTX} value=0 ]} {Instance:asset=JASMY State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=JASMY Value:0xc03219b9c8} C:{Var:C Labels:asset=JASMY Value:0xc03219b9b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475517703s EvaluationString:[ var='B' labels={asset=JASMY} value=182.61169125759 ], [ var='C' labels={asset=JASMY} value=0 ]} {Instance:asset=KAVA State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=KAVA Value:0xc03219b9f0} C:{Var:C Labels:asset=KAVA Value:0xc03219baa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475523403s EvaluationString:[ var='B' labels={asset=KAVA} value=0.0294320209 ], [ var='C' labels={asset=KAVA} value=0 ]} {Instance:asset=KEY State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=KEY Value:0xc03219bac8} C:{Var:C Labels:asset=KEY Value:0xc03219bad8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475530073s EvaluationString:[ var='B' labels={asset=KEY} value=0.00442906415 ], [ var='C' labels={asset=KEY} value=0 ]} {Instance:asset=LDO State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=LDO Value:0xc03219baf8} C:{Var:C Labels:asset=LDO Value:0xc03219bb08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475536783s EvaluationString:[ var='B' labels={asset=LDO} value=8.6421582e-05 ], [ var='C' labels={asset=LDO} value=0 ]} {Instance:asset=LEVER State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=LEVER Value:0xc03219bbf0} C:{Var:C Labels:asset=LEVER Value:0xc03219bc08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475545114s EvaluationString:[ var='B' labels={asset=LEVER} value=0.00058608652 ], [ var='C' labels={asset=LEVER} value=0 ]} {Instance:asset=LINA State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=LINA Value:0xc03219bc30} C:{Var:C Labels:asset=LINA Value:0xc03219bc48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475552994s EvaluationString:[ var='B' labels={asset=LINA} value=1.3144377e-05 ], [ var='C' labels={asset=LINA} value=0 ]} {Instance:asset=LINK State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=LINK Value:0xc03219bc88} C:{Var:C Labels:asset=LINK Value:0xc03219bc70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475559934s EvaluationString:[ var='B' labels={asset=LINK} value=0.051451670592 ], [ var='C' labels={asset=LINK} value=0 ]} {Instance:asset=LOOM State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=LOOM Value:0xc03219bcb0} C:{Var:C Labels:asset=LOOM Value:0xc03219bcc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475566675s EvaluationString:[ var='B' labels={asset=LOOM} value=193.213487602 ], [ var='C' labels={asset=LOOM} value=0 ]} {Instance:asset=LPT State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=LPT Value:0xc03219bce8} C:{Var:C Labels:asset=LPT Value:0xc03219bcf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475573135s EvaluationString:[ var='B' labels={asset=LPT} value=0 ], [ var='C' labels={asset=LPT} value=0 ]} {Instance:asset=LTC State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=LTC Value:0xc03219be78} C:{Var:C Labels:asset=LTC Value:0xc03219be68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475577765s EvaluationString:[ var='B' labels={asset=LTC} value=0 ], [ var='C' labels={asset=LTC} value=0 ]} {Instance:asset=LUNC State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=LUNC Value:0xc03219bea0} C:{Var:C Labels:asset=LUNC Value:0xc03219beb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475583455s EvaluationString:[ var='B' labels={asset=LUNC} value=188.0654089912 ], [ var='C' labels={asset=LUNC} value=0 ]} {Instance:asset=MAGIC State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=MAGIC Value:0xc03219bee0} C:{Var:C Labels:asset=MAGIC Value:0xc03219bef8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475588765s EvaluationString:[ var='B' labels={asset=MAGIC} value=157.4762148414 ], [ var='C' labels={asset=MAGIC} value=0 ]} {Instance:asset=MANA State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=MANA Value:0xc03219bf20} C:{Var:C Labels:asset=MANA Value:0xc03219bf38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475594325s EvaluationString:[ var='B' labels={asset=MANA} value=0.0027321056 ], [ var='C' labels={asset=MANA} value=0 ]} {Instance:asset=MASK State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=MASK Value:0xc03219bf60} C:{Var:C Labels:asset=MASK Value:0xc03219bf78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475599315s EvaluationString:[ var='B' labels={asset=MASK} value=0.000234041749 ], [ var='C' labels={asset=MASK} value=0 ]} {Instance:asset=MATIC State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=MATIC Value:0xc03219bfa0} C:{Var:C Labels:asset=MATIC Value:0xc03219bfb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475604185s EvaluationString:[ var='B' labels={asset=MATIC} value=0.0520370835 ], [ var='C' labels={asset=MATIC} value=0 ]} {Instance:asset=MAV State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=MAV Value:0xc03219bfd8} C:{Var:C Labels:asset=MAV Value:0xc03219bfe8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475609265s EvaluationString:[ var='B' labels={asset=MAV} value=0.21393348 ], [ var='C' labels={asset=MAV} value=0 ]} {Instance:asset=MEME State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=MEME Value:0xc0af652010} C:{Var:C Labels:asset=MEME Value:0xc0af652028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475614405s EvaluationString:[ var='B' labels={asset=MEME} value=2.89718e-07 ], [ var='C' labels={asset=MEME} value=0 ]} {Instance:asset=MINA State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=MINA Value:0xc0af652050} C:{Var:C Labels:asset=MINA Value:0xc0af652068}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475623345s EvaluationString:[ var='B' labels={asset=MINA} value=183.7528 ], [ var='C' labels={asset=MINA} value=0 ]} {Instance:asset=MKR State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=MKR Value:0xc0af652088} C:{Var:C Labels:asset=MKR Value:0xc0af652098}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475630955s EvaluationString:[ var='B' labels={asset=MKR} value=0 ], [ var='C' labels={asset=MKR} value=0 ]} {Instance:asset=NEAR State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=NEAR Value:0xc0af6520c0} C:{Var:C Labels:asset=NEAR Value:0xc0af6520d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475639525s EvaluationString:[ var='B' labels={asset=NEAR} value=0 ], [ var='C' labels={asset=NEAR} value=0 ]} {Instance:asset=OCEAN State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=OCEAN Value:0xc0af652100} C:{Var:C Labels:asset=OCEAN Value:0xc0af652118}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475647295s EvaluationString:[ var='B' labels={asset=OCEAN} value=0 ], [ var='C' labels={asset=OCEAN} value=0 ]} {Instance:asset=OGN State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=OGN Value:0xc0af652138} C:{Var:C Labels:asset=OGN Value:0xc0af652148}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475654505s EvaluationString:[ var='B' labels={asset=OGN} value=523.01625 ], [ var='C' labels={asset=OGN} value=0 ]} {Instance:asset=OM State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=OM Value:0xc0af652168} C:{Var:C Labels:asset=OM Value:0xc0af652178}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475662695s EvaluationString:[ var='B' labels={asset=OM} value=40.036966862 ], [ var='C' labels={asset=OM} value=0 ]} {Instance:asset=ONT State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=ONT Value:0xc0af652198} C:{Var:C Labels:asset=ONT Value:0xc0af6521a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475668886s EvaluationString:[ var='B' labels={asset=ONT} value=254.61 ], [ var='C' labels={asset=ONT} value=0 ]} {Instance:asset=OP State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=OP Value:0xc0af6521d8} C:{Var:C Labels:asset=OP Value:0xc0af6521c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475674126s EvaluationString:[ var='B' labels={asset=OP} value=0 ], [ var='C' labels={asset=OP} value=0 ]} {Instance:asset=ORDI State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=ORDI Value:0xc0af652200} C:{Var:C Labels:asset=ORDI Value:0xc0af652218}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475678386s EvaluationString:[ var='B' labels={asset=ORDI} value=0.001985834707 ], [ var='C' labels={asset=ORDI} value=0 ]} {Instance:asset=PENDLE State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=PENDLE Value:0xc0af652248} C:{Var:C Labels:asset=PENDLE Value:0xc0af652260}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475683526s EvaluationString:[ var='B' labels={asset=PENDLE} value=0.00019674543 ], [ var='C' labels={asset=PENDLE} value=0 ]} {Instance:asset=PEOPLE State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=PEOPLE Value:0xc0af6522a8} C:{Var:C Labels:asset=PEOPLE Value:0xc0af652290}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475688566s EvaluationString:[ var='B' labels={asset=PEOPLE} value=0 ], [ var='C' labels={asset=PEOPLE} value=0 ]} {Instance:asset=PERP State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=PERP Value:0xc0af6522d0} C:{Var:C Labels:asset=PERP Value:0xc0af6522e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475693116s EvaluationString:[ var='B' labels={asset=PERP} value=8.8791729e-05 ], [ var='C' labels={asset=PERP} value=0 ]} {Instance:asset=PHB State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=PHB Value:0xc0af652308} C:{Var:C Labels:asset=PHB Value:0xc0af652318}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475698066s EvaluationString:[ var='B' labels={asset=PHB} value=0.000112423098 ], [ var='C' labels={asset=PHB} value=0 ]} {Instance:asset=POLYX State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=POLYX Value:0xc0af652358} C:{Var:C Labels:asset=POLYX Value:0xc0af652340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475703476s EvaluationString:[ var='B' labels={asset=POLYX} value=8.990007477759 ], [ var='C' labels={asset=POLYX} value=0 ]} {Instance:asset=POWR State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=POWR Value:0xc0af652398} C:{Var:C Labels:asset=POWR Value:0xc0af652380}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475710566s EvaluationString:[ var='B' labels={asset=POWR} value=0.22657943 ], [ var='C' labels={asset=POWR} value=0 ]} {Instance:asset=QTUM State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=QTUM Value:0xc0af6523c0} C:{Var:C Labels:asset=QTUM Value:0xc0af6523d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475716966s EvaluationString:[ var='B' labels={asset=QTUM} value=0.219513372 ], [ var='C' labels={asset=QTUM} value=0 ]} {Instance:asset=RDNT State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=RDNT Value:0xc0af652400} C:{Var:C Labels:asset=RDNT Value:0xc0af652418}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475723796s EvaluationString:[ var='B' labels={asset=RDNT} value=240.657253221 ], [ var='C' labels={asset=RDNT} value=0 ]} {Instance:asset=REEF State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=REEF Value:0xc0af652440} C:{Var:C Labels:asset=REEF Value:0xc0af652458}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475730766s EvaluationString:[ var='B' labels={asset=REEF} value=267.793006 ], [ var='C' labels={asset=REEF} value=0 ]} {Instance:asset=RNDR State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=RNDR Value:0xc0af652480} C:{Var:C Labels:asset=RNDR Value:0xc0af652498}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475737736s EvaluationString:[ var='B' labels={asset=RNDR} value=0.000275802856 ], [ var='C' labels={asset=RNDR} value=0 ]} {Instance:asset=ROSE State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=ROSE Value:0xc0af6524d8} C:{Var:C Labels:asset=ROSE Value:0xc0af6524c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475745176s EvaluationString:[ var='B' labels={asset=ROSE} value=0.0007153776 ], [ var='C' labels={asset=ROSE} value=0 ]} {Instance:asset=RSR State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=RSR Value:0xc0af6524f8} C:{Var:C Labels:asset=RSR Value:0xc0af652508}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475753766s EvaluationString:[ var='B' labels={asset=RSR} value=269.3477331 ], [ var='C' labels={asset=RSR} value=0 ]} {Instance:asset=RUNE State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=RUNE Value:0xc0af652530} C:{Var:C Labels:asset=RUNE Value:0xc0af652548}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475761166s EvaluationString:[ var='B' labels={asset=RUNE} value=0.18898778 ], [ var='C' labels={asset=RUNE} value=0 ]} {Instance:asset=RVN State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=RVN Value:0xc0af652578} C:{Var:C Labels:asset=RVN Value:0xc0af652568}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475768776s EvaluationString:[ var='B' labels={asset=RVN} value=253.662816 ], [ var='C' labels={asset=RVN} value=0 ]} {Instance:asset=SAND State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=SAND Value:0xc0af6525a0} C:{Var:C Labels:asset=SAND Value:0xc0af6525b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475774167s EvaluationString:[ var='B' labels={asset=SAND} value=1.695819e-05 ], [ var='C' labels={asset=SAND} value=0 ]} {Instance:asset=SEI State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=SEI Value:0xc0af6525d8} C:{Var:C Labels:asset=SEI Value:0xc0af6525e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475779237s EvaluationString:[ var='B' labels={asset=SEI} value=0 ], [ var='C' labels={asset=SEI} value=0 ]} {Instance:asset=SHIB State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=SHIB Value:0xc0af652610} C:{Var:C Labels:asset=SHIB Value:0xc0af652628}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475783857s EvaluationString:[ var='B' labels={asset=SHIB} value=0 ], [ var='C' labels={asset=SHIB} value=0 ]} {Instance:asset=SKL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=SKL Value:0xc0af652648} C:{Var:C Labels:asset=SKL Value:0xc0af652658}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475788478s EvaluationString:[ var='B' labels={asset=SKL} value=0 ], [ var='C' labels={asset=SKL} value=0 ]} {Instance:asset=SNX State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=SNX Value:0xc0af652678} C:{Var:C Labels:asset=SNX Value:0xc0af652688}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475792978s EvaluationString:[ var='B' labels={asset=SNX} value=0.012330768 ], [ var='C' labels={asset=SNX} value=0 ]} {Instance:asset=SOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=SOL Value:0xc0af6526a8} C:{Var:C Labels:asset=SOL Value:0xc0af6526b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475797868s EvaluationString:[ var='B' labels={asset=SOL} value=0.0021964115 ], [ var='C' labels={asset=SOL} value=0 ]} {Instance:asset=SSV State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=SSV Value:0xc0af6526d8} C:{Var:C Labels:asset=SSV Value:0xc0af6526e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475803238s EvaluationString:[ var='B' labels={asset=SSV} value=0.0109760169 ], [ var='C' labels={asset=SSV} value=0 ]} {Instance:asset=STX State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=STX Value:0xc0af652708} C:{Var:C Labels:asset=STX Value:0xc0af652718}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475808158s EvaluationString:[ var='B' labels={asset=STX} value=0.108029642 ], [ var='C' labels={asset=STX} value=0 ]} {Instance:asset=SUI State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=SUI Value:0xc0af652738} C:{Var:C Labels:asset=SUI Value:0xc0af652748}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475813628s EvaluationString:[ var='B' labels={asset=SUI} value=3.8093006e-05 ], [ var='C' labels={asset=SUI} value=0 ]} {Instance:asset=SUSHI State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=SUSHI Value:0xc0af652788} C:{Var:C Labels:asset=SUSHI Value:0xc0af652770}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475818448s EvaluationString:[ var='B' labels={asset=SUSHI} value=0 ], [ var='C' labels={asset=SUSHI} value=0 ]} {Instance:asset=SXP State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=SXP Value:0xc0af6527a8} C:{Var:C Labels:asset=SXP Value:0xc0af6527b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475823968s EvaluationString:[ var='B' labels={asset=SXP} value=0 ], [ var='C' labels={asset=SXP} value=0 ]} {Instance:asset=THETA State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=THETA Value:0xc0af6527e0} C:{Var:C Labels:asset=THETA Value:0xc0af6527f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475828388s EvaluationString:[ var='B' labels={asset=THETA} value=0 ], [ var='C' labels={asset=THETA} value=0 ]} {Instance:asset=TIA State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=TIA Value:0xc0af652818} C:{Var:C Labels:asset=TIA Value:0xc0af652828}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475833138s EvaluationString:[ var='B' labels={asset=TIA} value=0.009176895 ], [ var='C' labels={asset=TIA} value=0 ]} {Instance:asset=TRB State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=TRB Value:0xc0af652858} C:{Var:C Labels:asset=TRB Value:0xc0af652848}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475837858s EvaluationString:[ var='B' labels={asset=TRB} value=0.003662214817 ], [ var='C' labels={asset=TRB} value=0 ]} {Instance:asset=TRU State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=TRU Value:0xc0af652888} C:{Var:C Labels:asset=TRU Value:0xc0af652878}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475842838s EvaluationString:[ var='B' labels={asset=TRU} value=0 ], [ var='C' labels={asset=TRU} value=0 ]} {Instance:asset=TRX State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=TRX Value:0xc0af6528a8} C:{Var:C Labels:asset=TRX Value:0xc0af6528b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475847388s EvaluationString:[ var='B' labels={asset=TRX} value=0.0111105 ], [ var='C' labels={asset=TRX} value=0 ]} {Instance:asset=UNFI State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=UNFI Value:0xc0af6528e0} C:{Var:C Labels:asset=UNFI Value:0xc0af6528f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475905389s EvaluationString:[ var='B' labels={asset=UNFI} value=0.21235117 ], [ var='C' labels={asset=UNFI} value=0 ]} {Instance:asset=UNI State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=UNI Value:0xc0af652918} C:{Var:C Labels:asset=UNI Value:0xc0af652928}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475941639s EvaluationString:[ var='B' labels={asset=UNI} value=0.0002064716 ], [ var='C' labels={asset=UNI} value=0 ]} {Instance:asset=USDC State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=USDC Value:0xc0af652950} C:{Var:C Labels:asset=USDC Value:0xc0af652968}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475959019s EvaluationString:[ var='B' labels={asset=USDC} value=0.013622310936 ], [ var='C' labels={asset=USDC} value=0 ]} {Instance:asset=USDT State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=USDT Value:0xc0af652990} C:{Var:C Labels:asset=USDT Value:0xc0af6529a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475974109s EvaluationString:[ var='B' labels={asset=USDT} value=2.890016e-06 ], [ var='C' labels={asset=USDT} value=0 ]} {Instance:asset=VET State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=VET Value:0xc0af6529c8} C:{Var:C Labels:asset=VET Value:0xc0af6529d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.47598448s EvaluationString:[ var='B' labels={asset=VET} value=0 ], [ var='C' labels={asset=VET} value=0 ]} {Instance:asset=WAVES State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=WAVES Value:0xc0af652a00} C:{Var:C Labels:asset=WAVES Value:0xc0af652a18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.47599058s EvaluationString:[ var='B' labels={asset=WAVES} value=0 ], [ var='C' labels={asset=WAVES} value=0 ]} {Instance:asset=WLD State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=WLD Value:0xc0af652a38} C:{Var:C Labels:asset=WLD Value:0xc0af652a48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.47599724s EvaluationString:[ var='B' labels={asset=WLD} value=0.00017333329 ], [ var='C' labels={asset=WLD} value=0 ]} {Instance:asset=WOO State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=WOO Value:0xc0af652a68} C:{Var:C Labels:asset=WOO Value:0xc0af652a78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.47600432s EvaluationString:[ var='B' labels={asset=WOO} value=19.546293529 ], [ var='C' labels={asset=WOO} value=0 ]} {Instance:asset=XEM State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=XEM Value:0xc0af652a98} C:{Var:C Labels:asset=XEM Value:0xc0af652aa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.47601157s EvaluationString:[ var='B' labels={asset=XEM} value=360.1750843117 ], [ var='C' labels={asset=XEM} value=0 ]} {Instance:asset=XLM State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=XLM Value:0xc0af652ac8} C:{Var:C Labels:asset=XLM Value:0xc0af652ad8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.4760178s EvaluationString:[ var='B' labels={asset=XLM} value=272.77635 ], [ var='C' labels={asset=XLM} value=0 ]} {Instance:asset=XRP State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=XRP Value:0xc0af652af8} C:{Var:C Labels:asset=XRP Value:0xc0af652b08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.47602279s EvaluationString:[ var='B' labels={asset=XRP} value=0.000986470682 ], [ var='C' labels={asset=XRP} value=0 ]} {Instance:asset=XVG State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=XVG Value:0xc0af652b28} C:{Var:C Labels:asset=XVG Value:0xc0af652b38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.47602779s EvaluationString:[ var='B' labels={asset=XVG} value=128.23349112 ], [ var='C' labels={asset=XVG} value=0 ]} {Instance:asset=YGG State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=YGG Value:0xc0af652b68} C:{Var:C Labels:asset=YGG Value:0xc0af652b58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.476033941s EvaluationString:[ var='B' labels={asset=YGG} value=0 ], [ var='C' labels={asset=YGG} value=0 ]} {Instance:asset=ZRX State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:asset=ZRX Value:0xc0af652b88} C:{Var:C Labels:asset=ZRX Value:0xc0af652b98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.476038401s EvaluationString:[ var='B' labels={asset=ZRX} value=24.372 ], [ var='C' labels={asset=ZRX} value=0 ]}]" duration=118.439285ms + level=debug ts=2024-05-29T13:44:15.47898932Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.47905301Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:15.47892965Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:15.47888095Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:15.47886745Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:15.478842149Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:15.478811349Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:15.478802249Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.47868232Z caller=remote_instance_store.go:51 user=432323 slug=lithic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:15.478766048Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:15.478754348Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.478670218Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:15.478696147Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:15.478684147Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:15.478675647Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:15.478613546Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.478497607Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:15.478350442Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=288032 slug=dapperlabssre t=2024-05-29T13:44:15.478270011Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=288032 slug=dapperlabssre version=1 fingerprint=6765bbad94cffd3c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.478199961Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=000000002, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.47795374s EvaluationString:}]" duration=28.053062ms + level=debug ts=2024-05-29T13:44:15.478097665Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.47807337Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.477921501Z caller=remote_instance_store.go:51 user=354676 slug=gridmarketenergy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=163513 slug=dialpad t=2024-05-29T13:44:15.477910832Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.274723ms + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:15.477878421Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=26.938978ms + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.477737459Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.47762402Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.477443411Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.477006335Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.47668921Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.476614703Z caller=remote_instance_store.go:51 user=236496 slug=improbable msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.47662092Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.476565254Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=236496 slug=improbable t=2024-05-29T13:44:15.476572772Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=430961 slug=solifi version=1 fingerprint=6e00a5663fa4ec93 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.476396467Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475845833s EvaluationString:}]" duration=110.069958ms + level=debug ts=2024-05-29T13:44:15.476467811Z caller=remote_instance_store.go:51 user=708531 slug=dooshimagbamwuan msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.476452043Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.476083014Z caller=remote_instance_store.go:51 user=355252 slug=bumper msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.476393253Z caller=remote_instance_store.go:51 user=150145 slug=pleasant msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.476314116Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.47607891Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=781127 slug=randamu instance="__name__=disk_free, __proxy_source__=influx, build=drand-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=drand.mainnet1.drand, mode=rw, path=/" t=2024-05-29T13:44:15.476348904Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=150145 slug=pleasant instance= t=2024-05-29T13:44:15.476341241Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=150145 slug=pleasant instance= t=2024-05-29T13:44:15.476328421Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=150145 slug=pleasant t=2024-05-29T13:44:15.476285353Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=150145 slug=pleasant version=7 fingerprint=cf8f668ebc691792 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.47621395Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.475837097s EvaluationString:}]" duration=15.190557ms + level=debug ts=2024-05-29T13:44:15.47617443Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.476031847Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=163215 slug=tripadvisor t=2024-05-29T13:44:15.475966084Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.090607ms + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=_h27TffVk, ref_id=A" t=2024-05-29T13:44:15.47599854Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=538037 slug=drivewealth t=2024-05-29T13:44:15.475983261Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=781127 slug=randamu instance="__name__=disk_free, __proxy_source__=influx, build=bastion-testnet2, deployment=testnet2, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet2.drand, mode=rw, path=/" t=2024-05-29T13:44:15.476003487Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=781127 slug=randamu instance="__name__=disk_free, __proxy_source__=influx, build=bastion-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet1.drand, mode=rw, path=/" t=2024-05-29T13:44:15.475782243Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.475674162Z caller=remote_instance_store.go:51 user=265692 slug=beekeeper msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=342039 slug=criblcloud t=2024-05-29T13:44:15.475567001Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=781127 slug=randamu instance="__name__=disk_free, __proxy_source__=influx, build=bastion-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=bastion.mainnet3.drand, mode=rw, path=/" t=2024-05-29T13:44:15.475532788Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.475437088Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="datasource_uid=hNyufM-7z, ref_id=A" t=2024-05-29T13:44:15.475340297Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.474870073Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=340750 slug=aptoslabs t=2024-05-29T13:44:15.474874742Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.439688ms + level=debug ts=2024-05-29T13:44:15.474681438Z caller=remote_instance_store.go:51 user=54972 slug=zanglang msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.474321245Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=781127 slug=randamu t=2024-05-29T13:44:15.474258523Z level=debug msg="State manager processing evaluation results" resultCount=42 + level=debug ts=2024-05-29T13:44:15.474186142Z caller=remote_instance_store.go:51 user=871095 slug=cmcnginp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.473597716Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=781127 slug=randamu version=8 fingerprint=f950195b7cf37d7e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.457298702Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=disk_free, __proxy_source__=influx, build=bastion-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=bastion.mainnet1.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=bastion-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=bastion.mainnet1.drand, mode=rw, path=/ Value:0xc03d3c0798} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=bastion-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=bastion.mainnet1.drand, mode=rw, path=/ Value:0xc03d3c0850} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=bastion-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=bastion.mainnet1.drand, mode=rw, path=/ Value:0xc03d3c0980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453728803s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=bastion-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=bastion.mainnet1.drand, mode=rw, path=/} value=6.030512128e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=bastion-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=bastion.mainnet1.drand, mode=rw, path=/} value=6.030512128e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=bastion-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=bastion.mainnet1.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=bastion-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=bastion.mainnet2.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=bastion-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=bastion.mainnet2.drand, mode=rw, path=/ Value:0xc03d3c0c20} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=bastion-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=bastion.mainnet2.drand, mode=rw, path=/ Value:0xc03d3c0ad0} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=bastion-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=bastion.mainnet2.drand, mode=rw, path=/ Value:0xc03d3c0b80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453745334s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=bastion-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=bastion.mainnet2.drand, mode=rw, path=/} value=6.05446144e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=bastion-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=bastion.mainnet2.drand, mode=rw, path=/} value=6.05446144e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=bastion-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=bastion.mainnet2.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=bastion-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=bastion.mainnet3.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=bastion-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=bastion.mainnet3.drand, mode=rw, path=/ Value:0xc03d3c0f58} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=bastion-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=bastion.mainnet3.drand, mode=rw, path=/ Value:0xc03d3c0d98} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=bastion-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=bastion.mainnet3.drand, mode=rw, path=/ Value:0xc03d3c0e38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453754444s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=bastion-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=bastion.mainnet3.drand, mode=rw, path=/} value=6.04411904e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=bastion-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=bastion.mainnet3.drand, mode=rw, path=/} value=6.04411904e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=bastion-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=bastion.mainnet3.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=bastion-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet1.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=bastion-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet1.drand, mode=rw, path=/ Value:0xc03d3c1078} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=bastion-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet1.drand, mode=rw, path=/ Value:0xc03d3c1120} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=bastion-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet1.drand, mode=rw, path=/ Value:0xc03d3c11c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453763944s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=bastion-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet1.drand, mode=rw, path=/} value=6.073413632e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=bastion-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet1.drand, mode=rw, path=/} value=6.073413632e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=bastion-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet1.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=bastion-testnet2, deployment=testnet2, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet2.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=bastion-testnet2, deployment=testnet2, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet2.drand, mode=rw, path=/ Value:0xc03d3c14d0} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=bastion-testnet2, deployment=testnet2, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet2.drand, mode=rw, path=/ Value:0xc03d3c1378} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=bastion-testnet2, deployment=testnet2, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet2.drand, mode=rw, path=/ Value:0xc03d3c1410}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453775344s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=bastion-testnet2, deployment=testnet2, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet2.drand, mode=rw, path=/} value=6.073147392e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=bastion-testnet2, deployment=testnet2, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet2.drand, mode=rw, path=/} value=6.073147392e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=bastion-testnet2, deployment=testnet2, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet2.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=bastion-testnet3, deployment=testnet3, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet3.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=bastion-testnet3, deployment=testnet3, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet3.drand, mode=rw, path=/ Value:0xc03d3c1600} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=bastion-testnet3, deployment=testnet3, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet3.drand, mode=rw, path=/ Value:0xc03d3c1698} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=bastion-testnet3, deployment=testnet3, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet3.drand, mode=rw, path=/ Value:0xc03d3c1738}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453785455s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=bastion-testnet3, deployment=testnet3, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet3.drand, mode=rw, path=/} value=6.08063488e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=bastion-testnet3, deployment=testnet3, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet3.drand, mode=rw, path=/} value=6.08063488e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=bastion-testnet3, deployment=testnet3, device=nvme0n1p1, env=testnet, fstype=xfs, host=bastion.testnet3.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=drand.mainnet1.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=drand.mainnet1.drand, mode=rw, path=/ Value:0xc03d3c1a10} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=drand.mainnet1.drand, mode=rw, path=/ Value:0xc03d3c1af8} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=drand.mainnet1.drand, mode=rw, path=/ Value:0xc03d3c1b90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453804465s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=drand-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=drand.mainnet1.drand, mode=rw, path=/} value=3.983552512e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=drand-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=drand.mainnet1.drand, mode=rw, path=/} value=3.983552512e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=drand-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=drand.mainnet1.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet1, deployment=mainnet1, device=nvme1n1, env=mainnet, fstype=ext4, host=drand.mainnet1.drand, mode=rw, path=/mnt/drand State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet1, deployment=mainnet1, device=nvme1n1, env=mainnet, fstype=ext4, host=drand.mainnet1.drand, mode=rw, path=/mnt/drand Value:0xc03d3c1cf0} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet1, deployment=mainnet1, device=nvme1n1, env=mainnet, fstype=ext4, host=drand.mainnet1.drand, mode=rw, path=/mnt/drand Value:0xc03d3c1df8} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet1, deployment=mainnet1, device=nvme1n1, env=mainnet, fstype=ext4, host=drand.mainnet1.drand, mode=rw, path=/mnt/drand Value:0xc03d3c1f08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453811505s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=drand-mainnet1, deployment=mainnet1, device=nvme1n1, env=mainnet, fstype=ext4, host=drand.mainnet1.drand, mode=rw, path=/mnt/drand} value=4.94351781888e+11 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=drand-mainnet1, deployment=mainnet1, device=nvme1n1, env=mainnet, fstype=ext4, host=drand.mainnet1.drand, mode=rw, path=/mnt/drand} value=4.94351781888e+11 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=drand-mainnet1, deployment=mainnet1, device=nvme1n1, env=mainnet, fstype=ext4, host=drand.mainnet1.drand, mode=rw, path=/mnt/drand} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=drand.mainnet2.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=drand.mainnet2.drand, mode=rw, path=/ Value:0xc0216c0070} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=drand.mainnet2.drand, mode=rw, path=/ Value:0xc0216c0120} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=drand.mainnet2.drand, mode=rw, path=/ Value:0xc0216c01c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453818165s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=drand-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=drand.mainnet2.drand, mode=rw, path=/} value=3.6370432e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=drand-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=drand.mainnet2.drand, mode=rw, path=/} value=3.6370432e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=drand-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=drand.mainnet2.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet2, deployment=mainnet2, device=nvme1n1, env=mainnet, fstype=ext4, host=drand.mainnet2.drand, mode=rw, path=/mnt/drand State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet2, deployment=mainnet2, device=nvme1n1, env=mainnet, fstype=ext4, host=drand.mainnet2.drand, mode=rw, path=/mnt/drand Value:0xc0216c0310} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet2, deployment=mainnet2, device=nvme1n1, env=mainnet, fstype=ext4, host=drand.mainnet2.drand, mode=rw, path=/mnt/drand Value:0xc0216c03a8} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet2, deployment=mainnet2, device=nvme1n1, env=mainnet, fstype=ext4, host=drand.mainnet2.drand, mode=rw, path=/mnt/drand Value:0xc0216c0470}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453823856s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=drand-mainnet2, deployment=mainnet2, device=nvme1n1, env=mainnet, fstype=ext4, host=drand.mainnet2.drand, mode=rw, path=/mnt/drand} value=4.89135304704e+11 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=drand-mainnet2, deployment=mainnet2, device=nvme1n1, env=mainnet, fstype=ext4, host=drand.mainnet2.drand, mode=rw, path=/mnt/drand} value=4.89135304704e+11 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=drand-mainnet2, deployment=mainnet2, device=nvme1n1, env=mainnet, fstype=ext4, host=drand.mainnet2.drand, mode=rw, path=/mnt/drand} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=drand.mainnet3.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=drand.mainnet3.drand, mode=rw, path=/ Value:0xc0216c0660} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=drand.mainnet3.drand, mode=rw, path=/ Value:0xc0216c0700} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=drand.mainnet3.drand, mode=rw, path=/ Value:0xc0216c07a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453830286s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=drand-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=drand.mainnet3.drand, mode=rw, path=/} value=3.930591232e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=drand-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=drand.mainnet3.drand, mode=rw, path=/} value=3.930591232e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=drand-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=drand.mainnet3.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet3, deployment=mainnet3, device=nvme1n1, env=mainnet, fstype=ext4, host=drand.mainnet3.drand, mode=rw, path=/mnt/drand State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet3, deployment=mainnet3, device=nvme1n1, env=mainnet, fstype=ext4, host=drand.mainnet3.drand, mode=rw, path=/mnt/drand Value:0xc0216c0b20} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet3, deployment=mainnet3, device=nvme1n1, env=mainnet, fstype=ext4, host=drand.mainnet3.drand, mode=rw, path=/mnt/drand Value:0xc0216c0910} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=drand-mainnet3, deployment=mainnet3, device=nvme1n1, env=mainnet, fstype=ext4, host=drand.mainnet3.drand, mode=rw, path=/mnt/drand Value:0xc0216c09d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453837406s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=drand-mainnet3, deployment=mainnet3, device=nvme1n1, env=mainnet, fstype=ext4, host=drand.mainnet3.drand, mode=rw, path=/mnt/drand} value=4.89267183616e+11 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=drand-mainnet3, deployment=mainnet3, device=nvme1n1, env=mainnet, fstype=ext4, host=drand.mainnet3.drand, mode=rw, path=/mnt/drand} value=4.89267183616e+11 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=drand-mainnet3, deployment=mainnet3, device=nvme1n1, env=mainnet, fstype=ext4, host=drand.mainnet3.drand, mode=rw, path=/mnt/drand} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=drand-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=drand.testnet1.drand, label=/, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=drand-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=drand.testnet1.drand, label=/, mode=rw, path=/ Value:0xc0216c0dc8} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=drand-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=drand.testnet1.drand, label=/, mode=rw, path=/ Value:0xc0216c0c70} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=drand-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=drand.testnet1.drand, label=/, mode=rw, path=/ Value:0xc0216c0d18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453843846s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=drand-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=drand.testnet1.drand, label=/, mode=rw, path=/} value=3.659354112e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=drand-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=drand.testnet1.drand, label=/, mode=rw, path=/} value=3.659354112e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=drand-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=drand.testnet1.drand, label=/, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=drand-testnet1, deployment=testnet1, device=nvme1n1, env=testnet, fstype=ext4, host=drand.testnet1.drand, mode=rw, path=/mnt/drand State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=drand-testnet1, deployment=testnet1, device=nvme1n1, env=testnet, fstype=ext4, host=drand.testnet1.drand, mode=rw, path=/mnt/drand Value:0xc0216c11a8} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=drand-testnet1, deployment=testnet1, device=nvme1n1, env=testnet, fstype=ext4, host=drand.testnet1.drand, mode=rw, path=/mnt/drand Value:0xc0216c1010} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=drand-testnet1, deployment=testnet1, device=nvme1n1, env=testnet, fstype=ext4, host=drand.testnet1.drand, mode=rw, path=/mnt/drand Value:0xc0216c10e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453856306s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=drand-testnet1, deployment=testnet1, device=nvme1n1, env=testnet, fstype=ext4, host=drand.testnet1.drand, mode=rw, path=/mnt/drand} value=3.0254690304e+10 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=drand-testnet1, deployment=testnet1, device=nvme1n1, env=testnet, fstype=ext4, host=drand.testnet1.drand, mode=rw, path=/mnt/drand} value=3.0254690304e+10 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=drand-testnet1, deployment=testnet1, device=nvme1n1, env=testnet, fstype=ext4, host=drand.testnet1.drand, mode=rw, path=/mnt/drand} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=drand-testnet2, deployment=testnet2, device=nvme0n1p1, env=testnet, fstype=xfs, host=drand.testnet2.drand, label=/, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=drand-testnet2, deployment=testnet2, device=nvme0n1p1, env=testnet, fstype=xfs, host=drand.testnet2.drand, label=/, mode=rw, path=/ Value:0xc0216c13a0} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=drand-testnet2, deployment=testnet2, device=nvme0n1p1, env=testnet, fstype=xfs, host=drand.testnet2.drand, label=/, mode=rw, path=/ Value:0xc0216c1448} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=drand-testnet2, deployment=testnet2, device=nvme0n1p1, env=testnet, fstype=xfs, host=drand.testnet2.drand, label=/, mode=rw, path=/ Value:0xc0216c1300}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453875637s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=drand-testnet2, deployment=testnet2, device=nvme0n1p1, env=testnet, fstype=xfs, host=drand.testnet2.drand, label=/, mode=rw, path=/} value=3.150884864e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=drand-testnet2, deployment=testnet2, device=nvme0n1p1, env=testnet, fstype=xfs, host=drand.testnet2.drand, label=/, mode=rw, path=/} value=3.150884864e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=drand-testnet2, deployment=testnet2, device=nvme0n1p1, env=testnet, fstype=xfs, host=drand.testnet2.drand, label=/, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=drand-testnet2, deployment=testnet2, device=nvme1n1, env=testnet, fstype=ext4, host=drand.testnet2.drand, mode=rw, path=/mnt/drand State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=drand-testnet2, deployment=testnet2, device=nvme1n1, env=testnet, fstype=ext4, host=drand.testnet2.drand, mode=rw, path=/mnt/drand Value:0xc0216c1650} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=drand-testnet2, deployment=testnet2, device=nvme1n1, env=testnet, fstype=ext4, host=drand.testnet2.drand, mode=rw, path=/mnt/drand Value:0xc0216c17a0} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=drand-testnet2, deployment=testnet2, device=nvme1n1, env=testnet, fstype=ext4, host=drand.testnet2.drand, mode=rw, path=/mnt/drand Value:0xc0216c1850}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453883967s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=drand-testnet2, deployment=testnet2, device=nvme1n1, env=testnet, fstype=ext4, host=drand.testnet2.drand, mode=rw, path=/mnt/drand} value=3.849299968e+10 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=drand-testnet2, deployment=testnet2, device=nvme1n1, env=testnet, fstype=ext4, host=drand.testnet2.drand, mode=rw, path=/mnt/drand} value=3.849299968e+10 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=drand-testnet2, deployment=testnet2, device=nvme1n1, env=testnet, fstype=ext4, host=drand.testnet2.drand, mode=rw, path=/mnt/drand} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=drand-testnet3, deployment=testnet3, device=nvme0n1p1, env=testnet, fstype=xfs, host=drand.testnet3.drand, label=/, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=drand-testnet3, deployment=testnet3, device=nvme0n1p1, env=testnet, fstype=xfs, host=drand.testnet3.drand, label=/, mode=rw, path=/ Value:0xc0216c1bc0} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=drand-testnet3, deployment=testnet3, device=nvme0n1p1, env=testnet, fstype=xfs, host=drand.testnet3.drand, label=/, mode=rw, path=/ Value:0xc0216c1c70} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=drand-testnet3, deployment=testnet3, device=nvme0n1p1, env=testnet, fstype=xfs, host=drand.testnet3.drand, label=/, mode=rw, path=/ Value:0xc0216c1a80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453891367s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=drand-testnet3, deployment=testnet3, device=nvme0n1p1, env=testnet, fstype=xfs, host=drand.testnet3.drand, label=/, mode=rw, path=/} value=3.750531072e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=drand-testnet3, deployment=testnet3, device=nvme0n1p1, env=testnet, fstype=xfs, host=drand.testnet3.drand, label=/, mode=rw, path=/} value=3.750531072e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=drand-testnet3, deployment=testnet3, device=nvme0n1p1, env=testnet, fstype=xfs, host=drand.testnet3.drand, label=/, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=drand-testnet3, deployment=testnet3, device=nvme1n1, env=testnet, fstype=ext4, host=drand.testnet3.drand, mode=rw, path=/mnt/drand State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=drand-testnet3, deployment=testnet3, device=nvme1n1, env=testnet, fstype=ext4, host=drand.testnet3.drand, mode=rw, path=/mnt/drand Value:0xc0216c1f30} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=drand-testnet3, deployment=testnet3, device=nvme1n1, env=testnet, fstype=ext4, host=drand.testnet3.drand, mode=rw, path=/mnt/drand Value:0xc0216c1fe0} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=drand-testnet3, deployment=testnet3, device=nvme1n1, env=testnet, fstype=ext4, host=drand.testnet3.drand, mode=rw, path=/mnt/drand Value:0xc0216c1e70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453897757s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=drand-testnet3, deployment=testnet3, device=nvme1n1, env=testnet, fstype=ext4, host=drand.testnet3.drand, mode=rw, path=/mnt/drand} value=3.7604958208e+10 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=drand-testnet3, deployment=testnet3, device=nvme1n1, env=testnet, fstype=ext4, host=drand.testnet3.drand, mode=rw, path=/mnt/drand} value=3.7604958208e+10 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=drand-testnet3, deployment=testnet3, device=nvme1n1, env=testnet, fstype=ext4, host=drand.testnet3.drand, mode=rw, path=/mnt/drand} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip0.mainnet1.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip0.mainnet1.drand, mode=rw, path=/ Value:0xc00cf70078} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip0.mainnet1.drand, mode=rw, path=/ Value:0xc00cf70108} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip0.mainnet1.drand, mode=rw, path=/ Value:0xc00cf70190}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453903497s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip0.mainnet1.drand, mode=rw, path=/} value=4.366372864e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip0.mainnet1.drand, mode=rw, path=/} value=4.366372864e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip0.mainnet1.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip1.mainnet1.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip1.mainnet1.drand, mode=rw, path=/ Value:0xc00cf703c8} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip1.mainnet1.drand, mode=rw, path=/ Value:0xc00cf702a0} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip1.mainnet1.drand, mode=rw, path=/ Value:0xc00cf70330}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453910788s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip1.mainnet1.drand, mode=rw, path=/} value=4.3618304e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip1.mainnet1.drand, mode=rw, path=/} value=4.3618304e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip1.mainnet1.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip0.mainnet1.drand, mode=rw, path=/mnt/drand State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip0.mainnet1.drand, mode=rw, path=/mnt/drand Value:0xc00cf70500} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip0.mainnet1.drand, mode=rw, path=/mnt/drand Value:0xc00cf705a0} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip0.mainnet1.drand, mode=rw, path=/mnt/drand Value:0xc00cf70638}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453917608s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip0.mainnet1.drand, mode=rw, path=/mnt/drand} value=4.843900928e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip0.mainnet1.drand, mode=rw, path=/mnt/drand} value=4.843900928e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip0.mainnet1.drand, mode=rw, path=/mnt/drand} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip1.mainnet1.drand, mode=rw, path=/mnt/drand State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip1.mainnet1.drand, mode=rw, path=/mnt/drand Value:0xc00cf708d8} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip1.mainnet1.drand, mode=rw, path=/mnt/drand Value:0xc00cf707a0} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip1.mainnet1.drand, mode=rw, path=/mnt/drand Value:0xc00cf70840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453924238s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip1.mainnet1.drand, mode=rw, path=/mnt/drand} value=4.843900928e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip1.mainnet1.drand, mode=rw, path=/mnt/drand} value=4.843900928e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet1, deployment=mainnet1, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip1.mainnet1.drand, mode=rw, path=/mnt/drand} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip0.mainnet2.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip0.mainnet2.drand, mode=rw, path=/ Value:0xc00cf70b28} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip0.mainnet2.drand, mode=rw, path=/ Value:0xc00cf70a00} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip0.mainnet2.drand, mode=rw, path=/ Value:0xc00cf70a90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453935438s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip0.mainnet2.drand, mode=rw, path=/} value=4.358651904e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip0.mainnet2.drand, mode=rw, path=/} value=4.358651904e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip0.mainnet2.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip1.mainnet2.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip1.mainnet2.drand, mode=rw, path=/ Value:0xc00cf70d98} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip1.mainnet2.drand, mode=rw, path=/ Value:0xc00cf70c60} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip1.mainnet2.drand, mode=rw, path=/ Value:0xc00cf70cf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453945808s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip1.mainnet2.drand, mode=rw, path=/} value=4.35150848e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip1.mainnet2.drand, mode=rw, path=/} value=4.35150848e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip1.mainnet2.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip0.mainnet2.drand, mode=rw, path=/mnt/drand State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip0.mainnet2.drand, mode=rw, path=/mnt/drand Value:0xc00cf70ef8} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip0.mainnet2.drand, mode=rw, path=/mnt/drand Value:0xc00cf70f98} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip0.mainnet2.drand, mode=rw, path=/mnt/drand Value:0xc00cf71028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453955099s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip0.mainnet2.drand, mode=rw, path=/mnt/drand} value=4.843945984e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip0.mainnet2.drand, mode=rw, path=/mnt/drand} value=4.843945984e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip0.mainnet2.drand, mode=rw, path=/mnt/drand} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip1.mainnet2.drand, mode=rw, path=/mnt/drand State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip1.mainnet2.drand, mode=rw, path=/mnt/drand Value:0xc00cf712a0} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip1.mainnet2.drand, mode=rw, path=/mnt/drand Value:0xc00cf71340} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip1.mainnet2.drand, mode=rw, path=/mnt/drand Value:0xc00cf71200}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453962139s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip1.mainnet2.drand, mode=rw, path=/mnt/drand} value=4.843905024e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip1.mainnet2.drand, mode=rw, path=/mnt/drand} value=4.843905024e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet2, deployment=mainnet2, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip1.mainnet2.drand, mode=rw, path=/mnt/drand} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip0.mainnet3.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip0.mainnet3.drand, mode=rw, path=/ Value:0xc00cf71458} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip0.mainnet3.drand, mode=rw, path=/ Value:0xc00cf71530} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip0.mainnet3.drand, mode=rw, path=/ Value:0xc00cf71610}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453968989s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip0.mainnet3.drand, mode=rw, path=/} value=4.333887488e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip0.mainnet3.drand, mode=rw, path=/} value=4.333887488e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip0.mainnet3.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip1.mainnet3.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip1.mainnet3.drand, mode=rw, path=/ Value:0xc00cf71860} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip1.mainnet3.drand, mode=rw, path=/ Value:0xc00cf71728} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip1.mainnet3.drand, mode=rw, path=/ Value:0xc00cf717d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453974839s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip1.mainnet3.drand, mode=rw, path=/} value=4.334399488e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip1.mainnet3.drand, mode=rw, path=/} value=4.334399488e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-gossip1.mainnet3.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip0.mainnet3.drand, mode=rw, path=/mnt/drand State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip0.mainnet3.drand, mode=rw, path=/mnt/drand Value:0xc00cf71990} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip0.mainnet3.drand, mode=rw, path=/mnt/drand Value:0xc00cf71a28} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip0.mainnet3.drand, mode=rw, path=/mnt/drand Value:0xc00cf71ac0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453983349s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip0.mainnet3.drand, mode=rw, path=/mnt/drand} value=4.843905024e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip0.mainnet3.drand, mode=rw, path=/mnt/drand} value=4.843905024e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip0.mainnet3.drand, mode=rw, path=/mnt/drand} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip1.mainnet3.drand, mode=rw, path=/mnt/drand State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip1.mainnet3.drand, mode=rw, path=/mnt/drand Value:0xc00cf71bf8} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip1.mainnet3.drand, mode=rw, path=/mnt/drand Value:0xc00cf71c90} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip1.mainnet3.drand, mode=rw, path=/mnt/drand Value:0xc00cf71d28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.453990339s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip1.mainnet3.drand, mode=rw, path=/mnt/drand} value=4.843905024e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip1.mainnet3.drand, mode=rw, path=/mnt/drand} value=4.843905024e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-mainnet3, deployment=mainnet3, device=nvme1n1, env=mainnet, fstype=ext4, host=relay-gossip1.mainnet3.drand, mode=rw, path=/mnt/drand} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-gossip0.testnet1.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-gossip0.testnet1.drand, mode=rw, path=/ Value:0xc00cf71e70} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-gossip0.testnet1.drand, mode=rw, path=/ Value:0xc00cf71f20} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-gossip0.testnet1.drand, mode=rw, path=/ Value:0xc00cf71fc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.45399755s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-gossip0.testnet1.drand, mode=rw, path=/} value=4.29553664e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-gossip0.testnet1.drand, mode=rw, path=/} value=4.29553664e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-gossip0.testnet1.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-gossip1.testnet1.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-gossip1.testnet1.drand, mode=rw, path=/ Value:0xc00fe561f8} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-gossip1.testnet1.drand, mode=rw, path=/ Value:0xc00fe560b0} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-gossip1.testnet1.drand, mode=rw, path=/ Value:0xc00fe56148}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.45400353s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-gossip1.testnet1.drand, mode=rw, path=/} value=4.294918144e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-gossip1.testnet1.drand, mode=rw, path=/} value=4.294918144e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-gossip1.testnet1.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme1n1, env=testnet, fstype=ext4, host=relay-gossip0.testnet1.drand, mode=rw, path=/mnt/drand State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme1n1, env=testnet, fstype=ext4, host=relay-gossip0.testnet1.drand, mode=rw, path=/mnt/drand Value:0xc00fe564b0} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme1n1, env=testnet, fstype=ext4, host=relay-gossip0.testnet1.drand, mode=rw, path=/mnt/drand Value:0xc00fe56570} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme1n1, env=testnet, fstype=ext4, host=relay-gossip0.testnet1.drand, mode=rw, path=/mnt/drand Value:0xc00fe56378}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.45400922s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme1n1, env=testnet, fstype=ext4, host=relay-gossip0.testnet1.drand, mode=rw, path=/mnt/drand} value=4.843917312e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme1n1, env=testnet, fstype=ext4, host=relay-gossip0.testnet1.drand, mode=rw, path=/mnt/drand} value=4.843917312e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme1n1, env=testnet, fstype=ext4, host=relay-gossip0.testnet1.drand, mode=rw, path=/mnt/drand} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme1n1, env=testnet, fstype=ext4, host=relay-gossip1.testnet1.drand, mode=rw, path=/mnt/drand State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme1n1, env=testnet, fstype=ext4, host=relay-gossip1.testnet1.drand, mode=rw, path=/mnt/drand Value:0xc00fe566e0} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme1n1, env=testnet, fstype=ext4, host=relay-gossip1.testnet1.drand, mode=rw, path=/mnt/drand Value:0xc00fe56788} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme1n1, env=testnet, fstype=ext4, host=relay-gossip1.testnet1.drand, mode=rw, path=/mnt/drand Value:0xc00fe56840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.45401985s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme1n1, env=testnet, fstype=ext4, host=relay-gossip1.testnet1.drand, mode=rw, path=/mnt/drand} value=4.843917312e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme1n1, env=testnet, fstype=ext4, host=relay-gossip1.testnet1.drand, mode=rw, path=/mnt/drand} value=4.843917312e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-gossip-testnet1, deployment=testnet1, device=nvme1n1, env=testnet, fstype=ext4, host=relay-gossip1.testnet1.drand, mode=rw, path=/mnt/drand} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-068b3361fe76885d8.mainnet1.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-068b3361fe76885d8.mainnet1.drand, mode=rw, path=/ Value:0xc00fe56998} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-068b3361fe76885d8.mainnet1.drand, mode=rw, path=/ Value:0xc00fe56a30} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-068b3361fe76885d8.mainnet1.drand, mode=rw, path=/ Value:0xc00fe56ae0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.45403042s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-068b3361fe76885d8.mainnet1.drand, mode=rw, path=/} value=8.10706944e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-068b3361fe76885d8.mainnet1.drand, mode=rw, path=/} value=8.10706944e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-068b3361fe76885d8.mainnet1.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-0cea628215ff85158.mainnet1.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-0cea628215ff85158.mainnet1.drand, mode=rw, path=/ Value:0xc00fe56c30} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-0cea628215ff85158.mainnet1.drand, mode=rw, path=/ Value:0xc00fe56d08} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-0cea628215ff85158.mainnet1.drand, mode=rw, path=/ Value:0xc00fe56da8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.454041181s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-0cea628215ff85158.mainnet1.drand, mode=rw, path=/} value=8.024109056e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-0cea628215ff85158.mainnet1.drand, mode=rw, path=/} value=8.024109056e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet1, deployment=mainnet1, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-0cea628215ff85158.mainnet1.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-080bdd253f7596f54.mainnet2.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-080bdd253f7596f54.mainnet2.drand, mode=rw, path=/ Value:0xc00fe56ee8} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-080bdd253f7596f54.mainnet2.drand, mode=rw, path=/ Value:0xc00fe56fa0} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-080bdd253f7596f54.mainnet2.drand, mode=rw, path=/ Value:0xc00fe57048}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.454048521s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-080bdd253f7596f54.mainnet2.drand, mode=rw, path=/} value=8.386035712e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-080bdd253f7596f54.mainnet2.drand, mode=rw, path=/} value=8.386035712e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-080bdd253f7596f54.mainnet2.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-0b5536033206d096b.mainnet2.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-0b5536033206d096b.mainnet2.drand, mode=rw, path=/ Value:0xc00fe571b0} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-0b5536033206d096b.mainnet2.drand, mode=rw, path=/ Value:0xc00fe57278} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-0b5536033206d096b.mainnet2.drand, mode=rw, path=/ Value:0xc00fe57360}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.454054741s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-0b5536033206d096b.mainnet2.drand, mode=rw, path=/} value=8.403574784e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-0b5536033206d096b.mainnet2.drand, mode=rw, path=/} value=8.403574784e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet2, deployment=mainnet2, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-0b5536033206d096b.mainnet2.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-032c31127d7e330ae.mainnet3.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-032c31127d7e330ae.mainnet3.drand, mode=rw, path=/ Value:0xc00fe57610} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-032c31127d7e330ae.mainnet3.drand, mode=rw, path=/ Value:0xc00fe574b0} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-032c31127d7e330ae.mainnet3.drand, mode=rw, path=/ Value:0xc00fe57570}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.454061931s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-032c31127d7e330ae.mainnet3.drand, mode=rw, path=/} value=7.000711168e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-032c31127d7e330ae.mainnet3.drand, mode=rw, path=/} value=7.000711168e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-032c31127d7e330ae.mainnet3.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-0601b1015f8910a30.mainnet3.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-0601b1015f8910a30.mainnet3.drand, mode=rw, path=/ Value:0xc00fe57760} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-0601b1015f8910a30.mainnet3.drand, mode=rw, path=/ Value:0xc00fe57800} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-0601b1015f8910a30.mainnet3.drand, mode=rw, path=/ Value:0xc00fe578b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.454079561s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-0601b1015f8910a30.mainnet3.drand, mode=rw, path=/} value=6.727970816e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-0601b1015f8910a30.mainnet3.drand, mode=rw, path=/} value=6.727970816e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-mainnet3, deployment=mainnet3, device=nvme0n1p1, env=mainnet, fstype=xfs, host=relay-http-i-0601b1015f8910a30.mainnet3.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-http-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-http-i-02749989dbbc8efe1.testnet1.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-http-i-02749989dbbc8efe1.testnet1.drand, mode=rw, path=/ Value:0xc00fe57aa0} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-http-i-02749989dbbc8efe1.testnet1.drand, mode=rw, path=/ Value:0xc00fe57b40} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-http-i-02749989dbbc8efe1.testnet1.drand, mode=rw, path=/ Value:0xc00fe579f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.454085842s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-http-i-02749989dbbc8efe1.testnet1.drand, mode=rw, path=/} value=4.348719104e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-http-i-02749989dbbc8efe1.testnet1.drand, mode=rw, path=/} value=4.348719104e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-testnet1, deployment=testnet1, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-http-i-02749989dbbc8efe1.testnet1.drand, mode=rw, path=/} value=0 ]} {Instance:__name__=disk_free, __proxy_source__=influx, build=relay-http-testnet2, deployment=testnet2, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-http-i-04b701d65afed748e.testnet2.drand, mode=rw, path=/ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-testnet2, deployment=testnet2, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-http-i-04b701d65afed748e.testnet2.drand, mode=rw, path=/ Value:0xc00fe57cb0} B:{Var:B Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-testnet2, deployment=testnet2, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-http-i-04b701d65afed748e.testnet2.drand, mode=rw, path=/ Value:0xc00fe57d60} C:{Var:C Labels:__name__=disk_free, __proxy_source__=influx, build=relay-http-testnet2, deployment=testnet2, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-http-i-04b701d65afed748e.testnet2.drand, mode=rw, path=/ Value:0xc00fe57e00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.454091462s EvaluationString:[ var='A' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-testnet2, deployment=testnet2, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-http-i-04b701d65afed748e.testnet2.drand, mode=rw, path=/} value=4.334800896e+09 ], [ var='B' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-testnet2, deployment=testnet2, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-http-i-04b701d65afed748e.testnet2.drand, mode=rw, path=/} value=4.334800896e+09 ], [ var='C' labels={__name__=disk_free, __proxy_source__=influx, build=relay-http-testnet2, deployment=testnet2, device=nvme0n1p1, env=testnet, fstype=xfs, host=relay-http-i-04b701d65afed748e.testnet2.drand, mode=rw, path=/} value=0 ]}]" duration=13.878701ms + logger=ngalert.state.manager.persist user=436633 slug=swirldslabsproduction t=2024-05-29T13:44:15.473695016Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=31.229311ms + level=debug ts=2024-05-29T13:44:15.473704082Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.473662612Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.473586705Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.473507438Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.473225763Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.472910598Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.472690789Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.472552956Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.47254136Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=266592 slug=srsge instance="ECS_PROMETHEUS_JOB_NAME=tpt.sessionguardian.com" t=2024-05-29T13:44:15.472541574Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:15.472414514Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=b66fa2f0-77b1-4213-b385-73969ab1d1f0, ref_id=A" t=2024-05-29T13:44:15.472434418Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=266592 slug=srsge instance="ECS_PROMETHEUS_JOB_NAME=sr.sessionguardian.com" t=2024-05-29T13:44:15.47245852Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=b66fa2f0-77b1-4213-b385-73969ab1d1f0, ref_id=A" t=2024-05-29T13:44:15.4724078Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=266592 slug=srsge instance="ECS_PROMETHEUS_JOB_NAME=sr.sessionguardian.com" t=2024-05-29T13:44:15.472438956Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=206107 slug=hydrolix version=8 fingerprint=826efc75227fde9a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.472296714Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=b66fa2f0-77b1-4213-b385-73969ab1d1f0, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.472010154s EvaluationString:}]" duration=158.671161ms + logger=ngalert.state.manager user=266592 slug=srsge instance="ECS_PROMETHEUS_JOB_NAME=hirecounsel.sessionguardian.com" t=2024-05-29T13:44:15.472365674Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.472049221Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.472051934Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=608555 slug=ias t=2024-05-29T13:44:15.471951931Z level=debug msg="Deleting alert states" count=1 + logger=ngalert.state.manager.persist user=111839 slug=last9 t=2024-05-29T13:44:15.471956465Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=111839 slug=last9 t=2024-05-29T13:44:15.471868378Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}{{ $cluster }}': error parsing template __alert_Container restarts - PROD: template: __alert_Container restarts - PROD:1: undefined variable \"$cluster\"" + level=debug ts=2024-05-29T13:44:15.471874221Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=111839 slug=last9 t=2024-05-29T13:44:15.47176228Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=426229 slug=accelbyte t=2024-05-29T13:44:15.471543699Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=24.098659ms + level=debug ts=2024-05-29T13:44:15.471338Z caller=remote_instance_store.go:51 user=679831 slug=joveostageaws msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.471281567Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.470846128Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.47083389Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.470360319Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.470366473Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.470261494Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.470213371Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.470100612Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.470087704Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.469907056Z caller=remote_alert_sender.go:94 user=114492 slug=railsbank host=railsbank-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.253.115:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=TVTwavf4z alerts=1 + level=debug ts=2024-05-29T13:44:15.469779946Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.469238531Z caller=remote_instance_store.go:51 user=147497 slug=rhodev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.468883519Z caller=remote_instance_store.go:51 user=500743 slug=sgr msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.468959598Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.468660254Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.468643437Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.46870325Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=111653 slug=theassociationmxp instance= t=2024-05-29T13:44:15.468445184Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.468312311Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=111653 slug=theassociationmxp t=2024-05-29T13:44:15.468404786Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.467904952Z caller=remote_instance_store.go:51 user=753403 slug=romich msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.467905449Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.467691796Z caller=remote_instance_store.go:51 user=265692 slug=beekeeper msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.467181113Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.467170741Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=163215 slug=tripadvisor t=2024-05-29T13:44:15.466872135Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=163215 slug=tripadvisor instance= t=2024-05-29T13:44:15.46686001Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163215 slug=tripadvisor t=2024-05-29T13:44:15.466813718Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.466767778Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=163215 slug=tripadvisor version=4 fingerprint=b94de85f7edc2eb3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.466732639Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc0013f90f0} C:{Var:C Labels: Value:0xc0013f90f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.466401743s EvaluationString:[ var='B' labels={} value=0 ], [ var='C' labels={} value=0 ]}]" duration=697.048443ms + level=debug ts=2024-05-29T13:44:15.466488025Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.46606552Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.466029057Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.465836259Z caller=remote_instance_store.go:51 user=540828 slug=finfoprod153 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.465620572Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.46562057Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.465466743Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.465288157Z caller=remote_instance_store.go:51 user=481110 slug=g123 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=127813 slug=clearsale t=2024-05-29T13:44:15.465083502Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.464898974Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.464897741Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.464724687Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.464397601Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.464036046Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.464158757Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=mlflow, pod=mlflow-6bfbc7cf6b-2449d" t=2024-05-29T13:44:15.464081147Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=mlflow, pod=mlflow-6bfbc7cf6b-2449d" t=2024-05-29T13:44:15.464069028Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.463989184Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.463951607Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.463950163Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.463907599Z caller=remote_instance_store.go:51 user=743579 slug=neotax msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.463851231Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.463661776Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=627215 slug=admitrievskygrafanacom instance= t=2024-05-29T13:44:15.463649572Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=627215 slug=admitrievskygrafanacom instance= t=2024-05-29T13:44:15.463639912Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=627215 slug=admitrievskygrafanacom t=2024-05-29T13:44:15.463607461Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.46347264Z caller=remote_instance_store.go:51 user=340750 slug=aptoslabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.463228078Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.463294399Z caller=remote_instance_store.go:51 user=747518 slug=dvevoli msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=664976 slug=staging1themomproject instance="datasource_uid=grafanacloud-prom, ref_id=query" t=2024-05-29T13:44:15.463318321Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=664976 slug=staging1themomproject instance="datasource_uid=grafanacloud-prom, ref_id=query" t=2024-05-29T13:44:15.463295101Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=664976 slug=staging1themomproject version=12 fingerprint=9ba629dd8d543842 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.463213263Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=query State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.462774201s EvaluationString:}]" duration=12.465145ms + level=debug ts=2024-05-29T13:44:15.463146591Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.462922126Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.462603404Z caller=remote_instance_store.go:51 user=54972 slug=zanglang msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.462556799Z caller=remote_instance_store.go:51 user=723897 slug=inthepocket msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="account_id=457710302499, dimension_FunctionName=talabat-prod-salus-sonarcloud-integrator, dimension_Resource=talabat-prod-salus-sonarcloud-integrator, name=arn:aws:lambda:eu-west-2:457710302499:function:talabat-prod-salus-sonarcloud-integrator, region=eu-west-2" t=2024-05-29T13:44:15.462227824Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.462121808Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.462001759Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.462028855Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=18335 slug=semaphore instance="datasource_uid=000000021, ref_id=A,B" t=2024-05-29T13:44:15.462028266Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=18335 slug=semaphore t=2024-05-29T13:44:15.461983658Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.461870191Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.461998429Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.461947132Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.461678633Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.801213ms + logger=ngalert.state.manager.persist user=679831 slug=joveostageaws t=2024-05-29T13:44:15.461672242Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=679831 slug=joveostageaws instance="datasource_uid=a6f971c0-2a39-492e-8c6a-8034f1702671, ref_id=A" t=2024-05-29T13:44:15.461629454Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=49546 slug=nulogyinfra instance= t=2024-05-29T13:44:15.461543924Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.461472758Z caller=remote_instance_store.go:51 user=174054 slug=netrading msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.461110613Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.461101747Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=251760 slug=forgerock t=2024-05-29T13:44:15.460872001Z level=warn msg="Tick dropped because alert rule evaluation is too slow" rule_uid=edew2g8chg5c0a org_id=1 time=2024-05-29T13:43:10Z + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cloud_account_id=077641130042, cloud_availability_zone=us-west-2c, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, ec2_tag_Name=firewalk-ams-dev-consul-server-001-asg, ec2_tag_environment=dev, ec2_tag_environment_name=dev, ec2_tag_project=ams, ec2_tag_service=consul-server, ec2_tag_unique_id=15537, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-03cc4167ea3e657ec, host_image_id=ami-0733a0439c0d660a2, host_name=ip-10-16-89-127.us-west-2.compute.internal, host_type=t4g.medium, http_scheme=http, instance=127.0.0.1:9100, job=host, net_host_port=9100, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-16-89-127 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service_instance_id=127.0.0.1:9100, service_name=host" t=2024-05-29T13:44:15.460760578Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cloud_account_id=077641130042, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, ec2_tag_Name=firewalk-ams-dev-consul-server-001-asg, ec2_tag_environment=dev, ec2_tag_environment_name=dev, ec2_tag_project=ams, ec2_tag_service=consul-server, ec2_tag_unique_id=15537, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01543056d991a72de, host_image_id=ami-0733a0439c0d660a2, host_name=ip-10-16-71-233.us-west-2.compute.internal, host_type=t4g.medium, http_scheme=http, instance=127.0.0.1:9100, job=host, net_host_port=9100, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-16-71-233 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service_instance_id=127.0.0.1:9100, service_name=host" t=2024-05-29T13:44:15.460630258Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cloud_account_id=077641130042, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, ec2_tag_Name=firewalk-ams-dev-consul-server-001-asg, ec2_tag_environment=dev, ec2_tag_environment_name=dev, ec2_tag_project=ams, ec2_tag_service=consul-server, ec2_tag_unique_id=15537, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01543056d991a72de, host_image_id=ami-0733a0439c0d660a2, host_name=ip-10-16-71-233.us-west-2.compute.internal, host_type=t4g.medium, http_scheme=http, instance=127.0.0.1:9100, job=host, net_host_port=9100, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-16-71-233 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service_instance_id=127.0.0.1:9100, service_name=host" t=2024-05-29T13:44:15.460610443Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cloud_account_id=077641130042, cloud_availability_zone=us-east-2c, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-2, ec2_tag_Name=firewalk-ams-dev-consul-server-001-asg, ec2_tag_environment=dev, ec2_tag_environment_name=dev, ec2_tag_project=ams, ec2_tag_service=consul-server, ec2_tag_unique_id=15537, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-072abdc930297447d, host_image_id=ami-0312e8ba19582f7ec, host_name=ip-10-11-92-214.us-east-2.compute.internal, host_type=t4g.medium, http_scheme=http, instance=127.0.0.1:9100, job=host, net_host_port=9100, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-92-214 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service_instance_id=127.0.0.1:9100, service_name=host" t=2024-05-29T13:44:15.460362977Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.460193844Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.460117789Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cloud_account_id=077641130042, cloud_availability_zone=us-east-2a, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-2, ec2_tag_Name=firewalk-ams-dev-consul-server-001-asg, ec2_tag_environment=dev, ec2_tag_environment_name=dev, ec2_tag_project=ams, ec2_tag_service=consul-server, ec2_tag_unique_id=15537, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-09bf8b9e5c8fb5a27, host_image_id=ami-0312e8ba19582f7ec, host_name=ip-10-11-52-50.us-east-2.compute.internal, host_type=t4g.medium, http_scheme=http, instance=127.0.0.1:9100, job=host, net_host_port=9100, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-52-50 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service_instance_id=127.0.0.1:9100, service_name=host" t=2024-05-29T13:44:15.460093219Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cloud_account_id=077641130042, cloud_availability_zone=eu-central-1c, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=eu-central-1, ec2_tag_Name=firewalk-ams-dev-consul-server-001-asg, ec2_tag_environment=dev, ec2_tag_environment_name=dev, ec2_tag_project=ams, ec2_tag_service=consul-server, ec2_tag_unique_id=15537, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-0c758eeba24773292, host_image_id=ami-0be455480fea8bf41, host_name=ip-10-12-91-14.eu-central-1.compute.internal, host_type=t4g.medium, http_scheme=http, instance=127.0.0.1:9100, job=host, net_host_port=9100, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-91-14 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service_instance_id=127.0.0.1:9100, service_name=host" t=2024-05-29T13:44:15.459968412Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.459867778Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cloud_account_id=077641130042, cloud_availability_zone=ap-northeast-1d, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=ap-northeast-1, ec2_tag_Name=firewalk-ams-dev-consul-server-001-asg, ec2_tag_environment=dev, ec2_tag_environment_name=dev, ec2_tag_project=ams, ec2_tag_service=consul-server, ec2_tag_unique_id=15537, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-0dff77a87c534a66d, host_image_id=ami-035344b86ca1301c7, host_name=ip-10-15-85-152.ap-northeast-1.compute.internal, host_type=t4g.medium, http_scheme=http, instance=127.0.0.1:9100, job=host, net_host_port=9100, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-15-85-152 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service_instance_id=127.0.0.1:9100, service_name=host" t=2024-05-29T13:44:15.459565311Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=467258 slug=neonprod t=2024-05-29T13:44:15.459569371Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=467258 slug=neonprod instance="background_job_name=clickhouse_consumption" t=2024-05-29T13:44:15.459544651Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.459497785Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=467258 slug=neonprod version=27 fingerprint=3bd8254861f002d6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.459374358Z level=debug msg="Alert rule evaluated" results="[{Instance:background_job_name=clickhouse_consumption State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:background_job_name=clickhouse_consumption Value:0xc032894fb8} B:{Var:B Labels:background_job_name=clickhouse_consumption Value:0xc032894fc0} C:{Var:C Labels:background_job_name=clickhouse_consumption Value:0xc032894fb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.45899072s EvaluationString:[ var='A' labels={background_job_name=clickhouse_consumption} value=239.82 ], [ var='B' labels={background_job_name=clickhouse_consumption} value=239.82 ], [ var='C' labels={background_job_name=clickhouse_consumption} value=0 ]}]" duration=26.889092ms + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cloud_account_id=077641130042, cloud_availability_zone=ap-northeast-1c, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=ap-northeast-1, ec2_tag_Name=firewalk-ams-dev-consul-server-001-asg, ec2_tag_environment=dev, ec2_tag_environment_name=dev, ec2_tag_project=ams, ec2_tag_service=consul-server, ec2_tag_unique_id=15537, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-09bdedfd034d99dfd, host_image_id=ami-035344b86ca1301c7, host_name=ip-10-15-76-22.ap-northeast-1.compute.internal, host_type=t4g.medium, http_scheme=http, instance=127.0.0.1:9100, job=host, net_host_port=9100, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-15-76-22 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service_instance_id=127.0.0.1:9100, service_name=host" t=2024-05-29T13:44:15.459421179Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:15.459369521Z caller=remote_alert_sender.go:94 user=642786 slug=sophoscomnsg host=sophoscomnsg-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.44.231:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=f40681e4-7987-4aa3-8952-9303df49cc2b alerts=1 + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cloud_account_id=077641130042, cloud_availability_zone=ap-northeast-1a, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=ap-northeast-1, ec2_tag_Name=firewalk-ams-dev-consul-server-001-asg, ec2_tag_environment=dev, ec2_tag_environment_name=dev, ec2_tag_project=ams, ec2_tag_service=consul-server, ec2_tag_unique_id=15537, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-032358908d4f1e875, host_image_id=ami-035344b86ca1301c7, host_name=ip-10-15-53-14.ap-northeast-1.compute.internal, host_type=t4g.medium, http_scheme=http, instance=127.0.0.1:9100, job=host, net_host_port=9100, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-15-53-14 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service_instance_id=127.0.0.1:9100, service_name=host" t=2024-05-29T13:44:15.45926605Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.458952336Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.724747ms + level=debug ts=2024-05-29T13:44:15.458830805Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.458598657Z caller=remote_instance_store.go:51 user=355252 slug=bumper msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=884866 slug=cnonumerique t=2024-05-29T13:44:15.458401274Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.606565ms + logger=ngalert.scheduler user=163513 slug=dialpad version=406 fingerprint=e1ac83639d22d932 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.458304788Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.457993891s EvaluationString:}]" duration=29.858636ms + level=debug ts=2024-05-29T13:44:15.458184589Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.458249866Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.458253172Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.458165722Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.458191551Z caller=remote_instance_store.go:51 user=500743 slug=sgr msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.457498913Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.457503142Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.457389733Z caller=remote_instance_store.go:51 user=485459 slug=heroiclabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.45647017Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.456419869Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.456313486Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.456294112Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.456104409Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.45610529Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.456134539Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.456040174Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.455949252Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.455957414Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.455762863Z caller=remote_instance_store.go:51 user=743579 slug=neotax msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.455602445Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.455427172Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.455264697Z caller=remote_instance_store.go:51 user=432323 slug=lithic msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.455289256Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.455084349Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=172772 slug=ppbtradingtribe t=2024-05-29T13:44:15.454682539Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.934331ms + logger=ngalert.state.manager.persist user=178698 slug=avantpage t=2024-05-29T13:44:15.454699737Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.10094ms + level=debug ts=2024-05-29T13:44:15.454579576Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.454507541Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.453886615Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.453857346Z caller=remote_instance_store.go:51 user=540828 slug=finfoprod153 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.453766217Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.453493747Z caller=remote_instance_store.go:51 user=723897 slug=inthepocket msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.453484134Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.45323772Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + ts=2024-05-29T13:44:15.453233314Z caller=memberlist_logger.go:74 level=debug msg="Initiating push/pull sync with: grafana-ruler-6ddb6c5b98-rj2sh-69b8b3b5 10.144.111.191:7946" + logger=ngalert.state.manager user=432323 slug=lithic instance="QueueName=ledger-ne-consumer-v1-sandbox-dlq" t=2024-05-29T13:44:15.452988943Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=20177 slug=paddledash t=2024-05-29T13:44:15.452883006Z level=debug msg="State manager processing evaluation results" resultCount=1 + Error parsing panelUID for alert annotationruleID1925dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=20177 slug=paddledash version=2 fingerprint=8bbe0e2b5316b90e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.452821244Z level=debug msg="Alert rule evaluated" results="[{Instance:LoadBalancer=app/paddle-production-report/aea44f60a3b3afbc State:Normal Error: Results:map[] Values:map[AlertCondition:{Var:AlertCondition Labels:LoadBalancer=app/paddle-production-report/aea44f60a3b3afbc Value:0xc015acb2b8} BadEvents:{Var:BadEvents Labels:LoadBalancer=app/paddle-production-report/aea44f60a3b3afbc Value:0xc015acb220} BurnRate:{Var:BurnRate Labels:LoadBalancer=app/paddle-production-report/aea44f60a3b3afbc Value:0xc015acb238} ValidEvents:{Var:ValidEvents Labels:LoadBalancer=app/paddle-production-report/aea44f60a3b3afbc Value:0xc015acb2b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.452508562s EvaluationString:[ var='AlertCondition' labels={LoadBalancer=app/paddle-production-report/aea44f60a3b3afbc} value=0 ], [ var='BadEvents' labels={LoadBalancer=app/paddle-production-report/aea44f60a3b3afbc} value=0 ], [ var='BurnRate' labels={LoadBalancer=app/paddle-production-report/aea44f60a3b3afbc} value=0 ], [ var='ValidEvents' labels={LoadBalancer=app/paddle-production-report/aea44f60a3b3afbc} value=7094 ]}]" duration=183.769469ms + level=debug ts=2024-05-29T13:44:15.452789277Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.452800624Z caller=remote_instance_store.go:51 user=871095 slug=cmcnginp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.452679087Z caller=remote_instance_store.go:51 user=482907 slug=wavelonp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.452198919Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.4517892Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.451596688Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.451609329Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.451567302Z caller=remote_instance_store.go:51 user=374423 slug=bitburst msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:15.451536001Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.451552643Z caller=remote_instance_store.go:51 user=119385 slug=elastio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.451494815Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=119385 slug=elastio instance="datasource_uid=g9k1LXV4z, ref_id=A" t=2024-05-29T13:44:15.451482364Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.451398074Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=550189 slug=flippa t=2024-05-29T13:44:15.451331862Z level=debug msg="Skip rule evaluation because it is paused" + level=debug ts=2024-05-29T13:44:15.451333231Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.451319341Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.450976831Z caller=remote_instance_store.go:51 user=191103 slug=amazonadmin msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:15.450938566Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:15.450917332Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.449923288Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.449835116Z caller=remote_rule_evaluator.go:193 user=855233 slug=sadeno msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + level=debug ts=2024-05-29T13:44:15.449849581Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.449611898Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.449503059Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.449386141Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=471861 slug=planetstaging t=2024-05-29T13:44:15.449125241Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.449231909Z caller=remote_instance_store.go:51 user=471861 slug=planetstaging msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.448812973Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.448723944Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.448665042Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.448647815Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.448636866Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.447622173Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.447489592Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.447425487Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.447422373Z caller=remote_instance_store.go:51 user=893158 slug=cmfollnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.447386161Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.447331025Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.447322825Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.37580427Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.373981035Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.373786781Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-zwkkp" t=2024-05-29T13:44:15.447289596Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.44718593Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-xmws8" t=2024-05-29T13:44:15.447237095Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-xl26j" t=2024-05-29T13:44:15.447204405Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-xl26j" t=2024-05-29T13:44:15.447198375Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-wd788" t=2024-05-29T13:44:15.447084022Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-vthh8" t=2024-05-29T13:44:15.447047112Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-t2zt7" t=2024-05-29T13:44:15.446983351Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.44680352Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-px2kx" t=2024-05-29T13:44:15.446895819Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-nt62w" t=2024-05-29T13:44:15.446827658Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.446770634Z caller=remote_instance_store.go:51 user=635771 slug=sharedservices msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-nrv5f" t=2024-05-29T13:44:15.446800897Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=635771 slug=sharedservices t=2024-05-29T13:44:15.446700483Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=635771 slug=sharedservices instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.446684073Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=635771 slug=sharedservices instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.446675193Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=635771 slug=sharedservices t=2024-05-29T13:44:15.446640242Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-n2njm" t=2024-05-29T13:44:15.446648884Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=635771 slug=sharedservices version=3 fingerprint=b4f4d83bfe51984b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.446547261Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.445930699s EvaluationString:}]" duration=13.110902ms + level=debug ts=2024-05-29T13:44:15.446611087Z caller=remote_instance_store.go:51 user=615392 slug=shinemetrics msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-m9gpv" t=2024-05-29T13:44:15.446587073Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.446538846Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-l4fkd" t=2024-05-29T13:44:15.446499101Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.446495663Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.446471157Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-kkr2t" t=2024-05-29T13:44:15.44643713Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank t=2024-05-29T13:44:15.446453351Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.446416399Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-jz2xx" t=2024-05-29T13:44:15.446378999Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-hsrbj" t=2024-05-29T13:44:15.446308608Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-hgflw" t=2024-05-29T13:44:15.446253226Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-gkqcr" t=2024-05-29T13:44:15.446192695Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.373525065Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-g7v46" t=2024-05-29T13:44:15.446140794Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.446043284Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-dzmxk" t=2024-05-29T13:44:15.446073003Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.44598623Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=692010 slug=mercariusprod t=2024-05-29T13:44:15.445771696Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.020068ms + level=debug ts=2024-05-29T13:44:15.445734336Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:15.445592256Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=191103 slug=amazonadmin t=2024-05-29T13:44:15.445553633Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Multiplo Admin URL" + logger=ngalert.scheduler user=191103 slug=amazonadmin version=6 fingerprint=f6256261c7202c4c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.44546973Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.445234983s EvaluationString:}]" duration=12.116734ms + logger=ngalert.state.manager user=749971 slug=unobravo instance= t=2024-05-29T13:44:15.445495231Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-d5z59" t=2024-05-29T13:44:15.4454494Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-ctnm2" t=2024-05-29T13:44:15.44540054Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-cjb82" t=2024-05-29T13:44:15.445361449Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-9tfcq" t=2024-05-29T13:44:15.445326659Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.445302162Z caller=remote_instance_store.go:51 user=320778 slug=omegaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-95dbw" t=2024-05-29T13:44:15.445253717Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=320778 slug=omegaai t=2024-05-29T13:44:15.445222439Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=320778 slug=omegaai instance= t=2024-05-29T13:44:15.445193275Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.445172444Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-84k6w" t=2024-05-29T13:44:15.445171176Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.445041308Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.445039768Z caller=remote_instance_store.go:51 user=677132 slug=dragonflydbdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=701741 slug=thetradingpitproduction t=2024-05-29T13:44:15.366190563Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.20684ms + logger=ngalert.state.manager.persist user=747518 slug=dvevoli t=2024-05-29T13:44:15.445038082Z level=debug msg="Saving alert states" count=11 max_state_save_concurrency=1 + logger=ngalert.state.manager user=747518 slug=dvevoli instance="deployment_environment=testing, job=km-rechnungserstellung-be, topic=shared_qs_default_km_kmonline_rechnungsentwurf" t=2024-05-29T13:44:15.445020742Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.444916844Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=747518 slug=dvevoli instance="deployment_environment=testing, job=km-rechnungserstellung-be, topic=shared_qs_default_E-Rechnung_user-update_invoice-state" t=2024-05-29T13:44:15.444844379Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=884866 slug=cnonumerique t=2024-05-29T13:44:15.444788478Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.444777903Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=642786 slug=sophoscomnsg t=2024-05-29T13:44:15.444731896Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=747518 slug=dvevoli instance="deployment_environment=testing, job=km-rechnungsentwurf-mdoconsumer-be, topic=shared_qa_mdo_omd_v2_client_replay" t=2024-05-29T13:44:15.443815609Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.444733737Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=700783 slug=gsgmedia t=2024-05-29T13:44:15.444668535Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=884866 slug=cnonumerique t=2024-05-29T13:44:15.444644775Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.444633875Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=884866 slug=cnonumerique version=67 fingerprint=ecc2b14801db8a4e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.370338543Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=fdhk917z41xj4a, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.37007771s EvaluationString:}]" duration=8.68808ms + logger=ngalert.state.manager.persist user=937416 slug=cambridgeuniversitypress t=2024-05-29T13:44:15.362355187Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.333032ms + logger=ngalert.state.manager user=902357 slug=tonvalidators instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.44437054Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=902357 slug=tonvalidators instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.444328969Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-7qrjx" t=2024-05-29T13:44:15.444215307Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-7js48" t=2024-05-29T13:44:15.444191056Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-7js48" t=2024-05-29T13:44:15.444183266Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=357638 slug=usepower t=2024-05-29T13:44:15.444016143Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.011877ms + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-54w44" t=2024-05-29T13:44:15.444053644Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-2h7xk" t=2024-05-29T13:44:15.443981832Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=762586 slug=enderian t=2024-05-29T13:44:15.443812216Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=762586 slug=enderian instance="datasource_uid=grafanacloud-logs, ref_id=A,B" t=2024-05-29T13:44:15.443794615Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=762586 slug=enderian t=2024-05-29T13:44:15.443749544Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=762586 slug=enderian version=4 fingerprint=8708da18a079cb80 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.443685633Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A,B State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.443423392s EvaluationString:}]" duration=39.755244ms + level=debug ts=2024-05-29T13:44:15.443658726Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-2bc8q" t=2024-05-29T13:44:15.44339945Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.443306266Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=528849 slug=bitvavo instance="pod=exchange-rest-56fc7fc8d-26wpt" t=2024-05-29T13:44:15.443298168Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.443107328Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.442858384Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.442629833Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + Error parsing panelUID for alert annotationruleID1682dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.442376023Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=49.20072ms + level=debug ts=2024-05-29T13:44:15.442347269Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=747518 slug=dvevoli instance="deployment_environment=testing, job=km-rechnungsentwurf-mdoconsumer-be, topic=shared_qa_mdo_omd_data-environment_replay" t=2024-05-29T13:44:15.44233324Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.442201642Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=747518 slug=dvevoli instance="deployment_environment=testing, job=km-rechnungsentwurf-mdoconsumer-be, topic=shared_qa_mdo_omd_data-environment" t=2024-05-29T13:44:15.442186777Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.44217451Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.441836157Z caller=remote_instance_store.go:51 user=723897 slug=inthepocket msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=747518 slug=dvevoli instance="deployment_environment=testing, job=km-rechnungsentwurf-mdoconsumer-be, topic=shared_qa_mdo_omd_data-environment" t=2024-05-29T13:44:15.442064984Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.44202352Z caller=remote_instance_store.go:51 user=502468 slug=gmawater msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=281034 slug=retailedge t=2024-05-29T13:44:15.441983974Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=281034 slug=retailedge instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.441965053Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.scheduler user=281034 slug=retailedge version=1 fingerprint=b9ca0d78edaa50c8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.44181327Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.441438413s EvaluationString:}]" duration=89.118665ms + level=debug ts=2024-05-29T13:44:15.44168298Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.441627922Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=83647 slug=bidsolutions t=2024-05-29T13:44:15.441389398Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=32.946197ms + logger=ngalert.state.manager.persist user=614008 slug=surekha t=2024-05-29T13:44:15.441385554Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.186481ms + level=debug ts=2024-05-29T13:44:15.44115747Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.441235095Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.441162349Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=756904 slug=orbdatanfr instance="__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=user" t=2024-05-29T13:44:15.440978697Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:50:10Z next_ends_at=2024-05-29T13:52:10Z + logger=ngalert.state.manager user=756904 slug=orbdatanfr instance="__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=steal" t=2024-05-29T13:44:15.440894215Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.441046189Z caller=remote_instance_store.go:51 user=756904 slug=orbdatanfr msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.440939326Z caller=remote_image_capturer.go:33 user=756904 slug=orbdatanfr rule_org_id=1 rule_uid=c04a68ca-be6f-4db9-b669-34c7b4878f53 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:15.440839604Z caller=remote_image_capturer.go:33 user=756904 slug=orbdatanfr rule_org_id=1 rule_uid=c04a68ca-be6f-4db9-b669-34c7b4878f53 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:15.440805443Z caller=remote_image_capturer.go:33 user=756904 slug=orbdatanfr rule_org_id=1 rule_uid=c04a68ca-be6f-4db9-b669-34c7b4878f53 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=756904 slug=orbdatanfr instance="__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=nice" t=2024-05-29T13:44:15.440817583Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=756904 slug=orbdatanfr instance="__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=irq" t=2024-05-29T13:44:15.440791592Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=756904 slug=orbdatanfr instance="__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=idle" t=2024-05-29T13:44:15.440722161Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:50:10Z next_ends_at=2024-05-29T13:52:10Z + logger=ngalert.state.manager user=756904 slug=orbdatanfr instance="__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=idle" t=2024-05-29T13:44:15.440708691Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=756904 slug=orbdatanfr instance="__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=user" t=2024-05-29T13:44:15.440666749Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:50:10Z next_ends_at=2024-05-29T13:52:10Z + level=debug ts=2024-05-29T13:44:15.44067972Z caller=remote_image_capturer.go:33 user=756904 slug=orbdatanfr rule_org_id=1 rule_uid=c04a68ca-be6f-4db9-b669-34c7b4878f53 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=756904 slug=orbdatanfr instance="__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=user" t=2024-05-29T13:44:15.440650148Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:15.440446954Z caller=remote_image_capturer.go:33 user=756904 slug=orbdatanfr rule_org_id=1 rule_uid=c04a68ca-be6f-4db9-b669-34c7b4878f53 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:15.440346762Z caller=remote_image_capturer.go:33 user=756904 slug=orbdatanfr rule_org_id=1 rule_uid=c04a68ca-be6f-4db9-b669-34c7b4878f53 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager.persist user=438185 slug=nodeinfra t=2024-05-29T13:44:15.440303871Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=22.139023ms + logger=ngalert.state.manager user=756904 slug=orbdatanfr instance="__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=user" t=2024-05-29T13:44:15.44029162Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:50:10Z next_ends_at=2024-05-29T13:52:10Z + logger=ngalert.state.manager user=756904 slug=orbdatanfr instance="__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=user" t=2024-05-29T13:44:15.4402744Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:15.44025239Z caller=remote_image_capturer.go:33 user=756904 slug=orbdatanfr rule_org_id=1 rule_uid=c04a68ca-be6f-4db9-b669-34c7b4878f53 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=756904 slug=orbdatanfr instance="__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=softirq" t=2024-05-29T13:44:15.440143357Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:15.440171878Z caller=remote_image_capturer.go:33 user=756904 slug=orbdatanfr rule_org_id=1 rule_uid=c04a68ca-be6f-4db9-b669-34c7b4878f53 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:15.440123896Z caller=remote_image_capturer.go:33 user=756904 slug=orbdatanfr rule_org_id=1 rule_uid=c04a68ca-be6f-4db9-b669-34c7b4878f53 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=756904 slug=orbdatanfr instance="__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=irq" t=2024-05-29T13:44:15.440059065Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:50:10Z next_ends_at=2024-05-29T13:52:10Z + logger=ngalert.state.manager user=756904 slug=orbdatanfr instance="__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=iowait" t=2024-05-29T13:44:15.440004424Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:50:10Z next_ends_at=2024-05-29T13:52:10Z + level=debug ts=2024-05-29T13:44:15.439964223Z caller=remote_image_capturer.go:33 user=756904 slug=orbdatanfr rule_org_id=1 rule_uid=c04a68ca-be6f-4db9-b669-34c7b4878f53 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:15.439846729Z caller=remote_image_capturer.go:33 user=756904 slug=orbdatanfr rule_org_id=1 rule_uid=c04a68ca-be6f-4db9-b669-34c7b4878f53 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=756904 slug=orbdatanfr instance="__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=user" t=2024-05-29T13:44:15.439900521Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:50:10Z next_ends_at=2024-05-29T13:52:10Z + level=debug ts=2024-05-29T13:44:15.439777798Z caller=remote_image_capturer.go:33 user=756904 slug=orbdatanfr rule_org_id=1 rule_uid=c04a68ca-be6f-4db9-b669-34c7b4878f53 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:15.439723937Z caller=remote_image_capturer.go:33 user=756904 slug=orbdatanfr rule_org_id=1 rule_uid=c04a68ca-be6f-4db9-b669-34c7b4878f53 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=756904 slug=orbdatanfr instance="__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=steal" t=2024-05-29T13:44:15.439805559Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.439744991Z caller=remote_instance_store.go:51 user=485459 slug=heroiclabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=756904 slug=orbdatanfr instance="__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=nice" t=2024-05-29T13:44:15.439701916Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=756904 slug=orbdatanfr instance="__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=idle" t=2024-05-29T13:44:15.439493491Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:50:10Z next_ends_at=2024-05-29T13:52:10Z + level=debug ts=2024-05-29T13:44:15.43970328Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=756904 slug=orbdatanfr t=2024-05-29T13:44:15.43945054Z level=debug msg="State manager processing evaluation results" resultCount=32 + logger=ngalert.scheduler user=756904 slug=orbdatanfr version=1 fingerprint=d3d4c2a050c5ff23 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.439088812Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=idle State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=idle Value:0xc01a2c1348} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=idle Value:0xc01a2c12b8} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=idle Value:0xc01a2c1300}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437506253s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=idle} value=5.35882789e+06 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=idle} value=5.35882789e+06 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=idle} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=iowait State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=iowait Value:0xc01a2c14c8} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=iowait Value:0xc01a2c13c8} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=iowait Value:0xc01a2c1480}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437527015s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=iowait} value=11692.97 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=iowait} value=11692.97 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=iowait} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=irq State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=irq Value:0xc01a2c15b0} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=irq Value:0xc01a2c1608} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=irq Value:0xc01a2c1558}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437534575s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=irq} value=21958.2 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=irq} value=21958.2 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=irq} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=nice State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=nice Value:0xc01a2c1688} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=nice Value:0xc01a2c16f0} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=nice Value:0xc01a2c1738}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437541596s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=nice} value=383.91 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=nice} value=383.91 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=nice} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=softirq State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=softirq Value:0xc01a2c1820} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=softirq Value:0xc01a2c1868} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=softirq Value:0xc01a2c17c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437547126s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=softirq} value=7622.28 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=softirq} value=7622.28 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=softirq} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=steal State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=steal Value:0xc01a2c1900} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=steal Value:0xc01a2c1948} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=steal Value:0xc01a2c1990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437555146s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=steal} value=0 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=steal} value=0 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=steal} value=0 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=system State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=system Value:0xc01a2c1a30} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=system Value:0xc01a2c1a78} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=system Value:0xc01a2c1ad0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437559747s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=system} value=68329.03 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=system} value=68329.03 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=system} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=user State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=user Value:0xc01a2c1bb8} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=user Value:0xc01a2c1c10} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=user Value:0xc01a2c1b60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437564647s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=user} value=401762.96 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=user} value=401762.96 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=0, instance=orb-ms01, job=integrations/node_exporter, mode=user} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=idle State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=idle Value:0xc01a2c1d30} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=idle Value:0xc01a2c1ca0} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=idle Value:0xc01a2c1ce8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437569417s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=idle} value=5.34497425e+06 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=idle} value=5.34497425e+06 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=idle} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=iowait State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=iowait Value:0xc01a2c1e18} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=iowait Value:0xc01a2c1e60} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=iowait Value:0xc01a2c1dd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437576158s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=iowait} value=13825.53 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=iowait} value=13825.53 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=iowait} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=irq State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=irq Value:0xc01a2c1f48} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=irq Value:0xc01a2c1f90} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=irq Value:0xc01a2c1ef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437581048s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=irq} value=22779.74 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=irq} value=22779.74 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=irq} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=nice State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=nice Value:0xc019c54020} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=nice Value:0xc019c54068} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=nice Value:0xc019c540c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437587748s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=nice} value=432.87 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=nice} value=432.87 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=nice} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=softirq State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=softirq Value:0xc019c541f0} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=softirq Value:0xc019c54150} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=softirq Value:0xc019c541a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437592679s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=softirq} value=4225.35 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=softirq} value=4225.35 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=softirq} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=steal State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=steal Value:0xc019c542b8} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=steal Value:0xc019c54310} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=steal Value:0xc019c54280}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437598149s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=steal} value=0 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=steal} value=0 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=steal} value=0 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=system State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=system Value:0xc019c543a8} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=system Value:0xc019c54420} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=system Value:0xc019c54468}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437602029s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=system} value=73618.36 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=system} value=73618.36 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=system} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=user State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=user Value:0xc019c54588} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=user Value:0xc019c544f8} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=user Value:0xc019c54550}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.43760656s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=user} value=410350.47 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=user} value=410350.47 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=1, instance=orb-ms01, job=integrations/node_exporter, mode=user} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=idle State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=idle Value:0xc019c54680} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=idle Value:0xc019c546c8} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=idle Value:0xc019c54618}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.43761213s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=idle} value=5.38637327e+06 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=idle} value=5.38637327e+06 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=idle} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=iowait State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=iowait Value:0xc019c54758} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=iowait Value:0xc019c547a0} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=iowait Value:0xc019c547d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.43761758s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=iowait} value=13693.92 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=iowait} value=13693.92 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=iowait} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=irq State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=irq Value:0xc019c54878} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=irq Value:0xc019c548c0} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=irq Value:0xc019c54918}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437622191s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=irq} value=24500.02 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=irq} value=24500.02 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=irq} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=nice State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=nice Value:0xc019c549a8} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=nice Value:0xc019c54a00} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=nice Value:0xc019c54a48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437627651s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=nice} value=279.5 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=nice} value=279.5 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=nice} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=softirq State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=softirq Value:0xc019c54b88} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=softirq Value:0xc019c54ad8} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=softirq Value:0xc019c54b30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437631991s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=softirq} value=5029.64 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=softirq} value=5029.64 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=softirq} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=steal State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=steal Value:0xc019c54c18} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=steal Value:0xc019c54c60} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=steal Value:0xc019c54ca8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437642052s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=steal} value=0 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=steal} value=0 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=steal} value=0 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=system State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=system Value:0xc019c54d48} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=system Value:0xc019c54d90} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=system Value:0xc019c54dd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437649192s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=system} value=71085.96 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=system} value=71085.96 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=system} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=user State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=user Value:0xc019c54e78} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=user Value:0xc019c54ec0} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=user Value:0xc019c54f18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437658203s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=user} value=364535.86 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=user} value=364535.86 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=2, instance=orb-ms01, job=integrations/node_exporter, mode=user} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=idle State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=idle Value:0xc019c54ff0} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=idle Value:0xc019c55038} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=idle Value:0xc019c54fa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437667123s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=idle} value=5.31371829e+06 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=idle} value=5.31371829e+06 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=idle} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=iowait State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=iowait Value:0xc019c55148} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=iowait Value:0xc019c550c8} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=iowait Value:0xc019c55110}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437676644s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=iowait} value=12907.52 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=iowait} value=12907.52 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=iowait} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=irq State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=irq Value:0xc019c55278} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=irq Value:0xc019c551f8} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=irq Value:0xc019c55240}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437688435s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=irq} value=22510.62 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=irq} value=22510.62 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=irq} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=nice State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=nice Value:0xc019c55370} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=nice Value:0xc019c553c8} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=nice Value:0xc019c55328}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437698335s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=nice} value=418.2 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=nice} value=418.2 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=nice} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=softirq State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=softirq Value:0xc019c554f0} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=softirq Value:0xc019c55460} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=softirq Value:0xc019c554a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437705886s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=softirq} value=3889.26 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=softirq} value=3889.26 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=softirq} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=steal State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=steal Value:0xc019c555c8} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=steal Value:0xc019c55620} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=steal Value:0xc019c55580}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437714136s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=steal} value=0 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=steal} value=0 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=steal} value=0 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=system State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=system Value:0xc019c55770} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=system Value:0xc019c556b0} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=system Value:0xc019c556f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437721507s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=system} value=72877.4 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=system} value=72877.4 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=system} value=1 ]} {Instance:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=user State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=user Value:0xc019c559f0} B:{Var:B Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=user Value:0xc019c55950} C:{Var:C Labels:__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=user Value:0xc019c559a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437729577s EvaluationString:[ var='A' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=user} value=444130.84 ], [ var='B' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=user} value=444130.84 ], [ var='C' labels={__name__=node_cpu_seconds_total, agent_hostname=ovh-l-orb-p-ms01.esxi.orb, cpu=3, instance=orb-ms01, job=integrations/node_exporter, mode=user} value=1 ]}]" duration=12.353366ms + logger=ngalert.state.manager user=146728 slug=dgc instance="datasource_uid=WqVnnZtMk, ref_id=A" t=2024-05-29T13:44:15.439482873Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=146728 slug=dgc version=1 fingerprint=07b61c99a3312764 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.43930526Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=WqVnnZtMk, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.438911173s EvaluationString:}]" duration=46.250982ms + level=debug ts=2024-05-29T13:44:15.438814437Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.438597788Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.438569846Z level=debug msg="Saving alert states done" count=10 max_state_save_concurrency=1 duration=707.35019ms + level=debug ts=2024-05-29T13:44:15.438548597Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.qira.stateChange" t=2024-05-29T13:44:15.438468736Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.438440993Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=806229 slug=simplisafe version=33 fingerprint=75edea2535d81029 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.438326454Z level=debug msg="Alert rule evaluated" results="[{Instance:queue=two.qira.stateChange State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:queue=two.qira.stateChange Value:0xc081566040} D:{Var:D Labels:queue=two.qira.stateChange Value:0xc081566050}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.437599658s EvaluationString:[ var='A' labels={queue=two.qira.stateChange} value=0 ], [ var='D' labels={queue=two.qira.stateChange} value=0 ]}]" duration=18.864489ms + level=debug ts=2024-05-29T13:44:15.438284195Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.438197727Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.438183355Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.438048818Z caller=remote_instance_store.go:51 user=482907 slug=wavelonp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.437880737Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.437866764Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.437837579Z caller=remote_instance_store.go:51 user=536824 slug=forgerockit msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.437800818Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.437766682Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.437700058Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.437187498Z caller=remote_instance_store.go:51 user=679831 slug=joveostageaws msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.437088674Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.436941718Z caller=remote_instance_store.go:51 user=155740 slug=routific msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.4368326Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.436506399Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.436395768Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + Error parsing panelUID for alert annotationruleID2707dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=155740 slug=routific version=4 fingerprint=66ea28038af2971a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.436303308Z level=debug msg="Alert rule evaluated" results="[{Instance:pod=planning-service-fd8cf8d8b-cf6kd State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:pod=planning-service-fd8cf8d8b-cf6kd Value:0xc06c959a10} C:{Var:C Labels:pod=planning-service-fd8cf8d8b-cf6kd Value:0xc06c959a20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.435893134s EvaluationString:[ var='B' labels={pod=planning-service-fd8cf8d8b-cf6kd} value=0.09747116556025336 ], [ var='C' labels={pod=planning-service-fd8cf8d8b-cf6kd} value=0 ]} {Instance:pod=planning-service-fd8cf8d8b-psmbn State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:pod=planning-service-fd8cf8d8b-psmbn Value:0xc06c959a40} C:{Var:C Labels:pod=planning-service-fd8cf8d8b-psmbn Value:0xc06c959a50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.435906501s EvaluationString:[ var='B' labels={pod=planning-service-fd8cf8d8b-psmbn} value=0.06492900599847214 ], [ var='C' labels={pod=planning-service-fd8cf8d8b-psmbn} value=0 ]} {Instance:pod=planning-service-fd8cf8d8b-tkrlf State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:pod=planning-service-fd8cf8d8b-tkrlf Value:0xc06c959a70} C:{Var:C Labels:pod=planning-service-fd8cf8d8b-tkrlf Value:0xc06c959a80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.435912274s EvaluationString:[ var='B' labels={pod=planning-service-fd8cf8d8b-tkrlf} value=0.09217634982663328 ], [ var='C' labels={pod=planning-service-fd8cf8d8b-tkrlf} value=0 ]} {Instance:pod=planning-service-fd8cf8d8b-wpftw State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:pod=planning-service-fd8cf8d8b-wpftw Value:0xc06c959aa0} C:{Var:C Labels:pod=planning-service-fd8cf8d8b-wpftw Value:0xc06c959ab0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.435919699s EvaluationString:[ var='B' labels={pod=planning-service-fd8cf8d8b-wpftw} value=0.0646842589170953 ], [ var='C' labels={pod=planning-service-fd8cf8d8b-wpftw} value=0 ]}]" duration=32.029811ms + logger=ngalert.state.manager.persist user=849191 slug=spintech t=2024-05-29T13:44:15.435910088Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.323386ms + level=debug ts=2024-05-29T13:44:15.43588591Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.435642794Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.43560698Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.43559514Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=178698 slug=avantpage t=2024-05-29T13:44:15.435545318Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.435359021Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.435121192Z caller=remote_instance_store.go:51 user=54972 slug=zanglang msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.434916046Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.434928714Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.434759957Z caller=remote_instance_store.go:51 user=615392 slug=shinemetrics msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.434590756Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:15.434576154Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.434416063Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.historian backend=loki user=517562 slug=microstrategytest t=2024-05-29T13:44:15.434338905Z level=debug msg="Done saving alert state history batch" + level=debug ts=2024-05-29T13:44:15.433916217Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.433851709Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.433780486Z caller=remote_instance_store.go:51 user=172772 slug=ppbtradingtribe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.43368137Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.433782857Z caller=remote_instance_store.go:51 user=893158 slug=cmfollnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:15.433434175Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info ts=2024-05-29T13:44:15.433475069Z caller=remote_alert_sender.go:94 user=835651 slug=kirogoto host=kirogoto-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.183.17:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=adbybvdalfzswb alerts=1 + level=debug ts=2024-05-29T13:44:15.433348705Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.433148254Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.433041104Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=297794 slug=leanix instance="datasource_uid=grafanacloud-prom, ref_id=A,C" t=2024-05-29T13:44:15.433021312Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=297794 slug=leanix t=2024-05-29T13:44:15.432998993Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.432954477Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.432972139Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.432687205Z caller=remote_instance_store.go:51 user=302415 slug=mgbcoreinfraprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=302415 slug=mgbcoreinfraprod instance="datasource_uid=grafanacloud-usage, ref_id=A" t=2024-05-29T13:44:15.432628177Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.432598595Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=489921 slug=statuscake t=2024-05-29T13:44:15.432614369Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=489921 slug=statuscake instance= t=2024-05-29T13:44:15.432598394Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=489921 slug=statuscake t=2024-05-29T13:44:15.432546796Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=302415 slug=mgbcoreinfraprod version=6 fingerprint=348db194bb98cec7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.432507317Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-usage, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.432255992s EvaluationString:}]" duration=154.646745ms + level=info ts=2024-05-29T13:44:15.432513331Z caller=remote_alert_sender.go:94 user=517562 slug=microstrategytest host=microstrategytest-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.221.20:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fdbkdcj5ek9oge alerts=22 + logger=ngalert.scheduler user=517562 slug=microstrategytest t=2024-05-29T13:44:15.432541341Z level=warn msg="Tick dropped because alert rule evaluation is too slow" rule_uid=fdbkdcj5ek9oge org_id=1 time=2024-05-29T13:44:00Z + logger=ngalert.scheduler user=489921 slug=statuscake version=122 fingerprint=e8e6a026f6a91a78 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.432469708Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[Last:{Var:Last Labels: Value:0xc044c51c38} Value:{Var:Value Labels: Value:0xc044c51c50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.432153612s EvaluationString:[ var='Last' labels={} value=0 ], [ var='Value' labels={} value=0 ]}]" duration=5.274333ms + level=debug ts=2024-05-29T13:44:15.432468282Z caller=remote_instance_store.go:51 user=500743 slug=sgr msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=114516 slug=heliumdashboard t=2024-05-29T13:44:15.4323657Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=24.56027ms + level=debug ts=2024-05-29T13:44:15.432265476Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.432130571Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.4322079Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.432041542Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.432036115Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.432078439Z caller=remote_instance_store.go:51 user=723897 slug=inthepocket msg="calling SaveAlertInstance" + logger=ngalert.state.historian backend=loki user=517562 slug=microstrategytest t=2024-05-29T13:44:15.432009535Z level=debug msg="Alert state changed creating annotation" newState=Pending oldState=Normal + level=debug ts=2024-05-29T13:44:15.4319778Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.historian backend=loki user=517562 slug=microstrategytest t=2024-05-29T13:44:15.431967151Z level=debug msg="Alert state changed creating annotation" newState=Pending oldState=Normal + logger=ngalert.state.historian backend=loki user=517562 slug=microstrategytest t=2024-05-29T13:44:15.431811258Z level=debug msg="Alert state changed creating annotation" newState=Alerting oldState=Pending + logger=ngalert.state.historian backend=loki user=517562 slug=microstrategytest t=2024-05-29T13:44:15.431767723Z level=debug msg="Alert state changed creating annotation" newState=Alerting oldState=Pending + logger=ngalert.state.manager user=111839 slug=last9 instance= t=2024-05-29T13:44:15.431588793Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=111839 slug=last9 t=2024-05-29T13:44:15.431547724Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}{{ $cluster }}': error parsing template __alert_vmcluster slow inserts - PROD: template: __alert_vmcluster slow inserts - PROD:1: undefined variable \"$cluster\"" + logger=ngalert.state.manager.persist user=633381 slug=arascorp t=2024-05-29T13:44:15.431368003Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.688636ms + logger=ngalert.state.manager.persist user=527202 slug=lnrsusinsurancedev t=2024-05-29T13:44:15.431318921Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.006785ms + level=debug ts=2024-05-29T13:44:15.430948999Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=insurance-platform, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=insurance-platform, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development" t=2024-05-29T13:44:15.430626373Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:15.430530381Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=101297 slug=techopsservices t=2024-05-29T13:44:15.43060784Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=101297 slug=techopsservices instance="instance=ip-172-25-30-101.eu-central-1.compute.internal" t=2024-05-29T13:44:15.43056325Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.430587298Z caller=remote_instance_store.go:51 user=419587 slug=greenpass msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=101297 slug=techopsservices instance="instance=ip-172-25-30-101.eu-central-1.compute.internal" t=2024-05-29T13:44:15.430552166Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=698963 slug=lemonade version=2 fingerprint=9f310b0fea969504 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.430393751Z level=debug msg="Alert rule evaluated" results="[{Instance:app=insurance-platform, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=insurance-platform, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=insurance-platform, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=insurance-platform, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development Value:0xc00434c6f0} THRESHOLD:{Var:THRESHOLD Labels:app=insurance-platform, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=insurance-platform, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development Value:0xc00434c7a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.429478882s EvaluationString:[ var='QUERY' labels={app=insurance-platform, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=insurance-platform, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development} value=0 ], [ var='THRESHOLD' labels={app=insurance-platform, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=insurance-platform, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development} value=0 ]}]" duration=50.306046ms + logger=ngalert.state.manager user=419587 slug=greenpass instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.430491526Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=419587 slug=greenpass version=61 fingerprint=63244e70f47fdd7b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.430353163Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.43004586s EvaluationString:}]" duration=14.138831ms + level=debug ts=2024-05-29T13:44:15.430320238Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=677132 slug=dragonflydbdev instance="database_id=attos-dev:stagingv2-control-plane-db-replica" t=2024-05-29T13:44:15.429626048Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.429682553Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.429579683Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=677132 slug=dragonflydbdev instance="database_id=attos-dev:stagingv2-control-plane-db" t=2024-05-29T13:44:15.429600017Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=677132 slug=dragonflydbdev t=2024-05-29T13:44:15.429561467Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.scheduler user=677132 slug=dragonflydbdev version=11 fingerprint=515bbe596e05ed86 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.429479195Z level=debug msg="Alert rule evaluated" results="[{Instance:database_id=attos-dev:stagingv2-control-plane-db State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:database_id=attos-dev:stagingv2-control-plane-db Value:0xc01f38b410} B:{Var:B Labels:database_id=attos-dev:stagingv2-control-plane-db Value:0xc01f38b3e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.429053426s EvaluationString:[ var='A' labels={database_id=attos-dev:stagingv2-control-plane-db} value=0.05033598949071376 ], [ var='B' labels={database_id=attos-dev:stagingv2-control-plane-db} value=0 ]} {Instance:database_id=attos-dev:stagingv2-control-plane-db-replica State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:database_id=attos-dev:stagingv2-control-plane-db-replica Value:0xc01f38b448} B:{Var:B Labels:database_id=attos-dev:stagingv2-control-plane-db-replica Value:0xc01f38b440}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.429070797s EvaluationString:[ var='A' labels={database_id=attos-dev:stagingv2-control-plane-db-replica} value=0.05023930468357646 ], [ var='B' labels={database_id=attos-dev:stagingv2-control-plane-db-replica} value=0 ]}]" duration=249.384716ms + logger=ngalert.state.manager.persist user=849729 slug=medopsimscare t=2024-05-29T13:44:15.42949189Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=849729 slug=medopsimscare t=2024-05-29T13:44:15.429434041Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.429412894Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=849729 slug=medopsimscare version=41 fingerprint=6693c04e666760f4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.42936234Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.42890639s EvaluationString:}]" duration=14.319378ms + logger=ngalert.state.manager user=349229 slug=kropyva instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.429302442Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=679029 slug=joveoprodaws t=2024-05-29T13:44:15.429153397Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.429065544Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.428991066Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.428832352Z caller=remote_instance_store.go:51 user=354676 slug=gridmarketenergy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:15.428457422Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=HONG KONG Query" t=2024-05-29T13:44:15.428443046Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=HONG KONG Query" t=2024-05-29T13:44:15.428427934Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.428338111Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=214309 slug=spenmo t=2024-05-29T13:44:15.428036632Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.301147ms + logger=ngalert.state.manager user=679831 slug=joveostageaws instance="datasource_uid=a6f971c0-2a39-492e-8c6a-8034f1702671, ref_id=A" t=2024-05-29T13:44:15.427237771Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=679831 slug=joveostageaws instance="datasource_uid=a6f971c0-2a39-492e-8c6a-8034f1702671, ref_id=A" t=2024-05-29T13:44:15.42719921Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.427158386Z caller=remote_instance_store.go:51 user=868411 slug=cmpladnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.427080529Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.427005706Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.426962764Z caller=remote_instance_store.go:51 user=530405 slug=zetetic msg="calling SaveAlertInstance" + logger=ngalert.state.historian backend=loki user=845543 slug=deliveryhero t=2024-05-29T13:44:15.426631302Z level=debug msg="Done saving alert state history batch" + level=debug ts=2024-05-29T13:44:15.426359082Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=357638 slug=usepower instance="DBInstanceIdentifier=service-card-prd" t=2024-05-29T13:44:15.425958622Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.426007308Z caller=remote_instance_store.go:51 user=471861 slug=planetstaging msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=471861 slug=planetstaging instance= t=2024-05-29T13:44:15.425927413Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + level=debug ts=2024-05-29T13:44:15.425597589Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.425252642Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.425285629Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.425136627Z caller=remote_instance_store.go:51 user=265692 slug=beekeeper msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.425138607Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.42489851Z caller=remote_instance_store.go:51 user=871095 slug=cmcnginp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.4248061Z caller=remote_instance_store.go:51 user=147497 slug=rhodev msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=307381 slug=kambitaskforce t=2024-05-29T13:44:15.424623599Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.298219ms + level=debug ts=2024-05-29T13:44:15.424545426Z caller=remote_instance_store.go:51 user=328778 slug=teemuskog msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.424483016Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=328778 slug=teemuskog instance= t=2024-05-29T13:44:15.424434968Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=328778 slug=teemuskog t=2024-05-29T13:44:15.424372087Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info ts=2024-05-29T13:44:15.424102442Z caller=remote_alert_sender.go:94 user=794342 slug=ziyedbe host=ziyedbe-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.220.153:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=c7b1e254-2f09-40a7-ae2e-4c36da431ec8 alerts=1 + logger=ngalert.state.manager.persist user=794342 slug=ziyedbe t=2024-05-29T13:44:15.424037151Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.445689ms + logger=ngalert.state.manager.persist user=846179 slug=dfdsdemo t=2024-05-29T13:44:15.42401699Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.108221ms + level=debug ts=2024-05-29T13:44:15.423991726Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.423827277Z caller=remote_alert_sender.go:94 user=70430 slug=dapperlabs host=dapperlabs-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.144.208.13:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ebbcca70-d703-43b2-93e8-7bd97f196376 alerts=1 + level=info ts=2024-05-29T13:44:15.423799462Z caller=remote_alert_sender.go:94 user=70430 slug=dapperlabs host=dapperlabs-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.176.29:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ebbcca70-d703-43b2-93e8-7bd97f196376 alerts=1 + logger=ngalert.state.historian backend=loki user=845543 slug=deliveryhero t=2024-05-29T13:44:15.4236738Z level=debug msg="Alert state changed creating annotation" newState=Normal oldState=Pending + logger=ngalert.scheduler user=245291 slug=pismo version=5 fingerprint=1bbdfffd4a24493e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.423460538Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.423162511s EvaluationString:}]" duration=249.794994ms + level=debug ts=2024-05-29T13:44:15.423395572Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.423387527Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.423258845Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.423249971Z caller=remote_instance_store.go:51 user=614008 slug=surekha msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.423199777Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.423169319Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=614008 slug=surekha instance= t=2024-05-29T13:44:15.423170032Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=614008 slug=surekha t=2024-05-29T13:44:15.423125656Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.422886506Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.422720237Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.422640002Z caller=remote_instance_store.go:51 user=71676 slug=lemonbeat msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.422536746Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.422328988Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.421881154Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=835651 slug=kirogoto version=4 fingerprint=e1752a36ae079636 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.421854908Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.421426063s EvaluationString:}]" duration=22.379267ms + level=debug ts=2024-05-29T13:44:15.42175612Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.421695954Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.421693791Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=192304 slug=eivjo instance= t=2024-05-29T13:44:15.42154558Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.421317845Z caller=remote_instance_store.go:51 user=482907 slug=wavelonp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=111653 slug=theassociationmxp instance= t=2024-05-29T13:44:15.421259161Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:15.421235734Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.08114ms + level=debug ts=2024-05-29T13:44:15.421117905Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.42089771Z caller=remote_instance_store.go:51 user=432323 slug=lithic msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=374423 slug=bitburst t=2024-05-29T13:44:15.420820048Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=374423 slug=bitburst instance="partition=0, topic=event.survey.screenout.0" t=2024-05-29T13:44:15.420802021Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=432323 slug=lithic instance="db=analytics-db, host=pg1-sjc, team=SRE" t=2024-05-29T13:44:15.420727434Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=374423 slug=bitburst instance="partition=0, topic=event.survey.complete.0" t=2024-05-29T13:44:15.420700721Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.420356298Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.420034341Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=432323 slug=lithic instance="db=analytics-backup, host=pg1-sjc, team=SRE" t=2024-05-29T13:44:15.420074368Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.419859577Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112732 slug=gleamer instance= t=2024-05-29T13:44:15.419865429Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.41931464Z caller=remote_instance_store.go:51 user=540828 slug=finfoprod153 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=ZenMate, city=New York, cluster=ZenMate-Free, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=156.146.59.26, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=newyork-s451, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.414672938Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=ZenMate, city=Frankfurt, cluster=ZenMate-Free, country=Germany, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=87.249.132.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s445, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.413802693Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=ZenMate, city=Frankfurt, cluster=ZenMate-Free, country=Germany, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=87.249.132.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s445, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.413793331Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.419411363Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=ZenMate, city=Bucharest, cluster=ZenMate-Free, country=Romania, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.49.52, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bucharest-s483, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.413144181Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.413117479Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=ZenMate, city=Bucharest, cluster=ZenMate-Free, country=Romania, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.49.36, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bucharest-s482, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.412765976Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=ZenMate, city=Bucharest, cluster=ZenMate-Free, country=Romania, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.49.216, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=bucharest-s490, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.412579553Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=ZenMate, city=Bucharest, cluster=ZenMate-Free, country=Romania, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.49.216, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=bucharest-s490, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.412568776Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=ZenMate, city=Bucharest, cluster=ZenMate-Free, country=Romania, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.49.216, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bucharest-s490, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.412349311Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.416412844Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.419162436Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.412301407Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.418622728Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.418615372Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=432323 slug=lithic t=2024-05-29T13:44:15.41842615Z level=debug msg="State manager processing evaluation results" resultCount=4 + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:15.418426803Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.418430097Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=486972 slug=payretailers t=2024-05-29T13:44:15.418327422Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.418106587Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=308298 slug=xbto t=2024-05-29T13:44:15.418334167Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.418224101Z caller=remote_instance_store.go:51 user=497177 slug=zoldmezo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=308298 slug=xbto instance= t=2024-05-29T13:44:15.418288157Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.417684072Z caller=remote_instance_store.go:51 user=201790 slug=veedmo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.417657017Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:15.417594392Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=438185 slug=nodeinfra t=2024-05-29T13:44:15.41745131Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.417378671Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.417292409Z caller=remote_instance_store.go:51 user=536824 slug=forgerockit msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.417287081Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=527202 slug=lnrsusinsurancedev t=2024-05-29T13:44:15.417309768Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=527202 slug=lnrsusinsurancedev instance="datasource_uid=Rz3MTGbVz, ref_id=A" t=2024-05-29T13:44:15.417267148Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.41679626Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=214309 slug=spenmo instance= t=2024-05-29T13:44:15.416704127Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=214309 slug=spenmo t=2024-05-29T13:44:15.416678517Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info ts=2024-05-29T13:44:15.416258092Z caller=remote_alert_sender.go:94 user=633381 slug=arascorp host=arascorp-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.229.151:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ediw3meja8kjkc alerts=1 + level=debug ts=2024-05-29T13:44:15.416091023Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.415744441Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.416041376Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.415843877Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.415719618Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=b04778ab3ff04a72 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.415685191Z level=error msg="Failed to evaluate rule" error="failed to build query 'Errors': data source not found" duration=3.627664ms + level=error ts=2024-05-29T13:44:15.415665356Z caller=remote_rule_evaluator.go:110 user=250150 slug=bizagi msg="remote evaluate failed" code=Code(422) err="failed to build query 'Errors': data source not found" + level=debug ts=2024-05-29T13:44:15.410636774Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=ZenMate, city=Bucharest, cluster=ZenMate-Free, country=Romania, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.49.195, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=bucharest-s489, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.412135391Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.412104221Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=ZenMate, city=Bucharest, cluster=ZenMate-Free, country=Romania, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.49.195, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bucharest-s489, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.411899702Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=ZenMate, city=Bucharest, cluster=ZenMate-Free, country=Romania, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.49.174, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=bucharest-s488, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.411708466Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=ZenMate, city=Bucharest, cluster=ZenMate-Free, country=Romania, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.49.174, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bucharest-s488, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.411515116Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.411489077Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.411318332Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=ZenMate, city=Bucharest, cluster=ZenMate-Free, country=Romania, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.14.161, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=bucharest-s485, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.41101031Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=ZenMate, city=Bucharest, cluster=ZenMate-Free, country=Romania, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.14.161, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=bucharest-s485, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.41100059Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.410807504Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=ZenMate, city=Bucharest, cluster=ZenMate-Free, country=Romania, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.14.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=bucharest-s484, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.410626995Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.41059807Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=ZenMate, city=Bucharest, cluster=ZenMate-Free, country=Romania, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.14.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bucharest-s484, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.41045808Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=ZenMate, city=Bucharest, cluster=ZenMate-Free, country=Romania, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.14.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bucharest-s484, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.410445524Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.415088611Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.109058ms + level=debug ts=2024-05-29T13:44:15.414439408Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.414634843Z caller=remote_instance_store.go:51 user=794342 slug=ziyedbe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.414356149Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=700783 slug=gsgmedia t=2024-05-29T13:44:15.414297005Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=700783 slug=gsgmedia t=2024-05-29T13:44:15.414185402Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=698103 slug=vericast t=2024-05-29T13:44:15.414133398Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.086935ms + level=debug ts=2024-05-29T13:44:15.414069383Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.414001999Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=472647 slug=planet instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.413587541Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.413593865Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=340750 slug=aptoslabs instance= t=2024-05-29T13:44:15.413543204Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=340750 slug=aptoslabs version=4 fingerprint=3aedec02c894db9b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.413427992Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.41302521s EvaluationString:}]" duration=86.120531ms + level=debug ts=2024-05-29T13:44:15.413190045Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.413097878Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.413087122Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=21.17277ms + level=debug ts=2024-05-29T13:44:15.412998695Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.41297322Z caller=remote_instance_store.go:51 user=846179 slug=dfdsdemo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=307381 slug=kambitaskforce t=2024-05-29T13:44:15.412916028Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=307381 slug=kambitaskforce version=9 fingerprint=07174e268be51a3e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.412823363Z level=debug msg="Alert rule evaluated" results="[{Instance:pool=es-postgres State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pool=es-postgres Value:0xc1414fb510} B:{Var:B Labels:pool=es-postgres Value:0xc1414fb530} C:{Var:C Labels:pool=es-postgres Value:0xc1414fb4f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.412391815s EvaluationString:[ var='A' labels={pool=es-postgres} value=0.004194296 ], [ var='B' labels={pool=es-postgres} value=0.004194296 ], [ var='C' labels={pool=es-postgres} value=0 ]}]" duration=40.98929ms + logger=ngalert.state.manager.persist user=846179 slug=dfdsdemo t=2024-05-29T13:44:15.412904518Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.412840277Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.412842615Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.01256ms + level=debug ts=2024-05-29T13:44:15.412770402Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.412600028Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.412558252Z caller=remote_image_capturer.go:54 user=633381 slug=arascorp rule_org_id=1 rule_uid=cdiw3meinrhfkb dashboard=aa7da355-9767-4265-b649-9ed12476de6b panel=68 msg="rendering alert image with grafana" + level=debug ts=2024-05-29T13:44:15.41253393Z caller=remote_instance_store.go:51 user=691855 slug=chainlake msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.412453056Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.412516433Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.412467536Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=633381 slug=arascorp version=1 fingerprint=09c9bcf9d34242c5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.412333321Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc00eaa6058} B:{Var:B Labels: Value:0xc00eaa6060} C:{Var:C Labels: Value:0xc00eaa6068}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.411909798s EvaluationString:[ var='A' labels={} value=13 ], [ var='B' labels={} value=13 ], [ var='C' labels={} value=1 ]}]" duration=17.381761ms + level=debug ts=2024-05-29T13:44:15.412078643Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.411366957Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.41051344Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.409921746Z caller=remote_image_capturer.go:54 user=201644 slug=thoughtspot rule_org_id=1 rule_uid=K99imYKVk dashboard=s7TH-EUW1 panel=10 msg="rendering alert image with grafana" + level=debug ts=2024-05-29T13:44:15.410198188Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=201644 slug=thoughtspot instance= t=2024-05-29T13:44:15.409796969Z level=debug msg="Execution error state is Alerting" handler=resultAlerting previous_handler=resultError + level=debug ts=2024-05-29T13:44:15.408925158Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.408916524Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.40959953Z caller=remote_instance_store.go:51 user=337951 slug=pawapay msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=337951 slug=pawapay instance= t=2024-05-29T13:44:15.409539285Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.409309199Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.409219617Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=350551 slug=loopme instance= t=2024-05-29T13:44:15.40908985Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.409069971Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:15.40870202Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=612525 slug=adleyeview instance= t=2024-05-29T13:44:15.40868992Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.408592257Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.408546023Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=612525 slug=adleyeview version=158 fingerprint=f43b30bb487ac2d9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.408448705Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[C:{Var:C Labels: Value:0xc00ca811c0} D:{Var:D Labels: Value:0xc00ca811c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.400407153s EvaluationString:[ var='C' labels={} value=0 ], [ var='D' labels={} value=0 ]}]" duration=18.032823ms + logger=ngalert.state.manager user=83647 slug=bidsolutions instance= t=2024-05-29T13:44:15.408429434Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=83647 slug=bidsolutions version=1 fingerprint=44a366d6b729a215 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.408278504Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.408062411s EvaluationString:}]" duration=45.228983ms + level=debug ts=2024-05-29T13:44:15.408341091Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.408282071Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.408219695Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance= t=2024-05-29T13:44:15.408228126Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=307381 slug=kambitaskforce t=2024-05-29T13:44:15.408138569Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.408118465Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=307381 slug=kambitaskforce version=9 fingerprint=c3a56a34580dcb69 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.407996906Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.40362976s EvaluationString:}]" duration=36.346297ms + logger=ngalert.state.manager user=114516 slug=heliumdashboard instance="Env=mainnet, Role=verifier, Stack=iot, __name__=iot_verifier_num_beacons, agent_hostname=mainnet-iot-verifier0-oregon, instance=mainnet-iot-verifier0-oregon:19001, job=iot_verifier_metrics" t=2024-05-29T13:44:15.407777653Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=806229 slug=simplisafe version=33 fingerprint=db170139e233b8c4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.407301922Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc004b2c088} C:{Var:C Labels: Value:0xc004b2c090}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.406459614s EvaluationString:[ var='A' labels={} value=467486.9344978166 ], [ var='C' labels={} value=0 ]}]" duration=35.786352ms + level=debug ts=2024-05-29T13:44:15.407081676Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.40682918Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.406755398Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.406569888Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:15.406637008Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Zurich, country=Switzerland, datacenter=DataPacket, environment=production, instance=89.149.38.1:9998, ip=89.149.38.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=zurich405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.406004457Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Zurich, country=Switzerland, datacenter=DataPacket, environment=production, instance=212.102.37.166:9998, ip=212.102.37.166, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/swiss.crt, role=vpn, server=zurich403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.405838392Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Zurich, country=Switzerland, datacenter=DataPacket, environment=production, instance=212.102.37.166:9998, ip=212.102.37.166, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/swiss.crt, role=vpn, server=zurich403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.405824331Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Zurich, country=Switzerland, datacenter=DataPacket, environment=production, instance=212.102.37.134:9998, ip=212.102.37.134, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/swiss.crt, role=vpn, server=zurich404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.405450715Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.406480531Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Zurich, country=Switzerland, datacenter=DataPacket, environment=production, instance=156.146.62.193:9998, ip=156.146.62.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/swiss.crt, role=vpn, server=zurich407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.404612672Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Zurich, country=Switzerland, datacenter=DataPacket, environment=production, instance=156.146.62.129:9998, ip=156.146.62.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/swiss.crt, role=vpn, server=zurich406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.404231807Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.404207489Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Zurich, country=Switzerland, datacenter=Altushost, environment=production, instance=79.142.79.59:9998, ip=79.142.79.59, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=zurich412, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.403715687Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Zurich, country=Switzerland, datacenter=Altushost, environment=production, instance=79.142.79.27:9998, ip=79.142.79.27, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/swiss.crt, role=vpn, server=zurich411, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.403521328Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.403288129Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Zagreb, country=Croatia, datacenter=DataPacket, environment=production, instance=154.47.29.129:9998, ip=154.47.29.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/zagreb.crt, role=vpn, server=zagreb403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.403142689Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Zagreb, country=Croatia, datacenter=DataPacket, environment=production, instance=154.47.29.129:9998, ip=154.47.29.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/zagreb.crt, role=vpn, server=zagreb403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.403130514Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.403105767Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.406252721Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Zagreb, country=Croatia, datacenter=DataPacket, environment=production, instance=149.102.247.225:9998, ip=149.102.247.225, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/zagreb.crt, role=vpn, server=zagreb404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.402817182Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Zagreb, country=Croatia, datacenter=DataPacket, environment=production, instance=149.102.247.225:9998, ip=149.102.247.225, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/zagreb.crt, role=vpn, server=zagreb404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.402805748Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Zagreb, country=Croatia, datacenter=DataPacket, environment=production, instance=149.102.247.225:9998, ip=149.102.247.225, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=zagreb404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.40266421Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.406112561Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.402446594Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.406155964Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.405142583Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=873368 slug=euid t=2024-05-29T13:44:15.405057845Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.548221ms + logger=ngalert.state.manager user=413775 slug=akoppula instance= t=2024-05-29T13:44:15.404963133Z level=warn msg="Failed to take an image" dashboard=r7ga_94Vz panel=2 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:15.404930881Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.404860796Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=413775 slug=akoppula instance= t=2024-05-29T13:44:15.40427643Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=263582 slug=prestowillis instance="pod=wss-854b49d677-6j4dc" t=2024-05-29T13:44:15.403884562Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=263582 slug=prestowillis instance="pod=wss-854b49d677-6j4dc" t=2024-05-29T13:44:15.403873275Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=263582 slug=prestowillis instance="pod=wss-764d8bd676-dbqw4" t=2024-05-29T13:44:15.40382417Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=263582 slug=prestowillis instance="pod=wss-74b66779d8-h2qdx" t=2024-05-29T13:44:15.403800501Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=263582 slug=prestowillis instance="pod=wss-74b66779d8-h2qdx" t=2024-05-29T13:44:15.403790687Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.403710485Z caller=remote_instance_store.go:51 user=540828 slug=finfoprod153 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=263582 slug=prestowillis instance="pod=wss-64bbfc8dff-fqkvt" t=2024-05-29T13:44:15.403718878Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=263582 slug=prestowillis instance="pod=wss-6469c468cf-h4v9q" t=2024-05-29T13:44:15.40368948Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.40356852Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=263582 slug=prestowillis instance="pod=wss-565fb9ccdd-wlnn2" t=2024-05-29T13:44:15.403531094Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=263582 slug=prestowillis t=2024-05-29T13:44:15.403466639Z level=debug msg="State manager processing evaluation results" resultCount=12 + level=debug ts=2024-05-29T13:44:15.403434916Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.403315182Z caller=remote_instance_store.go:51 user=54972 slug=zanglang msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=265692 slug=beekeeper instance="bundle-id=beekeeper.Hive" t=2024-05-29T13:44:15.402958989Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.402888872Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.402621167Z caller=remote_instance_store.go:51 user=355252 slug=bumper msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=265692 slug=beekeeper instance="bundle-id=ch.beekeeper.coop" t=2024-05-29T13:44:15.402567237Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=265692 slug=beekeeper instance="bundle-id=ch.beekeeper.flaggerforce" t=2024-05-29T13:44:15.402537872Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=265692 slug=beekeeper instance="bundle-id=ch.beekeeper.flaggerforce.abm" t=2024-05-29T13:44:15.402510484Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=265692 slug=beekeeper instance="bundle-id=ch.beekeeper.heathrow" t=2024-05-29T13:44:15.402478554Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=265692 slug=beekeeper instance="bundle-id=ch.beekeeper.heathrow" t=2024-05-29T13:44:15.402467297Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Wilmington, country=United States, datacenter=DataPacket, environment=production, instance=84.239.43.160:9998, ip=84.239.43.160, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-wilmington.crt, role=vpn, server=wilmington402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.401968594Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Wilmington, country=United States, datacenter=DataPacket, environment=production, instance=84.239.43.160:9998, ip=84.239.43.160, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-wilmington.crt, role=vpn, server=wilmington402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.40193385Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=265692 slug=beekeeper instance="bundle-id=ch.beekeeper.heathrow.abm" t=2024-05-29T13:44:15.402438656Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.40191086Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Wilmington, country=United States, datacenter=DataPacket, environment=production, instance=84.239.43.160:9998, ip=84.239.43.160, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=wilmington402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.401788958Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.401748261Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Wilmington, country=United States, datacenter=DataPacket, environment=production, instance=84.239.43.129:9998, ip=84.239.43.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-wilmington.crt, role=vpn, server=wilmington401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.401618504Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.402496756Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Wilmington, country=United States, datacenter=DataPacket, environment=production, instance=84.239.43.129:9998, ip=84.239.43.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=wilmington401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.40141319Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Wilmington, country=United States, datacenter=DataPacket, environment=production, instance=84.239.43.129:9998, ip=84.239.43.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=wilmington401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.401401699Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.401198366Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.401009922Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Wellington, country=New Zealand, datacenter=ServersAustralia, environment=production, instance=43.250.207.82:9998, ip=43.250.207.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=newzealand403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.400876267Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Wellington, country=New Zealand, datacenter=ServersAustralia, environment=production, instance=43.250.207.82:9998, ip=43.250.207.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nz.crt, role=streaming-optimized, server=newzealand403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.400713217Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Wellington, country=New Zealand, datacenter=ServersAustralia, environment=production, instance=202.60.86.66:9998, ip=202.60.86.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=newzealand405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.400530485Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Wellington, country=New Zealand, datacenter=ServersAustralia, environment=production, instance=202.60.86.66:9998, ip=202.60.86.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nz.crt, role=streaming-optimized, server=newzealand405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.400359672Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=458034 slug=juremy t=2024-05-29T13:44:15.402382783Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Wellington, country=New Zealand, datacenter=ServersAustralia, environment=production, instance=202.60.86.2:9998, ip=202.60.86.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=newzealand404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.400145883Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=458034 slug=juremy instance="cluster=prod-eu-west-0, id=616839, org_id=729425" t=2024-05-29T13:44:15.402360236Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.400111162Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=265692 slug=beekeeper instance="bundle-id=ch.beekeeper.mohg" t=2024-05-29T13:44:15.402316951Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Wellington, country=New Zealand, datacenter=ServersAustralia, environment=production, instance=202.60.86.2:9998, ip=202.60.86.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nz.crt, role=streaming-optimized, server=newzealand404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.399981737Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=458034 slug=juremy instance="cluster=prod-eu-west-0, id=616839, org_id=729425" t=2024-05-29T13:44:15.40234488Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.399957263Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=265692 slug=beekeeper instance="bundle-id=ch.beekeeper.mohg" t=2024-05-29T13:44:15.402291104Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.399790225Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=458034 slug=juremy t=2024-05-29T13:44:15.402286732Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=181.214.94.98:9998, ip=181.214.94.98, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=washington435, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.399665975Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=265692 slug=beekeeper instance="bundle-id=ch.beekeeper.partnersgroup" t=2024-05-29T13:44:15.402261372Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=181.214.94.66:9998, ip=181.214.94.66, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-washingtondc.crt, role=vpn, server=washington434, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.399471169Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.399283524Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=181.214.94.34:9998, ip=181.214.94.34, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-washingtondc.crt, role=vpn, server=washington433, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.399099386Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=265692 slug=beekeeper instance="bundle-id=com.Alkermes.AlksConnect" t=2024-05-29T13:44:15.40217527Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.399386429Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=265692 slug=beekeeper instance="bundle-id=de.beekeeper.mannhummel" t=2024-05-29T13:44:15.402128002Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.402085646Z caller=remote_instance_store.go:51 user=698103 slug=vericast msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698103 slug=vericast instance= t=2024-05-29T13:44:15.402036293Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.401986138Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=698103 slug=vericast version=25 fingerprint=882e376a71a3b5d7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.401912767Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.401540516s EvaluationString:}]" duration=131.406722ms + level=debug ts=2024-05-29T13:44:15.40167574Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=396586 slug=opengov t=2024-05-29T13:44:15.401244545Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.219211ms + logger=ngalert.state.manager.persist user=530405 slug=zetetic t=2024-05-29T13:44:15.40125459Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=28.865695ms + level=debug ts=2024-05-29T13:44:15.400998464Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=937416 slug=cambridgeuniversitypress instance="datasource_uid=9nh9AkTnz, ref_id=A" t=2024-05-29T13:44:15.401075272Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=937416 slug=cambridgeuniversitypress instance="datasource_uid=9nh9AkTnz, ref_id=A" t=2024-05-29T13:44:15.401057601Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.401042601Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=937416 slug=cambridgeuniversitypress version=2 fingerprint=f40d38b9ecd9b2c5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.400956279Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=9nh9AkTnz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.400663667s EvaluationString:}]" duration=5.256785ms + level=debug ts=2024-05-29T13:44:15.400976141Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=18335 slug=semaphore t=2024-05-29T13:44:15.400884484Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=18335 slug=semaphore instance= t=2024-05-29T13:44:15.400866186Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.400527602Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.400492924Z caller=remote_instance_store.go:51 user=868411 slug=cmpladnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=309009 slug=elestyle t=2024-05-29T13:44:15.400482863Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=309009 slug=elestyle instance="datasource_uid=zxr_3eR4z, ref_id=A" t=2024-05-29T13:44:15.400465297Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="datasource_uid=zxr_3eR4z, ref_id=A" t=2024-05-29T13:44:15.400444618Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=309009 slug=elestyle t=2024-05-29T13:44:15.400429389Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.399928631Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.399828089Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.399774836Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.399726957Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.399695164Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=190917 slug=d1cx instance= t=2024-05-29T13:44:15.399722576Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:59:00Z next_ends_at=2024-05-29T14:04:00Z + logger=ngalert.state.manager user=190917 slug=d1cx t=2024-05-29T13:44:15.399679206Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.399566153Z caller=remote_instance_store.go:51 user=726011 slug=oneqrew msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=142072 slug=w2z t=2024-05-29T13:44:15.398959368Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.68248ms + logger=ngalert.state.manager.persist user=111839 slug=last9 t=2024-05-29T13:44:15.399537162Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.39938528Z caller=remote_instance_store.go:51 user=316418 slug=workmotion msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=111839 slug=last9 t=2024-05-29T13:44:15.399433276Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}{{ $cluster }}': error parsing template __alert_vmcluster CPU - PROD: template: __alert_vmcluster CPU - PROD:1: undefined variable \"$cluster\"" + logger=ngalert.state.manager.persist user=75789 slug=mysign t=2024-05-29T13:44:15.399381488Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=75789 slug=mysign t=2024-05-29T13:44:15.399327239Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=111839 slug=last9 t=2024-05-29T13:44:15.399307801Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.399227426Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:15.398772264Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=432323 slug=lithic instance="Cluster Name=sandbox-v2, Consumer Group=ledger-inst-consumer-v1, Topic=processing.instances.v1" t=2024-05-29T13:44:15.398754841Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=432323 slug=lithic instance="Cluster Name=sandbox-v2, Consumer Group=ledger-inst-consumer-v1, Topic=processing.instances.v1" t=2024-05-29T13:44:15.398689637Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=181.214.94.224:9998, ip=181.214.94.224, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-washingtondc.crt, role=vpn, server=washington439, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.397934766Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=181.214.94.193:9998, ip=181.214.94.193, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=washington438, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.397410615Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=843304 slug=ppcgroup t=2024-05-29T13:44:15.398500218Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.205417ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=181.214.94.162:9998, ip=181.214.94.162, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-washingtondc.crt, role=vpn, server=washington437, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.397255765Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=181.214.94.162:9998, ip=181.214.94.162, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-washingtondc.crt, role=vpn, server=washington437, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.39724581Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=181.214.94.162:9998, ip=181.214.94.162, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=washington437, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.397071671Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=181.214.94.162:9998, ip=181.214.94.162, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=washington437, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.39706165Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=181.214.94.130:9998, ip=181.214.94.130, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-washingtondc.crt, role=vpn, server=washington436, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.396898979Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=181.214.94.130:9998, ip=181.214.94.130, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-washingtondc.crt, role=vpn, server=washington436, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.396886939Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=181.214.94.130:9998, ip=181.214.94.130, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=washington436, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.396722701Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.398458296Z caller=remote_instance_store.go:51 user=500743 slug=sgr msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=102.129.235.98:9998, ip=102.129.235.98, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-washingtondc.crt, role=vpn, server=washington443, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.396545939Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=102.129.235.98:9998, ip=102.129.235.98, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=washington443, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.396362685Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.396125619Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=102.129.235.66:9998, ip=102.129.235.66, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=washington442, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.396004121Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.395774875Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=102.129.235.2:9998, ip=102.129.235.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-washingtondc.crt, role=vpn, server=washington440, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.395391415Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=102.129.235.2:9998, ip=102.129.235.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=washington440, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.395229404Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.395201193Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.398273824Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.395521428Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.398116135Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.705517ms + level=debug ts=2024-05-29T13:44:15.398011282Z caller=remote_instance_store.go:51 user=743579 slug=neotax msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=173730 slug=nikon t=2024-05-29T13:44:15.397994193Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=30.031876ms + level=debug ts=2024-05-29T13:44:15.397864966Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=824501 slug=bendingspoons t=2024-05-29T13:44:15.397074048Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.373658ms + level=debug ts=2024-05-29T13:44:15.397799456Z caller=remote_instance_store.go:51 user=543654 slug=jobcloudprogrammaticprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.397724282Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.397597969Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=543654 slug=jobcloudprogrammaticprod version=3 fingerprint=204e9ecb75b1d9ff attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.397571462Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.396080562s EvaluationString:}]" duration=19.659191ms + level=debug ts=2024-05-29T13:44:15.397411749Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.394440922Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=102.129.235.193:9998, ip=102.129.235.193, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-washingtondc.crt, role=vpn, server=washington446, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.394295779Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.394267508Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.394057409Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=102.129.235.162:9998, ip=102.129.235.162, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=washington445, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.393669821Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=102.129.235.162:9998, ip=102.129.235.162, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=washington445, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.393657524Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=102.129.235.130:9998, ip=102.129.235.130, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-washingtondc.crt, role=vpn, server=washington444, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.393459792Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=102.129.235.130:9998, ip=102.129.235.130, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=washington444, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.393247994Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=PIA, environment=production, instance=102.129.235.130:9998, ip=102.129.235.130, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=washington444, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.393236866Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=DataPacket, environment=production, instance=149.18.24.191:9998, ip=149.18.24.191, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-washingtondc.crt, role=vpn, server=washington451, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.393036432Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=DataPacket, environment=production, instance=149.18.24.191:9998, ip=149.18.24.191, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=washington451, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.39287176Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.397152232Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.397195119Z caller=remote_instance_store.go:51 user=523054 slug=vialtopartners msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=523054 slug=vialtopartners instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.397130719Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=523054 slug=vialtopartners t=2024-05-29T13:44:15.39708645Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.39678607Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.396561092Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.396567663Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:15.396470668Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=26.744169ms + logger=ngalert.state.manager.persist user=873368 slug=euid t=2024-05-29T13:44:15.396505627Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=873368 slug=euid instance="datasource_uid=grafanacloud-prom, ref_id=QUERY" t=2024-05-29T13:44:15.396482939Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=873368 slug=euid instance="datasource_uid=grafanacloud-prom, ref_id=QUERY" t=2024-05-29T13:44:15.396472324Z level=debug msg="Setting next state" handler=resultNoData + level=info ts=2024-05-29T13:44:15.396416519Z caller=remote_alert_sender.go:94 user=116479 slug=tomtomnv host=tomtomnv-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.144.237.244:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=8mhcI4Lnk alerts=1 + level=debug ts=2024-05-29T13:44:15.396331275Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.396374588Z caller=remote_alert_sender.go:94 user=116479 slug=tomtomnv host=tomtomnv-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.250.42:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=8mhcI4Lnk alerts=1 + level=info ts=2024-05-29T13:44:15.396224756Z caller=remote_alert_sender.go:94 user=70430 slug=dapperlabs host=dapperlabs-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.176.29:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=df81ec34-fc30-44b4-a95c-a50aa26c74f4 alerts=1 + level=debug ts=2024-05-29T13:44:15.396208135Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.396170743Z caller=remote_instance_store.go:51 user=71676 slug=lemonbeat msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.396101878Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.396013053Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.396066946Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.395998563Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.396022165Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.395948831Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=237629 slug=ocrolus instance="datasource_uid=grafanacloud-prom, ref_id=A,D" t=2024-05-29T13:44:15.395975196Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=237629 slug=ocrolus instance="datasource_uid=grafanacloud-prom, ref_id=A,D" t=2024-05-29T13:44:15.39596363Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=237629 slug=ocrolus t=2024-05-29T13:44:15.39594539Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=9c85e5fd3e8b89c8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.395871343Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.39566829s EvaluationString:}]" duration=164.923997ms + logger=ngalert.state.manager user=201790 slug=veedmo instance="host=host2.veedmo.com, interface=, url=" t=2024-05-29T13:44:15.39584373Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=201790 slug=veedmo instance="host=host1.veedmo.com, interface=, url=" t=2024-05-29T13:44:15.395808578Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.395637668Z caller=remote_instance_store.go:51 user=436633 slug=swirldslabsproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.395511838Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.395552238Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.395452975Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.395404565Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.395263162Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.395025282Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=user-insights, group=blox, humio=logs, instance=10.41.22.4:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=user-insights-6999b9bf5-8t5wd, path=/., pod_template_hash=6999b9bf5" t=2024-05-29T13:44:15.394927905Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=60603 slug=avalaratax t=2024-05-29T13:44:15.394800421Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=70.415892ms + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=user-engagement, group=blox, humio=logs, instance=10.41.2.6:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=user-engagement-57975b9b8d-6lnvh, path=/., pod_template_hash=57975b9b8d" t=2024-05-29T13:44:15.394781431Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=user-engagement, group=blox, humio=logs, instance=10.41.18.5:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=user-engagement-57975b9b8d-tj88g, path=/., pod_template_hash=57975b9b8d" t=2024-05-29T13:44:15.394722114Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=user-admin, group=blox, humio=logs, instance=10.41.5.7:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=user-admin-74d4955b64-7hkl6, path=/., pod_template_hash=74d4955b64" t=2024-05-29T13:44:15.394611952Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.394318041Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.394340166Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.394312479Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=user-admin, group=blox, humio=logs, instance=10.41.18.4:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=user-admin-74d4955b64-sf4f7, path=/., pod_template_hash=74d4955b64" t=2024-05-29T13:44:15.394412305Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=316418 slug=workmotion t=2024-05-29T13:44:15.394407528Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=316418 slug=workmotion instance="FunctionName=beta-workmotion-tenant-lambda-authorizer" t=2024-05-29T13:44:15.394394362Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.394311746Z caller=remote_instance_store.go:51 user=151289 slug=everflow msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.394293676Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=transaction, group=blox, humio=logs, instance=10.41.16.9:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=transaction-68b6b596c4-z46t5, path=/., pod_template_hash=68b6b596c4" t=2024-05-29T13:44:15.394262354Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=transaction, group=blox, humio=logs, instance=10.41.11.6:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=transaction-68b6b596c4-rxvt5, path=/., pod_template_hash=68b6b596c4" t=2024-05-29T13:44:15.394184936Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.394073251Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=rest-api, group=blox, humio=logs, instance=10.41.24.11:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=rest-api-666d4df98b-jl9j6, path=/., pod_template_hash=666d4df98b" t=2024-05-29T13:44:15.394055441Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=rest-api, group=blox, humio=logs, instance=10.41.0.7:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=rest-api-666d4df98b-lgfgx, path=/., pod_template_hash=666d4df98b" t=2024-05-29T13:44:15.393699816Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=rest-api, group=blox, humio=logs, instance=10.41.0.6:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=rest-api-666d4df98b-l4256, path=/., pod_template_hash=666d4df98b" t=2024-05-29T13:44:15.393586498Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.39358657Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=400599 slug=unionai instance="cluster=production, region=us-east-2" t=2024-05-29T13:44:15.393347287Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=DataPacket, environment=production, instance=149.18.24.129:9998, ip=149.18.24.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-washingtondc.crt, role=vpn, server=washington452, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.392676814Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.392613657Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=DataPacket, environment=production, instance=149.18.24.129:9998, ip=149.18.24.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=washington452, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.392396474Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Washington DC, country=United States, datacenter=DataPacket, environment=production, instance=149.18.24.129:9998, ip=149.18.24.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=washington452, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.392072997Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=438855 slug=teckresources instance= t=2024-05-29T13:44:15.392053495Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=438855 slug=teckresources t=2024-05-29T13:44:15.392006077Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=438855 slug=teckresources version=5 fingerprint=21567abbb5eaecdc attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.391890577Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.391572175s EvaluationString:}]" duration=70.079458ms + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=quote, group=blox, humio=logs, instance=10.41.18.8:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=quote-57b4f7c6d9-7m4b2, path=/., pod_template_hash=57b4f7c6d9" t=2024-05-29T13:44:15.393265977Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Warsaw, country=Poland, datacenter=DataPacket, environment=production, instance=138.199.59.33:9998, ip=138.199.59.33, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=warsaw410, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.39187694Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=quote, group=blox, humio=logs, instance=10.41.18.8:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=quote-57b4f7c6d9-7m4b2, path=/., pod_template_hash=57b4f7c6d9" t=2024-05-29T13:44:15.393243773Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Warsaw, country=Poland, datacenter=DataPacket, environment=production, instance=138.199.59.33:9998, ip=138.199.59.33, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/poland.crt, role=vpn, server=warsaw410, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.391693656Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.393172699Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=400599 slug=unionai instance="cluster=production, region=eu-central-1" t=2024-05-29T13:44:15.393212387Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=quote, group=blox, humio=logs, instance=10.41.10.5:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=quote-57b4f7c6d9-6rzbb, path=/., pod_template_hash=57b4f7c6d9" t=2024-05-29T13:44:15.393105931Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=400599 slug=unionai instance="cluster=production-serverless, region=us-east-2" t=2024-05-29T13:44:15.393117807Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.39304459Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=400599 slug=unionai instance="cluster=canary, region=us-east-2" t=2024-05-29T13:44:15.393039422Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:15.393036728Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=698963 slug=lemonade version=3 fingerprint=9ed6f3778b0f233e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.392467376Z level=debug msg="Alert rule evaluated" results="[{Instance:app=pet-blender, cluster=lmnd-sandbox-us-east-1, container=kube-state-metrics, deployment=pet-blender, endpoint=http, instance=10.32.87.231:8080, job=kube-state-metrics, namespace=sandbox, pod=kube-state-metrics-6c795d5489-lb4vc, region=us-east-1, service=kube-state-metrics, stage=sandbox State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=pet-blender, cluster=lmnd-sandbox-us-east-1, container=kube-state-metrics, deployment=pet-blender, endpoint=http, instance=10.32.87.231:8080, job=kube-state-metrics, namespace=sandbox, pod=kube-state-metrics-6c795d5489-lb4vc, region=us-east-1, service=kube-state-metrics, stage=sandbox Value:0xc01e75f318} THRESHOLD:{Var:THRESHOLD Labels:app=pet-blender, cluster=lmnd-sandbox-us-east-1, container=kube-state-metrics, deployment=pet-blender, endpoint=http, instance=10.32.87.231:8080, job=kube-state-metrics, namespace=sandbox, pod=kube-state-metrics-6c795d5489-lb4vc, region=us-east-1, service=kube-state-metrics, stage=sandbox Value:0xc01e75f3d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.392062029s EvaluationString:[ var='QUERY' labels={app=pet-blender, cluster=lmnd-sandbox-us-east-1, container=kube-state-metrics, deployment=pet-blender, endpoint=http, instance=10.32.87.231:8080, job=kube-state-metrics, namespace=sandbox, pod=kube-state-metrics-6c795d5489-lb4vc, region=us-east-1, service=kube-state-metrics, stage=sandbox} value=0 ], [ var='THRESHOLD' labels={app=pet-blender, cluster=lmnd-sandbox-us-east-1, container=kube-state-metrics, deployment=pet-blender, endpoint=http, instance=10.32.87.231:8080, job=kube-state-metrics, namespace=sandbox, pod=kube-state-metrics-6c795d5489-lb4vc, region=us-east-1, service=kube-state-metrics, stage=sandbox} value=0 ]}]" duration=34.694295ms + level=debug ts=2024-05-29T13:44:15.392914681Z caller=remote_instance_store.go:51 user=442934 slug=arqit msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=order, group=blox, humio=logs, instance=10.41.18.3:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=order-f575b785-v9222, path=/., pod_template_hash=f575b785" t=2024-05-29T13:44:15.392882311Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=order, group=blox, humio=logs, instance=10.41.11.5:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=order-f575b785-6942p, path=/., pod_template_hash=f575b785" t=2024-05-29T13:44:15.392781566Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.392677856Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.392684414Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=order, group=blox, humio=logs, instance=10.41.10.4:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=order-f575b785-njs67, path=/., pod_template_hash=f575b785" t=2024-05-29T13:44:15.392686812Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.392709958Z caller=remote_instance_store.go:51 user=70430 slug=dapperlabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.392610369Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:15.392660797Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=70430 slug=dapperlabs instance="datasource_uid=Ph3GqdY7z, ref_id=A" t=2024-05-29T13:44:15.39262383Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.392584166Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=market, group=blox, humio=logs, instance=10.41.18.10:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=market-5b5b68777c-vpfkh, path=/., pod_template_hash=5b5b68777c" t=2024-05-29T13:44:15.392599677Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.392570074Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=70430 slug=dapperlabs version=1 fingerprint=6498c309084ca3c3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.39248482Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=Ph3GqdY7z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.392191226s EvaluationString:}]" duration=149.035802ms + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=market, group=blox, humio=logs, instance=10.41.11.12:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=market-5b5b68777c-httm7, path=/., pod_template_hash=5b5b68777c" t=2024-05-29T13:44:15.392474385Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID681dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=344017 slug=descript version=3 fingerprint=8e7da06e8a94ba3e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.392428655Z level=debug msg="Alert rule evaluated" results="[{Instance:resource.label.project_id=production-273614, resource.type=k8s_container State:Normal Error: Results:map[] Values:map[Reduce:{Var:Reduce Labels:resource.label.project_id=production-273614, resource.type=k8s_container Value:0xc0311323e8} Threshold:{Var:Threshold Labels:resource.label.project_id=production-273614, resource.type=k8s_container Value:0xc0311323d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.392016183s EvaluationString:[ var='Reduce' labels={resource.label.project_id=production-273614, resource.type=k8s_container} value=0.171881640625 ], [ var='Threshold' labels={resource.label.project_id=production-273614, resource.type=k8s_container} value=0 ]}]" duration=602.130631ms + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=ledger, group=blox, humio=logs, instance=10.41.5.10:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=ledger-6b59f46dcd-q5h9x, path=/., pod_template_hash=6b59f46dcd" t=2024-05-29T13:44:15.392282872Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=ledger, group=blox, humio=logs, instance=10.41.5.10:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=ledger-6b59f46dcd-q5h9x, path=/., pod_template_hash=6b59f46dcd" t=2024-05-29T13:44:15.39226579Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=ledger, group=blox, humio=logs, instance=10.41.4.11:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=ledger-6b59f46dcd-mjj27, path=/., pod_template_hash=6b59f46dcd" t=2024-05-29T13:44:15.392176541Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.392172275Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.391971376Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=ledger, group=blox, humio=logs, instance=10.41.2.10:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=ledger-6b59f46dcd-kqnll, path=/., pod_template_hash=6b59f46dcd" t=2024-05-29T13:44:15.392066935Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=ledger, group=blox, humio=logs, instance=10.41.2.10:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=ledger-6b59f46dcd-kqnll, path=/., pod_template_hash=6b59f46dcd" t=2024-05-29T13:44:15.392051173Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.392029601Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.391953905Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=ledger, group=blox, humio=logs, instance=10.41.16.11:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=ledger-6b59f46dcd-x4n2z, path=/., pod_template_hash=6b59f46dcd" t=2024-05-29T13:44:15.391940785Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.391271275Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Warsaw, country=Poland, datacenter=DataPacket, environment=production, instance=138.199.59.193:9998, ip=138.199.59.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=warsaw413, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.391102659Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Warsaw, country=Poland, datacenter=DataPacket, environment=production, instance=138.199.59.193:9998, ip=138.199.59.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/poland.crt, role=vpn, server=warsaw413, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.39092131Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=earn, group=blox, humio=logs, instance=10.41.23.12:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=earn-5b8458d7cd-mllpn, path=/., pod_template_hash=5b8458d7cd" t=2024-05-29T13:44:15.391521711Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Virginia Beach, country=United States, datacenter=DataPacket, environment=production, instance=84.239.10.129:9998, ip=84.239.10.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=virginia403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.390604949Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Virginia Beach, country=United States, datacenter=DataPacket, environment=production, instance=84.239.10.129:9998, ip=84.239.10.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=virginia403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.390594094Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vilnius, country=Lithuania, datacenter=OneProvider, environment=production, instance=194.32.122.75:9998, ip=194.32.122.75, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=vilnius404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.390451507Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vilnius, country=Lithuania, datacenter=OneProvider, environment=production, instance=194.32.122.60:9998, ip=194.32.122.60, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=vilnius403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.390083628Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=earn, group=blox, humio=logs, instance=10.41.14.8:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=earn-5b8458d7cd-gmqr5, path=/., pod_template_hash=5b8458d7cd" t=2024-05-29T13:44:15.391418828Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vilnius, country=Lithuania, datacenter=OneProvider, environment=production, instance=194.32.122.45:9998, ip=194.32.122.45, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=vilnius401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.389757442Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.389575119Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vilnius, country=Lithuania, datacenter=OneProvider, environment=production, instance=194.32.122.30:9998, ip=194.32.122.30, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=vilnius402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.389438876Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vilnius, country=Lithuania, datacenter=OneProvider, environment=production, instance=194.32.122.30:9998, ip=194.32.122.30, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/lt.crt, role=vpn, server=vilnius402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.389234379Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.389194078Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vienna, country=Austria, datacenter=DataPacket, environment=production, instance=89.187.168.1:9998, ip=89.187.168.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=vienna403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.389034862Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vienna, country=Austria, datacenter=DataPacket, environment=production, instance=89.187.168.1:9998, ip=89.187.168.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/austria.crt, role=streaming-optimized, server=vienna403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.388823252Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.388765814Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.39113197Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=crypto-deposit, group=blox, humio=logs, instance=10.41.5.11:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=crypto-deposit-b486fd475-hfdcg, path=/., pod_template_hash=b486fd475" t=2024-05-29T13:44:15.391119119Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.39105651Z caller=remote_instance_store.go:51 user=691855 slug=chainlake msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=crypto-deposit, group=blox, humio=logs, instance=10.41.5.11:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=crypto-deposit-b486fd475-hfdcg, path=/., pod_template_hash=b486fd475" t=2024-05-29T13:44:15.391102827Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=compliance, group=blox, humio=logs, instance=10.41.16.5:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=compliance-69bf479ff-g56wf, path=/., pod_template_hash=69bf479ff" t=2024-05-29T13:44:15.391033141Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=compliance, group=blox, humio=logs, instance=10.41.0.4:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=compliance-69bf479ff-nwnmk, path=/., pod_template_hash=69bf479ff" t=2024-05-29T13:44:15.390968055Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.390850898Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.390863552Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.390795469Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.390804469Z caller=remote_instance_store.go:51 user=805026 slug=powwro11y msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:15.390734799Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.914551ms + level=debug ts=2024-05-29T13:44:15.377760689Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=axonserver-mz, appgroup=axonserver, apps_kubernetes_io_pod_index=2, axonserver=axonserver-mz-2, controller_revision_hash=axonserver-mz-788db6fbfd, group=blox, humio=infra, instance=10.41.8.7:8024, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=axonserver-mz-2, path=/events, statefulset_kubernetes_io_pod_name=axonserver-mz-2" t=2024-05-29T13:44:15.390730939Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vienna, country=Austria, datacenter=DataPacket, environment=production, instance=156.146.60.1:9998, ip=156.146.60.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=vienna402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.388585497Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vienna, country=Austria, datacenter=DataPacket, environment=production, instance=156.146.60.1:9998, ip=156.146.60.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/austria.crt, role=streaming-optimized, server=vienna402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.388247517Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vienna, country=Austria, datacenter=DataPacket, environment=production, instance=156.146.60.1:9998, ip=156.146.60.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/austria.crt, role=streaming-optimized, server=vienna402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.388232685Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.390662412Z caller=remote_instance_store.go:51 user=540828 slug=finfoprod153 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.388017447Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vienna, country=Austria, datacenter=DataPacket, environment=production, instance=156.146.60.129:9998, ip=156.146.60.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/austria.crt, role=streaming-optimized, server=vienna401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.38784334Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.390609531Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=TSS / Performive, environment=production, instance=66.115.146.204:9998, ip=66.115.146.204, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-vancouver.crt, role=vpn, server=vancouver411, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.387475904Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.387430059Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=TSS / Performive, environment=production, instance=66.115.146.202:9998, ip=66.115.146.202, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=vancouver408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.387278606Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=TSS / Performive, environment=production, instance=66.115.146.202:9998, ip=66.115.146.202, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-vancouver.crt, role=vpn, server=vancouver408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.387036855Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=TSS / Performive, environment=production, instance=208.78.42.222:9998, ip=208.78.42.222, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=vancouver420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.386861518Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.386822091Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=TSS / Performive, environment=production, instance=208.78.42.222:9998, ip=208.78.42.222, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-vancouver.crt, role=vpn, server=vancouver420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.386696317Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=axonserver-mz, appgroup=axonserver, apps_kubernetes_io_pod_index=1, axonserver=axonserver-mz-1, controller_revision_hash=axonserver-mz-788db6fbfd, group=blox, humio=infra, instance=10.41.20.5:8024, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=axonserver-mz-1, path=/events, statefulset_kubernetes_io_pod_name=axonserver-mz-1" t=2024-05-29T13:44:15.390576153Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.386666799Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=axonserver-mz, appgroup=axonserver, apps_kubernetes_io_pod_index=1, axonserver=axonserver-mz-1, controller_revision_hash=axonserver-mz-788db6fbfd, group=blox, humio=infra, instance=10.41.20.5:8024, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=axonserver-mz-1, path=/controldata, statefulset_kubernetes_io_pod_name=axonserver-mz-1" t=2024-05-29T13:44:15.390515043Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=TSS / Performive, environment=production, instance=208.78.42.159:9998, ip=208.78.42.159, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=vancouver419, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.38652139Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=TSS / Performive, environment=production, instance=208.78.42.159:9998, ip=208.78.42.159, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-vancouver.crt, role=vpn, server=vancouver419, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.386353284Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.386324497Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=230713 slug=flocksafety instance="datasource_uid=grafanacloud-prom, ref_id=D" t=2024-05-29T13:44:15.390426657Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=axonserver-mz, appgroup=axonserver, apps_kubernetes_io_pod_index=1, axonserver=axonserver-mz-1, controller_revision_hash=axonserver-mz-788db6fbfd, group=blox, humio=infra, instance=10.41.20.5:8024, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=axonserver-mz-1, path=/controldata, statefulset_kubernetes_io_pod_name=axonserver-mz-1" t=2024-05-29T13:44:15.390503547Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=TSS / Performive, environment=production, instance=172.98.89.7:9998, ip=172.98.89.7, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-vancouver.crt, role=vpn, server=vancouver407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.385979975Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.385937813Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=TSS / Performive, environment=production, instance=172.98.89.194:9998, ip=172.98.89.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=vancouver409, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.385780923Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=axonserver-mz, appgroup=axonserver, apps_kubernetes_io_pod_index=0, axonserver=axonserver-mz-0, controller_revision_hash=axonserver-mz-788db6fbfd, group=blox, humio=infra, instance=10.41.12.5:8024, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=axonserver-mz-0, path=/events, statefulset_kubernetes_io_pod_name=axonserver-mz-0" t=2024-05-29T13:44:15.390441128Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=TSS / Performive, environment=production, instance=162.216.47.2:9998, ip=162.216.47.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-vancouver.crt, role=vpn, server=vancouver401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.385139507Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=TSS / Performive, environment=production, instance=162.216.47.194:9998, ip=162.216.47.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=vancouver406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.384974043Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.38493506Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=TSS / Performive, environment=production, instance=162.216.47.194:9998, ip=162.216.47.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-vancouver.crt, role=vpn, server=vancouver406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.384799783Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=TSS / Performive, environment=production, instance=162.216.47.194:9998, ip=162.216.47.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-vancouver.crt, role=vpn, server=vancouver406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.384791044Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=TSS / Performive, environment=production, instance=162.216.47.130:9998, ip=162.216.47.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=vancouver405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.384662222Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=axonserver-mz, appgroup=axonserver, apps_kubernetes_io_pod_index=0, axonserver=axonserver-mz-0, controller_revision_hash=axonserver-mz-788db6fbfd, group=blox, humio=infra, instance=10.41.12.5:8024, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=axonserver-mz-0, path=/controldata, statefulset_kubernetes_io_pod_name=axonserver-mz-0" t=2024-05-29T13:44:15.390354962Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.384443308Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=TSS / Performive, environment=production, instance=107.181.189.210:9998, ip=107.181.189.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-vancouver.crt, role=vpn, server=vancouver412, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.38410594Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=api-gw, group=blox, humio=logs, instance=10.41.6.7:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=api-gw-7db567fff8-cdwjk, path=/., pod_template_hash=7db567fff8" t=2024-05-29T13:44:15.39029062Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.383966462Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=PIA, environment=production, instance=89.149.52.6:9998, ip=89.149.52.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-vancouver.crt, role=vpn, server=vancouver430, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.383860282Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.383841275Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=PIA, environment=production, instance=89.149.52.5:9998, ip=89.149.52.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=vancouver429, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.383742875Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.383285515Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=126872 slug=sensoper instance="datasource_uid=z6Ms0MGMk, ref_id=A" t=2024-05-29T13:44:15.379122488Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=126872 slug=sensoper t=2024-05-29T13:44:15.379105726Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=PIA, environment=production, instance=89.149.52.4:9998, ip=89.149.52.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-vancouver.crt, role=vpn, server=vancouver428, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.379076058Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=126872 slug=sensoper version=1 fingerprint=c9059e83544926ae attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.379033844Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=z6Ms0MGMk, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.378635929s EvaluationString:}]" duration=226.185165ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.379039192Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=PIA, environment=production, instance=89.149.52.3:9998, ip=89.149.52.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=vancouver427, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.378858462Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=api-gw, group=blox, humio=logs, instance=10.41.6.6:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=api-gw-7db567fff8-qdjgq, path=/., pod_template_hash=7db567fff8" t=2024-05-29T13:44:15.39012897Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=PIA, environment=production, instance=89.149.52.3:9998, ip=89.149.52.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-vancouver.crt, role=vpn, server=vancouver427, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.378659256Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=PIA, environment=production, instance=181.41.202.9:9998, ip=181.41.202.9, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=vancouver435, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.378054776Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=api-gw, group=blox, humio=logs, instance=10.41.11.13:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=api-gw-7db567fff8-9mxzd, path=/., pod_template_hash=7db567fff8" t=2024-05-29T13:44:15.390041903Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=api-gw, group=blox, humio=logs, instance=10.41.11.13:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=api-gw-7db567fff8-9mxzd, path=/., pod_template_hash=7db567fff8" t=2024-05-29T13:44:15.390024538Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.377817731Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.389914808Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=PIA, environment=production, instance=181.41.202.6:9998, ip=181.41.202.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=vancouver432, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.376963617Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=PIA, environment=production, instance=181.41.202.5:9998, ip=181.41.202.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=vancouver431, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.37654309Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.389881644Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=api-gw, group=blox, humio=logs, instance=10.41.1.7:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=api-gw-7db567fff8-k6ptt, path=/., pod_template_hash=7db567fff8" t=2024-05-29T13:44:15.389919044Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.389859527Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=PIA, environment=production, instance=181.214.153.7:9998, ip=181.214.153.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=vancouver425, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.376137346Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.376108693Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=PIA, environment=production, instance=181.214.153.7:9998, ip=181.214.153.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-vancouver.crt, role=vpn, server=vancouver425, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.375957903Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=api-gw, group=blox, humio=logs, instance=10.41.1.6:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=api-gw-7db567fff8-9zwfp, path=/., pod_template_hash=7db567fff8" t=2024-05-29T13:44:15.389766652Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=api-gw, group=blox, humio=logs, instance=10.41.0.8:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=api-gw-7db567fff8-fhxz4, path=/., pod_template_hash=7db567fff8" t=2024-05-29T13:44:15.389680856Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="app=adyen-api, group=blox, humio=logs, instance=10.41.16.3:8080, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=adyen-api-9c566d758-vkhhd, path=/., pod_template_hash=9c566d758" t=2024-05-29T13:44:15.389482947Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=325783 slug=bloxprod t=2024-05-29T13:44:15.389069821Z level=debug msg="State manager processing evaluation results" resultCount=57 + level=debug ts=2024-05-29T13:44:15.388748698Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.388351163Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.388353118Z caller=remote_instance_store.go:51 user=485459 slug=heroiclabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.387344437Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.38727369Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.387197028Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov t=2024-05-29T13:44:15.386951816Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.386718882Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.386689271Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.386667039Z caller=remote_instance_store.go:51 user=871095 slug=cmcnginp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.38647864Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiName=beta-workmotion-tenant-api, Method=--, Resource=/employees/{proxy+}, Stage=--" t=2024-05-29T13:44:15.386364456Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=316418 slug=workmotion version=2 fingerprint=28d028a6028dd94c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.38622114Z level=debug msg="Alert rule evaluated" results="[{Instance:ApiName=beta-workmotion-tenant-api, Method=--, Resource=/employees/{proxy+}, Stage=-- State:NoData Error: Results:map[] Values:map[B:{Var:B Labels:ApiName=beta-workmotion-tenant-api, Method=--, Resource=/employees/{proxy+}, Stage=-- Value:} C:{Var:C Labels:ApiName=beta-workmotion-tenant-api, Method=--, Resource=/employees/{proxy+}, Stage=-- Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.385787619s EvaluationString:[ var='B' labels={ApiName=beta-workmotion-tenant-api, Method=--, Resource=/employees/{proxy+}, Stage=--} value=null ], [ var='C' labels={ApiName=beta-workmotion-tenant-api, Method=--, Resource=/employees/{proxy+}, Stage=--} value=null ]}]" duration=30.163662ms + level=debug ts=2024-05-29T13:44:15.385959263Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.385847196Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.385768286Z caller=remote_alert_sender.go:94 user=920650 slug=pradeep19 host=pradeep19-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.16.178.51:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=adlwi5gllab5sa alerts=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=PIA, environment=production, instance=181.214.153.6:9998, ip=181.214.153.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-vancouver.crt, role=vpn, server=vancouver424, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.375595846Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=PIA, environment=production, instance=181.214.153.6:9998, ip=181.214.153.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-vancouver.crt, role=vpn, server=vancouver424, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.375584578Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=PIA, environment=production, instance=181.214.153.5:9998, ip=181.214.153.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=vancouver423, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.375400553Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=PIA, environment=production, instance=181.214.153.5:9998, ip=181.214.153.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=vancouver423, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.375383934Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=196413 slug=form3production instance="FunctionName=sax-certificate-reminder-signing-keys" t=2024-05-29T13:44:15.385652106Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=196413 slug=form3production instance="FunctionName=sax-certificate-reminder-signing-keys" t=2024-05-29T13:44:15.385634644Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID331dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.385528869Z level=debug msg="Saving alert states done" count=5 max_state_save_concurrency=1 duration=301.46453ms + logger=ngalert.scheduler user=196413 slug=form3production version=1 fingerprint=65351de146375fd7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.385336699Z level=debug msg="Alert rule evaluated" results="[{Instance:FunctionName=sax-certificate-reminder-signing-keys State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:FunctionName=sax-certificate-reminder-signing-keys Value:0xc0a753db58} C:{Var:C Labels:FunctionName=sax-certificate-reminder-signing-keys Value:0xc0a753db80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.384695146s EvaluationString:[ var='B' labels={FunctionName=sax-certificate-reminder-signing-keys} value=1 ], [ var='C' labels={FunctionName=sax-certificate-reminder-signing-keys} value=0 ]}]" duration=169.987787ms + level=debug ts=2024-05-29T13:44:15.385041454Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.384887392Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.38482951Z caller=remote_instance_store.go:51 user=893158 slug=cmfollnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.384801459Z caller=remote_instance_store.go:51 user=256527 slug=thebaconmonster msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.384667106Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=256527 slug=thebaconmonster t=2024-05-29T13:44:15.38475272Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.384693795Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=245291 slug=pismo version=19 fingerprint=254d427ee275d2bf attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.384396915Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.381105438s EvaluationString:}]" duration=2.023359668s + level=debug ts=2024-05-29T13:44:15.384686605Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.38457028Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.384464271Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=256527 slug=thebaconmonster t=2024-05-29T13:44:15.384543283Z level=debug msg="State manager processing evaluation results" resultCount=3 + logger=ngalert.scheduler user=256527 slug=thebaconmonster version=3 fingerprint=2fec38585f73c889 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.384418578Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=temp, app=pheme, area=bin, device=mush1, dt=govee, instance=localhost:8666, job=govee-monitor, location=mushroom State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=temp, app=pheme, area=bin, device=mush1, dt=govee, instance=localhost:8666, job=govee-monitor, location=mushroom Value:0xc01b35ef10} B:{Var:B Labels:__name__=temp, app=pheme, area=bin, device=mush1, dt=govee, instance=localhost:8666, job=govee-monitor, location=mushroom Value:0xc01b35ef80} C:{Var:C Labels:__name__=temp, app=pheme, area=bin, device=mush1, dt=govee, instance=localhost:8666, job=govee-monitor, location=mushroom Value:0xc01b35eea0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.383887971s EvaluationString:[ var='A' labels={__name__=temp, app=pheme, area=bin, device=mush1, dt=govee, instance=localhost:8666, job=govee-monitor, location=mushroom} value=73.22000122070312 ], [ var='B' labels={__name__=temp, app=pheme, area=bin, device=mush1, dt=govee, instance=localhost:8666, job=govee-monitor, location=mushroom} value=73.22000122070312 ], [ var='C' labels={__name__=temp, app=pheme, area=bin, device=mush1, dt=govee, instance=localhost:8666, job=govee-monitor, location=mushroom} value=0 ]} {Instance:__name__=temp, app=pheme, area=bin, device=mush2, dt=govee, instance=localhost:8666, job=govee-monitor, location=mushroom State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=temp, app=pheme, area=bin, device=mush2, dt=govee, instance=localhost:8666, job=govee-monitor, location=mushroom Value:0xc01b35f178} B:{Var:B Labels:__name__=temp, app=pheme, area=bin, device=mush2, dt=govee, instance=localhost:8666, job=govee-monitor, location=mushroom Value:0xc01b35f080} C:{Var:C Labels:__name__=temp, app=pheme, area=bin, device=mush2, dt=govee, instance=localhost:8666, job=govee-monitor, location=mushroom Value:0xc01b35f100}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.383911943s EvaluationString:[ var='A' labels={__name__=temp, app=pheme, area=bin, device=mush2, dt=govee, instance=localhost:8666, job=govee-monitor, location=mushroom} value=64.58000183105469 ], [ var='B' labels={__name__=temp, app=pheme, area=bin, device=mush2, dt=govee, instance=localhost:8666, job=govee-monitor, location=mushroom} value=64.58000183105469 ], [ var='C' labels={__name__=temp, app=pheme, area=bin, device=mush2, dt=govee, instance=localhost:8666, job=govee-monitor, location=mushroom} value=1 ]} {Instance:__name__=temp, app=pheme, area=bin, device=mush3, dt=govee, instance=localhost:8666, job=govee-monitor, location=mushroom State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=temp, app=pheme, area=bin, device=mush3, dt=govee, instance=localhost:8666, job=govee-monitor, location=mushroom Value:0xc01b35f270} B:{Var:B Labels:__name__=temp, app=pheme, area=bin, device=mush3, dt=govee, instance=localhost:8666, job=govee-monitor, location=mushroom Value:0xc01b35f2f0} C:{Var:C Labels:__name__=temp, app=pheme, area=bin, device=mush3, dt=govee, instance=localhost:8666, job=govee-monitor, location=mushroom Value:0xc01b35f360}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.38392302s EvaluationString:[ var='A' labels={__name__=temp, app=pheme, area=bin, device=mush3, dt=govee, instance=localhost:8666, job=govee-monitor, location=mushroom} value=67.63999938964844 ], [ var='B' labels={__name__=temp, app=pheme, area=bin, device=mush3, dt=govee, instance=localhost:8666, job=govee-monitor, location=mushroom} value=67.63999938964844 ], [ var='C' labels={__name__=temp, app=pheme, area=bin, device=mush3, dt=govee, instance=localhost:8666, job=govee-monitor, location=mushroom} value=1 ]}]" duration=15.677041ms + level=debug ts=2024-05-29T13:44:15.384405786Z caller=remote_instance_store.go:51 user=497177 slug=zoldmezo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.384338901Z caller=remote_instance_store.go:51 user=843304 slug=ppcgroup msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.384264526Z caller=remote_instance_store.go:51 user=726011 slug=oneqrew msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.384252715Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=843304 slug=ppcgroup t=2024-05-29T13:44:15.384291401Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=843304 slug=ppcgroup instance= t=2024-05-29T13:44:15.3842559Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=920650 slug=pradeep19 t=2024-05-29T13:44:15.38420201Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=7.704113ms + level=debug ts=2024-05-29T13:44:15.383850615Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.383936625Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.383881036Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.383841215Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.383551494Z caller=remote_instance_store.go:51 user=708531 slug=dooshimagbamwuan msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.383474928Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.383409729Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.383396054Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.383001152Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=698963 slug=lemonade version=1 fingerprint=3b9ecb22dbb288d4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.382311497Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=QUERY State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.381106889s EvaluationString:}]" duration=66.772385ms + level=debug ts=2024-05-29T13:44:15.382334055Z caller=remote_instance_store.go:51 user=147497 slug=rhodev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.382276554Z caller=remote_instance_store.go:51 user=743579 slug=neotax msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.38212429Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.38210042Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=236496 slug=improbable t=2024-05-29T13:44:15.382091573Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.356498ms + level=debug ts=2024-05-29T13:44:15.382050107Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=743579 slug=neotax instance="agent_hostname=onlinepriznani-tmp-2, instance=onlinepriznani-tmp-2, job=integrations/agent" t=2024-05-29T13:44:15.382089609Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=743579 slug=neotax instance="agent_hostname=onlinepriznani-metabase.1, instance=onlinepriznani-metabase.1, job=integrations/agent" t=2024-05-29T13:44:15.381904135Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.381772177Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=743579 slug=neotax instance="agent_hostname=onlinepriznani-2, instance=onlinepriznani-2, job=integrations/agent" t=2024-05-29T13:44:15.381761801Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.381691596Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=743579 slug=neotax instance="agent_hostname=onlinepriznani-1, instance=onlinepriznani-1, job=integrations/agent" t=2024-05-29T13:44:15.381686219Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.381685309Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=743579 slug=neotax instance="agent_hostname=bezsanonu-web-devel-1, instance=bezsanonu-web-devel-1, job=integrations/agent" t=2024-05-29T13:44:15.381638588Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=743579 slug=neotax instance="agent_hostname=bezsanonu-web-1, instance=bezsanonu-web-1, job=integrations/agent" t=2024-05-29T13:44:15.381563636Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=824501 slug=bendingspoons instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.381552834Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=743579 slug=neotax instance="agent_hostname=bezsanonu-sk-app-devel-1, instance=bezsanonu-sk-app-devel-1, job=integrations/agent" t=2024-05-29T13:44:15.381355251Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=743579 slug=neotax instance="agent_hostname=bezsanonu-sk-app-devel-1, instance=bezsanonu-sk-app-devel-1, job=integrations/agent" t=2024-05-29T13:44:15.381346091Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=743579 slug=neotax instance="agent_hostname=bezsanonu-sk-app-1, instance=bezsanonu-sk-app-1, job=integrations/agent" t=2024-05-29T13:44:15.38130613Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=743579 slug=neotax instance="agent_hostname=bezsanonu-firma-devel-1, instance=bezsanonu-firma-devel-1, job=integrations/agent" t=2024-05-29T13:44:15.381258609Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=743579 slug=neotax instance="agent_hostname=bezsanonu-firma-devel-1, instance=bezsanonu-firma-devel-1, job=integrations/agent" t=2024-05-29T13:44:15.381249759Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=743579 slug=neotax instance="agent_hostname=bezsanonu-dane-devel-1, instance=bezsanonu-dane-devel-1, job=integrations/agent" t=2024-05-29T13:44:15.381203078Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=743579 slug=neotax instance="agent_hostname=bezsanonu-dane-1, instance=bezsanonu-dane-1, job=integrations/agent" t=2024-05-29T13:44:15.381165777Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=743579 slug=neotax instance="agent_hostname=bezsanonu-dane-1, instance=bezsanonu-dane-1, job=integrations/agent" t=2024-05-29T13:44:15.381157377Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-03, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics" t=2024-05-29T13:44:15.381095572Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=743579 slug=neotax instance="agent_hostname=bezsanonu-cz-metabase-1, instance=bezsanonu-cz-metabase-1, job=integrations/agent" t=2024-05-29T13:44:15.381119516Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-03, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics" t=2024-05-29T13:44:15.381085895Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.381065459Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=743579 slug=neotax instance="agent_hostname=bezsanonu-app-1, instance=bezsanonu-app-1, job=integrations/agent" t=2024-05-29T13:44:15.381057364Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.380967711Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=538037 slug=drivewealth t=2024-05-29T13:44:15.380944671Z level=debug msg="State manager processing evaluation results" resultCount=3 + logger=ngalert.scheduler user=538037 slug=drivewealth version=89 fingerprint=9a896d0d74d9eaf8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.380729304Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-01, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-01, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics Value:0xc036ac2370} B:{Var:B Labels:__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-01, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics Value:0xc036ac2430} C:{Var:C Labels:__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-01, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics Value:0xc036ac24f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.380012798s EvaluationString:[ var='A' labels={__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-01, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics} value=12 ], [ var='B' labels={__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-01, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics} value=12 ], [ var='C' labels={__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-01, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics} value=0 ]} {Instance:__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics Value:0xc036ac2728} B:{Var:B Labels:__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics Value:0xc036ac2818} C:{Var:C Labels:__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics Value:0xc036ac2668}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.380037237s EvaluationString:[ var='A' labels={__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics} value=11 ], [ var='B' labels={__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics} value=11 ], [ var='C' labels={__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics} value=0 ]} {Instance:__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-03, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-03, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics Value:0xc036ac29b0} B:{Var:B Labels:__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-03, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics Value:0xc036ac2a68} C:{Var:C Labels:__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-03, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics Value:0xc036ac2b58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.380051825s EvaluationString:[ var='A' labels={__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-03, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics} value=11 ], [ var='B' labels={__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-03, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics} value=11 ], [ var='C' labels={__name__=ioms2p_metrics_application_latency_microseconds, application=ioms2p_metrics, business=institutional, component=InstitutionalEngine, data_type=application, db=telegraf, host=ny4ap-intel-03, location=NY4-PQT-8162, quantile=0.5, url=http://127.0.0.1:8201/metrics} value=0 ]}]" duration=23.390742ms + logger=ngalert.state.manager user=743579 slug=neotax t=2024-05-29T13:44:15.38088859Z level=debug msg="State manager processing evaluation results" resultCount=22 + level=debug ts=2024-05-29T13:44:15.380600628Z caller=remote_instance_store.go:51 user=60199 slug=wallapop msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=60199 slug=wallapop instance= t=2024-05-29T13:44:15.380557492Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=60199 slug=wallapop t=2024-05-29T13:44:15.380508697Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.380180362Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.379812177Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=696798 slug=mcv instance="datasource_uid=a7c4b457-6a7a-416c-b994-1407dd32ed34, ref_id=Query" t=2024-05-29T13:44:15.37980616Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:15.379772079Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.379568019Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=856040 slug=kuady t=2024-05-29T13:44:15.379452061Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.238544ms + level=debug ts=2024-05-29T13:44:15.378908487Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.378845592Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.378566855Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.378402833Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.37841795Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.378354627Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.378314634Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.378308584Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.37821675Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.378219108Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=142072 slug=w2z instance= t=2024-05-29T13:44:15.378197151Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.378023212Z caller=remote_instance_store.go:51 user=753403 slug=romich msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.378033112Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.377996116Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.377848316Z caller=remote_instance_store.go:51 user=482907 slug=wavelonp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.37783262Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.377753955Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.377761437Z caller=remote_instance_store.go:51 user=824036 slug=mellifera msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=824036 slug=mellifera instance="__name__=namada_validator_uptime_percentage, chain_id=shielded-expedition.88f17d1d14, instance=localhost:13123, job=namada-mellifera, validator_hash_address=3BA4CBA3D0DE25094DF41C8B1A07A8BD163B045E, validator_tm_address=tnam1qxrc2njv8lnvfqsmj5dlyzuys06nzmz3ggx2s3dc" t=2024-05-29T13:44:15.377612664Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.377050162Z caller=remote_instance_store.go:51 user=726011 slug=oneqrew msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.376898594Z caller=remote_instance_store.go:51 user=405431 slug=deepersignals msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.376679927Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.376561117Z caller=remote_instance_store.go:51 user=920650 slug=pradeep19 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=920650 slug=pradeep19 instance="site_id=LRTL, sys_name=NVR" t=2024-05-29T13:44:15.376479057Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=920650 slug=pradeep19 t=2024-05-29T13:44:15.376394006Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:15.376171943Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=5 fingerprint=1e69423c43ece6cd attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.376049868Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=4.714295ms + level=debug ts=2024-05-29T13:44:15.376101009Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=error ts=2024-05-29T13:44:15.375992006Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + level=info ts=2024-05-29T13:44:15.375957857Z caller=grafana.go:247 user=625173 slug=hadron msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=ddmhidierw7b4b&panel_id=2" groups=1 alerts=0 + level=debug ts=2024-05-29T13:44:15.37583478Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.375840154Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114492 slug=railsbank instance="QueueName=PROD-PP-CLOSED-ACCOUNT-STATUS-SQS" t=2024-05-29T13:44:15.375755688Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank instance="QueueName=PROD-PP-CLOSED-ACCOUNT-STATUS-SQS" t=2024-05-29T13:44:15.375741363Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.375347663Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=739130 slug=redphasetech t=2024-05-29T13:44:15.375525778Z level=debug msg="Saving alert states done" count=21 max_state_save_concurrency=1 duration=316.022492ms + level=debug ts=2024-05-29T13:44:15.375442731Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.375540448Z caller=grafana.go:247 user=625173 slug=hadron msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=ddmhidierw7b4b&panel_id=2" groups=0 alerts=0 + level=debug ts=2024-05-29T13:44:15.375361029Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.375172105Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.375120368Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.375122116Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=PIA, environment=production, instance=181.214.153.5:9998, ip=181.214.153.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-vancouver.crt, role=vpn, server=vancouver423, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.375157621Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.375100286Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=524410 slug=syso t=2024-05-29T13:44:15.375069118Z level=debug msg="Saving alert states done" count=14 max_state_save_concurrency=1 duration=408.319799ms + level=debug ts=2024-05-29T13:44:15.375081362Z caller=remote_instance_store.go:51 user=70430 slug=dapperlabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=70430 slug=dapperlabs instance="datasource_uid=000000002, ref_id=A" t=2024-05-29T13:44:15.375014572Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=PIA, environment=production, instance=181.214.153.4:9998, ip=181.214.153.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=vancouver422, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.374966088Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.37482592Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=150145 slug=pleasant version=1 fingerprint=a1bfa72fbb6136e5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.37469662Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.374444361s EvaluationString:}]" duration=18.24376ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=PIA, environment=production, instance=181.214.153.4:9998, ip=181.214.153.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-vancouver.crt, role=vpn, server=vancouver422, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.37456719Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.374482837Z caller=remote_instance_store.go:51 user=708531 slug=dooshimagbamwuan msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.374308092Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vancouver, country=Canada, datacenter=PIA, environment=production, instance=181.214.153.3:9998, ip=181.214.153.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-vancouver.crt, role=vpn, server=vancouver421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.373974229Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.37389687Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.373667432Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Virginia Beach, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.373421806Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Vilnius, country=Lithuania, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.373261947Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.373094622Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.372921105Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Vancouver, country=Canada, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.37282658Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Vancouver, country=Canada, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.372817018Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Valletta, country=Malta, datacenter=M247, environment=production, instance=176.125.230.16:9998, ip=176.125.230.16, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/malta.crt, role=vpn, server=malta404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.372790369Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.372763582Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.37265301Z caller=remote_instance_store.go:51 user=805026 slug=powwro11y msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.372712562Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.372550094Z caller=remote_instance_store.go:51 user=491157 slug=prd01wr msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.372428489Z caller=remote_instance_store.go:51 user=530405 slug=zetetic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Vaduz, country=Liechtenstein, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.372330986Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Vaduz, country=Liechtenstein, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.37231982Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=530405 slug=zetetic t=2024-05-29T13:44:15.372386496Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=530405 slug=zetetic instance="chain=Kusama, pool=Zetetic" t=2024-05-29T13:44:15.372368146Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=472647 slug=planet t=2024-05-29T13:44:15.372156691Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=22.721811ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Ulaanbaatar, country=Mongolia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.372159555Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Ulaanbaatar, country=Mongolia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.372147773Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.371973572Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Valencia, country=Spain, datacenter=PacketExchange, environment=production, instance=196.245.54.145:9998, ip=196.245.54.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/es-valencia.crt, role=streaming-optimized, server=valencia402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.371947681Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Valencia, country=Spain, datacenter=PacketExchange, environment=production, instance=196.245.54.145:9998, ip=196.245.54.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/es-valencia.crt, role=streaming-optimized, server=valencia402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.371933911Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Trenton, country=United States, environment=production, role=streaming-optimized, service_name=zam" t=2024-05-29T13:44:15.371890243Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.371866431Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.371766717Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Valencia, country=Spain, datacenter=PacketExchange, environment=production, instance=196.245.54.130:9998, ip=196.245.54.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=valencia401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.371680592Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.371669853Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:15.371581358Z caller=remote_instance_store.go:51 user=868411 slug=cmpladnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:15.371475479Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.912178ms + level=debug ts=2024-05-29T13:44:15.371456873Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.371463268Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Tirana, country=Albania, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.371355914Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.371278839Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=656459 slug=activeport t=2024-05-29T13:44:15.37123655Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.371172736Z caller=remote_instance_store.go:51 user=723897 slug=inthepocket msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.371091268Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.371007968Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Tbilisi, country=Georgia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.371143512Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.370974313Z caller=remote_instance_store.go:51 user=481110 slug=g123 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Tallinn, country=Estonia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.371000363Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Tallinn, country=Estonia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.370989076Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.370962079Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Vaduz, country=Liechtenstein, datacenter=M247, environment=production, instance=91.90.122.20:9998, ip=91.90.122.20, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=liechtenstein401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.370902089Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.370757164Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Sydney, country=Australia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.370696675Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.370681107Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:15.370616318Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.370479516Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ulaanbaatar, country=Mongolia, datacenter=DataPacket, environment=production, instance=192.142.227.13:9998, ip=192.142.227.13, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=mongolia405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.370361383Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ulaanbaatar, country=Mongolia, datacenter=DataPacket, environment=production, instance=192.142.227.13:9998, ip=192.142.227.13, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=mongolia405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.370344798Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.370199262Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.37009931Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.370054757Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Skopje, country=North Macedonia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.36997525Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=PIA, environment=production, instance=204.93.149.9:9998, ip=204.93.149.9, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newjersey.crt, role=vpn, server=newjersey440, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.36996844Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Skopje, country=North Macedonia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.369969183Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=PIA, environment=production, instance=204.93.149.9:9998, ip=204.93.149.9, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newjersey.crt, role=vpn, server=newjersey440, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.369958233Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.369926575Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.369814077Z caller=remote_instance_store.go:51 user=708873 slug=soultv msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=708873 slug=soultv instance="InstanceId=i-0c81b2805a5c6edc0" t=2024-05-29T13:44:15.369737236Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.369681155Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.369630362Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Singapore, country=Singapore, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.369687783Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.369618892Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Silicon Valley, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.369539985Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=PIA, environment=production, instance=204.93.149.8:9998, ip=204.93.149.8, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newjersey.crt, role=vpn, server=newjersey439, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.369398928Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=PIA, environment=production, instance=204.93.149.8:9998, ip=204.93.149.8, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newjersey.crt, role=vpn, server=newjersey439, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.369382787Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.36914762Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager.persist user=158691 slug=covalent t=2024-05-29T13:44:15.369077597Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.27887ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Seattle, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.36901258Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.369022873Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=PIA, environment=production, instance=204.93.149.7:9998, ip=204.93.149.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newjersey.crt, role=vpn, server=newjersey438, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.369000295Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=PIA, environment=production, instance=204.93.149.7:9998, ip=204.93.149.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newjersey.crt, role=vpn, server=newjersey438, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.36899143Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=116479 slug=tomtomnv instance= t=2024-05-29T13:44:15.368947449Z level=warn msg="Failed to take an image" dashboard=Men4MVEGz panel=14 error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:15.368812625Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=448378 slug=abdelhak0salhi t=2024-05-29T13:44:15.368759573Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.434958ms + level=debug ts=2024-05-29T13:44:15.368773168Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.368815425Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=PIA, environment=production, instance=204.93.149.7:9998, ip=204.93.149.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=newjersey438, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.368774351Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=PIA, environment=production, instance=204.93.149.7:9998, ip=204.93.149.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=newjersey438, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.368637068Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=328453 slug=jitolabs t=2024-05-29T13:44:15.368468777Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.029917ms + logger=ngalert.state.manager user=61907 slug=fullstory instance= t=2024-05-29T13:44:15.368655029Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=61907 slug=fullstory t=2024-05-29T13:44:15.36842726Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=PIA, environment=production, instance=204.93.149.6:9998, ip=204.93.149.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newjersey.crt, role=vpn, server=newjersey437, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.368419021Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.368368751Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.368238253Z caller=remote_image_capturer.go:54 user=116479 slug=tomtomnv rule_org_id=1 rule_uid=8mhcI4Lnk dashboard=Men4MVEGz panel=14 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.368280536Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager.persist user=236496 slug=improbable t=2024-05-29T13:44:15.368188455Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:15.368162071Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=PIA, environment=production, instance=204.93.149.6:9998, ip=204.93.149.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=newjersey437, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.368222293Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=116479 slug=tomtomnv instance= t=2024-05-29T13:44:15.368150695Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=236496 slug=improbable t=2024-05-29T13:44:15.368109807Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.36813454Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.368059188Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.367986581Z caller=remote_instance_store.go:51 user=497177 slug=zoldmezo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.367943546Z caller=remote_instance_store.go:51 user=405431 slug=deepersignals msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Salt Lake City, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.367969357Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=173730 slug=nikon t=2024-05-29T13:44:15.367963992Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.367834466Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=173730 slug=nikon t=2024-05-29T13:44:15.367895656Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=173730 slug=nikon version=79 fingerprint=79989697ea21466a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.367847983Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.367622739s EvaluationString:}]" duration=346.203656ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=PIA, environment=production, instance=204.93.149.5:9998, ip=204.93.149.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=newjersey436, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.367829812Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.367756048Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=523906 slug=cyberark instance="ClusterName=syslog-server-prod-JenkinsSyslogServerProdMaster, Series=syslog-server-prod-JenkinsSyslogServerProdMaster SyslogServer-prod-JenkinsSyslogServerProdMaster, ServiceName=SyslogServer-prod-JenkinsSyslogServerProdMaster" t=2024-05-29T13:44:15.367701595Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=523906 slug=cyberark instance="ClusterName=syslog-server-prod-JenkinsSyslogServerProdMaster, Series=syslog-server-prod-JenkinsSyslogServerProdMaster SyslogServer-prod-JenkinsSyslogServerProdMaster, ServiceName=SyslogServer-prod-JenkinsSyslogServerProdMaster" t=2024-05-29T13:44:15.367686012Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Riyadh, country=Saudi Arabia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.367696878Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.36766673Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.scheduler user=523906 slug=cyberark version=181 fingerprint=e19a2fb00efb4480 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.367523235Z level=debug msg="Alert rule evaluated" results="[{Instance:ClusterName=syslog-server-prod-JenkinsSyslogServerProdMaster, Series=syslog-server-prod-JenkinsSyslogServerProdMaster SyslogServer-prod-JenkinsSyslogServerProdMaster, ServiceName=SyslogServer-prod-JenkinsSyslogServerProdMaster State:Normal Error: Results:map[] Values:map[Cluster Memory Utilization:{Var:Cluster Memory Utilization Labels:ClusterName=syslog-server-prod-JenkinsSyslogServerProdMaster, Series=syslog-server-prod-JenkinsSyslogServerProdMaster SyslogServer-prod-JenkinsSyslogServerProdMaster, ServiceName=SyslogServer-prod-JenkinsSyslogServerProdMaster Value:0xc031689de0} Morethan:{Var:Morethan Labels:ClusterName=syslog-server-prod-JenkinsSyslogServerProdMaster, Series=syslog-server-prod-JenkinsSyslogServerProdMaster SyslogServer-prod-JenkinsSyslogServerProdMaster, ServiceName=SyslogServer-prod-JenkinsSyslogServerProdMaster Value:0xc031689e20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.367210604s EvaluationString:[ var='Cluster Memory Utilization' labels={ClusterName=syslog-server-prod-JenkinsSyslogServerProdMaster, Series=syslog-server-prod-JenkinsSyslogServerProdMaster SyslogServer-prod-JenkinsSyslogServerProdMaster, ServiceName=SyslogServer-prod-JenkinsSyslogServerProdMaster} value=2.8076171875 ], [ var='Morethan' labels={ClusterName=syslog-server-prod-JenkinsSyslogServerProdMaster, Series=syslog-server-prod-JenkinsSyslogServerProdMaster SyslogServer-prod-JenkinsSyslogServerProdMaster, ServiceName=SyslogServer-prod-JenkinsSyslogServerProdMaster} value=0 ]}]" duration=465.734014ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.367570523Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.367389742Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=PIA, environment=production, instance=204.93.149.4:9998, ip=204.93.149.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=newjersey435, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.367431036Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.367328539Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.367129692Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.367083066Z caller=remote_instance_store.go:51 user=502468 slug=gmawater msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.367127404Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=438855 slug=teckresources instance="datasource_uid=000000096, ref_id=A" t=2024-05-29T13:44:15.366971155Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=438855 slug=teckresources instance="datasource_uid=000000096, ref_id=A" t=2024-05-29T13:44:15.366963325Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.366941181Z caller=remote_instance_store.go:51 user=482907 slug=wavelonp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Providence, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.366814088Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.366783145Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=633335 slug=promqlworkshop instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.366621293Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=PIA, environment=production, instance=102.165.16.9:9998, ip=102.165.16.9, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newjersey.crt, role=vpn, server=newjersey432, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.366636407Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=PIA, environment=production, instance=102.165.16.9:9998, ip=102.165.16.9, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newjersey.crt, role=vpn, server=newjersey432, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.366628176Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=633335 slug=promqlworkshop instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.366591817Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.366490398Z caller=remote_instance_store.go:51 user=261837 slug=empowercloud msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.366560208Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.366606503Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=633335 slug=promqlworkshop t=2024-05-29T13:44:15.366542394Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Portland - Oregon, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.366502206Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=PIA, environment=production, instance=102.165.16.9:9998, ip=102.165.16.9, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=newjersey432, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.366498183Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Portland - Maine, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.366380681Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Portland - Maine, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.366368365Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.366343329Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Podgorica, country=Montenegro, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.366216512Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Podgorica, country=Montenegro, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.366202976Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.36611794Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.366033354Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.36604852Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.365948827Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:15.36590083Z caller=remote_instance_store.go:51 user=71676 slug=lemonbeat msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.36590358Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.365843822Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Phoenix, country=United States, environment=production, role=streaming-optimized, service_name=zam" t=2024-05-29T13:44:15.365817205Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.365619842Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:15.365616704Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=PIA, environment=production, instance=102.165.16.6:9998, ip=102.165.16.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newjersey.crt, role=vpn, server=newjersey429, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.36550692Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=PIA, environment=production, instance=102.165.16.6:9998, ip=102.165.16.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newjersey.crt, role=vpn, server=newjersey429, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.365473201Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Philadelphia, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.365475977Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.365383693Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.365450148Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:15.365351952Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.364986725Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=PIA, environment=production, instance=102.165.16.5:9998, ip=102.165.16.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newjersey.crt, role=vpn, server=newjersey428, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.36504332Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.36497014Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.364862735Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.36462133Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.364578674Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:15.364478607Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.1198ms + level=debug ts=2024-05-29T13:44:15.364473643Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=456850 slug=juniz t=2024-05-29T13:44:15.364350034Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.364314426Z caller=remote_instance_store.go:51 user=137351 slug=pinnacle21 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=456850 slug=juniz instance= t=2024-05-29T13:44:15.36432467Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=456850 slug=juniz instance= t=2024-05-29T13:44:15.364299506Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.364279387Z caller=remote_instance_store.go:51 user=856040 slug=kuady msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=PIA, environment=production, instance=102.165.16.3:9998, ip=102.165.16.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newjersey.crt, role=vpn, server=newjersey426, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.364217852Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=856040 slug=kuady t=2024-05-29T13:44:15.364207956Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=456850 slug=juniz version=38 fingerprint=047fd871ffa519c2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.364055003Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[F:{Var:F Labels: Value:} G:{Var:G Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.363408523s EvaluationString:[ var='F' labels={} value=null ], [ var='G' labels={} value=null ]}]" duration=144.204845ms + logger=ngalert.state.manager user=856040 slug=kuady instance= t=2024-05-29T13:44:15.364188286Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.364067839Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=856040 slug=kuady t=2024-05-29T13:44:15.364145685Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=312340 slug=lakefs instance= t=2024-05-29T13:44:15.364117582Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=856040 slug=kuady version=4 fingerprint=d0f2e79e43e0061e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.364036813Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[Above75:{Var:Above75 Labels: Value:0xc06ad95ea8} Condition:{Var:Condition Labels: Value:0xc06ad95e90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.363670039s EvaluationString:[ var='Above75' labels={} value=0 ], [ var='Condition' labels={} value=0 ]}]" duration=330.301448ms + level=debug ts=2024-05-29T13:44:15.36398472Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=PIA, environment=production, instance=102.165.16.3:9998, ip=102.165.16.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=newjersey426, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.363903404Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Omaha, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.363825061Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.363733585Z caller=remote_instance_store.go:51 user=173730 slug=nikon msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.363753995Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=PIA, environment=production, instance=102.165.16.2:9998, ip=102.165.16.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=newjersey425, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.363261942Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.363314066Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.363117211Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.363010796Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=DataPacket, environment=production, instance=37.19.197.219:9998, ip=37.19.197.219, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newjersey.crt, role=vpn, server=newjersey424, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.363010302Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=New York, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.362966149Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.362940821Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.362951293Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=New Orleans, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.362817786Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.362666159Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.362535629Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.362511686Z caller=remote_instance_store.go:51 user=491157 slug=prd01wr msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.362425028Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=DataPacket, environment=production, instance=37.19.197.189:9998, ip=37.19.197.189, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newjersey.crt, role=vpn, server=newjersey418, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.362445156Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.362386045Z caller=remote_instance_store.go:51 user=436633 slug=swirldslabsproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=692721 slug=agrigateone instance="source_name=DATALASTIC, success=false" t=2024-05-29T13:44:15.36238945Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.362369298Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.362354433Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Mumbai, country=India, environment=production, role=streaming-optimized, service_name=zam" t=2024-05-29T13:44:15.362312522Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.3622856Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:15.362281048Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=DataPacket, environment=production, instance=37.19.197.189:9998, ip=37.19.197.189, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=newjersey418, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.362266479Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=633345 slug=spaaza version=3 fingerprint=235d0934335468a0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.362101718Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=bc5670cc-bd27-4b68-a9cd-bca27cbe8e73, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.361772489s EvaluationString:}]" duration=254.448148ms + logger=ngalert.state.manager user=421567 slug=nexx360 t=2024-05-29T13:44:15.362047831Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.362172956Z caller=remote_instance_store.go:51 user=35611 slug=play msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.362126785Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.362198835Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=679029 slug=joveoprodaws t=2024-05-29T13:44:15.362132201Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.362064969Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.362089099Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:15.361932304Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Montevideo, country=Uruguay, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.361964623Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Montevideo, country=Uruguay, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.361953096Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.361923675Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.361868049Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.361846661Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.361783061Z caller=remote_instance_store.go:51 user=236496 slug=improbable msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=DataPacket, environment=production, instance=37.19.197.129:9998, ip=37.19.197.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-streaming.crt, role=streaming-optimized, server=newjersey419, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.361736997Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:15.361711527Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=DataPacket, environment=production, instance=37.19.197.129:9998, ip=37.19.197.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-streaming.crt, role=streaming-optimized, server=newjersey419, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.361722302Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=332555 slug=omexomcs t=2024-05-29T13:44:15.361611179Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.331582ms + level=debug ts=2024-05-29T13:44:15.361622375Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=DataPacket, environment=production, instance=37.19.197.129:9998, ip=37.19.197.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=newjersey419, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.361584532Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Minneapolis, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.361519123Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=DataPacket, environment=production, instance=143.244.44.60:9998, ip=143.244.44.60, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-streaming.crt, role=streaming-optimized, server=newjersey401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.361467284Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=DataPacket, environment=production, instance=143.244.44.60:9998, ip=143.244.44.60, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-streaming.crt, role=streaming-optimized, server=newjersey401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.361457532Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.36137925Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Milwaukee, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.361354833Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Trenton, country=United States, datacenter=DataPacket, environment=production, instance=143.244.44.60:9998, ip=143.244.44.60, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=newjersey401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.361313379Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.361168699Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.361202203Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.361116761Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.361120551Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.36099863Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.361084049Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Milano, country=Italy, environment=production, role=streaming-optimized, service_name=zam" t=2024-05-29T13:44:15.360958513Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Milano, country=Italy, environment=production, role=streaming-optimized, service_name=zam" t=2024-05-29T13:44:15.360928892Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.360871259Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:15.360813896Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Miami, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.360768686Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=splitgate1047-justice-prod" t=2024-05-29T13:44:15.36060551Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=PIA, environment=production, instance=191.96.36.94:9998, ip=191.96.36.94, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=toronto417, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.360660796Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.360610676Z caller=remote_image_capturer.go:33 user=426229 slug=accelbyte rule_org_id=1 rule_uid=ffeb6347-1630-4a78-97cf-121009f7f77e msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:15.360512751Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=alwaysgeeky-justice-prod" t=2024-05-29T13:44:15.360447926Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:15.360457305Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.360441951Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=PIA, environment=production, instance=191.96.36.94:9998, ip=191.96.36.94, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-toronto.crt, role=vpn, server=toronto417, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.360454713Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.360178616Z caller=remote_instance_store.go:51 user=54972 slug=zanglang msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=PIA, environment=production, instance=191.96.36.64:9998, ip=191.96.36.64, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=toronto416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.360137756Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=327842 slug=exabeam instance= t=2024-05-29T13:44:15.360004906Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.359812823Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.359805011Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.359772866Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.359559016Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance= t=2024-05-29T13:44:15.359402096Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance= t=2024-05-29T13:44:15.359385506Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.359380408Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=PIA, environment=production, instance=191.96.36.3:9998, ip=191.96.36.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-toronto.crt, role=vpn, server=toronto414, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.359403486Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.359352457Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.scheduler user=307381 slug=kambitaskforce version=42 fingerprint=7dc9a4b5ab37f90b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.359156219Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.358800388s EvaluationString:}]" duration=216.952059ms + level=debug ts=2024-05-29T13:44:15.359229705Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.359301011Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.359289724Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=PIA, environment=production, instance=191.96.36.33:9998, ip=191.96.36.33, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=toronto415, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.35921245Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.359151133Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.359092647Z caller=remote_instance_store.go:51 user=708531 slug=dooshimagbamwuan msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.359034Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.359020152Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.359039564Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.359034475Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=PIA, environment=production, instance=191.96.36.33:9998, ip=191.96.36.33, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-toronto.crt, role=vpn, server=toronto415, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.359015864Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.358971805Z caller=remote_instance_store.go:51 user=465816 slug=metricgamingqa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.358905943Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.358957678Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.358833105Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=22398 slug=sunfolding t=2024-05-29T13:44:15.358849054Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.176936ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=London, country=United Kingdom, environment=production, role=streaming-optimized, service_name=zam" t=2024-05-29T13:44:15.358827231Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=London, country=United Kingdom, environment=production, role=streaming-optimized, service_name=zam" t=2024-05-29T13:44:15.358815561Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.358804671Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=PIA, environment=production, instance=191.96.36.215:9998, ip=191.96.36.215, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=toronto421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.358778193Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.358732373Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.358618814Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.358527572Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=PIA, environment=production, instance=191.96.36.215:9998, ip=191.96.36.215, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-toronto.crt, role=vpn, server=toronto421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.358597604Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=PIA, environment=production, instance=191.96.36.215:9998, ip=191.96.36.215, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-toronto.crt, role=vpn, server=toronto421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.358581156Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.358502543Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.3585124Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.35849044Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.358479349Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=PIA, environment=production, instance=191.96.36.185:9998, ip=191.96.36.185, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=toronto420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.35838433Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=327842 slug=exabeam instance= t=2024-05-29T13:44:15.358359221Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.358144415Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=PIA, environment=production, instance=191.96.36.185:9998, ip=191.96.36.185, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-toronto.crt, role=vpn, server=toronto420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.358208889Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.358204026Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.35814424Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.358125307Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:15.357737065Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.16384ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=PIA, environment=production, instance=191.96.36.154:9998, ip=191.96.36.154, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=toronto419, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.357936533Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.357835089Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=La Paz, country=Bolivia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.357829111Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=109452 slug=deltarisk instance= t=2024-05-29T13:44:15.35755791Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=109452 slug=deltarisk instance= t=2024-05-29T13:44:15.357544174Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=PIA, environment=production, instance=191.96.36.124:9998, ip=191.96.36.124, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=toronto418, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.357434914Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Kathmandu, country=Nepal, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.357299806Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.357257863Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.357037971Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=PIA, environment=production, instance=179.61.197.94:9998, ip=179.61.197.94, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=toronto425, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.356914536Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.356888559Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.356847027Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:15.356521072Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.356502685Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.356499375Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=PIA, environment=production, instance=179.61.197.64:9998, ip=179.61.197.64, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-toronto.crt, role=vpn, server=toronto424, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.356347999Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Jackson, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.356223624Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.356133763Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=PIA, environment=production, instance=179.61.197.3:9998, ip=179.61.197.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=toronto422, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.356125923Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Istanbul, country=Turkey, environment=production, role=streaming-optimized, service_name=zam" t=2024-05-29T13:44:15.355977639Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=PIA, environment=production, instance=179.61.197.3:9998, ip=179.61.197.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-toronto.crt, role=vpn, server=toronto422, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.355883699Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.355789612Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager.persist user=726011 slug=oneqrew t=2024-05-29T13:44:15.355466182Z level=debug msg="Saving alert states" count=6 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.355583671Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.355504451Z caller=remote_instance_store.go:51 user=726011 slug=oneqrew msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=726011 slug=oneqrew instance="k8s_cluster_name=aks-tool-de-devops, k8s_namespace=gradle, k8s_statefulset_name=build-cache-node, k8s_statefulset_uid=2bbaa03c-f3cc-4520-b1c6-56e011c5b9d8, otel_receiver=k8s_cluster" t=2024-05-29T13:44:15.35541539Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=747518 slug=dvevoli instance="deployment_environment=testing, job=km-rechnungsentwurf-kalkconsumer-be, topic=shared_qs_default_km_kmonline_kalkulation" t=2024-05-29T13:44:15.355520985Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=726011 slug=oneqrew instance="k8s_cluster_name=aks-iam-de-dev, k8s_namespace=keycloak-iam, k8s_statefulset_name=keycloak-keycloakx, k8s_statefulset_uid=bca486b0-d9b0-439d-9472-b46f62b7605a, otel_receiver=k8s_cluster" t=2024-05-29T13:44:15.355359059Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=726011 slug=oneqrew instance="k8s_cluster_name=aks-cloud-erp-de-stg, k8s_namespace=rabbitmq, k8s_statefulset_name=rabbitmq-cloud-erp-server, k8s_statefulset_uid=4ed13b97-3fe8-45d3-a9e0-16d29ed24fee, otel_receiver=k8s_cluster" t=2024-05-29T13:44:15.355344618Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=726011 slug=oneqrew instance="k8s_cluster_name=aks-cloud-erp-de-dev, k8s_namespace=rabbitmq, k8s_statefulset_name=rabbitmq-cloud-erp-server, k8s_statefulset_uid=5066fe00-08d9-4558-afbc-98c40a86fab2, otel_receiver=k8s_cluster" t=2024-05-29T13:44:15.355303628Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=747518 slug=dvevoli t=2024-05-29T13:44:15.355398422Z level=debug msg="State manager processing evaluation results" resultCount=11 + level=debug ts=2024-05-29T13:44:15.355306356Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=PIA, environment=production, instance=179.61.197.33:9998, ip=179.61.197.33, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-toronto.crt, role=vpn, server=toronto423, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.355302193Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.355244013Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.355132731Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=306439 slug=caiprod instance="datasource_uid=DggGZ2cVz, ref_id=A" t=2024-05-29T13:44:15.354907729Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=306439 slug=caiprod instance="datasource_uid=DggGZ2cVz, ref_id=A" t=2024-05-29T13:44:15.35488638Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=306439 slug=caiprod version=23 fingerprint=a6124e3998f58f62 attempt=1 now=2024-05-29T13:44:00Z t=2024-05-29T13:44:15.354803484Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=DggGZ2cVz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:00 +0000 UTC EvaluationDuration:15.354423448s EvaluationString:}]" duration=9.144958252s + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=PIA, environment=production, instance=179.61.197.218:9998, ip=179.61.197.218, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-toronto.crt, role=vpn, server=toronto434, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.35487215Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.35482504Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.354743784Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.354745995Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.354687949Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.354626311Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.354469517Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=PIA, environment=production, instance=179.61.197.187:9998, ip=179.61.197.187, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-toronto.crt, role=vpn, server=toronto433, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.354410863Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.354236625Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=183214 slug=vectorizedio t=2024-05-29T13:44:15.354227211Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=32.066544ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.354123174Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.35409321Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=PIA, environment=production, instance=179.61.197.156:9998, ip=179.61.197.156, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-toronto.crt, role=vpn, server=toronto426, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.353941486Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.353824684Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.353772133Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=DataPacket, environment=production, instance=178.249.214.33:9998, ip=178.249.214.33, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=toronto437, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.353728127Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.353677871Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=DataPacket, environment=production, instance=178.249.214.33:9998, ip=178.249.214.33, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-toronto.crt, role=vpn, server=toronto437, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.353538296Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.353493484Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=328453 slug=jitolabs t=2024-05-29T13:44:15.353436867Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=328453 slug=jitolabs instance="hostname=tokyo-mainnet-rpc-1, region=tokyo" t=2024-05-29T13:44:15.353421464Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=328453 slug=jitolabs instance="hostname=tokyo-mainnet-rpc-1, region=tokyo" t=2024-05-29T13:44:15.353388038Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=DataPacket, environment=production, instance=149.88.97.220:9998, ip=149.88.97.220, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=toronto402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.353380013Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.353231697Z caller=remote_instance_store.go:51 user=497177 slug=zoldmezo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Des Moines, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.353132704Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=396586 slug=opengov t=2024-05-29T13:44:15.353038036Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Denver, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.35296471Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.35285943Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:15.352848932Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=DataPacket, environment=production, instance=149.88.97.193:9998, ip=149.88.97.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-toronto.crt, role=vpn, server=toronto403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.352854931Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=142180 slug=luxtronic instance= t=2024-05-29T13:44:15.352794009Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:15.352787961Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.518063ms + logger=ngalert.scheduler user=142180 slug=luxtronic version=2 fingerprint=d61c1bd284c2825e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.352728109Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.352439055s EvaluationString:}]" duration=128.425126ms + level=debug ts=2024-05-29T13:44:15.352723575Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.352538095Z caller=remote_alert_sender.go:94 user=884866 slug=cnonumerique host=cnonumerique-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.74.54:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddlo40mjkeqyoa alerts=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.352700428Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.35261258Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:15.352612467Z caller=remote_alert_sender.go:94 user=884866 slug=cnonumerique host=cnonumerique-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.51.155:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddlo40mjkeqyoa alerts=1 + logger=ngalert.state.manager.persist user=491157 slug=prd01wr t=2024-05-29T13:44:15.352407699Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=491157 slug=prd01wr instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.352396164Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=491157 slug=prd01wr instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.352389082Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.352417697Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.352415609Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=DataPacket, environment=production, instance=149.50.218.210:9998, ip=149.50.218.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=toronto408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.352315759Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.352275851Z caller=remote_instance_store.go:51 user=482907 slug=wavelonp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.35227308Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.352268391Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.352161718Z caller=remote_instance_store.go:51 user=937416 slug=cambridgeuniversitypress msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=DataPacket, environment=production, instance=149.50.218.210:9998, ip=149.50.218.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-toronto.crt, role=vpn, server=toronto408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.352099986Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=937416 slug=cambridgeuniversitypress t=2024-05-29T13:44:15.352019506Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.351977182Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:15.35186359Z caller=remote_instance_store.go:51 user=500743 slug=sgr msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Columbus, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.351876264Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.351847422Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=937416 slug=cambridgeuniversitypress instance="datasource_uid=-UMZtQI7z, ref_id=A" t=2024-05-29T13:44:15.351830262Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.351691704Z caller=remote_rule_evaluator.go:193 user=656459 slug=activeport msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + level=debug ts=2024-05-29T13:44:15.351574689Z caller=remote_instance_store.go:51 user=679029 slug=joveoprodaws msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Columbia, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.351583898Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:15.351355995Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.351321249Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.351310123Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.351312694Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.351161782Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.351098526Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.350833267Z caller=remote_instance_store.go:51 user=524410 slug=syso msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=DataPacket, environment=production, instance=149.50.218.129:9998, ip=149.50.218.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-toronto.crt, role=vpn, server=toronto407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.350811188Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=DataPacket, environment=production, instance=149.36.49.169:9998, ip=149.36.49.169, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=toronto438, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.350687401Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=DataPacket, environment=production, instance=149.36.49.169:9998, ip=149.36.49.169, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=toronto438, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.35067832Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.350592962Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:15.350571797Z caller=remote_instance_store.go:51 user=893158 slug=cmfollnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=DataPacket, environment=production, instance=149.36.49.169:9998, ip=149.36.49.169, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-toronto.crt, role=vpn, server=toronto438, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.350572416Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Toronto, country=Canada, datacenter=DataPacket, environment=production, instance=149.36.49.129:9998, ip=149.36.49.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=toronto401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.350441805Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.350247072Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.350275128Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.350305844Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.350272811Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.350162217Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.scheduler user=448378 slug=abdelhak0salhi version=1 fingerprint=b971a62399101c24 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.349951073Z level=debug msg="Alert rule evaluated" results="[{Instance:id=1dcd313303dc5779, name=TP ModBus, retentionPolicy= State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:id=1dcd313303dc5779, name=TP ModBus, retentionPolicy= Value:0xc049d81af0} B:{Var:B Labels:id=1dcd313303dc5779, name=TP ModBus, retentionPolicy= Value:0xc049d81b70} C:{Var:C Labels:id=1dcd313303dc5779, name=TP ModBus, retentionPolicy= Value:0xc049d81be0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.349461663s EvaluationString:[ var='A' labels={id=1dcd313303dc5779, name=TP ModBus, retentionPolicy=} value=3.6e+12 ], [ var='B' labels={id=1dcd313303dc5779, name=TP ModBus, retentionPolicy=} value=3.6e+12 ], [ var='C' labels={id=1dcd313303dc5779, name=TP ModBus, retentionPolicy=} value=1 ]}]" duration=13.715804ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.350076008Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:15.349841281Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=158691 slug=covalent instance= t=2024-05-29T13:44:15.349774975Z level=warn msg="Failed to take an image" dashboard=prod-alerting panel=24 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=monolith-databus-worker, pod=monolith-databus-worker-bbdf8d7fb-9grsz" t=2024-05-29T13:44:15.349759241Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.349650214Z caller=remote_instance_store.go:51 user=637258 slug=testb9lab msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.349449498Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=472647 slug=planet instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.349391088Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.349385533Z caller=remote_instance_store.go:51 user=540828 slug=finfoprod153 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.349286722Z caller=remote_instance_store.go:51 user=708531 slug=dooshimagbamwuan msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=472647 slug=planet version=1 fingerprint=4c3a75e6c9146f77 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.349289656Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.348957069s EvaluationString:}]" duration=32.646406ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.349310834Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=129335 slug=neomantra instance= t=2024-05-29T13:44:15.349182278Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:00Z next_ends_at=2024-05-29T13:48:00Z + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Bridgeport, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.349145318Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.349084401Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.349009951Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.349008843Z caller=remote_image_capturer.go:54 user=158691 slug=covalent rule_org_id=1 rule_uid=de67d370-4229-4e7c-baf5-13b9216736f6 dashboard=prod-alerting panel=24 msg="rendering alert image with grafana" + level=debug ts=2024-05-29T13:44:15.348953859Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:15.348965455Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.655173ms + level=debug ts=2024-05-29T13:44:15.348866873Z caller=remote_instance_store.go:51 user=405431 slug=deepersignals msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.348898966Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.348969191Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.348943501Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Boston, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.34881205Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Boston, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.348801319Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=158691 slug=covalent t=2024-05-29T13:44:15.34873604Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Boise, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.348651033Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.348431711Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.348410531Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.348392929Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=261837 slug=empowercloud t=2024-05-29T13:44:15.348321382Z level=debug msg="Deleting alert states" count=19 + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.348332901Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=261837 slug=empowercloud t=2024-05-29T13:44:15.348294993Z level=info msg="Detected stale state entry" cacheID="[[\"__alert_rule_namespace_uid__\",\"fOmq2sLnz\"],[\"__alert_rule_uid__\",\"bdfnnv18bg3cwc\"],[\"agent_hostname\",\"3899-wdp\"],[\"alertname\",\"SSL Invalid Alert\"],[\"grafana_folder\",\"empower\"]]" state=Normal reason= + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=3a906b99840594eb attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.348260646Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.348022947s EvaluationString:}]" duration=395.148831ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.348301909Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=261837 slug=empowercloud t=2024-05-29T13:44:15.348260112Z level=info msg="Detected stale state entry" cacheID="[[\"__alert_rule_namespace_uid__\",\"fOmq2sLnz\"],[\"__alert_rule_uid__\",\"bdfnnv18bg3cwc\"],[\"agent_hostname\",\"5033-VBK\"],[\"alertname\",\"SSL Invalid Alert\"],[\"grafana_folder\",\"empower\"]]" state=Normal reason= + logger=ngalert.state.manager user=261837 slug=empowercloud t=2024-05-29T13:44:15.348240029Z level=info msg="Detected stale state entry" cacheID="[[\"__alert_rule_namespace_uid__\",\"fOmq2sLnz\"],[\"__alert_rule_uid__\",\"bdfnnv18bg3cwc\"],[\"agent_hostname\",\"4985-silkroad\"],[\"alertname\",\"SSL Invalid Alert\"],[\"grafana_folder\",\"empower\"]]" state=Normal reason= + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Tokyo, country=Japan, datacenter=DataPacket, environment=production, instance=138.199.39.1:9998, ip=138.199.39.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/japan.crt, role=vpn, server=tokyo410, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.348264733Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=261837 slug=empowercloud t=2024-05-29T13:44:15.348220316Z level=info msg="Detected stale state entry" cacheID="[[\"__alert_rule_namespace_uid__\",\"fOmq2sLnz\"],[\"__alert_rule_uid__\",\"bdfnnv18bg3cwc\"],[\"agent_hostname\",\"4708-bvr\"],[\"alertname\",\"SSL Invalid Alert\"],[\"grafana_folder\",\"empower\"]]" state=Normal reason= + logger=ngalert.state.manager user=261837 slug=empowercloud t=2024-05-29T13:44:15.348201345Z level=info msg="Detected stale state entry" cacheID="[[\"__alert_rule_namespace_uid__\",\"fOmq2sLnz\"],[\"__alert_rule_uid__\",\"bdfnnv18bg3cwc\"],[\"agent_hostname\",\"5009-diva-e\"],[\"alertname\",\"SSL Invalid Alert\"],[\"grafana_folder\",\"empower\"]]" state=Normal reason= + logger=ngalert.state.manager user=261837 slug=empowercloud t=2024-05-29T13:44:15.348153784Z level=info msg="Detected stale state entry" cacheID="[[\"__alert_rule_namespace_uid__\",\"fOmq2sLnz\"],[\"__alert_rule_uid__\",\"bdfnnv18bg3cwc\"],[\"agent_hostname\",\"4833-Platinum\"],[\"alertname\",\"SSL Invalid Alert\"],[\"grafana_folder\",\"empower\"]]" state=Normal reason= + logger=ngalert.state.manager user=261837 slug=empowercloud t=2024-05-29T13:44:15.348139916Z level=info msg="Detected stale state entry" cacheID="[[\"__alert_rule_namespace_uid__\",\"fOmq2sLnz\"],[\"__alert_rule_uid__\",\"bdfnnv18bg3cwc\"],[\"agent_hostname\",\"4980-Klingele\"],[\"alertname\",\"SSL Invalid Alert\"],[\"grafana_folder\",\"empower\"]]" state=Normal reason= + level=debug ts=2024-05-29T13:44:15.348035289Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=578-RTL" t=2024-05-29T13:44:15.348029141Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:15.347999126Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.256021ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.347970083Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=855919 slug=mrdprod t=2024-05-29T13:44:15.347819404Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.116336ms + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=5069-Adtran" t=2024-05-29T13:44:15.347728307Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.347689884Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.347612245Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=54972 slug=zanglang instance="account=umee1nna7k5lywn99cd63elcfqm6p8c5c4qcu6rqxdx, chain=umee-1, denom=uumee, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service" t=2024-05-29T13:44:15.347615774Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=5061-Bardusch" t=2024-05-29T13:44:15.347560858Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Tirana, country=Albania, datacenter=OneProvider, environment=production, instance=31.171.154.130:9998, ip=31.171.154.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=tirana401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.347553702Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=54972 slug=zanglang instance="account=stars1nna7k5lywn99cd63elcfqm6p8c5c4qcuuf2yz9, chain=stargaze-1, denom=ustars, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service" t=2024-05-29T13:44:15.347549498Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Tirana, country=Albania, datacenter=OneProvider, environment=production, instance=31.171.154.130:9998, ip=31.171.154.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=tirana401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.347521567Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=5042-pentos" t=2024-05-29T13:44:15.347424567Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.347437019Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=54972 slug=zanglang instance="account=noble1nna7k5lywn99cd63elcfqm6p8c5c4qcuqkg336, chain=noble-1, denom=uusdc, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service" t=2024-05-29T13:44:15.347392721Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=54972 slug=zanglang instance="account=noble1nna7k5lywn99cd63elcfqm6p8c5c4qcuqkg336, chain=noble-1, denom=uusdc, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service" t=2024-05-29T13:44:15.347379917Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=54972 slug=zanglang instance="account=noble1nna7k5lywn99cd63elcfqm6p8c5c4qcuqkg336, chain=noble-1, denom=uusdc, instance=hetzner-node-0:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service" t=2024-05-29T13:44:15.347354012Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.347221795Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=54972 slug=zanglang instance="account=neutron1nna7k5lywn99cd63elcfqm6p8c5c4qcuv25mnn, chain=neutron-1, denom=untrn, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service" t=2024-05-29T13:44:15.347283718Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.347275631Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=5038-exagon" t=2024-05-29T13:44:15.347236555Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=54972 slug=zanglang instance="account=juno1p7d8mnjttcszv34pk2a5yyug3474mhfftz79v5, chain=juno-1, denom=ujuno, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service" t=2024-05-29T13:44:15.347182403Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=54972 slug=zanglang instance="account=juno1nna7k5lywn99cd63elcfqm6p8c5c4qcu787zwg, chain=juno-1, denom=ujuno, instance=hetzner-node-0:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service" t=2024-05-29T13:44:15.347155114Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.347125357Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=5037-CoastalChe" t=2024-05-29T13:44:15.347151287Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=112387 slug=lucidhq t=2024-05-29T13:44:15.347071083Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=23.18502ms + logger=ngalert.state.manager user=54972 slug=zanglang instance="account=celestia1nna7k5lywn99cd63elcfqm6p8c5c4qcuelvfne, chain=celestia, denom=utia, instance=hetzner-node-0:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service" t=2024-05-29T13:44:15.347032777Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.347024586Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=info ts=2024-05-29T13:44:15.346953904Z caller=remote_image_capturer.go:61 user=54972 slug=zanglang rule_org_id=1 rule_uid=XlGHxvWVk dashboard=woHR_vQnk panel=10 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.347004394Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:15.346943419Z caller=remote_instance_store.go:51 user=491157 slug=prd01wr msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=5028-HFMIXINGGR" t=2024-05-29T13:44:15.34698181Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.346825504Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.346850201Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.346732337Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.346716719Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=5012-Ingenics" t=2024-05-29T13:44:15.346724728Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.346627124Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.346690071Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=5008-embraer" t=2024-05-29T13:44:15.346628401Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.34656662Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.snoke.integrationEvents" t=2024-05-29T13:44:15.346555742Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=5005-GreenGiraf" t=2024-05-29T13:44:15.346556121Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Andorra la Vella, country=Andorra, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.346552634Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe t=2024-05-29T13:44:15.346485453Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Tbilisi, country=Georgia, datacenter=M247, environment=production, instance=95.181.236.2:9998, ip=95.181.236.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/georgia.crt, role=vpn, server=georgia403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.346462109Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=806229 slug=simplisafe version=33 fingerprint=e250a31b12830790 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.34637443Z level=debug msg="Alert rule evaluated" results="[{Instance:queue=two.snoke.integrationEvents State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:queue=two.snoke.integrationEvents Value:0xc071003228} D:{Var:D Labels:queue=two.snoke.integrationEvents Value:0xc071003238}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.345648365s EvaluationString:[ var='B' labels={queue=two.snoke.integrationEvents} value=0 ], [ var='D' labels={queue=two.snoke.integrationEvents} value=0 ]}]" duration=69.684503ms + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4998-HackettGrp" t=2024-05-29T13:44:15.346480823Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=54972 slug=zanglang instance="account=axelar1nna7k5lywn99cd63elcfqm6p8c5c4qcuvmt3z4, chain=axelar-dojo-1, denom=uaxl, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service" t=2024-05-29T13:44:15.346160169Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:50:10Z next_ends_at=2024-05-29T13:52:10Z + level=debug ts=2024-05-29T13:44:15.346217793Z caller=remote_instance_store.go:51 user=220750 slug=homeys msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4988-loxxess" t=2024-05-29T13:44:15.346203785Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.346055442Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=691855 slug=chainlake instance="device=victorialogs-data, fstype=zfs, instance=victorialogs-cax11-victorialogs, mountpoint=/victorialogs-data, nodename=victorialogs" t=2024-05-29T13:44:15.346167311Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.345784701Z caller=remote_instance_store.go:51 user=350551 slug=loopme msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=691855 slug=chainlake instance="device=tmpfs, fstype=tmpfs, instance=victorialogs-cax11-victorialogs, mountpoint=/run/lock, nodename=victorialogs" t=2024-05-29T13:44:15.346020992Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Tbilisi, country=Georgia, datacenter=M247, environment=production, instance=95.181.236.14:9998, ip=95.181.236.14, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=georgia404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.346053108Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.346028149Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=691855 slug=chainlake instance="device=tmpfs, fstype=tmpfs, instance=victorialogs-cax11-victorialogs, mountpoint=/mnt/ephemeral/nomad-cluster/nomad/alloc/4a1ecf71-f303-1bb2-c0a7-c2c434e13aac/vmauth/secrets, nodename=victorialogs" t=2024-05-29T13:44:15.345876839Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=180994 slug=cgmonitor instance= t=2024-05-29T13:44:15.345819274Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=180994 slug=cgmonitor instance= t=2024-05-29T13:44:15.345811917Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4975-broetje" t=2024-05-29T13:44:15.34578417Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=180994 slug=cgmonitor version=1 fingerprint=a90626ebcf5f20fd attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.345712952Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.345418085s EvaluationString:}]" duration=682.819657ms + logger=ngalert.state.manager user=691855 slug=chainlake instance="device=tmpfs, fstype=tmpfs, instance=victorialogs-cax11-victorialogs, mountpoint=/mnt/ephemeral/nomad-cluster/nomad/alloc/4a1ecf71-f303-1bb2-c0a7-c2c434e13aac/vmauth/private, nodename=victorialogs" t=2024-05-29T13:44:15.345799463Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.345787165Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.345721862Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4953-itlogix" t=2024-05-29T13:44:15.345701413Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=691855 slug=chainlake instance="device=tmpfs, fstype=tmpfs, instance=victorialogs-cax11-victorialogs, mountpoint=/mnt/ephemeral/nomad-cluster/nomad/alloc/4a1ecf71-f303-1bb2-c0a7-c2c434e13aac/victorialogs/private, nodename=victorialogs" t=2024-05-29T13:44:15.345643083Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=691855 slug=chainlake instance="device=tmpfs, fstype=tmpfs, instance=victorialogs-cax11-victorialogs, mountpoint=/mnt/ephemeral/nomad-cluster/nomad/alloc/4a1ecf71-f303-1bb2-c0a7-c2c434e13aac/connect-proxy-logsvmauth/secrets, nodename=victorialogs" t=2024-05-29T13:44:15.345575569Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.345537783Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Tallinn, country=Estonia, datacenter=PacketExchange, environment=production, instance=165.231.182.96:9998, ip=165.231.182.96, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=talinn404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.345598929Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4935-avencore" t=2024-05-29T13:44:15.345550886Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.345535727Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.345549859Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4935-avencore" t=2024-05-29T13:44:15.345539115Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Albuquerque, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.345504064Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.345476535Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4930-bust" t=2024-05-29T13:44:15.345466302Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=691855 slug=chainlake instance="device=/dev/sda15, fstype=vfat, instance=victorialogs-cax11-victorialogs, mountpoint=/boot/efi, nodename=victorialogs" t=2024-05-29T13:44:15.345360832Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=691855 slug=chainlake instance="device=/dev/sda15, fstype=vfat, instance=victorialogs-cax11-victorialogs, mountpoint=/boot/efi, nodename=victorialogs" t=2024-05-29T13:44:15.345342902Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Tallinn, country=Estonia, datacenter=PacketExchange, environment=production, instance=165.231.182.96:9998, ip=165.231.182.96, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ee.crt, role=vpn, server=talinn404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.345385626Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Tallinn, country=Estonia, datacenter=PacketExchange, environment=production, instance=165.231.182.96:9998, ip=165.231.182.96, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ee.crt, role=vpn, server=talinn404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.345311118Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Adelaide, country=Australia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.345336396Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4913-Yuverta" t=2024-05-29T13:44:15.345317021Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=691855 slug=chainlake instance="device=/dev/sda1, fstype=ext4, instance=victorialogs-cax11-victorialogs, mountpoint=/, nodename=victorialogs" t=2024-05-29T13:44:15.34522515Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.34527901Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=691855 slug=chainlake t=2024-05-29T13:44:15.345043637Z level=debug msg="State manager processing evaluation results" resultCount=12 + level=debug ts=2024-05-29T13:44:15.345094396Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Abuja, country=Nigeria, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.345160974Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4895-StiebelEl" t=2024-05-29T13:44:15.345157054Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.345116472Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Tallinn, country=Estonia, datacenter=PacketExchange, environment=production, instance=165.231.182.83:9998, ip=165.231.182.83, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=talinn403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.345095396Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4880-bam-uk" t=2024-05-29T13:44:15.345091494Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.344968865Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Tallinn, country=Estonia, datacenter=PacketExchange, environment=production, instance=165.231.182.83:9998, ip=165.231.182.83, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ee.crt, role=vpn, server=talinn403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.344892847Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.344730299Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Zurich, country=Switzerland, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.344826304Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=120621 slug=jdall instance= t=2024-05-29T13:44:15.344830535Z level=warn msg="Failed to take an image" dashboard=kCT3cakGk panel=19 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=B" t=2024-05-29T13:44:15.344633954Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4844-tenthpin" t=2024-05-29T13:44:15.344598911Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Yerevan, country=Armenia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.344467893Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.344451931Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.historian backend=loki user=190917 slug=d1cx t=2024-05-29T13:44:15.344304214Z level=debug msg="Done saving alert state history batch" + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4825-CMB" t=2024-05-29T13:44:15.344312457Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=storage-04" t=2024-05-29T13:44:15.344231104Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=shared-03" t=2024-05-29T13:44:15.344197378Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.344228254Z caller=remote_image_capturer.go:54 user=120621 slug=jdall rule_org_id=1 rule_uid=zevM7K_nk dashboard=kCT3cakGk panel=19 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=qualityeng-01" t=2024-05-29T13:44:15.344181025Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=hobbes-07" t=2024-05-29T13:44:15.344140432Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.344110967Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=120621 slug=jdall instance= t=2024-05-29T13:44:15.344145935Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=120621 slug=jdall instance= t=2024-05-29T13:44:15.344140914Z level=debug msg="Execution error state is Alerting" handler=resultAlerting previous_handler=resultError + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4776-GKD" t=2024-05-29T13:44:15.344151266Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Warsaw, country=Poland, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.344116907Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4776-GKD" t=2024-05-29T13:44:15.344143118Z level=debug msg="Setting next state" handler=resultNormal + level=error ts=2024-05-29T13:44:15.344071845Z caller=remote_rule_evaluator.go:110 user=120621 slug=jdall msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=compute-12" t=2024-05-29T13:44:15.344097508Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=120621 slug=jdall version=1 fingerprint=27d4479a7215a9d7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.344094612Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=3.822064ms + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=compute-11" t=2024-05-29T13:44:15.344070814Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4772-CDPQ" t=2024-05-29T13:44:15.344099298Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4772-CDPQ" t=2024-05-29T13:44:15.344089496Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=compute-03" t=2024-05-29T13:44:15.344004325Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=analytics-01" t=2024-05-29T13:44:15.343973683Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:15.343930292Z level=debug msg="State manager processing evaluation results" resultCount=12 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Tallinn, country=Estonia, datacenter=PacketExchange, environment=production, instance=165.231.182.55:9998, ip=165.231.182.55, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ee.crt, role=vpn, server=talinn401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.343898904Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=472647 slug=planet version=4 fingerprint=394d00064fb65c9a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.343779869Z level=debug msg="Alert rule evaluated" results="[{Instance:cluster=analytics-01 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:cluster=analytics-01 Value:0xc02a4dc4a0} C:{Var:C Labels:cluster=analytics-01 Value:0xc02a4dc4c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.343274563s EvaluationString:[ var='B' labels={cluster=analytics-01} value=78 ], [ var='C' labels={cluster=analytics-01} value=0 ]} {Instance:cluster=compute-03 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:cluster=compute-03 Value:0xc02a4dc4e8} C:{Var:C Labels:cluster=compute-03 Value:0xc02a4dc508}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.343287982s EvaluationString:[ var='B' labels={cluster=compute-03} value=325 ], [ var='C' labels={cluster=compute-03} value=0 ]} {Instance:cluster=compute-10 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:cluster=compute-10 Value:0xc02a4dc548} C:{Var:C Labels:cluster=compute-10 Value:0xc02a4dc578}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.343294797s EvaluationString:[ var='B' labels={cluster=compute-10} value=46 ], [ var='C' labels={cluster=compute-10} value=0 ]} {Instance:cluster=compute-11 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:cluster=compute-11 Value:0xc02a4dc5b8} C:{Var:C Labels:cluster=compute-11 Value:0xc02a4dc5d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.343300972s EvaluationString:[ var='B' labels={cluster=compute-11} value=46 ], [ var='C' labels={cluster=compute-11} value=0 ]} {Instance:cluster=compute-12 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:cluster=compute-12 Value:0xc02a4dc628} C:{Var:C Labels:cluster=compute-12 Value:0xc02a4dc648}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.343305641s EvaluationString:[ var='B' labels={cluster=compute-12} value=41 ], [ var='C' labels={cluster=compute-12} value=0 ]} {Instance:cluster=compute-15 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:cluster=compute-15 Value:0xc02a4dc698} C:{Var:C Labels:cluster=compute-15 Value:0xc02a4dc6b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.343311787s EvaluationString:[ var='B' labels={cluster=compute-15} value=44 ], [ var='C' labels={cluster=compute-15} value=0 ]} {Instance:cluster=hobbes-07 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:cluster=hobbes-07 Value:0xc02a4dc700} C:{Var:C Labels:cluster=hobbes-07 Value:0xc02a4dc720}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.343316405s EvaluationString:[ var='B' labels={cluster=hobbes-07} value=106 ], [ var='C' labels={cluster=hobbes-07} value=0 ]} {Instance:cluster=pv-prod-01 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:cluster=pv-prod-01 Value:0xc02a4dc748} C:{Var:C Labels:cluster=pv-prod-01 Value:0xc02a4dc778}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.343321969s EvaluationString:[ var='B' labels={cluster=pv-prod-01} value=105 ], [ var='C' labels={cluster=pv-prod-01} value=0 ]} {Instance:cluster=qualityeng-01 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:cluster=qualityeng-01 Value:0xc02a4dc7b8} C:{Var:C Labels:cluster=qualityeng-01 Value:0xc02a4dc7d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.343329003s EvaluationString:[ var='B' labels={cluster=qualityeng-01} value=103 ], [ var='C' labels={cluster=qualityeng-01} value=0 ]} {Instance:cluster=shared-03 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:cluster=shared-03 Value:0xc02a4dc930} C:{Var:C Labels:cluster=shared-03 Value:0xc02a4dc900}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.343333683s EvaluationString:[ var='B' labels={cluster=shared-03} value=442 ], [ var='C' labels={cluster=shared-03} value=0 ]} {Instance:cluster=sksinfra-01 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:cluster=sksinfra-01 Value:0xc02a4dcca0} C:{Var:C Labels:cluster=sksinfra-01 Value:0xc02a4dcc80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.343338122s EvaluationString:[ var='B' labels={cluster=sksinfra-01} value=54 ], [ var='C' labels={cluster=sksinfra-01} value=0 ]} {Instance:cluster=storage-04 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:cluster=storage-04 Value:0xc02a4dccc8} C:{Var:C Labels:cluster=storage-04 Value:0xc02a4dcce8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.343343545s EvaluationString:[ var='B' labels={cluster=storage-04} value=166 ], [ var='C' labels={cluster=storage-04} value=0 ]}]" duration=238.133543ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.343896654Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Tallinn, country=Estonia, datacenter=PacketExchange, environment=production, instance=165.231.182.55:9998, ip=165.231.182.55, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ee.crt, role=vpn, server=talinn401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.343890854Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.343856851Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4756-LeueundNil" t=2024-05-29T13:44:15.343861665Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.343809745Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4739-BioNTech" t=2024-05-29T13:44:15.343782389Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Tallinn, country=Estonia, datacenter=PacketExchange, environment=production, instance=165.231.182.41:9998, ip=165.231.182.41, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ee.crt, role=vpn, server=talinn408, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.343653838Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4678-BostonSci" t=2024-05-29T13:44:15.343645756Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4678-BostonSci" t=2024-05-29T13:44:15.343638093Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4607-itelio" t=2024-05-29T13:44:15.343587583Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:15.343477495Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.77477ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Vancouver, country=Canada, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.343539657Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.343514788Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4459-FCF" t=2024-05-29T13:44:15.343482307Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4321-Heinze" t=2024-05-29T13:44:15.343430524Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.343324704Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4265-afas" t=2024-05-29T13:44:15.343264496Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=4197-Hirslanden" t=2024-05-29T13:44:15.343154167Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Valencia, country=Spain, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.343106125Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Tallinn, country=Estonia, datacenter=PacketExchange, environment=production, instance=165.231.182.28:9998, ip=165.231.182.28, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ee.crt, role=vpn, server=talinn407, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.34309157Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.342989719Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=3901-gft" t=2024-05-29T13:44:15.342894873Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Tallinn, country=Estonia, datacenter=PacketExchange, environment=production, instance=165.231.182.15:9998, ip=165.231.182.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=talinn406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.342882357Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=884866 slug=cnonumerique instance="datasource_uid=fdhk917z41xj4a, ref_id=A" t=2024-05-29T13:44:15.342792746Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=3855-infront" t=2024-05-29T13:44:15.342840517Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.342848216Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Vaduz, country=Liechtenstein, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.342827263Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.342800736Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=884866 slug=cnonumerique instance="datasource_uid=fdhk917z41xj4a, ref_id=A" t=2024-05-29T13:44:15.342664373Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Tallinn, country=Estonia, datacenter=PacketExchange, environment=production, instance=165.231.182.15:9998, ip=165.231.182.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ee.crt, role=vpn, server=talinn406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.342619834Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.342583319Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=3792-UnionInves" t=2024-05-29T13:44:15.342667892Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance="easyflow_edition=alt-04" t=2024-05-29T13:44:15.342650194Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance="easyflow_edition=alt-04" t=2024-05-29T13:44:15.342640945Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet instance="easyflow_edition=alt-03" t=2024-05-29T13:44:15.342614655Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:15.342554013Z level=debug msg="State manager processing evaluation results" resultCount=4 + logger=ngalert.state.manager.persist user=830813 slug=lynx0den t=2024-05-29T13:44:15.281448796Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.846512ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ulaanbaatar, country=Mongolia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.34252084Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ulaanbaatar, country=Mongolia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.342511832Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.34237702Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.342397858Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.342350266Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.342301955Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.plantsvszombies-gw3-{ps4}.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYER_SLOTS_CLIENT_SERVER_DEDICATED_*-nrt,5)) Query" t=2024-05-29T13:44:15.342253716Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=3300-empExpress" t=2024-05-29T13:44:15.342268084Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Tallinn, country=Estonia, datacenter=PacketExchange, environment=production, instance=165.231.182.122:9998, ip=165.231.182.122, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ee.crt, role=vpn, server=talinn410, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.342193441Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.342157353Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=2722-InTech" t=2024-05-29T13:44:15.342109624Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=d688826c9b9748a3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.342063097Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.plantsvszombies-gw3-{ps4}.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYER_SLOTS_CLIENT_SERVER_DEDICATED_*-nrt,5)) Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc01b4c5fb8} Threshold:{Var:Threshold Labels: Value:0xc01b4c5fe0} compare:{Var:compare Labels:aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.plantsvszombies-gw3-{ps4}.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYER_SLOTS_CLIENT_SERVER_DEDICATED_*-nrt,5)) Query Value:0xc01920a010} sum:{Var:sum Labels:aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.plantsvszombies-gw3-{ps4}.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYER_SLOTS_CLIENT_SERVER_DEDICATED_*-nrt,5)) Query Value:0xc01920a038}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.341717886s EvaluationString:[ var='Breaches' labels={} value=72 ], [ var='Threshold' labels={} value=2 ], [ var='compare' labels={aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.plantsvszombies-gw3-{ps4}.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYER_SLOTS_CLIENT_SERVER_DEDICATED_*-nrt,5)) Query} value=0 ], [ var='sum' labels={aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.plantsvszombies-gw3-{ps4}.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYER_SLOTS_CLIENT_SERVER_DEDICATED_*-nrt,5)) Query} value=0 ]}]" duration=33.724974ms + logger=ngalert.state.manager user=554491 slug=safeskyindustries instance="cluster=westeurope, namespace=production" t=2024-05-29T13:44:15.342025653Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=554491 slug=safeskyindustries instance="cluster=canadacentral, namespace=production" t=2024-05-29T13:44:15.341960952Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=554491 slug=safeskyindustries t=2024-05-29T13:44:15.341908052Z level=debug msg="State manager processing evaluation results" resultCount=4 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Tallinn, country=Estonia, datacenter=PacketExchange, environment=production, instance=165.231.182.109:9998, ip=165.231.182.109, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ee.crt, role=vpn, server=talinn409, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.341871167Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Tallinn, country=Estonia, datacenter=PacketExchange, environment=production, instance=165.231.182.109:9998, ip=165.231.182.109, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ee.crt, role=vpn, server=talinn409, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.34185753Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=1543-PictureGmb" t=2024-05-29T13:44:15.34175642Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Tirana, country=Albania, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.341719757Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=261837 slug=empowercloud instance="agent_hostname=1532-Akquise" t=2024-05-29T13:44:15.341646942Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.341477872Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Tallinn, country=Estonia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.341412124Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Taipei, country=Taiwan, datacenter=GSL, environment=production, instance=173.244.49.74:9998, ip=173.244.49.74, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=taiwan406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.341336299Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.341276349Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.341157409Z caller=remote_instance_store.go:51 user=500743 slug=sgr msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Taipei, country=Taiwan, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.341227553Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Taipei, country=Taiwan, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.341196792Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.341115427Z caller=remote_instance_store.go:51 user=174054 slug=netrading msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Taipei, country=Taiwan, datacenter=GSL, environment=production, instance=173.244.49.50:9998, ip=173.244.49.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/taiwan.crt, role=vpn, server=taiwan405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.341021204Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.340980749Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=190917 slug=d1cx t=2024-05-29T13:44:15.340784985Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.149933ms + level=debug ts=2024-05-29T13:44:15.3407397Z caller=remote_instance_store.go:51 user=497177 slug=zoldmezo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Taipei, country=Taiwan, datacenter=GSL, environment=production, instance=173.244.49.50:9998, ip=173.244.49.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=taiwan405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.340752748Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.340681966Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.340612131Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Strasbourg, country=France, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.34065902Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.340453403Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sydney, country=Australia, datacenter=ServersAustralia, environment=production, instance=117.120.9.34:9998, ip=117.120.9.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=sydney421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.340514366Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.340439683Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.340338008Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sydney, country=Australia, datacenter=ServersAustralia, environment=production, instance=117.120.9.34:9998, ip=117.120.9.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/au-sydney.crt, role=vpn, server=sydney421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.340300332Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.339779056Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.340016537Z caller=remote_instance_store.go:51 user=417450 slug=legitsecurity msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Sofia, country=Bulgaria, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.34006275Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.339721584Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.33975746Z caller=remote_instance_store.go:51 user=855919 slug=mrdprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Skopje, country=Macedonia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.339797147Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.33970835Z caller=remote_instance_store.go:51 user=893158 slug=cmfollnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.339538935Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.339388852Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.339116852Z caller=remote_instance_store.go:51 user=405431 slug=deepersignals msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.339279792Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.339196506Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.339040681Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:15.339046252Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=56.7059ms + level=debug ts=2024-05-29T13:44:15.339040005Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.338932834Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=848777 slug=opsalert instance="__name__=probe_http_status_code, config_version=1715334681575528448, instance=https://sxfmtaasstg.dryice.ai/, job=SX-MTaaS Staging XSMF, probe=Frankfurt" t=2024-05-29T13:44:15.339045116Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.338866404Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.338675048Z caller=remote_instance_store.go:51 user=482907 slug=wavelonp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sydney, country=Australia, datacenter=PIA, environment=production, instance=191.101.210.8:9998, ip=191.101.210.8, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=sydney428, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.338753229Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.338715458Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=245291 slug=pismo instance="QueueName=StatementsFilling-dead-letter" t=2024-05-29T13:44:15.338700093Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=646202 slug=kairosaerospace t=2024-05-29T13:44:15.338518996Z level=debug msg="Saving alert states done" count=5 max_state_save_concurrency=1 duration=74.471822ms + level=debug ts=2024-05-29T13:44:15.338162895Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.338067229Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.337847867Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:15.337767827Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.337775038Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=701741 slug=thetradingpitproduction t=2024-05-29T13:44:15.337724061Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.111076ms + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.337753936Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.337718528Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Shenzhen, country=China, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.337704906Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.337532168Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Seoul, country=Republic of Korea, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.337530631Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sydney, country=Australia, datacenter=PIA, environment=production, instance=191.101.210.6:9998, ip=191.101.210.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/au-sydney.crt, role=vpn, server=sydney426, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.337481205Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sydney, country=Australia, datacenter=PIA, environment=production, instance=191.101.210.6:9998, ip=191.101.210.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/au-sydney.crt, role=vpn, server=sydney426, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.337467234Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.337406241Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-veridas-db, env=au" t=2024-05-29T13:44:15.337354191Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sydney, country=Australia, datacenter=PIA, environment=production, instance=191.101.210.5:9998, ip=191.101.210.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=sydney425, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.337252792Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.337223521Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Sao Paulo, country=Brazil, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.337109353Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.337154328Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.336977062Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-v4monitoring-db, env=au" t=2024-05-29T13:44:15.336959611Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.336902515Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-v4monitoring-db, env=au" t=2024-05-29T13:44:15.336815284Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.336864103Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.336782429Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.336609186Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.336477292Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.336350092Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.336354849Z caller=remote_instance_store.go:51 user=523054 slug=vialtopartners msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:15.336308599Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=523054 slug=vialtopartners instance= t=2024-05-29T13:44:15.336296847Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.336309811Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=523054 slug=vialtopartners t=2024-05-29T13:44:15.336246298Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=550657 slug=garrigues t=2024-05-29T13:44:15.335649639Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=550657 slug=garrigues instance= t=2024-05-29T13:44:15.335626213Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:15.33601565Z caller=remote_instance_store.go:51 user=491157 slug=prd01wr msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sydney, country=Australia, datacenter=PIA, environment=production, instance=191.101.210.4:9998, ip=191.101.210.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/au-sydney.crt, role=vpn, server=sydney424, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.33597991Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Riyadh, country=Saudi Arabia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.335964221Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.335944544Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-risk-defense-platform-db, env=au" t=2024-05-29T13:44:15.335754892Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.335621868Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.33551338Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.scheduler user=550657 slug=garrigues version=22 fingerprint=dfa16875f3b82fbd attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.335201316Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.33482353s EvaluationString:}]" duration=75.467503ms + level=debug ts=2024-05-29T13:44:15.335176764Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.33515443Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.335164008Z caller=remote_instance_store.go:51 user=543654 slug=jobcloudprogrammaticprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=304032 slug=clearbanc t=2024-05-29T13:44:15.335079742Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=543654 slug=jobcloudprogrammaticprod t=2024-05-29T13:44:15.335100485Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.335064788Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Reykjavik, country=Iceland, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.33508855Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.335019159Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.33479121Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:15.334741928Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=DAMMAN Query" t=2024-05-29T13:44:15.33472692Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sydney, country=Australia, datacenter=PIA, environment=production, instance=154.16.81.7:9998, ip=154.16.81.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=sydney434, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.334751454Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-instant-id-qa-db, env=au" t=2024-05-29T13:44:15.334626848Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Prague, country=Czech Republic, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.334571144Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=5fe1ce91a253f10a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.334508695Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=DAMMAN Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc0440df4d8} IgnoreBelow:{Var:IgnoreBelow Labels: Value:0xc0440df3d8} Threshold:{Var:Threshold Labels: Value:0xc0440df440} compare:{Var:compare Labels:aggregatedBy=sum, name=DAMMAN Query Value:0xc0440df480} sum:{Var:sum Labels:aggregatedBy=sum, name=DAMMAN Query Value:0xc0440df4c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.334319733s EvaluationString:[ var='Breaches' labels={} value=1 ], [ var='IgnoreBelow' labels={} value=100 ], [ var='Threshold' labels={} value=-20 ], [ var='compare' labels={aggregatedBy=sum, name=DAMMAN Query} value=0 ], [ var='sum' labels={aggregatedBy=sum, name=DAMMAN Query} value=0 ]}]" duration=68.246488ms + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-idverse-enterprise-db, env=au" t=2024-05-29T13:44:15.33423009Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Podgorica, country=Montenegro, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.334219347Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.334110104Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-frontend-log-db, env=au" t=2024-05-29T13:44:15.333949458Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.333629772Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.333449648Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.333372268Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.333189758Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.333011133Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.332831003Z caller=remote_instance_store.go:51 user=355252 slug=bumper msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-frontend-db, env=au" t=2024-05-29T13:44:15.332808094Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.332838672Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:15.332702816Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.332688058Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.332629285Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.332695318Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=114492 slug=railsbank instance="QueueName=PROD-BACS-INBOUND-DEDUPLICATION-DLQ" t=2024-05-29T13:44:15.33263287Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.332544976Z caller=remote_instance_store.go:51 user=637258 slug=testb9lab msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114492 slug=railsbank instance="QueueName=PROD-BACS-INBOUND-DEDUPLICATION-DLQ" t=2024-05-29T13:44:15.332621709Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-dow-jones-db, env=au" t=2024-05-29T13:44:15.332610868Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.332519894Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=320906 slug=techcyte t=2024-05-29T13:44:15.332466456Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.519824ms + level=debug ts=2024-05-29T13:44:15.332500116Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.332461366Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=453308 slug=hyperzodprod t=2024-05-29T13:44:15.332403176Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.332314988Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.332318631Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=453308 slug=hyperzodprod t=2024-05-29T13:44:15.332253698Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.332275093Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:15.332055351Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.331106766Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=770817 slug=exproment t=2024-05-29T13:44:15.331990558Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-creditsafe-db, env=au" t=2024-05-29T13:44:15.331813264Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.331750165Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.331725535Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv instance= t=2024-05-29T13:44:15.331656554Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:15.331592014Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:15.331487444Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=696798 slug=mcv instance="name=keepLastValue(apex.Middle_East.players.x1.mh448980.serverstats) Query" t=2024-05-29T13:44:15.331436662Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.331473251Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.331247359Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.330781948Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=169633 slug=talentree t=2024-05-29T13:44:15.330990347Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=169633 slug=talentree instance= t=2024-05-29T13:44:15.330969966Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.330990956Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.330866811Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=777670 slug=fakku instance= t=2024-05-29T13:44:15.330908442Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=777670 slug=fakku t=2024-05-29T13:44:15.33084927Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sydney, country=Australia, datacenter=PIA, environment=production, instance=154.16.81.7:9998, ip=154.16.81.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/au-sydney.crt, role=vpn, server=sydney434, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.330857914Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.330716022Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.330653235Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.33065854Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.330633796Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.871131ms + logger=ngalert.state.manager.persist user=239286 slug=om2test t=2024-05-29T13:44:15.330584851Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=45.205246ms + level=debug ts=2024-05-29T13:44:15.330585011Z caller=remote_instance_store.go:51 user=147497 slug=rhodev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.330558368Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.330498218Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.330534157Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-backend-db-read-replica-1, env=au" t=2024-05-29T13:44:15.330502671Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.330476477Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.330431972Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.330395105Z caller=remote_instance_store.go:51 user=35611 slug=play msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.330354792Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.330234978Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.330108195Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-backend-db, env=au" t=2024-05-29T13:44:15.330138576Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:15.330053223Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=806229 slug=simplisafe instance= previous_handler=resultError t=2024-05-29T13:44:15.330036514Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe instance= previous_handler=resultError t=2024-05-29T13:44:15.330029183Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=806229 slug=simplisafe instance= t=2024-05-29T13:44:15.330016404Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=806229 slug=simplisafe t=2024-05-29T13:44:15.329965043Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.329890437Z caller=remote_instance_store.go:51 user=137351 slug=pinnacle21 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sydney, country=Australia, datacenter=PIA, environment=production, instance=154.16.81.6:9998, ip=154.16.81.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/au-sydney.crt, role=vpn, server=sydney433, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.329900633Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.329733423Z caller=remote_instance_store.go:51 user=405431 slug=deepersignals msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.329681807Z caller=remote_instance_store.go:51 user=701741 slug=thetradingpitproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=701741 slug=thetradingpitproduction t=2024-05-29T13:44:15.329607315Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.329613846Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=701741 slug=thetradingpitproduction instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.329572854Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.32960617Z caller=remote_instance_store.go:51 user=328778 slug=teemuskog msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.329510706Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=701741 slug=thetradingpitproduction version=42 fingerprint=f06057fc971c2971 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.32940408Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.328888908s EvaluationString:}]" duration=19.809077ms + level=debug ts=2024-05-29T13:44:15.329454618Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.329411357Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=328778 slug=teemuskog instance= t=2024-05-29T13:44:15.329398071Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:15.329358103Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=328778 slug=teemuskog instance= t=2024-05-29T13:44:15.329288408Z level=debug msg="Setting next state" handler=resultError + level=debug ts=2024-05-29T13:44:15.329037061Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.328927728Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.328812684Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.328886246Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.328749565Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.328797158Z caller=remote_instance_store.go:51 user=893158 slug=cmfollnp msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.328724458Z caller=remote_alert_sender.go:94 user=254872 slug=ennowallet host=ennowallet-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.149.20.162:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=gmruU3O7k alerts=1 + level=debug ts=2024-05-29T13:44:15.328741442Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.328692898Z caller=remote_instance_store.go:51 user=190917 slug=d1cx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=190917 slug=d1cx instance= t=2024-05-29T13:44:15.328615486Z level=debug msg="Changing state" previous_state=NoData next_state=Pending previous_ends_at=2024-05-29T13:47:00Z next_ends_at=2024-05-29T13:48:00Z + logger=ngalert.state.manager user=190917 slug=d1cx instance= t=2024-05-29T13:44:15.328597843Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-veridas-db, env=apac" t=2024-05-29T13:44:15.328653804Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=190917 slug=d1cx t=2024-05-29T13:44:15.328564923Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=307381 slug=kambitaskforce version=17 fingerprint=3f7880dfd7bc6aac attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.326128407Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.325441185s EvaluationString:}]" duration=31.506065ms + logger=ngalert.scheduler user=254659 slug=elzeard t=2024-05-29T13:44:15.322545597Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-v4monitoring-db, env=apac" t=2024-05-29T13:44:15.328394058Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-v4monitoring-db, env=apac" t=2024-05-29T13:44:15.328346246Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.328232305Z caller=remote_instance_store.go:51 user=497177 slug=zoldmezo msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.328226091Z caller=remote_alert_sender.go:94 user=277462 slug=bayadic host=bayadic-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.148.195.192:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=f33d72c8-2a97-4e2c-a25a-a5a2541570e5 alerts=1 + level=info ts=2024-05-29T13:44:15.328194791Z caller=remote_alert_sender.go:94 user=22398 slug=sunfolding host=sunfolding-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.83.87:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=e74daf71-2514-4db6-8617-e79bcf0a2403 alerts=1 + logger=ngalert.state.manager user=497177 slug=zoldmezo instance="__proxy_source__=influx, building=F" t=2024-05-29T13:44:15.32815327Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.328072804Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=497177 slug=zoldmezo instance="__proxy_source__=influx, building=E" t=2024-05-29T13:44:15.32812027Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.328074717Z caller=remote_instance_store.go:51 user=753403 slug=romich msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=497177 slug=zoldmezo instance="__proxy_source__=influx, building=C" t=2024-05-29T13:44:15.328077503Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=497177 slug=zoldmezo instance="__proxy_source__=influx, building=B" t=2024-05-29T13:44:15.328048452Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-sar-investigation-db, env=apac" t=2024-05-29T13:44:15.32791718Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=497177 slug=zoldmezo t=2024-05-29T13:44:15.32795039Z level=debug msg="State manager processing evaluation results" resultCount=7 + level=debug ts=2024-05-29T13:44:15.32780565Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sydney, country=Australia, datacenter=PIA, environment=production, instance=154.16.81.5:9998, ip=154.16.81.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=sydney432, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.327790298Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.327517583Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.32746838Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.327522628Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.327373928Z caller=remote_instance_store.go:51 user=482907 slug=wavelonp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sydney, country=Australia, datacenter=PIA, environment=production, instance=154.16.81.5:9998, ip=154.16.81.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/au-sydney.crt, role=vpn, server=sydney432, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.327320449Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.327098205Z caller=remote_instance_store.go:51 user=191103 slug=amazonadmin msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.326735799Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=245291 slug=pismo instance="datasource_uid=qyzvhg-7z, ref_id=A" t=2024-05-29T13:44:15.326687119Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sydney, country=Australia, datacenter=PIA, environment=production, instance=154.16.81.3:9998, ip=154.16.81.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=sydney430, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.326759988Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112732 slug=gleamer instance= t=2024-05-29T13:44:15.326615249Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112732 slug=gleamer instance= t=2024-05-29T13:44:15.326603102Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112732 slug=gleamer t=2024-05-29T13:44:15.32655591Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.326527268Z caller=remote_instance_store.go:51 user=805026 slug=powwro11y msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=112732 slug=gleamer version=1 fingerprint=eec5c4f8d80c83ec attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.326464891Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.32620085s EvaluationString:}]" duration=17.204301ms + logger=ngalert.state.manager.persist user=235691 slug=om2 t=2024-05-29T13:44:15.326334653Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=228733 slug=csmoney instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.326264998Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=235691 slug=om2 t=2024-05-29T13:44:15.326280934Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=228733 slug=csmoney version=59 fingerprint=983d598680a791d1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.326004808Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.325361383s EvaluationString:}]" duration=59.819451ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.325900463Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:15.325713486Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.07259ms + level=debug ts=2024-05-29T13:44:15.325642048Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=808724 slug=harinipandian2002 t=2024-05-29T13:44:15.325735904Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=808724 slug=harinipandian2002 instance= t=2024-05-29T13:44:15.325682964Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:15.325644538Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=808724 slug=harinipandian2002 t=2024-05-29T13:44:15.325614213Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=691102 slug=deluxeconfdev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.325426325Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=691102 slug=deluxeconfdev t=2024-05-29T13:44:15.325405164Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=808724 slug=harinipandian2002 version=9 fingerprint=6d6b14024164d253 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.325509791Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc01af17d28} C:{Var:C Labels: Value:0xc01af17d30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.324700968s EvaluationString:[ var='B' labels={} value=246.82732435405933 ], [ var='C' labels={} value=1 ]}]" duration=238.981292ms + logger=ngalert.state.manager.persist user=822158 slug=perseverance t=2024-05-29T13:44:15.325164188Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.261713ms + level=debug ts=2024-05-29T13:44:15.324864285Z caller=remote_instance_store.go:51 user=637258 slug=testb9lab msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Stockholm, country=Sweden, datacenter=Glesys, environment=production, instance=46.246.3.71:9998, ip=46.246.3.71, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=stockholm403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.324804516Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.324583874Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.324533523Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.324543829Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.324411226Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.324388221Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager.persist user=916149 slug=cmfollpd t=2024-05-29T13:44:15.32432271Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.553709ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.32426599Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.324045253Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=112387 slug=lucidhq instance="aggregatedBy=sum, name=supply-survey-api A" t=2024-05-29T13:44:15.323859206Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.32389382Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.323885016Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.323748064Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.323695581Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.323468841Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.323488071Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:15.323151864Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.323370109Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:15.323069773Z caller=remote_alert_sender.go:94 user=112387 slug=lucidhq host=lucidhq-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.80.252:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=a0b65e73-6370-4648-aa67-a95eac915617 alerts=1 + level=debug ts=2024-05-29T13:44:15.322829307Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.32258788Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=320778 slug=omegaai t=2024-05-29T13:44:15.309675055Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.566369ms + logger=ngalert.state.manager.persist user=173374 slug=felmo t=2024-05-29T13:44:15.318892733Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.060374ms + level=debug ts=2024-05-29T13:44:15.322488769Z caller=remote_instance_store.go:51 user=471861 slug=planetstaging msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.322193875Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Stockholm, country=Sweden, datacenter=Glesys, environment=production, instance=46.246.3.4:9998, ip=46.246.3.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/sweden.crt, role=vpn, server=stockholm401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.322292296Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.321976812Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-risk-defense-platform-db, env=apac" t=2024-05-29T13:44:15.322091075Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.322022629Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=109452 slug=deltarisk instance= t=2024-05-29T13:44:15.321945676Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=109452 slug=deltarisk instance= t=2024-05-29T13:44:15.321927631Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.321971296Z caller=remote_image_capturer.go:33 user=183214 slug=vectorizedio rule_org_id=1 rule_uid=ddiofh9s54mioa msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:15.321859101Z caller=remote_instance_store.go:51 user=220750 slug=homeys msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.32185757Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.482099ms + logger=ngalert.scheduler user=109452 slug=deltarisk version=14 fingerprint=04c8531f9d859251 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.321615951Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.321065009s EvaluationString:}]" duration=54.21558ms + level=debug ts=2024-05-29T13:44:15.321831517Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.321705601Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=183214 slug=vectorizedio version=1 fingerprint=28e15431d3e6232d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.321667284Z level=debug msg="Alert rule evaluated" results="[{Instance:redpanda_id=cpbdeif37uvmolr3j6t0 State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:redpanda_id=cpbdeif37uvmolr3j6t0 Value:0xc05255e6f8} B:{Var:B Labels:redpanda_id=cpbdeif37uvmolr3j6t0 Value:0xc05255e720}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.320118002s EvaluationString:[ var='A' labels={redpanda_id=cpbdeif37uvmolr3j6t0} value=1 ], [ var='B' labels={redpanda_id=cpbdeif37uvmolr3j6t0} value=1 ]} {Instance:redpanda_id=cpbf1gq74ocl436p67ag State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:redpanda_id=cpbf1gq74ocl436p67ag Value:0xc05255e750} B:{Var:B Labels:redpanda_id=cpbf1gq74ocl436p67ag Value:0xc05255e758}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.32012927s EvaluationString:[ var='A' labels={redpanda_id=cpbf1gq74ocl436p67ag} value=1 ], [ var='B' labels={redpanda_id=cpbf1gq74ocl436p67ag} value=1 ]}]" duration=44.733348ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.321521402Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.321350585Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.320815716Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Stockholm, country=Sweden, datacenter=Glesys, environment=production, instance=46.246.3.37:9998, ip=46.246.3.37, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/sweden.crt, role=vpn, server=stockholm402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.320724152Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.320666212Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.32066899Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.320661543Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.320654635Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-periodic-reviews-db, env=apac" t=2024-05-29T13:44:15.320405214Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Stockholm, country=Sweden, datacenter=Glesys, environment=production, instance=46.246.3.37:9998, ip=46.246.3.37, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=stockholm402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.320267418Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.320070835Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.319779475Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-instant-id-qa-db, env=apac" t=2024-05-29T13:44:15.31984051Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.319840526Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Stockholm, country=Sweden, datacenter=Glesys, environment=production, instance=46.246.3.220:9998, ip=46.246.3.220, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/sweden-2.crt, role=streaming-optimized, server=stockholm406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.319814717Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.319756459Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.319704883Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.319529275Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.31950023Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=8e8cc9ff091cb7c4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.31946845Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.319260444s EvaluationString:}]" duration=449.869527ms + logger=ngalert.state.manager.persist user=472647 slug=planet t=2024-05-29T13:44:15.319329265Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=561712 slug=latamairlinespci t=2024-05-29T13:44:15.319299535Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.046045ms + logger=ngalert.state.manager user=472647 slug=planet instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.319316377Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.319301504Z caller=remote_instance_store.go:51 user=127813 slug=clearsale msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.319237676Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.319164344Z caller=remote_instance_store.go:51 user=871095 slug=cmcnginp msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=472647 slug=planet version=362 fingerprint=e2cd82720a23c8eb attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.319162918Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.318924481s EvaluationString:}]" duration=66.510368ms + logger=ngalert.state.manager user=612525 slug=adleyeview instance= t=2024-05-29T13:44:15.319122342Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.31907477Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-instant-id-qa-db, env=apac" t=2024-05-29T13:44:15.318649923Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.318569594Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.318560965Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.318545777Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.318451134Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.318426786Z caller=remote_instance_store.go:51 user=126872 slug=sensoper msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.318369402Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=126872 slug=sensoper instance="datasource_uid=eVOZVCGGz, ref_id=A" t=2024-05-29T13:44:15.318361168Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=126872 slug=sensoper instance="datasource_uid=eVOZVCGGz, ref_id=A" t=2024-05-29T13:44:15.318318208Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=126872 slug=sensoper t=2024-05-29T13:44:15.31825765Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.318256774Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.318156983Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.317977795Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.317890195Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=310637 slug=notino t=2024-05-29T13:44:15.317716581Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=310637 slug=notino instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.317700011Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=310637 slug=notino t=2024-05-29T13:44:15.317656037Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-frontend-db-read-replica-1, env=apac" t=2024-05-29T13:44:15.31746919Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.316801305Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.316760066Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=LwbzRMnVk, ref_id=A" t=2024-05-29T13:44:15.31672218Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.316412704Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.316418412Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.316394801Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.316045918Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Stockholm, country=Sweden, datacenter=Glesys, environment=production, instance=46.246.3.180:9998, ip=46.246.3.180, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/sweden.crt, role=vpn, server=stockholm405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.316267058Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.316112513Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Stockholm, country=Sweden, datacenter=Glesys, environment=production, instance=46.246.3.180:9998, ip=46.246.3.180, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/sweden.crt, role=vpn, server=stockholm405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.316201384Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.316153584Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=A" t=2024-05-29T13:44:15.316120878Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.315956267Z caller=remote_instance_store.go:51 user=822158 slug=perseverance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.315908025Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.315841334Z caller=remote_alert_sender.go:94 user=129403 slug=sitechsahc host=sitechsahc-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.149.86.91:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=j6jakGY7z alerts=1 + logger=ngalert.state.manager user=822158 slug=perseverance instance= t=2024-05-29T13:44:15.315884365Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=822158 slug=perseverance instance= t=2024-05-29T13:44:15.315856834Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.31579074Z caller=remote_instance_store.go:51 user=916149 slug=cmfollpd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Stockholm, country=Sweden, datacenter=Glesys, environment=production, instance=46.246.3.180:9998, ip=46.246.3.180, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=stockholm405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.315859052Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=916149 slug=cmfollpd instance="instance=puusea4bfollwxsapp1002.foll.gcp.hclsw.internal" t=2024-05-29T13:44:15.315726979Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.315773033Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.scheduler user=916149 slug=cmfollpd version=1 fingerprint=e1109f7fc82beb02 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.315492107Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=puusea4bfollwxsapp1002.foll.gcp.hclsw.internal State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=puusea4bfollwxsapp1002.foll.gcp.hclsw.internal Value:0xc007e17d18} B:{Var:B Labels:instance=puusea4bfollwxsapp1002.foll.gcp.hclsw.internal Value:0xc007e17d48} C:{Var:C Labels:instance=puusea4bfollwxsapp1002.foll.gcp.hclsw.internal Value:0xc007e17d58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.314960997s EvaluationString:[ var='A' labels={instance=puusea4bfollwxsapp1002.foll.gcp.hclsw.internal} value=18.150330539295 ], [ var='B' labels={instance=puusea4bfollwxsapp1002.foll.gcp.hclsw.internal} value=18.150330539295 ], [ var='C' labels={instance=puusea4bfollwxsapp1002.foll.gcp.hclsw.internal} value=0 ]}]" duration=18.828701ms + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.31559685Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.315541847Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Stockholm, country=Sweden, datacenter=Glesys, environment=production, instance=46.246.3.106:9998, ip=46.246.3.106, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/sweden.crt, role=vpn, server=stockholm404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.315574576Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=430961 slug=solifi version=2 fingerprint=e1349640001c58c4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.315422959Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.315106837s EvaluationString:}]" duration=131.083379ms + level=debug ts=2024-05-29T13:44:15.315460146Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.315469484Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.767294ms + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=tempdb, dbinstance_identifier=sing-prod-frontend-db, env=apac" t=2024-05-29T13:44:15.315316021Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.315302771Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.315128817Z caller=remote_instance_store.go:51 user=355252 slug=bumper msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.315037878Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.315075307Z caller=remote_alert_sender.go:94 user=288032 slug=dapperlabssre host=dapperlabssre-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.145.122:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=4416sRPVk alerts=1 + level=debug ts=2024-05-29T13:44:15.31498573Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.314991344Z caller=remote_instance_store.go:51 user=320906 slug=techcyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=351895 slug=abacusworks t=2024-05-29T13:44:15.31494145Z level=debug msg="Saving alert states" count=25 max_state_save_concurrency=1 + logger=ngalert.state.manager user=320906 slug=techcyte instance="DBClusterIdentifier=ci-vetcyte-grundium-cluster" t=2024-05-29T13:44:15.314932635Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=320906 slug=techcyte t=2024-05-29T13:44:15.314881466Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-backend-db, env=apac" t=2024-05-29T13:44:15.313566947Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Stockholm, country=Sweden, datacenter=Glesys, environment=production, instance=212.112.19.18:9998, ip=212.112.19.18, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/sweden.crt, role=vpn, server=stockholm407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.314891435Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Stockholm, country=Sweden, datacenter=Glesys, environment=production, instance=212.112.19.18:9998, ip=212.112.19.18, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/sweden.crt, role=vpn, server=stockholm407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.314863733Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="chain=solana, wallet_address=DRoxXLkV2sAsZb7bPbCJQnucTxvQCcfzfmThC3ZVNorv" t=2024-05-29T13:44:15.314789636Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.314551706Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.314566321Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.314421796Z caller=remote_instance_store.go:51 user=108112 slug=btctrader msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=108112 slug=btctrader instance= t=2024-05-29T13:44:15.314332963Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.314201811Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=629225 slug=bncloud t=2024-05-29T13:44:15.314242065Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.964988ms + level=debug ts=2024-05-29T13:44:15.31409562Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=108112 slug=btctrader version=6 fingerprint=2993201bceea313f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.3139682Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.313629672s EvaluationString:}]" duration=155.976067ms + logger=ngalert.state.manager user=351895 slug=abacusworks instance="chain=sepolia, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C" t=2024-05-29T13:44:15.313682584Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.313559394Z caller=remote_instance_store.go:51 user=868411 slug=cmpladnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.313466015Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=491157 slug=prd01wr t=2024-05-29T13:44:15.313356174Z level=debug msg="Saving alert states" count=11 max_state_save_concurrency=1 + logger=ngalert.state.manager user=491157 slug=prd01wr instance="country=RW" t=2024-05-29T13:44:15.313264771Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.313252161Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.313138221Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.360194ms + logger=ngalert.state.manager user=491157 slug=prd01wr instance="country=HR" t=2024-05-29T13:44:15.313137606Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="chain=scrollsepolia, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C" t=2024-05-29T13:44:15.313151061Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.313067411Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-veridas-db, env=us" t=2024-05-29T13:44:15.31299882Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=491157 slug=prd01wr instance="country=DE" t=2024-05-29T13:44:15.313031735Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=491157 slug=prd01wr instance="country=CA" t=2024-05-29T13:44:15.312991551Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=491157 slug=prd01wr instance="country=AU" t=2024-05-29T13:44:15.312950953Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=491157 slug=prd01wr instance="country=AU" t=2024-05-29T13:44:15.312933959Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Southampton, country=United Kingdom, datacenter=DataPacket, environment=production, instance=98.159.234.42:9998, ip=98.159.234.42, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=southampton402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.312924023Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Southampton, country=United Kingdom, datacenter=DataPacket, environment=production, instance=98.159.234.42:9998, ip=98.159.234.42, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=southampton402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.312907333Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.310642885Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.312676595Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.31250421Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.31245606Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-v4monitoring-db, env=us" t=2024-05-29T13:44:15.312344362Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Southampton, country=United Kingdom, datacenter=DataPacket, environment=production, instance=98.159.234.129:9998, ip=98.159.234.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/uk-southampton.crt, role=vpn, server=southampton404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.312298946Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-v4monitoring-db, env=us" t=2024-05-29T13:44:15.307514512Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538357 slug=volue instance="cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.66.20:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-2, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-2.thedora-nodes.rabbitmq-system, service=thedora" t=2024-05-29T13:44:15.312173592Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538357 slug=volue instance="cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.65.216:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-1, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-1.thedora-nodes.rabbitmq-system, service=thedora" t=2024-05-29T13:44:15.3121273Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=538357 slug=volue t=2024-05-29T13:44:15.312073334Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=platform-kubernetes-prod" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.311949871Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=538357 slug=volue version=14 fingerprint=817c785b73f51d84 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.311552168Z level=debug msg="Alert rule evaluated" results="[{Instance:cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.64.101:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-0, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-0.thedora-nodes.rabbitmq-system, service=thedora State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.64.101:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-0, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-0.thedora-nodes.rabbitmq-system, service=thedora Value:0xc016a4fd20} B:{Var:B Labels:cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.64.101:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-0, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-0.thedora-nodes.rabbitmq-system, service=thedora Value:0xc016a4ff38} C:{Var:C Labels:cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.64.101:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-0, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-0.thedora-nodes.rabbitmq-system, service=thedora Value:0xc049d80088}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.310446228s EvaluationString:[ var='A' labels={cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.64.101:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-0, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-0.thedora-nodes.rabbitmq-system, service=thedora} value=28.99646760552238 ], [ var='B' labels={cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.64.101:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-0, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-0.thedora-nodes.rabbitmq-system, service=thedora} value=28.99646760552238 ], [ var='C' labels={cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.64.101:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-0, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-0.thedora-nodes.rabbitmq-system, service=thedora} value=0 ]} {Instance:cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.65.216:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-1, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-1.thedora-nodes.rabbitmq-system, service=thedora State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.65.216:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-1, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-1.thedora-nodes.rabbitmq-system, service=thedora Value:0xc049d80340} B:{Var:B Labels:cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.65.216:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-1, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-1.thedora-nodes.rabbitmq-system, service=thedora Value:0xc049d80450} C:{Var:C Labels:cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.65.216:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-1, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-1.thedora-nodes.rabbitmq-system, service=thedora Value:0xc049d80588}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.310478056s EvaluationString:[ var='A' labels={cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.65.216:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-1, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-1.thedora-nodes.rabbitmq-system, service=thedora} value=27.959942832335333 ], [ var='B' labels={cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.65.216:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-1, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-1.thedora-nodes.rabbitmq-system, service=thedora} value=27.959942832335333 ], [ var='C' labels={cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.65.216:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-1, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-1.thedora-nodes.rabbitmq-system, service=thedora} value=0 ]} {Instance:cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.66.20:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-2, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-2.thedora-nodes.rabbitmq-system, service=thedora State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.66.20:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-2, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-2.thedora-nodes.rabbitmq-system, service=thedora Value:0xc049d807b8} B:{Var:B Labels:cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.66.20:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-2, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-2.thedora-nodes.rabbitmq-system, service=thedora Value:0xc049d80928} C:{Var:C Labels:cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.66.20:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-2, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-2.thedora-nodes.rabbitmq-system, service=thedora Value:0xc049d80a48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.310493308s EvaluationString:[ var='A' labels={cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.66.20:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-2, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-2.thedora-nodes.rabbitmq-system, service=thedora} value=26.400685324194267 ], [ var='B' labels={cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.66.20:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-2, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-2.thedora-nodes.rabbitmq-system, service=thedora} value=26.400685324194267 ], [ var='C' labels={cluster=platform-kubernetes-prod, container=rabbitmq, endpoint=prometheus-tls, instance=10.96.66.20:15691, job=thedora, namespace=rabbitmq-system, pod=thedora-server-2, rabbitmq_cluster=thedora, rabbitmq_node=rabbit@thedora-server-2.thedora-nodes.rabbitmq-system, service=thedora} value=0 ]}]" duration=23.416172ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.311751674Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.311591992Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.311554074Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.311453211Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.311441038Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.311373885Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.311352498Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + level=info ts=2024-05-29T13:44:15.310838344Z caller=remote_image_capturer.go:61 user=277462 slug=bayadic rule_org_id=1 rule_uid=f33d72c8-2a97-4e2c-a25a-a5a2541570e5 dashboard=eebef0bd-e9a1-472a-ae86-2cbc8d208dea panel=5 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.310801639Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.310788758Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=tempdb, dbinstance_identifier=lon-pp-frontend-database, env=pp" t=2024-05-29T13:44:15.307447874Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=344017 slug=descript instance="datasource_uid=4JVxmaNVk, ref_id=query" t=2024-05-29T13:44:15.310707784Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=344017 slug=descript instance="datasource_uid=4JVxmaNVk, ref_id=query" t=2024-05-29T13:44:15.310700055Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=344017 slug=descript t=2024-05-29T13:44:15.31067089Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=316960 slug=mojamteam t=2024-05-29T13:44:15.310502986Z level=debug msg="Saving alert states done" count=5 max_state_save_concurrency=1 duration=79.818666ms + level=debug ts=2024-05-29T13:44:15.310242317Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.310135455Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.310109792Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=761298 slug=armcloudenablement t=2024-05-29T13:44:15.309888701Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.956045ms + level=debug ts=2024-05-29T13:44:15.309942091Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.309602204Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.309122755Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.309631415Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Skopje, country=North Macedonia, datacenter=Interspace, environment=production, instance=185.225.31.18:9998, ip=185.225.31.18, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=macedonia402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.308997824Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.308914893Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Skopje, country=North Macedonia, datacenter=Interspace, environment=production, instance=185.225.31.18:9998, ip=185.225.31.18, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/mk.crt, role=vpn, server=macedonia402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.308836172Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Skopje, country=North Macedonia, datacenter=Interspace, environment=production, instance=185.225.31.18:9998, ip=185.225.31.18, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/mk.crt, role=vpn, server=macedonia402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.308824353Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.308770539Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Skopje, country=North Macedonia, datacenter=Interspace, environment=production, instance=185.225.28.226:9998, ip=185.225.28.226, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=macedonia401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.308666531Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Skopje, country=North Macedonia, datacenter=Interspace, environment=production, instance=185.225.28.226:9998, ip=185.225.28.226, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/mk.crt, role=vpn, server=macedonia401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.308517714Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.308481724Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.308308643Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.308288232Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.308254816Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.308105617Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Singapore, country=Singapore, datacenter=DataPacket, environment=production, instance=156.146.57.38:9998, ip=156.146.57.38, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/sg.crt, role=vpn, server=singapore403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.307957816Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Singapore, country=Singapore, datacenter=DataPacket, environment=production, instance=156.146.57.38:9998, ip=156.146.57.38, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/sg.crt, role=vpn, server=singapore403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.307946601Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Singapore, country=Singapore, datacenter=DataPacket, environment=production, instance=156.146.57.171:9998, ip=156.146.57.171, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/sg.crt, role=vpn, server=singapore401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.307553924Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Singapore, country=Singapore, datacenter=DataPacket, environment=production, instance=156.146.57.171:9998, ip=156.146.57.171, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=singapore401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.307337043Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.309307383Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Singapore, country=Singapore, datacenter=DataPacket, environment=production, instance=156.146.57.102:9998, ip=156.146.57.102, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/sg.crt, role=vpn, server=singapore402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.307139781Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Singapore, country=Singapore, datacenter=DataPacket, environment=production, instance=156.146.57.102:9998, ip=156.146.57.102, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/sg.crt, role=vpn, server=singapore402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.307126649Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.309316587Z caller=remote_instance_store.go:51 user=714711 slug=nomiai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Singapore, country=Singapore, datacenter=DataPacket, environment=production, instance=138.199.24.92:9998, ip=138.199.24.92, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/sg.crt, role=vpn, server=singapore404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.306747498Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=714711 slug=nomiai instance="file_id=selfie-generator-5:3.1.10, hostname=hinton02" t=2024-05-29T13:44:15.309222076Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Singapore, country=Singapore, datacenter=DataPacket, environment=production, instance=138.199.24.92:9998, ip=138.199.24.92, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=singapore404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.306555311Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=714711 slug=nomiai instance="file_id=selfie-generator-1:3.1.10, hostname=hinton02" t=2024-05-29T13:44:15.309196486Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Silicon Valley, country=United States, datacenter=TSS / Performive, environment=production, instance=66.115.165.66:9998, ip=66.115.165.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-siliconvalley.crt, role=vpn, server=siliconvalley420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.306401611Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538355 slug=flogic instance="__name__=aws_ec2_cpucredit_balance_average, account_id=641264638977, dimension_InstanceId=i-0b4d233ecc17a59c9, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:ec2:ap-northeast-1:641264638977:instance/i-0b4d233ecc17a59c9, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter" t=2024-05-29T13:44:15.308978518Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.309028258Z caller=remote_instance_store.go:51 user=538355 slug=flogic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=538355 slug=flogic t=2024-05-29T13:44:15.308925549Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.308752224Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=174016 slug=journalstaging version=1 fingerprint=d378f9b58e660bb7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.308571416Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=bYQmLgyGz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.308336075s EvaluationString:}]" duration=18.007012ms + level=debug ts=2024-05-29T13:44:15.308538696Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.308459183Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.30844971Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.30840922Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=351895 slug=abacusworks instance="chain=scroll, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba" t=2024-05-29T13:44:15.30844823Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="chain=scroll, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba" t=2024-05-29T13:44:15.308414116Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.307829306Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=432323 slug=lithic t=2024-05-29T13:44:15.307895289Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=432323 slug=lithic version=10 fingerprint=27cfe20011466e7b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.307778972Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.307370025s EvaluationString:}]" duration=16.148177ms + level=debug ts=2024-05-29T13:44:15.307819687Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=321534 slug=atlasiq t=2024-05-29T13:44:15.307712675Z level=debug msg="Skip rule evaluation because it is paused" + level=debug ts=2024-05-29T13:44:15.307705737Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.307715643Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=615392 slug=shinemetrics t=2024-05-29T13:44:15.307536961Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=22.795149ms + level=debug ts=2024-05-29T13:44:15.307541469Z caller=remote_instance_store.go:51 user=738479 slug=gohero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=190917 slug=d1cx instance= t=2024-05-29T13:44:15.307640919Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:47:00Z next_ends_at=2024-05-29T13:48:00Z + level=debug ts=2024-05-29T13:44:15.307446388Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=351895 slug=abacusworks t=2024-05-29T13:44:15.307423805Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="chain=polygonzkevm" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-sar-investigation-db, env=us" t=2024-05-29T13:44:15.307336418Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=tempdb, dbinstance_identifier=lon-dev-frontend-db, env=dev" t=2024-05-29T13:44:15.307202659Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.307016249Z caller=remote_instance_store.go:51 user=530405 slug=zetetic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=tempdb, dbinstance_identifier=fran-prod-frontend-db, env=eu" t=2024-05-29T13:44:15.306907573Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.306875155Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.306836931Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.306644703Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.306596171Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=351895 slug=abacusworks instance="chain=polygon, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba" t=2024-05-29T13:44:15.30667034Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks t=2024-05-29T13:44:15.306536623Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="chain=polygon" + logger=ngalert.state.manager.persist user=811546 slug=fyld t=2024-05-29T13:44:15.306430227Z level=debug msg="Saving alert states done" count=4 max_state_save_concurrency=1 duration=36.918759ms + level=debug ts=2024-05-29T13:44:15.306193608Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.306064463Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.306051536Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Silicon Valley, country=United States, datacenter=TSS / Performive, environment=production, instance=66.115.165.2:9998, ip=66.115.165.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-siliconvalley.crt, role=vpn, server=siliconvalley419, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.306019231Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="chain=optimism, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba" t=2024-05-29T13:44:15.305992254Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Silicon Valley, country=United States, datacenter=TSS / Performive, environment=production, instance=66.115.165.135:9998, ip=66.115.165.135, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-siliconvalley.crt, role=vpn, server=siliconvalley421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.30560247Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.305911898Z caller=remote_instance_store.go:51 user=148654 slug=tinybeans msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Silicon Valley, country=United States, datacenter=TSS / Performive, environment=production, instance=66.115.165.135:9998, ip=66.115.165.135, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=siliconvalley421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.305381093Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.305352197Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.305247923Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="chain=optimism, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba" t=2024-05-29T13:44:15.305909732Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Silicon Valley, country=United States, datacenter=PIA, environment=production, instance=191.96.255.96:9998, ip=191.96.255.96, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-siliconvalley.crt, role=vpn, server=siliconvalley416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.305121862Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Silicon Valley, country=United States, datacenter=PIA, environment=production, instance=191.96.255.96:9998, ip=191.96.255.96, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=siliconvalley416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.30495439Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Silicon Valley, country=United States, datacenter=PIA, environment=production, instance=191.96.255.96:9998, ip=191.96.255.96, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=siliconvalley416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.304945276Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.304905178Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.304897976Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.304577773Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.304412963Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Silicon Valley, country=United States, datacenter=PIA, environment=production, instance=191.96.255.3:9998, ip=191.96.255.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-streaming-2.crt, role=streaming-optimized, server=siliconvalley414, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.30439314Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks t=2024-05-29T13:44:15.305735093Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="chain=optimism" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.304251303Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.304140931Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Silicon Valley, country=United States, datacenter=PIA, environment=production, instance=191.96.255.190:9998, ip=191.96.255.190, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=siliconvalley418, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.303741656Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.303731516Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Silicon Valley, country=United States, datacenter=PIA, environment=production, instance=191.96.255.143:9998, ip=191.96.255.143, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-siliconvalley.crt, role=vpn, server=siliconvalley417, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.303529152Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Silicon Valley, country=United States, datacenter=PIA, environment=production, instance=191.96.255.143:9998, ip=191.96.255.143, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-siliconvalley.crt, role=vpn, server=siliconvalley417, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.303516569Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.303219631Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.303001801Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Silicon Valley, country=United States, datacenter=PIA, environment=production, instance=102.129.252.97:9998, ip=102.129.252.97, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us3.crt, role=vpn, server=siliconvalley410, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.302992946Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.30298858Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.302959626Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.302793975Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.302773841Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.30260361Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.302459144Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=901230 slug=integromonitor t=2024-05-29T13:44:15.305285334Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.29175ms + level=debug ts=2024-05-29T13:44:15.305084632Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=351895 slug=abacusworks instance="chain=neutron, wallet_address=neutron1zk7wenqypcucqyl8c0uf3hxan7ld2pkzyhamf2" t=2024-05-29T13:44:15.305098389Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=211627 slug=beoogo instance="DBInstanceIdentifier=benson" t=2024-05-29T13:44:15.305077443Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.304953242Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.304775506Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.304758133Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.304718306Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=error ts=2024-05-29T13:44:15.304707842Z caller=ruler.go:515 msg="failed to load config from grafana instance, skipping instance" user=366558 slug=dojoazure err="user has the remote ruler not enabled" + logger=ngalert.state.manager user=347171 slug=neuralconcept instance="cluster=staging" t=2024-05-29T13:44:15.304684574Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.304608709Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=347171 slug=neuralconcept instance="cluster=nc-services-prod" t=2024-05-29T13:44:15.304632101Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.30451961Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=347171 slug=neuralconcept instance="cluster=internal" t=2024-05-29T13:44:15.304558051Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=347171 slug=neuralconcept instance="cluster=ford-cluster" t=2024-05-29T13:44:15.304444917Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=347171 slug=neuralconcept instance="cluster=external-eu-west-new" t=2024-05-29T13:44:15.304310322Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=347171 slug=neuralconcept instance="cluster=external-eu-west-new" t=2024-05-29T13:44:15.304297392Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.304292081Z caller=remote_instance_store.go:51 user=561712 slug=latamairlinespci msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=561712 slug=latamairlinespci instance= t=2024-05-29T13:44:15.304232226Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=561712 slug=latamairlinespci t=2024-05-29T13:44:15.304195802Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.303804549Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=491157 slug=prd01wr t=2024-05-29T13:44:15.303757154Z level=debug msg="Saving alert states" count=6 max_state_save_concurrency=1 + logger=ngalert.state.manager user=359640 slug=swfseu t=2024-05-29T13:44:15.303677807Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.303684281Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.3036101Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=491157 slug=prd01wr instance="DBInstanceIdentifier=prod-zepz-darkwing-database-2" t=2024-05-29T13:44:15.303535954Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=491157 slug=prd01wr instance="DBInstanceIdentifier=prod-zepz-darkwing-database-1" t=2024-05-29T13:44:15.303493136Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.30346051Z caller=remote_instance_store.go:51 user=270953 slug=moisesai msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=270953 slug=moisesai t=2024-05-29T13:44:15.303415534Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=270953 slug=moisesai instance="datasource_uid=Sx1bdXF7k, ref_id=A" t=2024-05-29T13:44:15.30340209Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=270953 slug=moisesai instance="datasource_uid=Sx1bdXF7k, ref_id=A" t=2024-05-29T13:44:15.303389728Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=270953 slug=moisesai t=2024-05-29T13:44:15.303377654Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=351895 slug=abacusworks t=2024-05-29T13:44:15.303345631Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="chain=moonbeam" + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:15.303346386Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=23.057029ms + logger=ngalert.scheduler user=491157 slug=prd01wr version=1 fingerprint=21cfc5743dfa0cd0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.303166039Z level=debug msg="Alert rule evaluated" results="[{Instance:DBInstanceIdentifier=prod-zepz-darkwing-database-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=prod-zepz-darkwing-database-0 Value:0xc0187b5830} C:{Var:C Labels:DBInstanceIdentifier=prod-zepz-darkwing-database-0 Value:0xc0187b5828}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.302627006s EvaluationString:[ var='B' labels={DBInstanceIdentifier=prod-zepz-darkwing-database-0} value=6.709957124821342 ], [ var='C' labels={DBInstanceIdentifier=prod-zepz-darkwing-database-0} value=0 ]} {Instance:DBInstanceIdentifier=prod-zepz-darkwing-database-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=prod-zepz-darkwing-database-1 Value:0xc0187b5848} C:{Var:C Labels:DBInstanceIdentifier=prod-zepz-darkwing-database-1 Value:0xc0187b5840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.302646514s EvaluationString:[ var='B' labels={DBInstanceIdentifier=prod-zepz-darkwing-database-1} value=0 ], [ var='C' labels={DBInstanceIdentifier=prod-zepz-darkwing-database-1} value=0 ]} {Instance:DBInstanceIdentifier=prod-zepz-darkwing-database-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=prod-zepz-darkwing-database-2 Value:0xc0187b5858} C:{Var:C Labels:DBInstanceIdentifier=prod-zepz-darkwing-database-2 Value:0xc0187b5880}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.302653666s EvaluationString:[ var='B' labels={DBInstanceIdentifier=prod-zepz-darkwing-database-2} value=0 ], [ var='C' labels={DBInstanceIdentifier=prod-zepz-darkwing-database-2} value=0 ]} {Instance:DBInstanceIdentifier=prod-zepz-remitsrv-database-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=prod-zepz-remitsrv-database-0 Value:0xc0187b5910} C:{Var:C Labels:DBInstanceIdentifier=prod-zepz-remitsrv-database-0 Value:0xc0187b5918}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.302665742s EvaluationString:[ var='B' labels={DBInstanceIdentifier=prod-zepz-remitsrv-database-0} value=0 ], [ var='C' labels={DBInstanceIdentifier=prod-zepz-remitsrv-database-0} value=0 ]} {Instance:DBInstanceIdentifier=prod-zepz-remitsrv-database-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=prod-zepz-remitsrv-database-1 Value:0xc0187b5928} C:{Var:C Labels:DBInstanceIdentifier=prod-zepz-remitsrv-database-1 Value:0xc0187b5a50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.30267404s EvaluationString:[ var='B' labels={DBInstanceIdentifier=prod-zepz-remitsrv-database-1} value=0 ], [ var='C' labels={DBInstanceIdentifier=prod-zepz-remitsrv-database-1} value=0 ]} {Instance:DBInstanceIdentifier=prod-zepz-remitsrv-database-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=prod-zepz-remitsrv-database-2 Value:0xc0187b5b50} C:{Var:C Labels:DBInstanceIdentifier=prod-zepz-remitsrv-database-2 Value:0xc0187b5b58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.302684208s EvaluationString:[ var='B' labels={DBInstanceIdentifier=prod-zepz-remitsrv-database-2} value=3.2333457065839064 ], [ var='C' labels={DBInstanceIdentifier=prod-zepz-remitsrv-database-2} value=0 ]}]" duration=165.570093ms + level=debug ts=2024-05-29T13:44:15.303284313Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=629225 slug=bncloud t=2024-05-29T13:44:15.303106896Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=162543 slug=rapharacing t=2024-05-29T13:44:15.303064529Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=162543 slug=rapharacing instance= t=2024-05-29T13:44:15.303030131Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=162543 slug=rapharacing instance= t=2024-05-29T13:44:15.303016997Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=162543 slug=rapharacing t=2024-05-29T13:44:15.302996154Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.303011674Z caller=remote_instance_store.go:51 user=115097 slug=controlplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=115097 slug=controlplane t=2024-05-29T13:44:15.302984822Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=115097 slug=controlplane instance= t=2024-05-29T13:44:15.302973215Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=115097 slug=controlplane t=2024-05-29T13:44:15.302941586Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Knative Autoscaler Leader Election Errors: location {{ location }}': error parsing template __alert_TEST Knative Autoscaler Leader Election Errors alert: template: __alert_TEST Knative Autoscaler Leader Election Errors alert:1: function \"location\" not defined" + level=debug ts=2024-05-29T13:44:15.302782556Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.302805487Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=351895 slug=abacusworks instance="chain=mantapacific, wallet_address=03787bc64a4f352b4ad172947473342028513ef3" t=2024-05-29T13:44:15.302778407Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.302566274Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.302546195Z caller=remote_instance_store.go:51 user=155740 slug=routific msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=155740 slug=routific t=2024-05-29T13:44:15.302469634Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=155740 slug=routific instance= t=2024-05-29T13:44:15.302429495Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Silicon Valley, country=United States, datacenter=PIA, environment=production, instance=102.129.252.51:9998, ip=102.129.252.51, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=siliconvalley409, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.30241382Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.302297698Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.302352235Z caller=remote_instance_store.go:51 user=155740 slug=routific msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.302292023Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=155740 slug=routific version=5 fingerprint=25692ce66739b800 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.302155062Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.301641698s EvaluationString:}]" duration=100.104386ms + level=debug ts=2024-05-29T13:44:15.302182788Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=467407 slug=agentsoftware instance="EnvironmentName=spectre-prod-82-b" t=2024-05-29T13:44:15.301942325Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=467407 slug=agentsoftware t=2024-05-29T13:44:15.301894572Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.30193356Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.301386666Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=173374 slug=felmo t=2024-05-29T13:44:15.301830164Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=tempdb, dbinstance_identifier=fran-prod-frontend-db, env=eu" t=2024-05-29T13:44:15.301760727Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=173374 slug=felmo instance= t=2024-05-29T13:44:15.30177631Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.301736249Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=351895 slug=abacusworks instance="chain=injective, wallet_address=inj1txlawhefw0qrnk7t38w4chf4adcpkja0yc88r4" t=2024-05-29T13:44:15.301718522Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=rdsadmin, dbinstance_identifier=syd-prod-frontend-db, env=au" t=2024-05-29T13:44:15.301559883Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.301368926Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.301337486Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:15.30132434Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.30111684Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=351895 slug=abacusworks instance="chain=inevm, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba" t=2024-05-29T13:44:15.301136065Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-zombie-survival, pod_name=s-zombie-survival-70b0-2" t=2024-05-29T13:44:15.3010837Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks t=2024-05-29T13:44:15.300968473Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="chain=inevm" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-zombie-survival, pod_name=s-zombie-survival-70b0-1" t=2024-05-29T13:44:15.30096534Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.300922701Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-zombie-survival, pod_name=s-zombie-survival-70b0-1" t=2024-05-29T13:44:15.300955054Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=rdsadmin, dbinstance_identifier=ohio-prod-frontend-db, env=us" t=2024-05-29T13:44:15.300835182Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-word-jam, pod_name=s-word-jam-3ef7-2" t=2024-05-29T13:44:15.300809926Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.300785435Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:15.300736864Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Silicon Valley, country=United States, datacenter=PIA, environment=production, instance=102.129.252.221:9998, ip=102.129.252.221, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=siliconvalley413, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.300694055Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.300620401Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-webbi-boi, pod_name=s-webbi-boi-f2aa-2" t=2024-05-29T13:44:15.300576075Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.300368621Z caller=remote_instance_store.go:51 user=487988 slug=microstrategyits msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=351895 slug=abacusworks instance="chain=gnosis, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba" t=2024-05-29T13:44:15.300418734Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-frontend-log-db, env=us" t=2024-05-29T13:44:15.299270905Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.300211624Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=196413 slug=form3production version=3 fingerprint=e8d7d96bb552af1f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.299753631Z level=debug msg="Alert rule evaluated" results="[{Instance:Region=eu-west-2, ServiceLimit=Active snapshots, ServiceName=EBS State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Region=eu-west-2, ServiceLimit=Active snapshots, ServiceName=EBS Value:0xc0408a53c0} C:{Var:C Labels:Region=eu-west-2, ServiceLimit=Active snapshots, ServiceName=EBS Value:0xc0408a5410}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.299280789s EvaluationString:[ var='B' labels={Region=eu-west-2, ServiceLimit=Active snapshots, ServiceName=EBS} value=3e-05 ], [ var='C' labels={Region=eu-west-2, ServiceLimit=Active snapshots, ServiceName=EBS} value=0 ]}]" duration=125.605133ms + level=debug ts=2024-05-29T13:44:15.30008752Z caller=remote_instance_store.go:51 user=254872 slug=ennowallet msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=254872 slug=ennowallet t=2024-05-29T13:44:15.300049057Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-vroom, pod_name=s-vroom-b994-2" t=2024-05-29T13:44:15.299955742Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-vroom, pod_name=s-vroom-b994-2" t=2024-05-29T13:44:15.299942559Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.299695747Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.299510438Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-tile-garden, pod_name=s-tile-garden-be30-2" t=2024-05-29T13:44:15.299518762Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=rdsadmin, dbinstance_identifier=fran-prod-frontend-db, env=eu" t=2024-05-29T13:44:15.299500431Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=rdsadmin, dbinstance_identifier=fran-prod-frontend-db, env=eu" t=2024-05-29T13:44:15.299487066Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.298711322Z caller=remote_instance_store.go:51 user=526835 slug=fundbot msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.299400162Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=461396 slug=ultimateai t=2024-05-29T13:44:15.29938446Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=64.217357ms + logger=ngalert.state.historian backend=loki user=752743 slug=andreydmitr20 t=2024-05-29T13:44:15.29932697Z level=debug msg="Done saving alert state history batch" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-tile-garden, pod_name=s-tile-garden-be30-1" t=2024-05-29T13:44:15.299321706Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=cd2cbf97c01e1ac3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.299270092Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.299017572s EvaluationString:}]" duration=285.977917ms + logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:15.299144425Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-the-real-juggle, pod_name=s-the-real-juggle-0ae1-2" t=2024-05-29T13:44:15.299164729Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.299128501Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-the-real-juggle, pod_name=s-the-real-juggle-0ae1-2" t=2024-05-29T13:44:15.299155734Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.298612522Z caller=remote_instance_store.go:51 user=350551 slug=loopme msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206107 slug=hydrolix instance="code=499" t=2024-05-29T13:44:15.299081204Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-frontend-db-read-replica-1, env=us" t=2024-05-29T13:44:15.299049439Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-frontend-db-read-replica-1, env=us" t=2024-05-29T13:44:15.298999694Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="chain=ethereum, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba" t=2024-05-29T13:44:15.298913033Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=outsystems_log, dbinstance_identifier=lon-dev-frontend-db, env=dev" t=2024-05-29T13:44:15.295314261Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-super-salon, pod_name=s-super-salon-2edb-2" t=2024-05-29T13:44:15.298782108Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:15.298678541Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:15.298610767Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-dow-jones-db, env=us" t=2024-05-29T13:44:15.298536632Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=191103 slug=amazonadmin t=2024-05-29T13:44:15.298481998Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=DLQ_DBS-Core-InterpretadorLTR.fifo" + logger=ngalert.scheduler user=191103 slug=amazonadmin version=3 fingerprint=7c940efbd05dba5d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.298400347Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.298189703s EvaluationString:}]" duration=614.418703ms + logger=ngalert.state.manager user=462905 slug=skra instance= t=2024-05-29T13:44:15.298351241Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="chain=celo, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba" t=2024-05-29T13:44:15.298378783Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:15.298229118Z caller=remote_alert_sender.go:94 user=752743 slug=andreydmitr20 host=andreydmitr20-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.222.165:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=bdfmqcdayla0wa alerts=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.29829259Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-data-management-db, env=us" t=2024-05-29T13:44:15.298312959Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=462905 slug=skra version=19 fingerprint=bc5c4d257fbbb81a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.298069536Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc006ed4bd0} C:{Var:C Labels: Value:0xc006ed4bd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.297619116s EvaluationString:[ var='B' labels={} value=0 ], [ var='C' labels={} value=0 ]}]" duration=157.596389ms + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-serenity-spa, pod_name=s-serenity-spa-ee1b-2" t=2024-05-29T13:44:15.298108652Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.297718114Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-creditsafe-db, env=us" t=2024-05-29T13:44:15.298018295Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=244660 slug=sideshift t=2024-05-29T13:44:15.29800743Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=761298 slug=armcloudenablement t=2024-05-29T13:44:15.297926595Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:15.297707528Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=34.657054ms + level=debug ts=2024-05-29T13:44:15.297679779Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-comply-advantage-db, env=us" t=2024-05-29T13:44:15.297653Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks t=2024-05-29T13:44:15.297503954Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="chain=bsctestnet" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-papers-grade, pod_name=s-papers-grade-863e-2" t=2024-05-29T13:44:15.297462444Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=402122 slug=leapwallet t=2024-05-29T13:44:15.297353721Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.622858ms + level=debug ts=2024-05-29T13:44:15.29735888Z caller=remote_instance_store.go:51 user=277970 slug=teckresourcestest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-papers-grade, pod_name=s-papers-grade-863e-1" t=2024-05-29T13:44:15.2972619Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=485348 slug=holygrolli t=2024-05-29T13:44:15.297198705Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:15.297191635Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.297064136Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-billing-db, env=us" t=2024-05-29T13:44:15.297101029Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-mergelife, pod_name=s-mergelife-b5c5-2" t=2024-05-29T13:44:15.297066012Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=444725 slug=devnextgen t=2024-05-29T13:44:15.297028567Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.302807ms + logger=ngalert.state.manager user=351895 slug=abacusworks instance="chain=bsc, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba" t=2024-05-29T13:44:15.297070526Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:15.297064756Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-billing-db, env=us" t=2024-05-29T13:44:15.297017695Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID845dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager user=351895 slug=abacusworks t=2024-05-29T13:44:15.296830645Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="chain=bsc" + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.296765709Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=56.266236ms + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-backend-db-read-replica-1, env=us" t=2024-05-29T13:44:15.296808215Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=5 fingerprint=a432e19d7b5ad66f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.296781514Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=4.947227ms + level=error ts=2024-05-29T13:44:15.296580366Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-backend-db, env=us" t=2024-05-29T13:44:15.296465835Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.29635777Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="chain=base, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba" t=2024-05-29T13:44:15.29634849Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.296299957Z caller=remote_instance_store.go:51 user=868411 slug=cmpladnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.296287335Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-loot-inc, pod_name=s-loot-inc-614a-2" t=2024-05-29T13:44:15.296268834Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-veridas-db, env=qa" t=2024-05-29T13:44:15.296251424Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Silicon Valley, country=United States, datacenter=PIA, environment=production, instance=102.129.232.3:9998, ip=102.129.232.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=siliconvalley401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.296240893Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks t=2024-05-29T13:44:15.296184334Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="chain=base" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-veridas-db, env=qa" t=2024-05-29T13:44:15.296203714Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.296126958Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.296095547Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-loot-inc, pod_name=s-loot-inc-614a-1" t=2024-05-29T13:44:15.296062652Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.295811259Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-king-wing, pod_name=s-king-wing-7dd7-2" t=2024-05-29T13:44:15.2958488Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=447897 slug=mysten t=2024-05-29T13:44:15.295785217Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=32.209807ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Silicon Valley, country=United States, datacenter=PIA, environment=production, instance=102.129.232.221:9998, ip=102.129.232.221, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-siliconvalley.crt, role=vpn, server=siliconvalley406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.295857039Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.29574494Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=351895 slug=abacusworks instance="chain=avalanche, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba" t=2024-05-29T13:44:15.295711935Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.295684297Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.295507417Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.295475681Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=806229 slug=simplisafe version=33 fingerprint=39b2243194171596 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.295315555Z level=debug msg="Alert rule evaluated" results="[{Instance:FunctionName=prd-kamino-mongo-dns-update State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:FunctionName=prd-kamino-mongo-dns-update Value:0xc091d37f48} B:{Var:B Labels:FunctionName=prd-kamino-mongo-dns-update Value:0xc091d37f40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.2949512s EvaluationString:[ var='A' labels={FunctionName=prd-kamino-mongo-dns-update} value=0 ], [ var='B' labels={FunctionName=prd-kamino-mongo-dns-update} value=0 ]}]" duration=66.644723ms + level=debug ts=2024-05-29T13:44:15.295263297Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-hooked-inc, pod_name=s-hooked-inc-43c4-1" t=2024-05-29T13:44:15.295317305Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-hooked-inc, pod_name=s-hooked-inc-43c4-1" t=2024-05-29T13:44:15.2953034Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-risk-defense-platform-db, env=qa" t=2024-05-29T13:44:15.295287001Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.295272559Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.295117616Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=351895 slug=abacusworks instance="chain=arbitrum, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba" t=2024-05-29T13:44:15.295094008Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.295047447Z caller=remote_instance_store.go:51 user=500743 slug=sgr msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=309009 slug=elestyle t=2024-05-29T13:44:15.295049206Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=31.467614ms + logger=ngalert.state.manager user=351895 slug=abacusworks instance="chain=arbitrum, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba" t=2024-05-29T13:44:15.294925157Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-heroes-inc, pod_name=s-heroes-inc-e5cc-1" t=2024-05-29T13:44:15.294855208Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-heroes-inc, pod_name=s-heroes-inc-e5cc-1" t=2024-05-29T13:44:15.294835407Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-periodic-reviews-db, env=qa" t=2024-05-29T13:44:15.286772307Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Silicon Valley, country=United States, datacenter=PIA, environment=production, instance=102.129.232.189:9998, ip=102.129.232.189, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-streaming-2.crt, role=streaming-optimized, server=siliconvalley407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.294863495Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.294820901Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-gladiators-arena, pod_name=s-gladiators-arena-e91f-2" t=2024-05-29T13:44:15.294622515Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=230279 slug=buckless instance="cluster_type=cluster, instance=cluster main fc2d1997, node_role=main, resource_id=356f6296-2d78-4e4b-82c9-3f549425626b, resource_name=database" t=2024-05-29T13:44:15.294574458Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.294345898Z caller=remote_instance_store.go:51 user=615392 slug=shinemetrics msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-gladiators-arena, pod_name=s-gladiators-arena-e91f-1" t=2024-05-29T13:44:15.294405747Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.294315002Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Silicon Valley, country=United States, datacenter=PIA, environment=production, instance=102.129.232.143:9998, ip=102.129.232.143, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-siliconvalley.crt, role=vpn, server=siliconvalley404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.294254792Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.294122137Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-found-it, pod_name=s-found-it-8086-3" t=2024-05-29T13:44:15.294195262Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.294147338Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-found-it, pod_name=s-found-it-8086-1" t=2024-05-29T13:44:15.294023991Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-found-it, pod_name=s-found-it-8086-1" t=2024-05-29T13:44:15.294008968Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.293699096Z caller=remote_instance_store.go:51 user=129403 slug=sitechsahc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=149505 slug=stanislavgerasimov8 t=2024-05-29T13:44:15.293699739Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=351895 slug=abacusworks instance="chain=alfajores, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C" t=2024-05-29T13:44:15.293717755Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=149505 slug=stanislavgerasimov8 instance= t=2024-05-29T13:44:15.293670982Z level=warn msg="Failed to take an image" dashboard=CWter8iMz panel=30 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=351895 slug=abacusworks instance="chain=alfajores, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C" t=2024-05-29T13:44:15.293661127Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.293623839Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-flip-trickster, pod_name=s-flip-trickster-4d14-1" t=2024-05-29T13:44:15.293602862Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-flip-trickster, pod_name=s-flip-trickster-4d14-1" t=2024-05-29T13:44:15.293590928Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.293474301Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=237629 slug=ocrolus t=2024-05-29T13:44:15.293520177Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=237629 slug=ocrolus instance="__name__=kube_deployment_status_replicas_available, app_kubernetes_io_component=metrics, app_kubernetes_io_instance=devops-prometheus, app_kubernetes_io_managed_by=Helm, app_kubernetes_io_name=kube-state-metrics, app_kubernetes_io_part_of=kube-state-metrics, app_kubernetes_io_version=2.12.0, cluster=production01, deployment=prod-dashboard-bff-http, environment=prod, helm_sh_chart=kube-state-metrics-5.18.1, instance=10.160.68.74:8080, job=kubernetes-service-endpoints, kubernetes_name=devops-prometheus-kube-state-metrics, kubernetes_namespace=devops, kubernetes_node=ip-10-160-79-108.ec2.internal, namespace=prod" t=2024-05-29T13:44:15.293488844Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=237629 slug=ocrolus t=2024-05-29T13:44:15.293444983Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=prod" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Seoul, country=South Korea, datacenter=GSL, environment=production, instance=84.247.102.2:9998, ip=84.247.102.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=seoul401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.293451361Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.293311869Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.293288701Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-dingbats, pod_name=s-dingbats-34f9-1" t=2024-05-29T13:44:15.29326485Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.293114772Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.293013633Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-cube-zoo, pod_name=s-cube-zoo-0581-1" t=2024-05-29T13:44:15.292880971Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-cube-zoo, pod_name=s-cube-zoo-0581-1" t=2024-05-29T13:44:15.292870827Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.292830552Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.292792003Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=351895 slug=abacusworks version=111 fingerprint=8fc916c818a37ce6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.292144164Z level=debug msg="Alert rule evaluated" results="[{Instance:chain=alfajores, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=alfajores, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C Value:0xc055a9a470} B:{Var:B Labels:chain=alfajores, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C Value:0xc055a9a440}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.288880881s EvaluationString:[ var='A' labels={chain=alfajores, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C} value=18.006429360368134 ], [ var='B' labels={chain=alfajores, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C} value=0 ]} {Instance:chain=arbitrum, wallet_address=03787bc64a4f352b4ad172947473342028513ef3 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=arbitrum, wallet_address=03787bc64a4f352b4ad172947473342028513ef3 Value:0xc055a9a4c0} B:{Var:B Labels:chain=arbitrum, wallet_address=03787bc64a4f352b4ad172947473342028513ef3 Value:0xc055a9a4d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.288901467s EvaluationString:[ var='A' labels={chain=arbitrum, wallet_address=03787bc64a4f352b4ad172947473342028513ef3} value=1.408674685513923 ], [ var='B' labels={chain=arbitrum, wallet_address=03787bc64a4f352b4ad172947473342028513ef3} value=0 ]} {Instance:chain=arbitrum, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=arbitrum, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9a560} B:{Var:B Labels:chain=arbitrum, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9a528}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.288908752s EvaluationString:[ var='A' labels={chain=arbitrum, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=14.348401206507214 ], [ var='B' labels={chain=arbitrum, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=0 ]} {Instance:chain=avalanche, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=avalanche, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9a5e0} B:{Var:B Labels:chain=avalanche, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9a5b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.288914747s EvaluationString:[ var='A' labels={chain=avalanche, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=89.43573862860842 ], [ var='B' labels={chain=avalanche, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=0 ]} {Instance:chain=base, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=base, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9a618} B:{Var:B Labels:chain=base, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9a6b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.288923955s EvaluationString:[ var='A' labels={chain=base, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=8.718211452444216 ], [ var='B' labels={chain=base, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=0 ]} {Instance:chain=bsc, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=bsc, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9a710} B:{Var:B Labels:chain=bsc, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9a6f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.288933675s EvaluationString:[ var='A' labels={chain=bsc, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=28.52711694664987 ], [ var='B' labels={chain=bsc, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=0 ]} {Instance:chain=bsctestnet, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=bsctestnet, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C Value:0xc055a9a760} B:{Var:B Labels:chain=bsctestnet, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C Value:0xc055a9a790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.288940552s EvaluationString:[ var='A' labels={chain=bsctestnet, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C} value=46.59663718651032 ], [ var='B' labels={chain=bsctestnet, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C} value=0 ]} {Instance:chain=celo, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=celo, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9a7c8} B:{Var:B Labels:chain=celo, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9a800}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.288949753s EvaluationString:[ var='A' labels={chain=celo, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=27906.20461307882 ], [ var='B' labels={chain=celo, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=0 ]} {Instance:chain=ethereum, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=ethereum, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9a868} B:{Var:B Labels:chain=ethereum, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9a850}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.288956268s EvaluationString:[ var='A' labels={chain=ethereum, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=1.832926337563685 ], [ var='B' labels={chain=ethereum, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=0 ]} {Instance:chain=fuji, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=fuji, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C Value:0xc055a9a8c0} B:{Var:B Labels:chain=fuji, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C Value:0xc055a9a8d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.288963127s EvaluationString:[ var='A' labels={chain=fuji, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C} value=209.18695841450747 ], [ var='B' labels={chain=fuji, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C} value=0 ]} {Instance:chain=gnosis, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=gnosis, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9a928} B:{Var:B Labels:chain=gnosis, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9a960}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.288970867s EvaluationString:[ var='A' labels={chain=gnosis, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=5348.143211576994 ], [ var='B' labels={chain=gnosis, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=0 ]} {Instance:chain=inevm, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=inevm, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9a9b0} B:{Var:B Labels:chain=inevm, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9a9c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.288976944s EvaluationString:[ var='A' labels={chain=inevm, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=1093.9606410366866 ], [ var='B' labels={chain=inevm, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=0 ]} {Instance:chain=injective, wallet_address=inj1txlawhefw0qrnk7t38w4chf4adcpkja0yc88r4 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=injective, wallet_address=inj1txlawhefw0qrnk7t38w4chf4adcpkja0yc88r4 Value:0xc055a9aa30} B:{Var:B Labels:chain=injective, wallet_address=inj1txlawhefw0qrnk7t38w4chf4adcpkja0yc88r4 Value:0xc055a9aa90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.288988023s EvaluationString:[ var='A' labels={chain=injective, wallet_address=inj1txlawhefw0qrnk7t38w4chf4adcpkja0yc88r4} value=6.69351612839226e+12 ], [ var='B' labels={chain=injective, wallet_address=inj1txlawhefw0qrnk7t38w4chf4adcpkja0yc88r4} value=0 ]} {Instance:chain=mantapacific, wallet_address=03787bc64a4f352b4ad172947473342028513ef3 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=mantapacific, wallet_address=03787bc64a4f352b4ad172947473342028513ef3 Value:0xc055a9ab10} B:{Var:B Labels:chain=mantapacific, wallet_address=03787bc64a4f352b4ad172947473342028513ef3 Value:0xc055a9ab50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.288994947s EvaluationString:[ var='A' labels={chain=mantapacific, wallet_address=03787bc64a4f352b4ad172947473342028513ef3} value=0.010210052601970943 ], [ var='B' labels={chain=mantapacific, wallet_address=03787bc64a4f352b4ad172947473342028513ef3} value=0 ]} {Instance:chain=moonbeam, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=moonbeam, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9abc8} B:{Var:B Labels:chain=moonbeam, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9aba0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.289002202s EvaluationString:[ var='A' labels={chain=moonbeam, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=29366.632275492095 ], [ var='B' labels={chain=moonbeam, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=0 ]} {Instance:chain=nautilus, wallet_address=b8042e54a969b8fd702910e82907964b50630eb4 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=nautilus, wallet_address=b8042e54a969b8fd702910e82907964b50630eb4 Value:0xc055a9ac40} B:{Var:B Labels:chain=nautilus, wallet_address=b8042e54a969b8fd702910e82907964b50630eb4 Value:0xc055a9ac08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.289007228s EvaluationString:[ var='A' labels={chain=nautilus, wallet_address=b8042e54a969b8fd702910e82907964b50630eb4} value=30622.20192248 ], [ var='B' labels={chain=nautilus, wallet_address=b8042e54a969b8fd702910e82907964b50630eb4} value=0 ]} {Instance:chain=neutron, wallet_address=neutron1zk7wenqypcucqyl8c0uf3hxan7ld2pkzyhamf2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=neutron, wallet_address=neutron1zk7wenqypcucqyl8c0uf3hxan7ld2pkzyhamf2 Value:0xc055a9ac90} B:{Var:B Labels:chain=neutron, wallet_address=neutron1zk7wenqypcucqyl8c0uf3hxan7ld2pkzyhamf2 Value:0xc055a9aca8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.289013455s EvaluationString:[ var='A' labels={chain=neutron, wallet_address=neutron1zk7wenqypcucqyl8c0uf3hxan7ld2pkzyhamf2} value=3804.199477 ], [ var='B' labels={chain=neutron, wallet_address=neutron1zk7wenqypcucqyl8c0uf3hxan7ld2pkzyhamf2} value=0 ]} {Instance:chain=optimism, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=optimism, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9ad08} B:{Var:B Labels:chain=optimism, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9ad30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.289018841s EvaluationString:[ var='A' labels={chain=optimism, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=6.4470149180190806 ], [ var='B' labels={chain=optimism, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=0 ]} {Instance:chain=polygon, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=polygon, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9ad80} B:{Var:B Labels:chain=polygon, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9ad98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.289025233s EvaluationString:[ var='A' labels={chain=polygon, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=41120.149362895696 ], [ var='B' labels={chain=polygon, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=0 ]} {Instance:chain=polygonzkevm, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=polygonzkevm, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9ade8} B:{Var:B Labels:chain=polygonzkevm, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9ae18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.289034229s EvaluationString:[ var='A' labels={chain=polygonzkevm, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=1.9447439954791856 ], [ var='B' labels={chain=polygonzkevm, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=0 ]} {Instance:chain=scroll, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=scroll, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9aeb0} B:{Var:B Labels:chain=scroll, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba Value:0xc055a9ae78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.289044073s EvaluationString:[ var='A' labels={chain=scroll, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=3.443222510146308 ], [ var='B' labels={chain=scroll, wallet_address=0xa7ECcdb9Be08178f896c26b7BbD8C3D4E844d9Ba} value=0 ]} {Instance:chain=scrollsepolia, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=scrollsepolia, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C Value:0xc055a9af40} B:{Var:B Labels:chain=scrollsepolia, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C Value:0xc055a9af10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.289050785s EvaluationString:[ var='A' labels={chain=scrollsepolia, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C} value=8.298137322746504 ], [ var='B' labels={chain=scrollsepolia, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C} value=0 ]} {Instance:chain=sepolia, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=sepolia, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C Value:0xc055a9af90} B:{Var:B Labels:chain=sepolia, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C Value:0xc055a9afa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.289057916s EvaluationString:[ var='A' labels={chain=sepolia, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C} value=960.7338748836659 ], [ var='B' labels={chain=sepolia, wallet_address=0xfaD1C94469700833717Fa8a3017278BC1cA8031C} value=0 ]} {Instance:chain=solana, wallet_address=CLT7m1JBCqJx6FM3obi9ohTnML1zmdzFcX2SgWj1D8qV State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=solana, wallet_address=CLT7m1JBCqJx6FM3obi9ohTnML1zmdzFcX2SgWj1D8qV Value:0xc055a9aff8} B:{Var:B Labels:chain=solana, wallet_address=CLT7m1JBCqJx6FM3obi9ohTnML1zmdzFcX2SgWj1D8qV Value:0xc055a9b030}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.289063708s EvaluationString:[ var='A' labels={chain=solana, wallet_address=CLT7m1JBCqJx6FM3obi9ohTnML1zmdzFcX2SgWj1D8qV} value=4.791061207 ], [ var='B' labels={chain=solana, wallet_address=CLT7m1JBCqJx6FM3obi9ohTnML1zmdzFcX2SgWj1D8qV} value=0 ]} {Instance:chain=solana, wallet_address=DRoxXLkV2sAsZb7bPbCJQnucTxvQCcfzfmThC3ZVNorv State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=solana, wallet_address=DRoxXLkV2sAsZb7bPbCJQnucTxvQCcfzfmThC3ZVNorv Value:0xc055a9b080} B:{Var:B Labels:chain=solana, wallet_address=DRoxXLkV2sAsZb7bPbCJQnucTxvQCcfzfmThC3ZVNorv Value:0xc055a9b098}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.28907131s EvaluationString:[ var='A' labels={chain=solana, wallet_address=DRoxXLkV2sAsZb7bPbCJQnucTxvQCcfzfmThC3ZVNorv} value=2.14280993 ], [ var='B' labels={chain=solana, wallet_address=DRoxXLkV2sAsZb7bPbCJQnucTxvQCcfzfmThC3ZVNorv} value=0 ]}]" duration=75.099878ms + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=outsystems, dbinstance_identifier=ohio-prod-frontend-db, env=us" t=2024-05-29T13:44:15.292681407Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.292546784Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:15.292478604Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=129403 slug=sitechsahc instance= t=2024-05-29T13:44:15.292520657Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.scheduler user=460915 slug=funrise version=26 fingerprint=cb81313161504940 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.292475583Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=node-exporter-ev-worker-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:instance=node-exporter-ev-worker-1 Value:0xc0402b0418} C:{Var:C Labels:instance=node-exporter-ev-worker-1 Value:0xc0402b0428}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.292179088s EvaluationString:[ var='B' labels={instance=node-exporter-ev-worker-1} value=68.42153825953677 ], [ var='C' labels={instance=node-exporter-ev-worker-1} value=0 ]}]" duration=19.595592ms + logger=ngalert.scheduler user=129403 slug=sitechsahc version=1 fingerprint=9418495b6439b1d9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.292455141Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': can not get data source by uid, uid is empty" duration=887.689µs + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.292438486Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.441586ms + level=error ts=2024-05-29T13:44:15.292419541Z caller=remote_rule_evaluator.go:110 user=129403 slug=sitechsahc msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': can not get data source by uid, uid is empty" + logger=ngalert.state.manager.persist user=363785 slug=moonletmonitor t=2024-05-29T13:44:15.292366427Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.886398ms + level=debug ts=2024-05-29T13:44:15.292165949Z caller=remote_instance_store.go:51 user=320778 slug=omegaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.292366566Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:15.292341509Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.29227779Z caller=remote_instance_store.go:51 user=615392 slug=shinemetrics msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=320778 slug=omegaai instance= t=2024-05-29T13:44:15.292093892Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=320778 slug=omegaai version=1 fingerprint=721848d22784f1d0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.290327067Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.289777545s EvaluationString:}]" duration=46.074764ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.291969202Z level=debug msg="State manager processing evaluation results" resultCount=281 + logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:15.29217437Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=25.473618ms + logger=ngalert.scheduler user=260796 slug=expressvpn version=55 fingerprint=96708b288e8bcaf6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.27302077Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam Value:0xc037ab7560} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam Value:0xc037ab7900} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn Value:0xc037ab7990} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn Value:0xc037ab7a60} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam Value:0xc037ab76e0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn Value:0xc037ab7778} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn Value:0xc037ab73a8} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam Value:0xc037ab7620} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam Value:0xc037ab74a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.23673459s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam} value=1000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam} value=1000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn} value=48.90438247011952 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn} value=48.90438247011952 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam Value:0xc037ab7e40} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam Value:0xc049c88050} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn Value:0xc037ab7ef0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn Value:0xc049c88818} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam Value:0xc037ab7d38} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn Value:0xc049c883e0} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn Value:0xc037ab7be0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam Value:0xc049c885c8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam Value:0xc037ab7c48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.236782804s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam} value=12500 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam} value=12500 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn} value=26.298986351664137 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn} value=26.298986351664137 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam} value=1 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam} value=2 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam Value:0xc02517a030} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam Value:0xc02517a0c0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn Value:0xc049c89378} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn Value:0xc049c89750} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam Value:0xc049c89b10} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn Value:0xc049c89508} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn Value:0xc049c88dd0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam Value:0xc049c89110} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam Value:0xc049c89e10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.236811151s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn} value=50.13333333333333 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn} value=50.13333333333333 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam Value:0xc02517a430} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam Value:0xc02517a1f0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn Value:0xc02517a5b0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn Value:0xc02517a300} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam Value:0xc02517a288} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn Value:0xc02517a4a8} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn Value:0xc02517a528} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam Value:0xc02517a398} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam Value:0xc02517a650}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.236857597s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam} value=800 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam} value=800 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn} value=18.495684340320594 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn} value=18.495684340320594 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam Value:0xc02517ab50} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam Value:0xc02517a978} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn Value:0xc02517abd0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn Value:0xc02517a750} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam Value:0xc02517aa08} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn Value:0xc02517a7c0} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn Value:0xc02517aaa0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam Value:0xc02517a850} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam Value:0xc02517a8e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.236881496s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam} value=1500 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam} value=1500 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn} value=37.03703703703704 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn} value=37.03703703703704 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam Value:0xc02517b1c0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam Value:0xc02517add0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn Value:0xc02517b238} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn Value:0xc02517af00} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam Value:0xc02517b0e8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn Value:0xc02517ae60} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn Value:0xc02517ad28} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam Value:0xc02517af78} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam Value:0xc02517b050}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.236911766s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam} value=5600 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam} value=5600 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn} value=39.586673792980584 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn} value=39.586673792980584 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam Value:0xc02517b638} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam Value:0xc02517b6c8} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn Value:0xc02517b350} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn Value:0xc02517b7a0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam Value:0xc02517b858} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn Value:0xc02517b460} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn Value:0xc02517b3e0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam Value:0xc02517b500} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam Value:0xc02517b5a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.236934223s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam} value=1120 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam} value=1120 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn} value=25.849731663685148 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn} value=25.849731663685148 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam Value:0xc02517b9b8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam Value:0xc02517ba58} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn Value:0xc02517bca8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn Value:0xc02517be10} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam Value:0xc02517bae8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn Value:0xc02517bd18} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn Value:0xc02517bd90} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam Value:0xc02517bb88} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam Value:0xc02517bc38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.236969594s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam} value=1120 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam} value=1120 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn} value=106.4888888888889 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn} value=106.4888888888889 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn} value=1 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam Value:0xc01aa8e038} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam Value:0xc01aa8e150} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn Value:0xc01aa8e1c0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn Value:0xc01aa8e2c8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam Value:0xc01aa8e260} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn Value:0xc02517bf10} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn Value:0xc01aa8e0a8} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam Value:0xc01aa8e358} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam Value:0xc02517bf98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237001167s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam} value=3200 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam} value=3200 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn} value=53.59848484848485 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn} value=53.59848484848485 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam Value:0xc01aa8e860} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam Value:0xc01aa8e8f0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn Value:0xc01aa8e468} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn Value:0xc01aa8e4d8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam Value:0xc01aa8e680} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn Value:0xc01aa8e558} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn Value:0xc01aa8e5d0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam Value:0xc01aa8ea10} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam Value:0xc01aa8e7d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237025329s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam} value=1120 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam} value=1120 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn} value=32.08888888888889 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn} value=32.08888888888889 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc01aa8ebc0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc01aa8f0b0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn Value:0xc01aa8ed98} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn Value:0xc01aa8ee28} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc01aa8eeb8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn Value:0xc01aa8eb20} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn Value:0xc01aa8ef80} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc01aa8f018} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc01aa8ed18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237049961s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=4500 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=4500 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn} value=38.38872991415364 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn} value=38.38872991415364 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc01aa8f1d0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc01aa8f380} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn Value:0xc01aa8f3f8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn Value:0xc01aa8f470} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc01aa8f610} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn Value:0xc01aa8f508} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn Value:0xc01aa8f578} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc01aa8f260} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc01aa8f2f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237093265s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam} value=9480 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam} value=9480 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn} value=52.52259827622451 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn} value=52.52259827622451 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam} value=1 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam} value=2 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam Value:0xc01aa8fa78} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam Value:0xc01aa8f8f0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn Value:0xc01aa8fb60} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn Value:0xc01aa8f968} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam Value:0xc01aa8f860} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn Value:0xc01aa8f9e0} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn Value:0xc01aa8fae8} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam Value:0xc01aa8f740} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam Value:0xc01aa8f7d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237133791s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam} value=1720 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam} value=1720 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn} value=30.594405594405593 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn} value=30.594405594405593 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam Value:0xc01aa8fd98} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam Value:0xc01aa8fe30} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn Value:0xc03adf80d8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn Value:0xc01aa8fc70} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam Value:0xc03adf8048} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn Value:0xc01aa8fea8} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn Value:0xc01aa8ff20} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam Value:0xc01aa8fd08} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam Value:0xc01aa8ffb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237158144s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam} value=4000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam} value=4000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn} value=19.43056943056943 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn} value=19.43056943056943 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam Value:0xc03adf8730} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam Value:0xc03adf8588} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn Value:0xc03adf8230} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn Value:0xc03adf82b8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam Value:0xc03adf84d0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn Value:0xc03adf87c0} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn Value:0xc03adf8358} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam Value:0xc03adf8430} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam Value:0xc03adf8660}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237183358s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam} value=1120 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam} value=1120 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn} value=52.05357142857143 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn} value=52.05357142857143 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam Value:0xc03adf89f8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam Value:0xc03adf8b58} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn Value:0xc03adf8eb8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn Value:0xc03adf8c00} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam Value:0xc03adf8948} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn Value:0xc03adf8a80} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn Value:0xc03adf8ca0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam Value:0xc03adf8d70} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam Value:0xc03adf8e30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237212858s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam} value=13200 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam} value=13200 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn} value=25.704545454545453 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn} value=25.704545454545453 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam} value=1 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam} value=2 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam Value:0xc03adf95e0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam Value:0xc03adf9178} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn Value:0xc03adf9030} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn Value:0xc03adf90c8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam Value:0xc03adf93b8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn Value:0xc03adf9450} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn Value:0xc03adf9510} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam Value:0xc03adf9228} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam Value:0xc03adf92f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237244782s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam} value=5040 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam} value=5040 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn} value=26.095538886695618 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn} value=26.095538886695618 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam Value:0xc03adf9cc8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam Value:0xc03adf9968} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn Value:0xc03adf9a28} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn Value:0xc03adf9aa8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam Value:0xc03adf9b68} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn Value:0xc03adf9bf8} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn Value:0xc03adf97e8} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam Value:0xc03adf98b8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam Value:0xc03adf9768}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237267027s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam} value=800 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam} value=800 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn} value=41.91919191919192 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn} value=41.91919191919192 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam Value:0xc03194c2b0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam Value:0xc03194c210} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn Value:0xc03adf9f70} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn Value:0xc03194c328} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam Value:0xc03194c060} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn Value:0xc03194c3a8} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn Value:0xc03194c0e8} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam Value:0xc03adf9e78} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam Value:0xc03194c180}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237286683s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam} value=1800 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam} value=1800 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn} value=38.99057127010538 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn} value=38.99057127010538 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam Value:0xc03194c568} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam Value:0xc03194c7e8} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn Value:0xc03194c6d8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn Value:0xc03194c850} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam Value:0xc03194c5f0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn Value:0xc03194c660} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn Value:0xc03194c8c0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam Value:0xc03194c4d0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam Value:0xc03194c768}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237309816s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam} value=700 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam} value=700 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn} value=34.831460674157306 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn} value=34.831460674157306 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam Value:0xc03194cd98} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam Value:0xc03194cb60} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn Value:0xc03194c9d0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn Value:0xc03194ca50} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam Value:0xc03194cbf8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn Value:0xc03194cac8} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn Value:0xc03194cc70} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam Value:0xc03194ce30} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam Value:0xc03194cd00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237330069s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam} value=280 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam} value=280 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn} value=53.57142857142857 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn} value=53.57142857142857 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam Value:0xc03194d008} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam Value:0xc03194d0a0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn Value:0xc03194d2b8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn Value:0xc03194d188} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam Value:0xc03194d138} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn Value:0xc03194d240} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn Value:0xc03194d338} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam Value:0xc03194d3d0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam Value:0xc03194cf70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237349274s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam} value=3000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam} value=3000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn} value=30.701754385964914 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn} value=30.701754385964914 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam Value:0xc03194d698} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam Value:0xc03194d738} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn Value:0xc03194d8c8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn Value:0xc03194d938} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam Value:0xc03194d7d8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn Value:0xc03194d568} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn Value:0xc03194d858} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam Value:0xc03194d608} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam Value:0xc03194d4f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237362755s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam} value=560 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam} value=560 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn} value=27.77777777777778 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn} value=27.77777777777778 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam Value:0xc03194de48} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam Value:0xc03194dca0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn Value:0xc03194deb8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn Value:0xc03194df30} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam Value:0xc03194da78} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn Value:0xc03194ddb8} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn Value:0xc03194db38} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam Value:0xc03194dd38} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam Value:0xc03194dbe8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237375495s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam} value=1520 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam} value=1520 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn} value=14.116094986807386 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn} value=14.116094986807386 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam Value:0xc01e520480} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam Value:0xc01e5201c0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn Value:0xc01e520270} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn Value:0xc01e520520} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam Value:0xc01e5205e0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn Value:0xc01e520050} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn Value:0xc01e520308} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam Value:0xc01e520118} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam Value:0xc01e5203d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237396391s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn} value=45.58035714285714 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn} value=45.58035714285714 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam Value:0xc01e520c18} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam Value:0xc01e520b70} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn Value:0xc01e520950} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn Value:0xc01e520738} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam Value:0xc01e520ce0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn Value:0xc01e5209f0} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn Value:0xc01e5207d8} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam Value:0xc01e5208a0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam Value:0xc01e520aa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237417487s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam} value=3000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam} value=3000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn} value=40.8675799086758 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn} value=40.8675799086758 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam Value:0xc01e520e78} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam Value:0xc01e520f40} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn Value:0xc01e521340} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn Value:0xc01e521150} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam Value:0xc01e520ff8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn Value:0xc01e5210a0} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn Value:0xc01e5211f0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam Value:0xc01e5213d8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam Value:0xc01e5212b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237456039s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn} value=9.968425800631483 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn} value=9.968425800631483 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam Value:0xc01e521690} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam Value:0xc01e521738} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn Value:0xc01e521508} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn Value:0xc01e521988} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam Value:0xc01e521a28} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn Value:0xc01e5217a8} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn Value:0xc01e521840} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam Value:0xc01e521918} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam Value:0xc01e5215c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237471481s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam} value=280 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam} value=280 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn} value=44.642857142857146 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn} value=44.642857142857146 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam Value:0xc01e521cd8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam Value:0xc01e521dd0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn Value:0xc01e521b88} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn Value:0xc01e521c58} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam Value:0xc01e521f70} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn Value:0xc01cbb2120} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn Value:0xc01cbb2008} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam Value:0xc01e521e78} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam Value:0xc01cbb20a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237499693s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam} value=560 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam} value=560 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn} value=44.13309982486865 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn} value=44.13309982486865 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam Value:0xc01cbb2670} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam Value:0xc01cbb23d0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn Value:0xc01cbb26d8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn Value:0xc01cbb25b8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam Value:0xc01cbb2520} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn Value:0xc01cbb2748} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn Value:0xc01cbb27e8} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam Value:0xc01cbb2318} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam Value:0xc01cbb2290}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237526203s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam} value=400 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam} value=400 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn} value=65.65656565656566 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn} value=65.65656565656566 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam Value:0xc01cbb2988} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam Value:0xc01cbb30c0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn Value:0xc01cbb3160} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn Value:0xc01cbb2d40} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam Value:0xc01cbb2ec0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn Value:0xc01cbb2f60} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn Value:0xc01cbb2bd0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam Value:0xc01cbb2a60} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam Value:0xc01cbb2c90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237542318s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam} value=4520 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam} value=4520 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn} value=19.928904687847147 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn} value=19.928904687847147 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc01cbb3858} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc01cbb34c8} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn Value:0xc01cbb3540} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn Value:0xc01cbb3aa8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc01cbb3b38} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn Value:0xc01cbb3a30} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn Value:0xc01cbb3668} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc01cbb3700} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc01cbb3790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237560921s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam} value=350 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam} value=350 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn} value=62.93103448275862 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn} value=62.93103448275862 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc01cbb3c68} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02b14a130} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn Value:0xc01cbb3ce0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn Value:0xc02b14a1f8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc01cbb3e88} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn Value:0xc01cbb3f08} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn Value:0xc01cbb3d60} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02b14a060} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc01cbb3df8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.23758026s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam} value=350 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam} value=350 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn} value=66.95402298850574 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn} value=66.95402298850574 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02b14a7c0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02b14a980} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn Value:0xc02b14a388} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn Value:0xc02b14a838} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02b14a538} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn Value:0xc02b14a600} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn Value:0xc02b14a688} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02b14a458} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02b14ab20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237599565s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam} value=35120 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam} value=35120 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn} value=53.085260732269354 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn} value=53.085260732269354 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam} value=1 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam} value=2 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam Value:0xc02b14ad60} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam Value:0xc02b14ae30} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn Value:0xc02b14aee8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn Value:0xc02b14b238} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam Value:0xc02b14afc8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn Value:0xc02b14b2f0} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn Value:0xc02b14ac98} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam Value:0xc02b14b0c8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam Value:0xc02b14b180}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237622953s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam} value=2200 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam} value=2200 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn} value=18.65942028985507 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn} value=18.65942028985507 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam Value:0xc02b14b490} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam Value:0xc02b14b7f0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn Value:0xc02b14bb00} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn Value:0xc02b14ba38} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam Value:0xc02b14b570} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn Value:0xc02b14b638} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn Value:0xc02b14b888} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam Value:0xc02b14b710} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam Value:0xc02b14b9b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237640918s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn} value=27.906976744186046 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn} value=27.906976744186046 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam Value:0xc01daf80e8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam Value:0xc01daf8228} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn Value:0xc02b14bed0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn Value:0xc02b14be10} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam Value:0xc01daf8190} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn Value:0xc02b14bf78} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn Value:0xc02b14bc90} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam Value:0xc01daf8040} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam Value:0xc02b14bd50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237657538s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam} value=2520 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam} value=2520 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn} value=16.36506687647522 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn} value=16.36506687647522 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam Value:0xc01daf8370} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam Value:0xc01daf8420} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn Value:0xc01daf84b0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn Value:0xc01daf86f8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam Value:0xc01daf87d8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn Value:0xc01daf8858} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn Value:0xc01daf8530} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam Value:0xc01daf85d0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam Value:0xc01daf86a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237679461s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam} value=6000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam} value=6000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn} value=55.588723051409616 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn} value=55.588723051409616 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam Value:0xc01daf8cb8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam Value:0xc01daf8d60} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn Value:0xc01daf8968} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn Value:0xc01daf89e8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam Value:0xc01daf8e00} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn Value:0xc01daf8af8} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn Value:0xc01daf8c28} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam Value:0xc01daf8b98} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam Value:0xc01daf8a78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237699938s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam} value=1000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam} value=1000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn} value=71.02897102897103 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn} value=71.02897102897103 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam Value:0xc01daf8f40} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam Value:0xc01daf9238} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn Value:0xc01daf92a8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn Value:0xc01daf9318} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam Value:0xc01daf8fd0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn Value:0xc01daf9388} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn Value:0xc01daf9048} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam Value:0xc01daf9428} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam Value:0xc01daf90f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.23771767s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam} value=2360 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam} value=2360 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn} value=145.76988155668357 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn} value=145.76988155668357 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn} value=1 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam Value:0xc01daf9670} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam Value:0xc01daf99b0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn Value:0xc01daf9780} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn Value:0xc01daf9920} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam Value:0xc01daf9710} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn Value:0xc01daf9808} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn Value:0xc01daf9538} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam Value:0xc01daf98b0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam Value:0xc01daf95e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237729756s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn} value=17.032720753025547 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn} value=17.032720753025547 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam Value:0xc01daf9d40} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam Value:0xc01daf9df0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn Value:0xc01daf9b90} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn Value:0xc01daf9be8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam Value:0xc01daf9ac8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn Value:0xc01daf9ca8} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn Value:0xc01daf9e70} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam Value:0xc01daf9f20} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam Value:0xc01daf9fd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237746925s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam} value=2000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam} value=2000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn} value=21.289355322338828 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn} value=21.289355322338828 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam Value:0xc0086e4158} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam Value:0xc0086e4368} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn Value:0xc0086e4500} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn Value:0xc0086e42d8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam Value:0xc0086e41e8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn Value:0xc0086e4260} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn Value:0xc0086e40c8} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam Value:0xc0086e43f8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam Value:0xc0086e4488}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237774925s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam} value=280 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam} value=280 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn} value=45.45454545454545 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn} value=45.45454545454545 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam Value:0xc0086e4c08} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam Value:0xc0086e4760} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn Value:0xc0086e4630} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn Value:0xc0086e48b0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam Value:0xc0086e4940} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn Value:0xc0086e4cc8} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn Value:0xc0086e4d70} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam Value:0xc0086e46c0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam Value:0xc0086e49d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237798487s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam} value=1780 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam} value=1780 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn} value=26.55367231638418 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn} value=26.55367231638418 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam Value:0xc0086e52c0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam Value:0xc0086e4f80} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn Value:0xc0086e5190} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn Value:0xc0086e5210} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam Value:0xc0086e5010} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn Value:0xc0086e4e68} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn Value:0xc0086e5088} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam Value:0xc0086e5110} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam Value:0xc0086e4ef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237816441s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam} value=280 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam} value=280 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn} value=75.17482517482517 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn} value=75.17482517482517 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam Value:0xc0086e5740} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam Value:0xc0086e5590} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn Value:0xc0086e5610} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn Value:0xc0086e5900} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam Value:0xc0086e5968} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn Value:0xc0086e54f8} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn Value:0xc0086e53e0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam Value:0xc0086e5478} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam Value:0xc0086e5678}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237846762s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn} value=46.0573476702509 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn} value=46.0573476702509 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam Value:0xc0086e5da8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam Value:0xc0086e5bb8} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn Value:0xc0086e5e20} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn Value:0xc0086e5c28} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam Value:0xc0086e5f28} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn Value:0xc0086e5e98} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn Value:0xc0086e5a98} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam Value:0xc0086e5b28} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam Value:0xc0086e5d10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237868583s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn} value=36.93333333333334 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn} value=36.93333333333334 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam Value:0xc01d48e0e0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam Value:0xc01d48e170} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn Value:0xc01d48e1c8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn Value:0xc01d48e348} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam Value:0xc01d48e470} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn Value:0xc01d48e3e0} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn Value:0xc01d48e260} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam Value:0xc01d48e2f0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam Value:0xc01d48e050}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237892898s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam} value=280 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam} value=280 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn} value=50.36231884057971 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn} value=50.36231884057971 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc01d48ea08} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc01d48e750} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn Value:0xc01d48e8f0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn Value:0xc01d48e970} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc01d48e6b0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn Value:0xc01d48e618} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn Value:0xc01d48e7d8} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc01d48e5a0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc01d48e870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237913969s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=14800 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=14800 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn} value=37.132152588555854 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn} value=37.132152588555854 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=1 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=2 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam Value:0xc01d48ebe0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam Value:0xc01d48efd0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn Value:0xc01d48eeb0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn Value:0xc01d48ed90} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam Value:0xc01d48ec80} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn Value:0xc01d48ed10} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn Value:0xc01d48ef38} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam Value:0xc01d48eb40} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam Value:0xc01d48ee28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237943805s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam} value=6720 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam} value=6720 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn} value=43.235955056179776 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn} value=43.235955056179776 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam Value:0xc01d48f190} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam Value:0xc01d48f470} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn Value:0xc01d48f4f8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn Value:0xc01d48f0f0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam Value:0xc01d48f5c0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn Value:0xc01d48f210} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn Value:0xc01d48f328} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam Value:0xc01d48f2b0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam Value:0xc01d48f3d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237971896s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam} value=2000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam} value=2000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn} value=23.622828784119108 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn} value=23.622828784119108 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam Value:0xc01d48f8a8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam Value:0xc01d48f9a0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn Value:0xc01d48fa88} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn Value:0xc01d48f7a0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam Value:0xc01d48f820} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn Value:0xc01d48f738} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn Value:0xc01d48f918} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam Value:0xc01d48f6d0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam Value:0xc01d48fa28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.237987356s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam} value=400 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam} value=400 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn} value=36.2962962962963 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn} value=36.2962962962963 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam Value:0xc01d48fba8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam Value:0xc01d48fcd0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn Value:0xc01d48ff48} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn Value:0xc01d48fd48} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam Value:0xc01d48fc38} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn Value:0xc01d48ffb8} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn Value:0xc01d48fee0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam Value:0xc01d48fdd8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam Value:0xc01d48fe68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238007801s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam} value=3360 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam} value=3360 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn} value=39.78015448603684 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn} value=39.78015448603684 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc069646370} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc069646408} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn Value:0xc0696464e8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn Value:0xc069646490} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc069646230} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn Value:0xc0696460d8} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn Value:0xc069646148} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc0696465b0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc0696462d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238033928s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=7700 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=7700 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn} value=38.71221044365921 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn} value=38.71221044365921 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam Value:0xc0696466e8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam Value:0xc069646af0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn Value:0xc069646770} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn Value:0xc069646b70} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam Value:0xc069646810} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn Value:0xc069646890} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn Value:0xc069646a50} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam Value:0xc0696469d0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam Value:0xc069646938}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238059146s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam} value=1920 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam} value=1920 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn} value=25.76396206533193 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn} value=25.76396206533193 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam Value:0xc069646db0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam Value:0xc069646ec0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn Value:0xc069646f30} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn Value:0xc069646fa0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam Value:0xc069646c90} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn Value:0xc0696470a0} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn Value:0xc069646e20} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam Value:0xc069647030} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam Value:0xc069646d20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238092515s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam} value=4400 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam} value=4400 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn} value=41.23641304347826 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn} value=41.23641304347826 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam Value:0xc0696472d0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam Value:0xc069647478} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn Value:0xc069647350} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn Value:0xc069647590} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam Value:0xc0696473e0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn Value:0xc069647240} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn Value:0xc069647610} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam Value:0xc0696471c0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam Value:0xc069647510}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238124093s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam} value=1800 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam} value=1800 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn} value=32.128960533629794 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn} value=32.128960533629794 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam Value:0xc0696477c0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam Value:0xc069647858} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn Value:0xc0696479e0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn Value:0xc069647b28} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam Value:0xc0696478f0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn Value:0xc069647a58} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn Value:0xc069647968} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam Value:0xc069647bc0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam Value:0xc069647728}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238145663s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam} value=5300 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam} value=5300 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn} value=56.60697455230914 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn} value=56.60697455230914 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam Value:0xc073740338} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam Value:0xc0737402a8} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn Value:0xc069647d70} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn Value:0xc069647fe8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam Value:0xc069647f70} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn Value:0xc069647dd8} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn Value:0xc069647e48} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam Value:0xc073740218} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam Value:0xc069647ed8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238160623s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam} value=2000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam} value=2000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn} value=35.18796992481203 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn} value=35.18796992481203 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam Value:0xc073740740} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam Value:0xc0737408f8} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn Value:0xc0737407b8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn Value:0xc0737404c8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam Value:0xc0737405a0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn Value:0xc073740620} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn Value:0xc073740438} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam Value:0xc073740698} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam Value:0xc073740858}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238180115s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam} value=1500 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam} value=1500 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn} value=28.466175485599464 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn} value=28.466175485599464 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam Value:0xc073740a20} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam Value:0xc0737410d8} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn Value:0xc073741150} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn Value:0xc073740fb0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam Value:0xc0737411e0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn Value:0xc073741038} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn Value:0xc073741260} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam Value:0xc073740ab8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam Value:0xc073740b60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238202327s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam} value=560 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam} value=560 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn} value=42.805755395683455 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn} value=42.805755395683455 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam Value:0xc073741910} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam Value:0xc0737417e0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn Value:0xc073741648} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn Value:0xc0737416c8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam Value:0xc073741470} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn Value:0xc073741740} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn Value:0xc0737414e8} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam Value:0xc0737415d8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam Value:0xc073741870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238225374s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam} value=11700 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam} value=11700 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn} value=32.16645216645217 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn} value=32.16645216645217 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam} value=1 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam} value=2 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam Value:0xc073741a30} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam Value:0xc073741f78} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn Value:0xc073741cb0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn Value:0xc073741e08} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam Value:0xc073741ac0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn Value:0xc073741e80} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn Value:0xc073741ef0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam Value:0xc073741ba8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam Value:0xc073741c30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238248163s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam} value=4000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam} value=4000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn} value=45.05136557253821 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn} value=45.05136557253821 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam Value:0xc02ee9cd20} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam Value:0xc02ee9c1b8} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn Value:0xc02ee9c730} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn Value:0xc02ee9c2f8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam Value:0xc02ee9c0a8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn Value:0xc02ee9ca28} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn Value:0xc02ee9c118} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam Value:0xc02ee9cc88} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam Value:0xc02ee9c900}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238293913s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam} value=2000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam} value=2000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn} value=27.164179104477608 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn} value=27.164179104477608 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam Value:0xc02ee9ce50} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam Value:0xc02ee9cf70} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn Value:0xc02ee9cff0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn Value:0xc02ee9d298} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam Value:0xc02ee9d338} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn Value:0xc02ee9d058} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn Value:0xc02ee9d188} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam Value:0xc02ee9d228} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam Value:0xc02ee9cee0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238328236s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam} value=280 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam} value=280 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn} value=30 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn} value=30 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam Value:0xc02ee9d578} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam Value:0xc02ee9d898} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn Value:0xc02ee9d950} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn Value:0xc02ee9d600} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam Value:0xc02ee9d690} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn Value:0xc02ee9d778} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn Value:0xc02ee9d720} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam Value:0xc02ee9d458} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam Value:0xc02ee9d4e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238356725s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam} value=1200 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam} value=1200 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn} value=43.09623430962343 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn} value=43.09623430962343 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam Value:0xc02ee9de40} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam Value:0xc02ee9dbf0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn Value:0xc02ee9dc98} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn Value:0xc02ee9dfe8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam Value:0xc02ee9def8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn Value:0xc02ee9df58} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn Value:0xc02ee9dd38} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam Value:0xc013ee0100} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam Value:0xc02ee9db08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238382392s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam} value=11240 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam} value=11240 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn} value=51.65792514890213 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn} value=51.65792514890213 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam} value=1 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam} value=2 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam Value:0xc013ee06f0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam Value:0xc013ee07c0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn Value:0xc013ee08c0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn Value:0xc013ee0840} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam Value:0xc013ee0990} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn Value:0xc013ee0ae0} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn Value:0xc013ee0380} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam Value:0xc013ee05c0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam Value:0xc013ee0250}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.23840317s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam} value=400 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam} value=400 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn} value=45.566502463054185 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn} value=45.566502463054185 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam Value:0xc013ee1180} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam Value:0xc013ee12b8} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn Value:0xc013ee1210} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn Value:0xc013ee0d90} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam Value:0xc013ee0ee8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn Value:0xc013ee1498} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn Value:0xc013ee0f68} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam Value:0xc013ee1560} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam Value:0xc013ee0c50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238424068s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam} value=1000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam} value=1000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn} value=54.60526315789473 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn} value=54.60526315789473 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam Value:0xc013ee16c8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam Value:0xc013ee1a10} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn Value:0xc013ee1828} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn Value:0xc013ee1c18} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam Value:0xc013ee1b80} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn Value:0xc013ee1760} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn Value:0xc013ee1e50} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam Value:0xc028aca268} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam Value:0xc013ee18f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238452988s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam} value=280 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam} value=280 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn} value=38.94736842105263 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn} value=38.94736842105263 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam Value:0xc08eb26630} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam Value:0xc08eb26550} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn Value:0xc08eb267f8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn Value:0xc08eb26878} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam Value:0xc028acb1e8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn Value:0xc08eb26258} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn Value:0xc028acaea0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam Value:0xc028acb778} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam Value:0xc028acba40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.23848387s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam} value=800 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam} value=800 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn} value=72.97979797979798 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn} value=72.97979797979798 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn, service_name=zam Value:0xc08eb26e98} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn, service_name=zam Value:0xc08eb27140} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn Value:0xc08eb276a0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn Value:0xc08eb27370} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn, service_name=zam Value:0xc08eb27a98} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn Value:0xc08eb274b0} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn Value:0xc08eb27d88} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn, service_name=zam Value:0xc08eb26c28} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn, service_name=zam Value:0xc08eb26d40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.23850625s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn, service_name=zam} value=280 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn, service_name=zam} value=280 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn} value=40.35087719298245 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn} value=40.35087719298245 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Paris, country=France, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Paris, country=France, environment=production, role=vpn, service_name=zam Value:0xc015a8c050} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Paris, country=France, environment=production, role=vpn, service_name=zam Value:0xc015a8c510} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Paris, country=France, environment=production, role=vpn Value:0xc015a8c280} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Paris, country=France, environment=production, role=vpn Value:0xc05f918b98} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Paris, country=France, environment=production, role=vpn, service_name=zam Value:0xc015a8c598} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Paris, country=France, environment=production, role=vpn Value:0xc015a8c388} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Paris, country=France, environment=production, role=vpn Value:0xc015a8c3f8} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Paris, country=France, environment=production, role=vpn, service_name=zam Value:0xc015a8c310} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Paris, country=France, environment=production, role=vpn, service_name=zam Value:0xc015a8c488}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238520305s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Paris, country=France, environment=production, role=vpn, service_name=zam} value=49940 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Paris, country=France, environment=production, role=vpn, service_name=zam} value=49940 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Paris, country=France, environment=production, role=vpn} value=37.832261682991806 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Paris, country=France, environment=production, role=vpn} value=37.832261682991806 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Paris, country=France, environment=production, role=vpn, service_name=zam} value=1 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Paris, country=France, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Paris, country=France, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Paris, country=France, environment=production, role=vpn, service_name=zam} value=2 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Paris, country=France, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phnom Penh, country=Cambodia, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phnom Penh, country=Cambodia, environment=production, role=vpn, service_name=zam Value:0xc015a8c860} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phnom Penh, country=Cambodia, environment=production, role=vpn, service_name=zam Value:0xc015a8ca70} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Phnom Penh, country=Cambodia, environment=production, role=vpn Value:0xc015a8c7d0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Phnom Penh, country=Cambodia, environment=production, role=vpn Value:0xc015a8cb00} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phnom Penh, country=Cambodia, environment=production, role=vpn, service_name=zam Value:0xc015a8c6d0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Phnom Penh, country=Cambodia, environment=production, role=vpn Value:0xc015a8cc10} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Phnom Penh, country=Cambodia, environment=production, role=vpn Value:0xc015a8c750} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phnom Penh, country=Cambodia, environment=production, role=vpn, service_name=zam Value:0xc015a8c8f0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phnom Penh, country=Cambodia, environment=production, role=vpn, service_name=zam Value:0xc015a8c990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238539036s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phnom Penh, country=Cambodia, environment=production, role=vpn, service_name=zam} value=560 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phnom Penh, country=Cambodia, environment=production, role=vpn, service_name=zam} value=560 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Phnom Penh, country=Cambodia, environment=production, role=vpn} value=50.45372050816697 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Phnom Penh, country=Cambodia, environment=production, role=vpn} value=50.45372050816697 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phnom Penh, country=Cambodia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Phnom Penh, country=Cambodia, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Phnom Penh, country=Cambodia, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phnom Penh, country=Cambodia, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phnom Penh, country=Cambodia, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phoenix, country=United States, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phoenix, country=United States, environment=production, role=vpn, service_name=zam Value:0xc015a8d1d8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phoenix, country=United States, environment=production, role=vpn, service_name=zam Value:0xc015a8d628} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Phoenix, country=United States, environment=production, role=vpn Value:0xc015a8d2f8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Phoenix, country=United States, environment=production, role=vpn Value:0xc015a8d500} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phoenix, country=United States, environment=production, role=vpn, service_name=zam Value:0xc015a8cf78} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Phoenix, country=United States, environment=production, role=vpn Value:0xc015a8ce70} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Phoenix, country=United States, environment=production, role=vpn Value:0xc015a8d700} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phoenix, country=United States, environment=production, role=vpn, service_name=zam Value:0xc015a8d0f0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phoenix, country=United States, environment=production, role=vpn, service_name=zam Value:0xc015a8d440}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238559761s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phoenix, country=United States, environment=production, role=vpn, service_name=zam} value=1250 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phoenix, country=United States, environment=production, role=vpn, service_name=zam} value=1250 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Phoenix, country=United States, environment=production, role=vpn} value=44.230769230769226 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Phoenix, country=United States, environment=production, role=vpn} value=44.230769230769226 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phoenix, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Phoenix, country=United States, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Phoenix, country=United States, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phoenix, country=United States, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phoenix, country=United States, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Podgorica, country=Montenegro, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Podgorica, country=Montenegro, environment=production, role=vpn, service_name=zam Value:0xc01c18ff90} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Podgorica, country=Montenegro, environment=production, role=vpn, service_name=zam Value:0xc015a8dce0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Podgorica, country=Montenegro, environment=production, role=vpn Value:0xc015a8d8c8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Podgorica, country=Montenegro, environment=production, role=vpn Value:0xc015a8de40} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Podgorica, country=Montenegro, environment=production, role=vpn, service_name=zam Value:0xc0133b48a0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Podgorica, country=Montenegro, environment=production, role=vpn Value:0xc015a8d9e0} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Podgorica, country=Montenegro, environment=production, role=vpn Value:0xc015a8df10} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Podgorica, country=Montenegro, environment=production, role=vpn, service_name=zam Value:0xc015a8dc40} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Podgorica, country=Montenegro, environment=production, role=vpn, service_name=zam Value:0xc015a8da98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238589038s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Podgorica, country=Montenegro, environment=production, role=vpn, service_name=zam} value=280 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Podgorica, country=Montenegro, environment=production, role=vpn, service_name=zam} value=280 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Podgorica, country=Montenegro, environment=production, role=vpn} value=32.631578947368425 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Podgorica, country=Montenegro, environment=production, role=vpn} value=32.631578947368425 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Podgorica, country=Montenegro, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Podgorica, country=Montenegro, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Podgorica, country=Montenegro, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Podgorica, country=Montenegro, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Podgorica, country=Montenegro, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Prague, country=Czech Republic, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Prague, country=Czech Republic, environment=production, role=vpn, service_name=zam Value:0xc00630c0a0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Prague, country=Czech Republic, environment=production, role=vpn, service_name=zam Value:0xc00630c6f8} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Prague, country=Czech Republic, environment=production, role=vpn Value:0xc00630c160} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Prague, country=Czech Republic, environment=production, role=vpn Value:0xc00630c518} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Prague, country=Czech Republic, environment=production, role=vpn, service_name=zam Value:0xc00630c350} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Prague, country=Czech Republic, environment=production, role=vpn Value:0xc0133b51d8} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Prague, country=Czech Republic, environment=production, role=vpn Value:0xc0133b59b8} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Prague, country=Czech Republic, environment=production, role=vpn, service_name=zam Value:0xc00630ca10} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Prague, country=Czech Republic, environment=production, role=vpn, service_name=zam Value:0xc00630c610}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238615643s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Prague, country=Czech Republic, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Prague, country=Czech Republic, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Prague, country=Czech Republic, environment=production, role=vpn} value=43.722172751558325 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Prague, country=Czech Republic, environment=production, role=vpn} value=43.722172751558325 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Prague, country=Czech Republic, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Prague, country=Czech Republic, environment=production, role=vpn} value=0 ], [ var='G' labels={access_group=premium, brand=CyberGhost, city=Prague, country=Czech Republic, environment=production, role=vpn} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Prague, country=Czech Republic, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Prague, country=Czech Republic, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Rabat, country=Morocco, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Rabat, country=Morocco, environment=production, role=vpn, service_name=zam Value:0xc00630d220} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Rabat, country=Morocco, environment=production, role=vpn, service_name=zam Value:0xc00630d2e0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Rabat, country=Morocco, environment=production, role=vpn Value:0xc00630ccb8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Rabat, country=Morocco, environment=production, role=vpn Value:0xc00630cb78} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Rabat, country=Morocco, environment=production, role=vpn, service_name=zam Value:0xc00630cf98} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Rabat, country=Morocco, environment=production, role=vpn Value:0xc00630cd80} G:{Var:G Labels:access_group=premium, brand=CyberGhost, city=Rabat, country=Morocco, environment=production, role=vpn Value:0xc00630d0a0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Rabat, country=Morocco, environment=production, role=vpn, service_name=zam Value:0xc00630d138} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Rabat, country=Morocco, environment=production, role=vpn, service_name=zam Value:0xc00630cc60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.238644356s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Rabat, country=Morocco, environment=production, role=vpn, service_name=zam} value=680 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Rabat, country=Morocco, environment=production, role=vpn, service_name=zam} value=680 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Rabat, country=Morocco, environment=production, role=vpn} value=41.348973607038126 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Rabat, country=Morocco, environment=production, role=vpn} value=41.348973607038126 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Rabat, country=Morocco, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Rabat, country=Morocco, environment=production, role=vpn} value=0 ], [ var='G' labels={acces + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-card-quest, pod_name=s-card-quest-5483-2" t=2024-05-29T13:44:15.292099138Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=163513 slug=dialpad t=2024-05-29T13:44:15.292047969Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=23.153558ms + level=info ts=2024-05-29T13:44:15.29198266Z caller=remote_alert_sender.go:94 user=555280 slug=hipcreative host=hipcreative-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.5.119:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddby61rboa5tsf alerts=1 + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-card-quest, pod_name=s-card-quest-5483-1" t=2024-05-29T13:44:15.291921455Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-auto-sort, pod_name=s-auto-sort-1ce0-2" t=2024-05-29T13:44:15.29173114Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696798 slug=mcv instance= t=2024-05-29T13:44:15.291663918Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv instance= t=2024-05-29T13:44:15.291652327Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:15.291624868Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=pc-lion-aws, namespace=s-auto-sort, pod_name=s-auto-sort-1ce0-1" t=2024-05-29T13:44:15.291542747Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.290863144Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-vegascherryslots, pod_name=s-vegascherryslots-a3da-2" t=2024-05-29T13:44:15.291367235Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-trap-traversal-prod1, pod_name=s-trap-traversal-prod1-ccf2-1" t=2024-05-29T13:44:15.290994832Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.29102226Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.291026666Z caller=remote_instance_store.go:51 user=901230 slug=integromonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=901230 slug=integromonitor instance= previous_handler=resultError t=2024-05-29T13:44:15.29097983Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=901230 slug=integromonitor instance= previous_handler=resultError t=2024-05-29T13:44:15.290973894Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=901230 slug=integromonitor t=2024-05-29T13:44:15.290942853Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.29090801Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.290820915Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-tower-liveops-dev, pod_name=s-tower-liveops-dev-19be-1" t=2024-05-29T13:44:15.290819935Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=outsystems, dbinstance_identifier=lon-dev-frontend-db, env=dev" t=2024-05-29T13:44:15.290812158Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.290609975Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.290556025Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="LoadBalancer=app/k8s-techtpsmarex-08051a06b1/e77b2063032268f9, TargetGroup=targetgroup/k8s-techtps-tpswebin-2bd4c14af1/54b60e82e75161fa" t=2024-05-29T13:44:15.29052365Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.290474257Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=27737 slug=edfmancapital version=4 fingerprint=30a0090a4a848991 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.290384079Z level=debug msg="Alert rule evaluated" results="[{Instance:LoadBalancer=app/k8s-techtpsmarex-08051a06b1/e77b2063032268f9, TargetGroup=targetgroup/k8s-techtps-tpswebin-2bd4c14af1/54b60e82e75161fa State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:LoadBalancer=app/k8s-techtpsmarex-08051a06b1/e77b2063032268f9, TargetGroup=targetgroup/k8s-techtps-tpswebin-2bd4c14af1/54b60e82e75161fa Value:0xc01f311ab0} C:{Var:C Labels:LoadBalancer=app/k8s-techtpsmarex-08051a06b1/e77b2063032268f9, TargetGroup=targetgroup/k8s-techtps-tpswebin-2bd4c14af1/54b60e82e75161fa Value:0xc01f311ab8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.290067421s EvaluationString:[ var='B' labels={LoadBalancer=app/k8s-techtpsmarex-08051a06b1/e77b2063032268f9, TargetGroup=targetgroup/k8s-techtps-tpswebin-2bd4c14af1/54b60e82e75161fa} value=0 ], [ var='C' labels={LoadBalancer=app/k8s-techtpsmarex-08051a06b1/e77b2063032268f9, TargetGroup=targetgroup/k8s-techtps-tpswebin-2bd4c14af1/54b60e82e75161fa} value=0 ]}]" duration=58.410857ms + level=debug ts=2024-05-29T13:44:15.290384743Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.290305942Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.290320531Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-tof-dev, pod_name=s-tof-dev-8d15-1" t=2024-05-29T13:44:15.29021203Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.290153482Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=car-risk, pod=car-risk-544589c84b-qlb5g" t=2024-05-29T13:44:15.290207971Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.290136544Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=630397 slug=tatin t=2024-05-29T13:44:15.290127567Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.505152ms + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=car-risk, pod=car-risk-544589c84b-6p8b4" t=2024-05-29T13:44:15.289873513Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.289871202Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.289766973Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=car-risk, pod=car-risk-544589c84b-6p8b4" t=2024-05-29T13:44:15.28977464Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.289761764Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-submarine-jump, pod_name=s-submarine-jump-f18c-1" t=2024-05-29T13:44:15.28965736Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-submarine-jump, pod_name=s-submarine-jump-f18c-1" t=2024-05-29T13:44:15.289646722Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-shuffle-master, pod_name=s-shuffle-master-5645-2" t=2024-05-29T13:44:15.289535625Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-shuffle-master, pod_name=s-shuffle-master-5645-1" t=2024-05-29T13:44:15.289392467Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=outruntime, dbinstance_identifier=lon-prod-frontend-database, env=uk" t=2024-05-29T13:44:15.289245484Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-sandbox, pod_name=s-sandbox-d671-1" t=2024-05-29T13:44:15.289227482Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-premio-dev, pod_name=s-premio-dev-6138-1" t=2024-05-29T13:44:15.289068854Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-premio-dev, pod_name=s-premio-dev-6138-1" t=2024-05-29T13:44:15.289050778Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.289080881Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.289043239Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.288916546Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.288941705Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=osSession, dbinstance_identifier=syd-prod-frontend-db, env=au" t=2024-05-29T13:44:15.288916239Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.288877093Z caller=remote_instance_store.go:51 user=87780 slug=zencloudandhosting msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-my-cake-shop, pod_name=s-my-cake-shop-d7cc-2" t=2024-05-29T13:44:15.288828137Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=87780 slug=zencloudandhosting t=2024-05-29T13:44:15.288759659Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=87780 slug=zencloudandhosting version=1 fingerprint=a1723b856aeac5ff attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.288691813Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=000000020, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.28846747s EvaluationString:}]" duration=105.813883ms + level=debug ts=2024-05-29T13:44:15.288671638Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:15.288568039Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:15.288551905Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:15.288502165Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-my-arcade-center, pod_name=s-my-arcade-center-800f-1" t=2024-05-29T13:44:15.288376053Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-merge-miners, pod_name=s-merge-miners-f71b-1" t=2024-05-29T13:44:15.288210474Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-mattel-nft-prod, pod_name=s-mattel-nft-prod-124a-2" t=2024-05-29T13:44:15.288091747Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-mattel-nft-prod, pod_name=s-mattel-nft-prod-124a-2" t=2024-05-29T13:44:15.288084977Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-mattel-nft-prod, pod_name=s-mattel-nft-prod-124a-1" t=2024-05-29T13:44:15.287982744Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-mattel-nft-dev, pod_name=s-mattel-nft-dev-54a4-1" t=2024-05-29T13:44:15.287793749Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-mattel-nft-dev, pod_name=s-mattel-nft-dev-54a4-1" t=2024-05-29T13:44:15.287780413Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=444725 slug=devnextgen instance="__name__=probe_success, config_version=1706588543504221952, instance=https://cms-frontend.calmsky-f6350dc4.australiaeast.azurecontainerapps.io, job=console, probe=dev_private_probe" t=2024-05-29T13:44:15.287700667Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=osSession, dbinstance_identifier=lon-qa-frontend-db, env=qa" t=2024-05-29T13:44:15.287293429Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-hexamatch, pod_name=s-hexamatch-39e4-2" t=2024-05-29T13:44:15.287253385Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-hexamatch, pod_name=s-hexamatch-39e4-2" t=2024-05-29T13:44:15.287240343Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=osSession, dbinstance_identifier=lon-qa-frontend-db, env=qa" t=2024-05-29T13:44:15.287148822Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.287022702Z caller=remote_instance_store.go:51 user=723897 slug=inthepocket msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-hexamatch, pod_name=s-hexamatch-39e4-1" t=2024-05-29T13:44:15.287050178Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=539529 slug=bazardoconsorciosandbox instance="FunctionName=md-cota-validate-extract-info-sandbox" t=2024-05-29T13:44:15.287023075Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Seattle, country=United States, datacenter=PIA, environment=production, instance=191.96.37.33:9998, ip=191.96.37.33, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=seattle413, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.28694204Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.286770522Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.286668295Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=316418 slug=workmotion t=2024-05-29T13:44:15.286699057Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.286691057Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.286665821Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.286662325Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-hexagon-sort, pod_name=s-hexagon-sort-82aa-1" t=2024-05-29T13:44:15.286624823Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.286537302Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.28660134Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.286470947Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-instant-id-qa-db, env=qa" t=2024-05-29T13:44:15.286529547Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.28645047Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=316418 slug=workmotion t=2024-05-29T13:44:15.286423089Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=osSession, dbinstance_identifier=lon-prod-frontend-database, env=uk" t=2024-05-29T13:44:15.286421481Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=316418 slug=workmotion version=4 fingerprint=0604a6c33afc621f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.286255658Z level=debug msg="Alert rule evaluated" results="[{Instance:ClientId=455456564602, DomainName=prod-workmotion-auditlog State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ClientId=455456564602, DomainName=prod-workmotion-auditlog Value:0xc00575c888} C:{Var:C Labels:ClientId=455456564602, DomainName=prod-workmotion-auditlog Value:0xc00575c838}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.28564628s EvaluationString:[ var='B' labels={ClientId=455456564602, DomainName=prod-workmotion-auditlog} value=1 ], [ var='C' labels={ClientId=455456564602, DomainName=prod-workmotion-auditlog} value=0 ]}]" duration=22.882623ms + level=debug ts=2024-05-29T13:44:15.286369375Z caller=remote_rule_evaluator.go:193 user=242310 slug=suzy msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager.persist user=646202 slug=kairosaerospace t=2024-05-29T13:44:15.286233944Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.286072357Z caller=remote_instance_store.go:51 user=811546 slug=fyld msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.286200032Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.286068667Z caller=remote_instance_store.go:51 user=736975 slug=jetcomms msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Seattle, country=United States, datacenter=PIA, environment=production, instance=191.96.37.188:9998, ip=191.96.37.188, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-seattle.crt, role=vpn, server=seattle418, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.285898938Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.285849892Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-elemental-miner, pod_name=s-elemental-miner-d2af-1" t=2024-05-29T13:44:15.285826905Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=752743 slug=andreydmitr20 t=2024-05-29T13:44:15.285794699Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-demo, pod_name=s-demo-ea4e-1" t=2024-05-29T13:44:15.285636256Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-frontend-db, env=qa" t=2024-05-29T13:44:15.285385122Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=239286 slug=om2test instance="datasource_uid=b4jzBrMnz, ref_id=A" t=2024-05-29T13:44:15.285348619Z level=debug msg="Setting next state" handler=resultNoData + level=info ts=2024-05-29T13:44:15.285185491Z caller=remote_alert_sender.go:94 user=178647 slug=ocadoerthgcp host=ocadoerthgcp-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.148.183.70:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=6HBFvEB7z alerts=1 + level=debug ts=2024-05-29T13:44:15.285134432Z caller=remote_instance_store.go:51 user=150145 slug=pleasant msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=150145 slug=pleasant t=2024-05-29T13:44:15.285033819Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=QA - Warning - SSL expiration" + level=debug ts=2024-05-29T13:44:15.285048312Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-arcade-kingdom-wars, pod_name=s-arcade-kingdom-wars-5673-2" t=2024-05-29T13:44:15.285044221Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-arcade-kingdom-wars, pod_name=s-arcade-kingdom-wars-5673-2" t=2024-05-29T13:44:15.285025317Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=178687 slug=ocadovghngcp t=2024-05-29T13:44:15.284834362Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-arcade-kingdom-wars, pod_name=s-arcade-kingdom-wars-5673-1" t=2024-05-29T13:44:15.284808017Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=834418 slug=mandolo t=2024-05-29T13:44:15.284759989Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.28480501Z caller=remote_instance_store.go:51 user=834418 slug=mandolo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=615392 slug=shinemetrics t=2024-05-29T13:44:15.284732151Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.284703888Z caller=remote_image_capturer.go:54 user=752743 slug=andreydmitr20 rule_org_id=1 rule_uid=bdfmqcdayla0wa dashboard=integration-docker-logs panel=5 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="__name__=probe_success, config_version=1715008305834938624, instance=https://api.shine.fr/v2/escalations/liveness_check, job=Liveness Check escalations-v2, probe=Paris" t=2024-05-29T13:44:15.284710472Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=752743 slug=andreydmitr20 instance= t=2024-05-29T13:44:15.284623596Z level=debug msg="Changing state" previous_state=Normal next_state=Alerting previous_ends_at=2024-05-29T13:39:10Z next_ends_at=2024-05-29T14:04:10Z + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="__name__=probe_success, config_version=1715008305834938624, instance=https://api.shine.fr/v2/escalations/liveness_check, job=Liveness Check escalations-v2, probe=Amsterdam" t=2024-05-29T13:44:15.28457887Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=752743 slug=andreydmitr20 t=2024-05-29T13:44:15.284571725Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=752743 slug=andreydmitr20 version=1 fingerprint=964bd61556efb482 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.284465512Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc04ef86308} C:{Var:C Labels: Value:0xc04ef86310}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.283971126s EvaluationString:[ var='A' labels={} value=1 ], [ var='C' labels={} value=1 ]}]" duration=16.365418ms + level=debug ts=2024-05-29T13:44:15.284435582Z caller=remote_instance_store.go:51 user=35611 slug=play msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.28435677Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.284354922Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.284257668Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.284165457Z caller=remote_instance_store.go:51 user=316960 slug=mojamteam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-arcade-center-v2, pod_name=s-arcade-center-v2-86df-1" t=2024-05-29T13:44:15.284226996Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=526835 slug=fundbot instance="datasource_uid=XBnl6NA4z, ref_id=A" t=2024-05-29T13:44:15.284161101Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=526835 slug=fundbot instance="datasource_uid=XBnl6NA4z, ref_id=A" t=2024-05-29T13:44:15.284153611Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Seattle, country=United States, datacenter=PIA, environment=production, instance=191.96.37.157:9998, ip=191.96.37.157, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=seattle417, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.28418345Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=526835 slug=fundbot t=2024-05-29T13:44:15.284090823Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=526835 slug=fundbot version=3 fingerprint=577fa357885c0530 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.283981713Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=XBnl6NA4z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.280524346s EvaluationString:}]" duration=4.510195222s + level=debug ts=2024-05-29T13:44:15.284008435Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=174927 slug=syndic82690 t=2024-05-29T13:44:15.284032441Z level=debug msg="Saving alert states done" count=12 max_state_save_concurrency=1 duration=402.266196ms + level=debug ts=2024-05-29T13:44:15.284042346Z caller=remote_instance_store.go:51 user=177453 slug=clabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-animalkingdom-dev, pod_name=s-animalkingdom-dev-ce42-1" t=2024-05-29T13:44:15.284001633Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=177453 slug=clabs instance="datasource_uid=7UodHjDnz, ref_id=A" t=2024-05-29T13:44:15.2839779Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=osSession, dbinstance_identifier=lon-dev-frontend-db, env=dev" t=2024-05-29T13:44:15.283926066Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=177453 slug=clabs instance="datasource_uid=7UodHjDnz, ref_id=A" t=2024-05-29T13:44:15.283963Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.28398205Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=177453 slug=clabs instance="datasource_uid=7UodHjDnz, ref_id=A" t=2024-05-29T13:44:15.283933142Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.283876219Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=177453 slug=clabs version=64 fingerprint=66912f1678f9ce41 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.283830634Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=7UodHjDnz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.280604708s EvaluationString:}]" duration=52.083357ms + level=debug ts=2024-05-29T13:44:15.283863552Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-ak-prod-mirror, pod_name=s-ak-prod-mirror-ef16-2" t=2024-05-29T13:44:15.28379371Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Seattle, country=United States, datacenter=PIA, environment=production, instance=191.96.37.126:9998, ip=191.96.37.126, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-seattle.crt, role=vpn, server=seattle416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.283772044Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=477402 slug=infleqtion t=2024-05-29T13:44:15.283365538Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.594184ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Seattle, country=United States, datacenter=PIA, environment=production, instance=191.96.37.126:9998, ip=191.96.37.126, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-seattle.crt, role=vpn, server=seattle416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.283722642Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.283487101Z caller=remote_instance_store.go:51 user=461396 slug=ultimateai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.283171179Z caller=remote_instance_store.go:51 user=233137 slug=mirrornode msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.283098571Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=233137 slug=mirrornode t=2024-05-29T13:44:15.283064192Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.282948293Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=233137 slug=mirrornode version=116 fingerprint=bc5b84677cc0afdd attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.282769852Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.282455447s EvaluationString:}]" duration=13.43976ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Seattle, country=United States, datacenter=PIA, environment=production, instance=191.96.103.2:9998, ip=191.96.103.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-seattle.crt, role=vpn, server=seattle404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.28278132Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=447897 slug=mysten instance="datasource_uid=8Xt1pVoVk, ref_id=A" t=2024-05-29T13:44:15.282731018Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=447897 slug=mysten instance="datasource_uid=8Xt1pVoVk, ref_id=A" t=2024-05-29T13:44:15.282699918Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-ak-playground, pod_name=s-ak-playground-435a-1" t=2024-05-29T13:44:15.282724635Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.28267811Z caller=remote_instance_store.go:51 user=288032 slug=dapperlabssre msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.282678051Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:15.282639992Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=288032 slug=dapperlabssre version=1 fingerprint=14d90fa7c0ab2a5c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.282518504Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=uiNClwN7k, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.282236254s EvaluationString:}]" duration=26.085925ms + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-gcp-us-east1-b, namespace=s-ak-dev, pod_name=s-ak-dev-563e-1" t=2024-05-29T13:44:15.282537428Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Seattle, country=United States, datacenter=PIA, environment=production, instance=191.96.103.2:9998, ip=191.96.103.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=seattle404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.282532157Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-aws-eu-central1-a, namespace=s-oblivion-sandbox, pod_name=s-oblivion-sandbox-51c5-1" t=2024-05-29T13:44:15.282360423Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="host=eqny405-pomsp03" t=2024-05-29T13:44:15.282328939Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="host=eqny405-pomsp02" t=2024-05-29T13:44:15.282284873Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="host=eqny405-pomsp02" t=2024-05-29T13:44:15.282274287Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Seattle, country=United States, datacenter=PIA, environment=production, instance=191.96.103.126:9998, ip=191.96.103.126, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-seattle.crt, role=vpn, server=seattle408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.282219632Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.282108229Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=485459 slug=heroiclabs instance="mc_zone=hc-prod-aws-eu-central1-a, namespace=s-max-bnm-perf, pod_name=s-max-bnm-perf-0bf1-1" t=2024-05-29T13:44:15.281924925Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.271110776Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.281905638Z caller=remote_instance_store.go:51 user=155740 slug=routific msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=155740 slug=routific instance="instance=map-service-6cdb559998-thwwd" t=2024-05-29T13:44:15.281848348Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=155740 slug=routific instance="instance=map-service-6cdb559998-thwwd" t=2024-05-29T13:44:15.281839055Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=155740 slug=routific t=2024-05-29T13:44:15.281656586Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Map Service instance ${{labels.instance}} facing slowness in request processing.': error parsing template __alert_Request Duration per instance 95th percentile alert - Map Service - production: template: __alert_Request Duration per instance 95th percentile alert - Map Service - production:1: function \"labels\" not defined" + logger=ngalert.state.manager user=155740 slug=routific instance="instance=map-service-6cdb559998-kh2zf" t=2024-05-29T13:44:15.281600404Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.28147086Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.281443045Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=155740 slug=routific t=2024-05-29T13:44:15.281364797Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Map Service instance ${{labels.instance}} facing slowness in request processing.': error parsing template __alert_Request Duration per instance 95th percentile alert - Map Service - production: template: __alert_Request Duration per instance 95th percentile alert - Map Service - production:1: function \"labels\" not defined" + logger=ngalert.state.manager user=155740 slug=routific instance="instance=map-service-6cdb559998-7wvcd" t=2024-05-29T13:44:15.281311526Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=788474 slug=elisasre t=2024-05-29T13:44:15.281198002Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.368843ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Seattle, country=United States, datacenter=DataPacket, environment=production, instance=156.146.48.1:9998, ip=156.146.48.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-seattle.crt, role=vpn, server=seattle401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.281313325Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.281195795Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=155740 slug=routific t=2024-05-29T13:44:15.281196605Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Map Service instance ${{labels.instance}} facing slowness in request processing.': error parsing template __alert_Request Duration per instance 95th percentile alert - Map Service - production: template: __alert_Request Duration per instance 95th percentile alert - Map Service - production:1: function \"labels\" not defined" + level=debug ts=2024-05-29T13:44:15.280888122Z caller=remote_instance_store.go:51 user=753403 slug=romich msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Seattle, country=United States, datacenter=DataPacket, environment=production, instance=156.146.48.135:9998, ip=156.146.48.135, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-seattle.crt, role=vpn, server=seattle423, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.280891387Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Seattle, country=United States, datacenter=DataPacket, environment=production, instance=156.146.48.135:9998, ip=156.146.48.135, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-seattle.crt, role=vpn, server=seattle423, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.280868102Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=489921 slug=statuscake t=2024-05-29T13:44:15.280764198Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.143873ms + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:15.280776666Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.280605114Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Seattle, country=United States, datacenter=DataPacket, environment=production, instance=156.146.48.135:9998, ip=156.146.48.135, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=seattle423, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.280650056Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.280604622Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=316418 slug=workmotion t=2024-05-29T13:44:15.280460507Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=23.884037ms + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:15.280286925Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.280246025Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.280208446Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=295631 slug=dapvizor t=2024-05-29T13:44:15.280119905Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.418021ms + logger=ngalert.state.manager.persist user=481110 slug=g123 t=2024-05-29T13:44:15.280063767Z level=debug msg="Saving alert states" count=25 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sarajevo, country=Bosnia and Herzegovina, datacenter=GSL, environment=production, instance=98.159.36.128:9998, ip=98.159.36.128, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=sarajevo403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.280014785Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=385107 slug=qubit9 t=2024-05-29T13:44:15.279813322Z level=debug msg="Saving alert states done" count=4 max_state_save_concurrency=1 duration=78.106738ms + level=debug ts=2024-05-29T13:44:15.279810211Z caller=remote_instance_store.go:51 user=637258 slug=testb9lab msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.279740487Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sarajevo, country=Bosnia and Herzegovina, datacenter=GSL, environment=production, instance=98.159.36.128:9998, ip=98.159.36.128, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ba.crt, role=vpn, server=sarajevo403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.279849091Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=792645 slug=hubx instance="datasource_uid=a1f2bd27-ff00-4110-883f-3a13db4ec694, ref_id=A" t=2024-05-29T13:44:15.279666609Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:15.279746185Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.27971238Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.27965209Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=792645 slug=hubx instance="datasource_uid=a1f2bd27-ff00-4110-883f-3a13db4ec694, ref_id=A" t=2024-05-29T13:44:15.279547047Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=481110 slug=g123 instance="__name__=arms_cms_ecs_cpu_util_lization, app=cms-exporter, arms_instance_id=cms-exporter-5388966948627303, arms_instance_name=cms-exporter-5388966948627303, hostName=game-20, host_name=game-20, id=i-6we9jjtmy9z146z83lyq, instance=192.168.68.11:9514, instanceName=legolas-goblinslayer-game-20, instance_id=i-6we9jjtmy9z146z83lyq, instance_name=legolas-goblinslayer-game-20, job=cms-exporter-pods, namespace=cms-5388966948627303, nodeIp=10.6.71.106, node_ip=10.6.71.106, pod=cms-exporter-5388966948627303-65dc495964-vjrv9, pod_name=cms-exporter-5388966948627303-65dc495964-vjrv9, pod_template_hash=65dc495964, region=ap-northeast-1, targetId=cms-exporter-5388966948627303, tenant_cloudproductcode=cms, tenant_clusterid=b0a0c2ef9dc9ac83, tenant_token=53331da43746e62cfb0365cbbd44be8, tenant_userid=5388966948627303, uid=cms-exporter-5388966948627303, user_id=5388966948627303, version=v1" t=2024-05-29T13:44:15.279601848Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sao Paulo, country=Brazil, datacenter=M247, environment=production, instance=188.241.177.50:9998, ip=188.241.177.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=saopaolo402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.279583523Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=481110 slug=g123 instance="__name__=arms_cms_ecs_cpu_util_lization, app=cms-exporter, arms_instance_id=cms-exporter-5388966948627303, arms_instance_name=cms-exporter-5388966948627303, hostName=game-19, host_name=game-19, id=i-6wei1b19p3n00krg1kj0, instance=192.168.68.11:9514, instanceName=legolas-goblinslayer-game-19, instance_id=i-6wei1b19p3n00krg1kj0, instance_name=legolas-goblinslayer-game-19, job=cms-exporter-pods, namespace=cms-5388966948627303, nodeIp=10.7.233.249, node_ip=10.7.233.249, pod=cms-exporter-5388966948627303-65dc495964-vjrv9, pod_name=cms-exporter-5388966948627303-65dc495964-vjrv9, pod_template_hash=65dc495964, region=ap-northeast-1, targetId=cms-exporter-5388966948627303, tenant_cloudproductcode=cms, tenant_clusterid=b0a0c2ef9dc9ac83, tenant_token=53331da43746e62cfb0365cbbd44be8, tenant_userid=5388966948627303, uid=cms-exporter-5388966948627303, user_id=5388966948627303, version=v1" t=2024-05-29T13:44:15.279490249Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:15.279460398Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.955325ms + logger=ngalert.scheduler user=792645 slug=hubx version=115 fingerprint=f8f9acffa9a42b55 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.27917624Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=a1f2bd27-ff00-4110-883f-3a13db4ec694, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.278639235s EvaluationString:}]" duration=251.853053ms + level=debug ts=2024-05-29T13:44:15.279379142Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sao Paulo, country=Brazil, datacenter=M247, environment=production, instance=188.241.177.50:9998, ip=188.241.177.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/br.crt, role=streaming-optimized, server=saopaolo402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.279393158Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.279331443Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=67811 slug=anonchatbot t=2024-05-29T13:44:15.279240981Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=481110 slug=g123 instance="__name__=arms_cms_ecs_cpu_util_lization, app=cms-exporter, arms_instance_id=cms-exporter-5388966948627303, arms_instance_name=cms-exporter-5388966948627303, hostName=game-16, host_name=game-16, id=i-6wee0jw0kukwfi1uh3zo, instance=192.168.68.11:9514, instanceName=legolas-goblinslayer-game-16, instance_id=i-6wee0jw0kukwfi1uh3zo, instance_name=legolas-goblinslayer-game-16, job=cms-exporter-pods, namespace=cms-5388966948627303, nodeIp=10.6.71.95, node_ip=10.6.71.95, pod=cms-exporter-5388966948627303-65dc495964-vjrv9, pod_name=cms-exporter-5388966948627303-65dc495964-vjrv9, pod_template_hash=65dc495964, region=ap-northeast-1, targetId=cms-exporter-5388966948627303, tenant_cloudproductcode=cms, tenant_clusterid=b0a0c2ef9dc9ac83, tenant_token=53331da43746e62cfb0365cbbd44be8, tenant_userid=5388966948627303, uid=cms-exporter-5388966948627303, user_id=5388966948627303, version=v1" t=2024-05-29T13:44:15.279225546Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sao Paulo, country=Brazil, datacenter=M247, environment=production, instance=188.241.177.242:9998, ip=188.241.177.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=saopaolo401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.279223025Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=481110 slug=g123 instance="__name__=arms_cms_ecs_cpu_util_lization, app=cms-exporter, arms_instance_id=cms-exporter-5388966948627303, arms_instance_name=cms-exporter-5388966948627303, hostName=game-16, host_name=game-16, id=i-6wee0jw0kukwfi1uh3zo, instance=192.168.68.11:9514, instanceName=legolas-goblinslayer-game-16, instance_id=i-6wee0jw0kukwfi1uh3zo, instance_name=legolas-goblinslayer-game-16, job=cms-exporter-pods, namespace=cms-5388966948627303, nodeIp=10.6.71.95, node_ip=10.6.71.95, pod=cms-exporter-5388966948627303-65dc495964-vjrv9, pod_name=cms-exporter-5388966948627303-65dc495964-vjrv9, pod_template_hash=65dc495964, region=ap-northeast-1, targetId=cms-exporter-5388966948627303, tenant_cloudproductcode=cms, tenant_clusterid=b0a0c2ef9dc9ac83, tenant_token=53331da43746e62cfb0365cbbd44be8, tenant_userid=5388966948627303, uid=cms-exporter-5388966948627303, user_id=5388966948627303, version=v1" t=2024-05-29T13:44:15.279209012Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-data-management-db, env=qa" t=2024-05-29T13:44:15.279091259Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=481110 slug=g123 instance="__name__=arms_cms_ecs_cpu_util_lization, app=cms-exporter, arms_instance_id=cms-exporter-5388966948627303, arms_instance_name=cms-exporter-5388966948627303, hostName=game-15, host_name=game-15, id=i-6webbhkegm4adzpk2fwa, instance=192.168.68.11:9514, instanceName=legolas-goblinslayer-game-15, instance_id=i-6webbhkegm4adzpk2fwa, instance_name=legolas-goblinslayer-game-15, job=cms-exporter-pods, namespace=cms-5388966948627303, nodeIp=10.7.233.243, node_ip=10.7.233.243, pod=cms-exporter-5388966948627303-65dc495964-vjrv9, pod_name=cms-exporter-5388966948627303-65dc495964-vjrv9, pod_template_hash=65dc495964, region=ap-northeast-1, targetId=cms-exporter-5388966948627303, tenant_cloudproductcode=cms, tenant_clusterid=b0a0c2ef9dc9ac83, tenant_token=53331da43746e62cfb0365cbbd44be8, tenant_userid=5388966948627303, uid=cms-exporter-5388966948627303, user_id=5388966948627303, version=v1" t=2024-05-29T13:44:15.279112789Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sao Paulo, country=Brazil, datacenter=M247, environment=production, instance=188.241.177.242:9998, ip=188.241.177.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/br.crt, role=streaming-optimized, server=saopaolo401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.279017324Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.278921987Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sao Paulo, country=Brazil, datacenter=M247, environment=production, instance=188.241.177.242:9998, ip=188.241.177.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/br.crt, role=streaming-optimized, server=saopaolo401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.278992649Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.278821466Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sao Paulo, country=Brazil, datacenter=M247, environment=production, instance=188.241.177.194:9998, ip=188.241.177.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=saopaolo405, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.27879481Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.278731377Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.278628399Z caller=remote_instance_store.go:51 user=723897 slug=inthepocket msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.27864806Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sao Paulo, country=Brazil, datacenter=M247, environment=production, instance=188.241.177.194:9998, ip=188.241.177.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/br.crt, role=streaming-optimized, server=saopaolo405, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.278580557Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=481110 slug=g123 instance="__name__=arms_cms_ecs_cpu_util_lization, app=cms-exporter, arms_instance_id=cms-exporter-5388966948627303, arms_instance_name=cms-exporter-5388966948627303, hostName=game-08, host_name=game-08, id=i-6wef28ssi36a2s9md1cr, instance=192.168.68.11:9514, instanceName=legolas-goblinslayer-game-08, instance_id=i-6wef28ssi36a2s9md1cr, instance_name=legolas-goblinslayer-game-08, job=cms-exporter-pods, namespace=cms-5388966948627303, nodeIp=10.6.71.88, node_ip=10.6.71.88, pod=cms-exporter-5388966948627303-65dc495964-vjrv9, pod_name=cms-exporter-5388966948627303-65dc495964-vjrv9, pod_template_hash=65dc495964, region=ap-northeast-1, targetId=cms-exporter-5388966948627303, tenant_cloudproductcode=cms, tenant_clusterid=b0a0c2ef9dc9ac83, tenant_token=53331da43746e62cfb0365cbbd44be8, tenant_userid=5388966948627303, uid=cms-exporter-5388966948627303, user_id=5388966948627303, version=v1" t=2024-05-29T13:44:15.27838374Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.278221398Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=481110 slug=g123 t=2024-05-29T13:44:15.278341572Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="app=cms-exporter" + level=debug ts=2024-05-29T13:44:15.27823809Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.278121954Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=481110 slug=g123 t=2024-05-29T13:44:15.27823764Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="app=cms-exporter" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sao Paulo, country=Brazil, datacenter=M247, environment=production, instance=188.241.177.106:9998, ip=188.241.177.106, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/br.crt, role=streaming-optimized, server=saopaolo404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.278212832Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sao Paulo, country=Brazil, datacenter=M247, environment=production, instance=188.241.177.106:9998, ip=188.241.177.106, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/br.crt, role=streaming-optimized, server=saopaolo404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.278182859Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=481110 slug=g123 instance="__name__=arms_cms_ecs_cpu_util_lization, app=cms-exporter, arms_instance_id=cms-exporter-5388966948627303, arms_instance_name=cms-exporter-5388966948627303, hostName=game-06, host_name=game-06, id=i-6we8x4xlszha49qnjl4r, instance=192.168.68.11:9514, instanceName=legolas-goblinslayer-game-06, instance_id=i-6we8x4xlszha49qnjl4r, instance_name=legolas-goblinslayer-game-06, job=cms-exporter-pods, namespace=cms-5388966948627303, nodeIp=10.6.71.89, node_ip=10.6.71.89, pod=cms-exporter-5388966948627303-65dc495964-vjrv9, pod_name=cms-exporter-5388966948627303-65dc495964-vjrv9, pod_template_hash=65dc495964, region=ap-northeast-1, targetId=cms-exporter-5388966948627303, tenant_cloudproductcode=cms, tenant_clusterid=b0a0c2ef9dc9ac83, tenant_token=53331da43746e62cfb0365cbbd44be8, tenant_userid=5388966948627303, uid=cms-exporter-5388966948627303, user_id=5388966948627303, version=v1" t=2024-05-29T13:44:15.278174592Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.278079895Z caller=remote_instance_store.go:51 user=811546 slug=fyld msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=481110 slug=g123 instance="__name__=arms_cms_ecs_cpu_util_lization, app=cms-exporter, arms_instance_id=cms-exporter-5388966948627303, arms_instance_name=cms-exporter-5388966948627303, hostName=game-05, host_name=game-05, id=i-6we7z0f8mdyk43cig36y, instance=192.168.68.11:9514, instanceName=legolas-goblinslayer-game-05, instance_id=i-6we7z0f8mdyk43cig36y, instance_name=legolas-goblinslayer-game-05, job=cms-exporter-pods, namespace=cms-5388966948627303, nodeIp=10.7.233.236, node_ip=10.7.233.236, pod=cms-exporter-5388966948627303-65dc495964-vjrv9, pod_name=cms-exporter-5388966948627303-65dc495964-vjrv9, pod_template_hash=65dc495964, region=ap-northeast-1, targetId=cms-exporter-5388966948627303, tenant_cloudproductcode=cms, tenant_clusterid=b0a0c2ef9dc9ac83, tenant_token=53331da43746e62cfb0365cbbd44be8, tenant_userid=5388966948627303, uid=cms-exporter-5388966948627303, user_id=5388966948627303, version=v1" t=2024-05-29T13:44:15.27806124Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=932433 slug=cmhdmxnp t=2024-05-29T13:44:15.277899933Z level=debug msg="Saving alert states done" count=9 max_state_save_concurrency=1 duration=136.113649ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sao Paulo, country=Brazil, datacenter=M247, environment=production, instance=146.70.98.34:9998, ip=146.70.98.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=saopaolo407, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.277976822Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.277943071Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.277913312Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=481110 slug=g123 instance="__name__=arms_cms_ecs_cpu_util_lization, app=cms-exporter, arms_instance_id=cms-exporter-5388966948627303, arms_instance_name=cms-exporter-5388966948627303, hostName=game-03, host_name=game-03, id=i-6we84mbluxc7rt5k4xii, instance=192.168.68.11:9514, instanceName=legolas-goblinslayer-game-03, instance_id=i-6we84mbluxc7rt5k4xii, instance_name=legolas-goblinslayer-game-03, job=cms-exporter-pods, namespace=cms-5388966948627303, nodeIp=10.6.71.84, node_ip=10.6.71.84, pod=cms-exporter-5388966948627303-65dc495964-vjrv9, pod_name=cms-exporter-5388966948627303-65dc495964-vjrv9, pod_template_hash=65dc495964, region=ap-northeast-1, targetId=cms-exporter-5388966948627303, tenant_cloudproductcode=cms, tenant_clusterid=b0a0c2ef9dc9ac83, tenant_token=53331da43746e62cfb0365cbbd44be8, tenant_userid=5388966948627303, uid=cms-exporter-5388966948627303, user_id=5388966948627303, version=v1" t=2024-05-29T13:44:15.277857682Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sao Paulo, country=Brazil, datacenter=M247, environment=production, instance=146.70.98.34:9998, ip=146.70.98.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/br.crt, role=streaming-optimized, server=saopaolo407, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.2777996Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=481110 slug=g123 t=2024-05-29T13:44:15.277790368Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="app=cms-exporter" + level=debug ts=2024-05-29T13:44:15.277768156Z caller=remote_instance_store.go:51 user=402122 slug=leapwallet msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=402122 slug=leapwallet t=2024-05-29T13:44:15.277726962Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=402122 slug=leapwallet instance= t=2024-05-29T13:44:15.277713451Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sao Paulo, country=Brazil, datacenter=M247, environment=production, instance=146.70.98.18:9998, ip=146.70.98.18, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/br.crt, role=streaming-optimized, server=saopaolo406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.277467062Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Sao Paulo, country=Brazil, datacenter=M247, environment=production, instance=146.70.98.18:9998, ip=146.70.98.18, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/br.crt, role=streaming-optimized, server=saopaolo406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.277455617Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Santiago, country=Chile, datacenter=M247, environment=production, instance=146.70.11.28:9998, ip=146.70.11.28, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=chile402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.277281572Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-comply-advantage-db, env=qa" t=2024-05-29T13:44:15.277248696Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.277246258Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.277071376Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.277000094Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.27699181Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Santiago, country=Chile, datacenter=M247, environment=production, instance=146.70.11.15:9998, ip=146.70.11.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=chile401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.276907702Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=277970 slug=teckresourcestest t=2024-05-29T13:44:15.276757369Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=26.594475ms + logger=ngalert.state.manager.persist user=30534 slug=arsein t=2024-05-29T13:44:15.276663905Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.066489ms + level=debug ts=2024-05-29T13:44:15.276566243Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.276118851Z caller=remote_alert_sender.go:94 user=280735 slug=grafana1eed host=grafana1eed-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.149.65.42:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=d3lSGwtnk alerts=1 + logger=ngalert.state.manager user=630397 slug=tatin instance="datasource_uid=b84f5a06-ec4b-45e9-a3ef-ede1f1944361, ref_id=STETH-bid" t=2024-05-29T13:44:15.276584591Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.27607451Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.276117251Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.276053149Z caller=remote_instance_store.go:51 user=254242 slug=dats msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=San Jose, country=Costa Rica, datacenter=M247, environment=production, instance=146.70.10.28:9998, ip=146.70.10.28, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=costarica402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.27607677Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.276016032Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=254242 slug=dats instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.275936489Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.276032882Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=254242 slug=dats t=2024-05-29T13:44:15.275880182Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.275901178Z caller=remote_instance_store.go:51 user=254242 slug=dats msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-billing-db, env=qa" t=2024-05-29T13:44:15.275813018Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=home-underwriting-worker, pod=home-underwriting-worker-85b4d5c9d5-sg5n5" t=2024-05-29T13:44:15.275793943Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=home-underwriting-worker, pod=home-underwriting-worker-85b4d5c9d5-828lc" t=2024-05-29T13:44:15.275749838Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=San Jose, country=Costa Rica, datacenter=M247, environment=production, instance=146.70.10.15:9998, ip=146.70.10.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=costarica401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.275680266Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=San Jose, country=Costa Rica, datacenter=M247, environment=production, instance=146.70.10.15:9998, ip=146.70.10.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=costarica401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.275667733Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.275637335Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.275441637Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=San Jose, country=Costa Rica, datacenter=M247, environment=production, instance=146.70.10.15:9998, ip=146.70.10.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/sanjose.crt, role=vpn, server=costarica401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.275492764Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.275447376Z caller=remote_instance_store.go:51 user=491157 slug=prd01wr msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.275450669Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Salt Lake City, country=United States, datacenter=DataPacket, environment=production, instance=84.239.5.161:9998, ip=84.239.5.161, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-saltlakecity.crt, role=vpn, server=saltlake402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.275325698Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Salt Lake City, country=United States, datacenter=DataPacket, environment=production, instance=84.239.5.161:9998, ip=84.239.5.161, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-saltlakecity.crt, role=vpn, server=saltlake402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.27530829Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.275274713Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:15.27518979Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.617752ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Salt Lake City, country=United States, datacenter=DataPacket, environment=production, instance=84.239.5.161:9998, ip=84.239.5.161, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=saltlake402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.275109297Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.275046268Z caller=remote_instance_store.go:51 user=536824 slug=forgerockit msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.27503763Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.274911384Z caller=remote_alert_sender.go:94 user=244232 slug=uvadashboard host=uvadashboard-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.152.107.179:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=c01de1bc-e465-4e44-8495-163d9aa01975 alerts=1 + level=debug ts=2024-05-29T13:44:15.27485336Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.274861258Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.274877137Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.274863173Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.274812741Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.274699805Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.274612287Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.274587541Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.274543957Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.27418002Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.274125335Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Riyadh, country=Saudi Arabia, datacenter=M247, environment=production, instance=95.181.235.145:9998, ip=95.181.235.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/saudiarabia.crt, role=vpn, server=saudiarabia404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.27405139Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.274029808Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Riga, country=Latvia, datacenter=PacketExchange, environment=production, instance=196.196.53.98:9998, ip=196.196.53.98, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=riga406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.27393028Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.273906343Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.273898839Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=741823 slug=sudoops t=2024-05-29T13:44:15.27383326Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.798819ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Riga, country=Latvia, datacenter=PacketExchange, environment=production, instance=196.196.53.98:9998, ip=196.196.53.98, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/lv.crt, role=vpn, server=riga406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.273755805Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:15.273747342Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.273632331Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Riga, country=Latvia, datacenter=PacketExchange, environment=production, instance=196.196.53.82:9998, ip=196.196.53.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=riga405, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.273587223Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.273531309Z caller=remote_instance_store.go:51 user=363785 slug=moonletmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.273516833Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=363785 slug=moonletmonitor instance= t=2024-05-29T13:44:15.273458358Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=363785 slug=moonletmonitor instance= t=2024-05-29T13:44:15.27344666Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=363785 slug=moonletmonitor version=105 fingerprint=a0f414459e1f7d8b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.273285477Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.272908991s EvaluationString:}]" duration=41.580634ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Riga, country=Latvia, datacenter=PacketExchange, environment=production, instance=196.196.53.82:9998, ip=196.196.53.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/lv.crt, role=vpn, server=riga405, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.273390099Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=767797 slug=mgmresorts t=2024-05-29T13:44:15.273099742Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=297992 slug=lemoncxvizz t=2024-05-29T13:44:15.273041571Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.383736ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Riga, country=Latvia, datacenter=PacketExchange, environment=production, instance=196.196.53.50:9998, ip=196.196.53.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/lv.crt, role=vpn, server=riga407, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.27302128Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.272772795Z caller=remote_instance_store.go:51 user=371085 slug=comexport msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.272744863Z caller=remote_image_capturer.go:33 user=408157 slug=ipcsmanagedaccountspov rule_org_id=1 rule_uid=eae46a93-5e13-4c26-a512-0715212a8b10 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Riga, country=Latvia, datacenter=PacketExchange, environment=production, instance=196.196.53.2:9998, ip=196.196.53.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/lv.crt, role=vpn, server=riga404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.272733422Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=111653 slug=theassociationmxp t=2024-05-29T13:44:15.272675066Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=44.867289ms + logger=ngalert.state.manager user=408157 slug=ipcsmanagedaccountspov instance="__name__=forecastdokutest:anomalous, ml_algorithm=grafana_prophet_1_0_1, ml_forecast=anomalies, ml_job_id=a7b62781-3eaa-4cec-aca2-2ea08f3f14c5, ml_job_metric=forecastdokutest" t=2024-05-29T13:44:15.272716762Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:15.272670184Z caller=remote_instance_store.go:51 user=338059 slug=ninetailed msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=326888 slug=buildingblocks t=2024-05-29T13:44:15.272480779Z level=debug msg="Saving alert states done" count=8 max_state_save_concurrency=1 duration=147.555031ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Riga, country=Latvia, datacenter=PacketExchange, environment=production, instance=196.196.53.130:9998, ip=196.196.53.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=riga408, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.272561399Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.272514201Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.27246286Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Riga, country=Latvia, datacenter=PacketExchange, environment=production, instance=196.196.53.130:9998, ip=196.196.53.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/lv.crt, role=vpn, server=riga408, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.272427008Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Riga, country=Latvia, datacenter=PacketExchange, environment=production, instance=196.196.53.130:9998, ip=196.196.53.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/lv.crt, role=vpn, server=riga408, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.272413251Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-risk-defense-platform-db, env=uk" t=2024-05-29T13:44:15.27235034Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.272308317Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-risk-defense-platform-db, env=uk" t=2024-05-29T13:44:15.272331244Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=299871 slug=connectia t=2024-05-29T13:44:15.272160425Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=140107 slug=jqiannian t=2024-05-29T13:44:15.272096428Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Reykjavik, country=Iceland, datacenter=Estnoc, environment=production, instance=45.133.193.34:9998, ip=45.133.193.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/is.crt, role=vpn, server=reykjavik401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.272086232Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=299871 slug=connectia version=1 fingerprint=1370a741224aff48 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.271833745Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc0408a5050} B:{Var:B Labels: Value:0xc0408a5058} C:{Var:C Labels: Value:0xc0408a5060}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.27138703s EvaluationString:[ var='A' labels={} value=1 ], [ var='B' labels={} value=1 ], [ var='C' labels={} value=0 ]}]" duration=10.902932ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.2720384Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=346766 slug=checklyhq t=2024-05-29T13:44:15.271903001Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.271682946Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.271588304Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.271518358Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.27148589Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=384749 slug=airpointnow t=2024-05-29T13:44:15.271428487Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.271400528Z caller=remote_image_capturer.go:33 user=384749 slug=airpointnow rule_org_id=1 rule_uid=R9-J48WVk msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=384749 slug=airpointnow instance="datasource_uid=Z_mYswCnk, ref_id=A" t=2024-05-29T13:44:15.271380487Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.271261969Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.27125549Z caller=remote_instance_store.go:51 user=137351 slug=pinnacle21 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=178647 slug=ocadoerthgcp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.271176319Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=178647 slug=ocadoerthgcp t=2024-05-29T13:44:15.271101071Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Quito, country=Ecuador, datacenter=GSL, environment=production, instance=84.247.93.2:9998, ip=84.247.93.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ec-ecuador-pf.crt, role=vpn, server=ecuador401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.270947294Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Quito, country=Ecuador, datacenter=GSL, environment=production, instance=84.247.93.2:9998, ip=84.247.93.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ec-ecuador-pf.crt, role=vpn, server=ecuador401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.270932919Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.270902943Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Providence, country=United States, datacenter=DataPacket, environment=production, instance=84.239.45.2:9998, ip=84.239.45.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-rhode-island-pf.crt, role=vpn, server=rhodeisland402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.270761225Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.270740659Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Providence, country=United States, datacenter=DataPacket, environment=production, instance=84.239.45.2:9998, ip=84.239.45.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=rhodeisland402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.270627726Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.270587196Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.270584401Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.27030281Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Prague, country=Czech Republic, datacenter=DataPacket, environment=production, instance=212.102.39.65:9998, ip=212.102.39.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/czech.crt, role=vpn, server=prague403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.270267335Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.270254326Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.270182702Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.27015087Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.27007301Z caller=remote_instance_store.go:51 user=487988 slug=microstrategyits msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Prague, country=Czech Republic, datacenter=DataPacket, environment=production, instance=212.102.39.130:9998, ip=212.102.39.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=prague402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.270091798Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.270044891Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Prague, country=Czech Republic, datacenter=DataPacket, environment=production, instance=212.102.39.130:9998, ip=212.102.39.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=prague402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.270084627Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.26990454Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.269638526Z caller=remote_instance_store.go:51 user=830813 slug=lynx0den msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.26778636Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.269550389Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=788474 slug=elisasre t=2024-05-29T13:44:15.269731238Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="component=docg-prod, monitor=monitor-437" + level=debug ts=2024-05-29T13:44:15.269726945Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.269721734Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.269576178Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=237629 slug=ocrolus t=2024-05-29T13:44:15.26963126Z level=debug msg="Saving alert states done" count=11 max_state_save_concurrency=1 duration=168.274603ms + level=debug ts=2024-05-29T13:44:15.269540389Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.269582453Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Prague, country=Czech Republic, datacenter=DataPacket, environment=production, instance=212.102.38.212:9998, ip=212.102.38.212, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/czech.crt, role=vpn, server=prague401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.269601035Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=830813 slug=lynx0den t=2024-05-29T13:44:15.269597956Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.269551934Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=811546 slug=fyld instance="ClusterName=sitestream-sme-aus-fyld-brain, ServiceName=sitestream-sme-aus-fyld-brain" t=2024-05-29T13:44:15.269494848Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.269551909Z caller=remote_instance_store.go:51 user=811546 slug=fyld msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=811546 slug=fyld instance="ClusterName=sitestream-sme-aus, ServiceName=sitestream-sme-aus" t=2024-05-29T13:44:15.269450287Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.269483032Z caller=remote_instance_store.go:51 user=452115 slug=ybmetrics msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=811546 slug=fyld instance="ClusterName=sitestream-sme-aus, ServiceName=sitestream-debug-sme-aus" t=2024-05-29T13:44:15.269432146Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.269515189Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=811546 slug=fyld instance="ClusterName=sitestream-sme-aus, ServiceName=sitestream-debug-sme-aus" t=2024-05-29T13:44:15.269420536Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=811546 slug=fyld instance="ClusterName=sitestream-sme-aus, ServiceName=sitestream-celery-sme-aus" t=2024-05-29T13:44:15.269380515Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=811546 slug=fyld t=2024-05-29T13:44:15.269329014Z level=debug msg="State manager processing evaluation results" resultCount=4 + level=debug ts=2024-05-29T13:44:15.269439327Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=811546 slug=fyld version=1 fingerprint=3a49fc3ca02bbb96 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.26918668Z level=debug msg="Alert rule evaluated" results="[{Instance:ClusterName=sitestream-sme-aus, ServiceName=sitestream-celery-sme-aus State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:ClusterName=sitestream-sme-aus, ServiceName=sitestream-celery-sme-aus Value:0xc01c5997c0} MemoryUtilization:{Var:MemoryUtilization Labels:ClusterName=sitestream-sme-aus, ServiceName=sitestream-celery-sme-aus Value:0xc01c5997c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.268533436s EvaluationString:[ var='C' labels={ClusterName=sitestream-sme-aus, ServiceName=sitestream-celery-sme-aus} value=0 ], [ var='MemoryUtilization' labels={ClusterName=sitestream-sme-aus, ServiceName=sitestream-celery-sme-aus} value=41.22222222222222 ]} {Instance:ClusterName=sitestream-sme-aus, ServiceName=sitestream-debug-sme-aus State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:ClusterName=sitestream-sme-aus, ServiceName=sitestream-debug-sme-aus Value:0xc01c599818} MemoryUtilization:{Var:MemoryUtilization Labels:ClusterName=sitestream-sme-aus, ServiceName=sitestream-debug-sme-aus Value:0xc01c599860}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.268552767s EvaluationString:[ var='C' labels={ClusterName=sitestream-sme-aus, ServiceName=sitestream-debug-sme-aus} value=0 ], [ var='MemoryUtilization' labels={ClusterName=sitestream-sme-aus, ServiceName=sitestream-debug-sme-aus} value=3.90625 ]} {Instance:ClusterName=sitestream-sme-aus, ServiceName=sitestream-sme-aus State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:ClusterName=sitestream-sme-aus, ServiceName=sitestream-sme-aus Value:0xc01c5998c8} MemoryUtilization:{Var:MemoryUtilization Labels:ClusterName=sitestream-sme-aus, ServiceName=sitestream-sme-aus Value:0xc01c5998c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.268560717s EvaluationString:[ var='C' labels={ClusterName=sitestream-sme-aus, ServiceName=sitestream-sme-aus} value=0 ], [ var='MemoryUtilization' labels={ClusterName=sitestream-sme-aus, ServiceName=sitestream-sme-aus} value=35.48518518518519 ]} {Instance:ClusterName=sitestream-sme-aus-fyld-brain, ServiceName=sitestream-sme-aus-fyld-brain State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:ClusterName=sitestream-sme-aus-fyld-brain, ServiceName=sitestream-sme-aus-fyld-brain Value:0xc01c599918} MemoryUtilization:{Var:MemoryUtilization Labels:ClusterName=sitestream-sme-aus-fyld-brain, ServiceName=sitestream-sme-aus-fyld-brain Value:0xc01c599960}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.268571657s EvaluationString:[ var='C' labels={ClusterName=sitestream-sme-aus-fyld-brain, ServiceName=sitestream-sme-aus-fyld-brain} value=0 ], [ var='MemoryUtilization' labels={ClusterName=sitestream-sme-aus-fyld-brain, ServiceName=sitestream-sme-aus-fyld-brain} value=14.8284912109375 ]}]" duration=313.253044ms + level=debug ts=2024-05-29T13:44:15.269355915Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=qe-resending-notification-backfill" t=2024-05-29T13:44:15.26928589Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=qe-parcel-ranges-ingest" t=2024-05-29T13:44:15.269262883Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pi-dx-sales-contacts-consumer" t=2024-05-29T13:44:15.269244718Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pi-dx-sales-contacts-consumer" t=2024-05-29T13:44:15.269235755Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.269168758Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.269159692Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.269198125Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.269148183Z caller=remote_instance_store.go:51 user=530405 slug=zetetic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pi-dx-parcel-ranges-backfill" t=2024-05-29T13:44:15.269203818Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pi-dx-customer-territory-backfill" t=2024-05-29T13:44:15.269131755Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pi-dx-customer-addresses-backfill" t=2024-05-29T13:44:15.269103891Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.269057743Z caller=remote_instance_store.go:51 user=442934 slug=arqit msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pi-dx-courier-tours-backfill" t=2024-05-29T13:44:15.269068658Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-workdays-backfill" t=2024-05-29T13:44:15.269032344Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Portland - Maine, country=United States, datacenter=DataPacket, environment=production, instance=84.239.37.2:9998, ip=84.239.37.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-maine-pf.crt, role=vpn, server=maine402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.269021611Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-tours-backfill" t=2024-05-29T13:44:15.268994944Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-stable-collection-template-backfill" t=2024-05-29T13:44:15.26896103Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.268953841Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-stable-collection-block-backfill" t=2024-05-29T13:44:15.268942894Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.268860579Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=163513 slug=dialpad t=2024-05-29T13:44:15.268818852Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.26877742Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-periods-backfill" t=2024-05-29T13:44:15.268863747Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-periods-backfill" t=2024-05-29T13:44:15.268852179Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-labels-backfill" t=2024-05-29T13:44:15.268818028Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=355252 slug=bumper t=2024-05-29T13:44:15.268769799Z level=debug msg="Saving alert states" count=14 max_state_save_concurrency=1 + logger=ngalert.state.manager user=408734 slug=mmrresearch instance= t=2024-05-29T13:44:15.268618149Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=444725 slug=devnextgen t=2024-05-29T13:44:15.268600564Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=408734 slug=mmrresearch instance= t=2024-05-29T13:44:15.268609572Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.268619278Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=355252 slug=bumper instance="pod=strapi-74c7cc756b-lfvvv" t=2024-05-29T13:44:15.268581726Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=il-ingest-stable-collection-template" t=2024-05-29T13:44:15.268575132Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Podgorica, country=Montenegro, datacenter=M247, environment=production, instance=176.125.229.2:9998, ip=176.125.229.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/montenegro.crt, role=vpn, server=montenegro403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.268478446Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.268446896Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=355252 slug=bumper instance="pod=strapi-74c7cc756b-lfvvv" t=2024-05-29T13:44:15.268410015Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=il-ingest-invoice" t=2024-05-29T13:44:15.268377429Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.268419675Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.268409913Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=430961 slug=solifi version=1 fingerprint=ff258718d1bf1071 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.268277225Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[A0:{Var:A Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.267732675s EvaluationString:[ var='A0' metric='NoData' labels={} value=null ]}]" duration=63.466367ms + logger=ngalert.state.manager.persist user=278024 slug=fibreking t=2024-05-29T13:44:15.268320321Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=24.200521ms + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=il-ingest-collections" t=2024-05-29T13:44:15.268339822Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=il-ingest-collection-labels" t=2024-05-29T13:44:15.268291284Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz t=2024-05-29T13:44:15.268225873Z level=debug msg="State manager processing evaluation results" resultCount=27 + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=cnco, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-qa201-mse-standby-01.nonprod-mse-database.cnco.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.268196663Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=cnco, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-qa201-mse-standby-01.nonprod-mse-database.cnco.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.268182845Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.268143105Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.268052358Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Podgorica, country=Montenegro, datacenter=M247, environment=production, instance=176.125.229.16:9998, ip=176.125.229.16, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/montenegro.crt, role=vpn, server=montenegro404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.268014898Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Podgorica, country=Montenegro, datacenter=M247, environment=production, instance=176.125.229.16:9998, ip=176.125.229.16, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/montenegro.crt, role=vpn, server=montenegro404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.267999638Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=355252 slug=bumper instance="pod=frontend-app-646474d85-hvf8d" t=2024-05-29T13:44:15.267977154Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=cnco, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-qa200-mse-standby-01.nonprod-mse-database.cnco.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.267993816Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=cnco, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-qa200-mse-primary-01.nonprod-mse-database.cnco.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.267900826Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:15.267845494Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:15.267827203Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=191103 slug=amazonadmin t=2024-05-29T13:44:15.267805796Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=355252 slug=bumper instance="pod=frontend-app-646474d85-dxc8c" t=2024-05-29T13:44:15.267803397Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=cnco, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-nonprod-turbine-standby-01.nonprod-mse-database.cnco.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.267807421Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Phoenix, country=United States, datacenter=TSS / Performive, environment=production, instance=184.170.252.194:9998, ip=184.170.252.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us3.crt, role=vpn, server=phoenix411, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.267758683Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=cnco, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-nonprod-turbine-primary-01.nonprod-mse-database.cnco.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.267729549Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=cnco, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-nonprod-turbine-primary-01.nonprod-mse-database.cnco.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.267718629Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.267702113Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=355252 slug=bumper instance="pod=frontend-app-646474d85-dxc8c" t=2024-05-29T13:44:15.267701393Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Phoenix, country=United States, datacenter=TSS / Performive, environment=production, instance=184.170.252.194:9998, ip=184.170.252.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=phoenix411, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.267505001Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=cnco, device=/dev/vda1, env=nonprod-mse-database, host=saltmaster-01.nonprod-mse-database.cnco.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.267377987Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Phoenix, country=United States, datacenter=TSS / Performive, environment=production, instance=184.170.252.130:9998, ip=184.170.252.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us3.crt, role=vpn, server=phoenix410, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.267299184Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=cnco, device=/dev/vda1, env=nonprod-mse-database, host=pgsql-qa201-mse-standby-01.nonprod-mse-database.cnco.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.267298999Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=cnco, device=/dev/vda1, env=nonprod-mse-database, host=pgsql-qa201-mse-primary-01.nonprod-mse-database.cnco.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.267138536Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=cnco, device=/dev/vda1, env=nonprod-mse-database, host=pgsql-qa200-mse-standby-01.nonprod-mse-database.cnco.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.267024445Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.266956523Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.266906999Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.266942868Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.266858482Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.266811611Z caller=remote_instance_store.go:51 user=158691 slug=covalent msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=158691 slug=covalent instance= t=2024-05-29T13:44:15.266754341Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=cnco, device=/dev/vda1, env=nonprod-mse-database, host=pgsql-nonprod-turbine-primary-01.nonprod-mse-database.cnco.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.266769036Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=msdb, dbinstance_identifier=sing-prod-frontend-db, env=apac" t=2024-05-29T13:44:15.26675297Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.266660963Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.scheduler user=158691 slug=covalent version=3 fingerprint=8ccfd22711384d88 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.266605964Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.26632501s EvaluationString:}]" duration=15.055378ms + logger=ngalert.state.manager user=206107 slug=hydrolix instance= t=2024-05-29T13:44:15.266665028Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=355252 slug=bumper instance="pod=backend-app-6779fd7d69-qq57r" t=2024-05-29T13:44:15.26659769Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=cnco, device=/dev/vda1, env=nonprod-mse-database, host=maint-01.nonprod-mse-database.cnco.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.266587806Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=msdb, dbinstance_identifier=ohio-prod-frontend-db, env=us" t=2024-05-29T13:44:15.266514646Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=cnco, device=/dev/vda1, env=nonprod-mse-database, host=backup-01.nonprod-mse-database.cnco.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.266508101Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=cnco, device=/dev/vda1, env=nonprod-mse-database, host=backup-01.nonprod-mse-database.cnco.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.266495194Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=307381 slug=kambitaskforce t=2024-05-29T13:44:15.265988116Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.048581ms + logger=ngalert.state.manager user=355252 slug=bumper instance="pod=backend-app-6779fd7d69-kttkr" t=2024-05-29T13:44:15.266425577Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.266449298Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=107179 slug=ibaudata version=1 fingerprint=b7d0933196e358a2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.266270495Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.266031474s EvaluationString:}]" duration=45.855627ms + logger=ngalert.state.manager.persist user=94289 slug=translinegruppegmbh t=2024-05-29T13:44:15.266329837Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=94289 slug=translinegruppegmbh instance= t=2024-05-29T13:44:15.26632119Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-qa-mse-standby-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.266218188Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-qa-mse-standby-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.266206744Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.266174611Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=msdb, dbinstance_identifier=lon-qa-frontend-db, env=qa" t=2024-05-29T13:44:15.263459673Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.266055077Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=355252 slug=bumper instance="pod=backend-app-6779fd7d69-7hgp6" t=2024-05-29T13:44:15.266125072Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-qa-mse-primary-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.266094307Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-nonprod-turbine-standby-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.266016965Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.265978601Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Phoenix, country=United States, datacenter=TSS / Performive, environment=production, instance=184.170.242.135:9998, ip=184.170.242.135, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=phoenix414, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.265866574Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-nonprod-turbine-primary-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.265930045Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=532655 slug=chathamdirectdev t=2024-05-29T13:44:15.26586735Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.745463ms + level=debug ts=2024-05-29T13:44:15.265715866Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-int-retool-dish-primary-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.265805824Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=295631 slug=dapvizor instance="datasource_uid=Ta6tIPbnz, ref_id=A" t=2024-05-29T13:44:15.265678292Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-int-mse-standby-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.265722205Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=482906 slug=wavelo t=2024-05-29T13:44:15.265659954Z level=debug msg="Saving alert states done" count=246 max_state_save_concurrency=1 duration=4.987202724s + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-int-mse-primary-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.265636951Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:15.265547037Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-dev-retool-dish-standby-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.265535485Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-dev-retool-dish-standby-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.265524076Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:15.26544659Z caller=remote_alert_sender.go:94 user=443907 slug=awarehq host=awarehq-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.85.104:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=d-ch79f4k alerts=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Phoenix, country=United States, datacenter=TSS / Performive, environment=production, instance=184.170.241.66:9998, ip=184.170.241.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=phoenix407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.265439824Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-dev-retool-dish-primary-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.265435931Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.265386733Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-dev-retool-combine-standby-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.265355086Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-dev-retool-combine-standby-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.265344184Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=443907 slug=awarehq t=2024-05-29T13:44:15.26517718Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.087843ms + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-dev-retool-combine-primary-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.265247731Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.265094354Z caller=remote_instance_store.go:51 user=805026 slug=powwro11y msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-dev-mse-standby-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.265161326Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.265023807Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=320778 slug=omegaai instance="datasource_uid=k3-C8xH4z, ref_id=A" t=2024-05-29T13:44:15.265025887Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.264955405Z caller=remote_instance_store.go:51 user=477402 slug=infleqtion msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=805741 slug=signpostgroup t=2024-05-29T13:44:15.264848181Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vdb, env=nonprod-mse-database, host=pgsql-dev-billing-standby-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.264896326Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=320778 slug=omegaai version=1 fingerprint=b69ebe6f606e2b38 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.264179302Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=k3-C8xH4z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.260720742s EvaluationString:}]" duration=1.141483746s + logger=ngalert.state.manager user=78663 slug=foxhind instance= t=2024-05-29T13:44:15.264722872Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=477402 slug=infleqtion instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.264663125Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Phoenix, country=United States, datacenter=TSS / Performive, environment=production, instance=184.170.241.2:9998, ip=184.170.241.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=phoenix406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.264682047Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=78663 slug=foxhind version=1 fingerprint=e7693eb32b8690b8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.264551291Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.264275135s EvaluationString:}]" duration=143.007417ms + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vdb, env=nonprod-dish-db-dwh, host=pgsql-dish-pte-mse-01.nonprod-dish-db-dwh.bra2.tucows.systems.nonprod-dish-db-dwh.bra2.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.264345064Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:15.264166303Z caller=grafana.go:247 user=389502 slug=ciscoiot msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query= groups=4 alerts=0 + logger=ngalert.state.manager user=646202 slug=kairosaerospace instance="s3.key.keyword=202405280906-LS25/leaky/telemetry-ls25-prime-May-28-2024T18-46-06.json" t=2024-05-29T13:44:15.264028684Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vdb, env=nonprod-dish-db-dwh, host=pgsql-dish-dwh-mse-01.nonprod-dish-db-dwh.bra2.tucows.systems, mountpoint=/data" t=2024-05-29T13:44:15.264116485Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.264022725Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=646202 slug=kairosaerospace instance="s3.key.keyword=202405280906-LS25/leaky/telemetry-ls25-prime-May-28-2024T15-51-39.json" t=2024-05-29T13:44:15.263975653Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=646202 slug=kairosaerospace instance="s3.key.keyword=202405280906-LS25/leaky/telemetry-ls25-prime-May-28-2024T15-51-39.json" t=2024-05-29T13:44:15.263961413Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vda1, env=nonprod-mse-database, host=syslog-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.264020874Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vda1, env=nonprod-mse-database, host=syslog-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.264008839Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vda1, env=nonprod-mse-database, host=saltmaster-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.26389805Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.263785713Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=info ts=2024-05-29T13:44:15.263681875Z caller=grafana.go:247 user=389502 slug=ciscoiot msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query= groups=3 alerts=0 + level=debug ts=2024-05-29T13:44:15.263651336Z caller=remote_instance_store.go:51 user=489921 slug=statuscake msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=489921 slug=statuscake t=2024-05-29T13:44:15.263620317Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vda1, env=nonprod-mse-database, host=pgsql-qa-mse-standby-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.263695345Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.263642912Z caller=remote_instance_store.go:51 user=30534 slug=arsein msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.263659846Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=489921 slug=statuscake instance= t=2024-05-29T13:44:15.263610223Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=489921 slug=statuscake t=2024-05-29T13:44:15.263575248Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.263606988Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=815713 slug=returnstaging version=3 fingerprint=b9a47cae6f710655 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.263419888Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=timescale_read_only, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.263095992s EvaluationString:}]" duration=113.095803ms + logger=ngalert.state.manager user=447897 slug=mysten instance="datasource_uid=8Xt1pVoVk, ref_id=A" t=2024-05-29T13:44:15.263560546Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.263587538Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="datasource_uid=zxr_3eR4z, ref_id=A" t=2024-05-29T13:44:15.263557913Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=309009 slug=elestyle instance="datasource_uid=zxr_3eR4z, ref_id=A" t=2024-05-29T13:44:15.263548152Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=447897 slug=mysten instance="datasource_uid=8Xt1pVoVk, ref_id=A" t=2024-05-29T13:44:15.263506112Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=435206 slug=kkrprivateuat t=2024-05-29T13:44:15.263533717Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.600184ms + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=b4cacddfc612d601 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.263506879Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.263295656s EvaluationString:}]" duration=366.480611ms + logger=ngalert.scheduler user=447897 slug=mysten version=11 fingerprint=d0a6df70394962ae attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.263420101Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=8Xt1pVoVk, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.263115193s EvaluationString:}]" duration=109.120951ms + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vda1, env=nonprod-mse-database, host=pgsql-qa-mse-primary-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.263424732Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Phoenix, country=United States, datacenter=TSS / Performive, environment=production, instance=172.98.87.194:9998, ip=172.98.87.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=phoenix408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.263379025Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vda1, env=nonprod-mse-database, host=pgsql-nonprod-turbine-standby-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.263304768Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.263211905Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.263114564Z caller=remote_instance_store.go:51 user=543660 slug=jobcloudprogrammaticstage msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vda1, env=nonprod-mse-database, host=pgsql-nonprod-turbine-primary-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.263160571Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.263013521Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:15.262937284Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.263028114Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=612525 slug=adleyeview instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.262990519Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=612525 slug=adleyeview instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.262979018Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.262955926Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=741823 slug=sudoops version=36 fingerprint=f2a3bdf0f0f5d3ef attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.262879538Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.262506586s EvaluationString:}]" duration=171.125392ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Phoenix, country=United States, datacenter=TSS / Performive, environment=production, instance=172.98.87.194:9998, ip=172.98.87.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=phoenix408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.262948037Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.262828772Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=395357 slug=sensen t=2024-05-29T13:44:15.262773576Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.197504ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.262832677Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.262798758Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.2627839Z caller=remote_instance_store.go:51 user=94501 slug=datastax msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=355252 slug=bumper instance="pod=backend-admin-7749884f44-qzhf4" t=2024-05-29T13:44:15.262766424Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:15.26274227Z caller=remote_alert_sender.go:94 user=27014 slug=baseline host=baseline-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.152.150.14:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=PpsH7oO7z alerts=1 + logger=ngalert.state.manager.persist user=27014 slug=baseline t=2024-05-29T13:44:15.262681343Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.662046ms + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vda1, env=nonprod-mse-database, host=pgsql-int-retool-dish-standby-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.262689464Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=452929 slug=rohitsharma6 instance= t=2024-05-29T13:44:15.262663017Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Phoenix, country=United States, datacenter=TSS / Performive, environment=production, instance=172.98.87.130:9998, ip=172.98.87.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us3.crt, role=vpn, server=phoenix409, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.262608351Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.262568513Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.262473599Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=482906 slug=wavelo t=2024-05-29T13:44:15.262397533Z level=debug msg="Saving alert states" count=32 max_state_save_concurrency=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=msdb, dbinstance_identifier=lon-prod-frontend-database, env=uk" t=2024-05-29T13:44:15.262380988Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.262278434Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vda1, env=nonprod-mse-database, host=pgsql-int-retool-dish-standby-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.262270244Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.262217833Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=msdb, dbinstance_identifier=lon-prod-frontend-database, env=uk" t=2024-05-29T13:44:15.262217119Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.26205723Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.262162621Z caller=remote_instance_store.go:51 user=788474 slug=elisasre msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Phoenix, country=United States, datacenter=TSS / Performive, environment=production, instance=172.98.87.130:9998, ip=172.98.87.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=phoenix409, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.262086492Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.262051729Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=788474 slug=elisasre t=2024-05-29T13:44:15.26209152Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=788474 slug=elisasre instance="__name__=probe_success, cluster=sre-ci.k8s.local, component=networkmgmt-prod, instance=https://10.222.158.40, monitor=monitor-433, namespace=health, region=sdcv3, target=https://10.222.158.40" t=2024-05-29T13:44:15.262075349Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.262040491Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.262023691Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=788474 slug=elisasre instance="__name__=probe_success, cluster=sre-ci.k8s.local, component=networkmgmt-prod, instance=https://10.222.158.40, monitor=monitor-433, namespace=health, region=sdcv3, target=https://10.222.158.40" t=2024-05-29T13:44:15.262058629Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.261913132Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.261961827Z caller=remote_instance_store.go:51 user=652809 slug=glassnode msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vda1, env=nonprod-mse-database, host=pgsql-int-mse-standby-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.26201108Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.261906592Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=550672 slug=paceintegration instance= t=2024-05-29T13:44:15.261842Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=550672 slug=paceintegration instance= t=2024-05-29T13:44:15.261833268Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=550672 slug=paceintegration t=2024-05-29T13:44:15.261774165Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.261886071Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.261849111Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Phoenix, country=United States, datacenter=TSS / Performive, environment=production, instance=107.181.184.199:9998, ip=107.181.184.199, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=phoenix412, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.261719717Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Phoenix, country=United States, datacenter=TSS / Performive, environment=production, instance=107.181.184.199:9998, ip=107.181.184.199, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=phoenix412, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.261704795Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.26177132Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.261695816Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.261679991Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vda1, env=nonprod-mse-database, host=pgsql-dev-retool-dish-standby-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.261749177Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.261645538Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.261606765Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.261567716Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.261482565Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.261448242Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vda1, env=nonprod-mse-database, host=pgsql-dev-retool-combine-primary-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.261449409Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdicucphttr7ka, ref_id=A" t=2024-05-29T13:44:15.261415144Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.261334783Z caller=remote_instance_store.go:51 user=916139 slug=cmtdspd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.261309816Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdicucphttr7ka, ref_id=A" t=2024-05-29T13:44:15.261390184Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.261380542Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=206107 slug=hydrolix version=4 fingerprint=806aa04c78ab53d4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.261232214Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=fdicucphttr7ka, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.260951842s EvaluationString:}]" duration=58.873976ms + logger=ngalert.state.manager.persist user=695339 slug=twofiftysix t=2024-05-29T13:44:15.261183001Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=164.482632ms + level=debug ts=2024-05-29T13:44:15.261144086Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + Error parsing panelUID for alert annotationruleID1498dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=21110 slug=foxnextgames version=7 fingerprint=d6faf8fbfe3f7a41 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.261057355Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.260791949s EvaluationString:}]" duration=161.085856ms + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.261160188Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=556831 slug=adevintadash t=2024-05-29T13:44:15.261010394Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.260966944Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.260939485Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.260845125Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.260830755Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.260815346Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.260724415Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.260700123Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.260441251Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vda1, env=nonprod-mse-database, host=pgsql-dev-billing-standby-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.260696884Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.260633595Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.260595888Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.260530771Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vda1, env=nonprod-mse-database, host=pgsql-dev-billing-primary-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.260536662Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vda1, env=nonprod-mse-database, host=pgsql-dev-billing-primary-01.nonprod-mse-database.bra2.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.260494592Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=482906 slug=wavelo instance="datasource_uid=ffa5b3dc-bc9e-4dd4-8c3f-89dc1f5b1bcd, ref_id=A" t=2024-05-29T13:44:15.260461294Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.260405338Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=482906 slug=wavelo t=2024-05-29T13:44:15.260253782Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=445941 slug=terawatt t=2024-05-29T13:44:15.260298513Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.650279ms + level=debug ts=2024-05-29T13:44:15.260155262Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.260053608Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.260086426Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:15.260038334Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=37.080633ms + level=debug ts=2024-05-29T13:44:15.259919364Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.260025664Z caller=manager.go:153 user=291766 slug=fita msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager.persist user=390300 slug=astrachain t=2024-05-29T13:44:15.259981614Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=24.061369ms + level=debug ts=2024-05-29T13:44:15.259950851Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.259938118Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Phnom Penh, country=Cambodia, datacenter=M247, environment=production, instance=188.215.235.114:9998, ip=188.215.235.114, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=cambodia402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.259929381Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.259813947Z caller=manager.go:153 user=351452 slug=iotcoolana msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:15.259788176Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=456946 slug=menlosecurityredge t=2024-05-29T13:44:15.259722188Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=47.05595ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Phnom Penh, country=Cambodia, datacenter=M247, environment=production, instance=188.215.235.114:9998, ip=188.215.235.114, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/cambodia.crt, role=vpn, server=cambodia402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.259768794Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Phnom Penh, country=Cambodia, datacenter=M247, environment=production, instance=188.215.235.114:9998, ip=188.215.235.114, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/cambodia.crt, role=vpn, server=cambodia402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.259754792Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.259627863Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.259606337Z caller=remote_instance_store.go:51 user=540828 slug=finfoprod153 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vda1, env=nonprod-dish-db-dwh, host=pgsql-dish-dwh-turbine-01.nonprod-dish-db-dwh.bra2.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.259643575Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.259585171Z caller=remote_instance_store.go:51 user=415003 slug=salaryfinance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=415003 slug=salaryfinance instance="agent_hostname=PROD-SAL-VMH-WEB08, instance=PROD-SAL-VMH-WEB08:12345, job=integrations/windows_exporter" t=2024-05-29T13:44:15.259549239Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482907 slug=wavelonp instance="datacenter=bra2, device=/dev/vda1, env=nonprod-dish-db-dwh, host=maint-01.nonprod-dish-db-dwh.bra2.tucows.systems, mountpoint=/" t=2024-05-29T13:44:15.259345952Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.259227482Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.259145312Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.259105294Z caller=remote_instance_store.go:51 user=326888 slug=buildingblocks msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.25911005Z caller=remote_instance_store.go:51 user=510494 slug=ellycode msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=510494 slug=ellycode t=2024-05-29T13:44:15.259077616Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=510494 slug=ellycode instance="datasource_uid=grafanacloud-usage, ref_id=A" t=2024-05-29T13:44:15.259054291Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=254242 slug=dats t=2024-05-29T13:44:15.25898482Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.259045693Z caller=remote_instance_store.go:51 user=254242 slug=dats msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.259045262Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=254242 slug=dats instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.258958613Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Philadelphia, country=United States, datacenter=DataPacket, environment=production, instance=84.239.41.129:9998, ip=84.239.41.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=pennsylvania403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.259013966Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Philadelphia, country=United States, datacenter=DataPacket, environment=production, instance=84.239.41.129:9998, ip=84.239.41.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=pennsylvania403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.259002507Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=254242 slug=dats instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.258925774Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=254242 slug=dats instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.258895688Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.258973105Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:15.258924341Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=master, dbinstance_identifier=syd-prod-frontend-db, env=au" t=2024-05-29T13:44:15.258785964Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.258620006Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.258548793Z caller=remote_instance_store.go:51 user=523054 slug=vialtopartners msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.258491904Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=523054 slug=vialtopartners instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.258477154Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=523054 slug=vialtopartners instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.258465898Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Perth, country=Australia, datacenter=ServersAustralia, environment=production, instance=43.250.205.57:9998, ip=43.250.205.57, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=perth403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.258479405Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.258299697Z caller=remote_instance_store.go:51 user=679029 slug=joveoprodaws msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.258210996Z caller=manager.go:153 user=344576 slug=blairm msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.258250801Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Perth, country=Australia, datacenter=ServersAustralia, environment=production, instance=43.250.205.178:9998, ip=43.250.205.178, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=perth404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.258100978Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.258073284Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.258045531Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.25800439Z caller=manager.go:153 user=460952 slug=prdnextgen msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:15.257694804Z caller=remote_instance_store.go:51 user=297992 slug=lemoncxvizz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=master, dbinstance_identifier=sing-prod-frontend-db, env=apac" t=2024-05-29T13:44:15.257944693Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.257919561Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.257911621Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=538037 slug=drivewealth version=155 fingerprint=2b2bf0a7e20ccef2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.257810889Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.257524756s EvaluationString:}]" duration=2.17369478s + level=debug ts=2024-05-29T13:44:15.257799888Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.25770928Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Paris, country=France, datacenter=PIA, environment=production, instance=191.101.31.3:9998, ip=191.101.31.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=paris412, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.257743257Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=637816 slug=kingobservatory t=2024-05-29T13:44:15.257644018Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="datasource_uid=ee613482-6425-4196-93d8-43d0b8d057e7, ref_id=A" t=2024-05-29T13:44:15.257620579Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.257693102Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.257700147Z caller=manager.go:153 user=443201 slug=hhcprod msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=637816 slug=kingobservatory t=2024-05-29T13:44:15.257570386Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=297992 slug=lemoncxvizz instance= t=2024-05-29T13:44:15.25763242Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.257537418Z caller=remote_instance_store.go:51 user=338059 slug=ninetailed msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Paris, country=France, datacenter=PIA, environment=production, instance=191.101.31.3:9998, ip=191.101.31.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/france.crt, role=streaming-optimized, server=paris412, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.257530725Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=371756 slug=asapp t=2024-05-29T13:44:15.257455637Z level=debug msg="Saving alert states done" count=6 max_state_save_concurrency=1 duration=129.239054ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Paris, country=France, datacenter=PIA, environment=production, instance=191.101.31.2:9998, ip=191.101.31.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=paris411, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.257361059Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=767797 slug=mgmresorts t=2024-05-29T13:44:15.257304633Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.897742ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Paris, country=France, datacenter=PIA, environment=production, instance=191.101.31.2:9998, ip=191.101.31.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/france.crt, role=streaming-optimized, server=paris411, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.25713924Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.257103241Z caller=remote_instance_store.go:51 user=633221 slug=chengtao msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.257077044Z caller=manager.go:153 user=305351 slug=dylspeaking msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Paris, country=France, datacenter=PIA, environment=production, instance=191.101.31.2:9998, ip=191.101.31.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/france.crt, role=streaming-optimized, server=paris411, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.25710729Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.257105775Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=staging-vultr-tyo-us-z01" t=2024-05-29T13:44:15.257015817Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.256977093Z caller=remote_instance_store.go:51 user=244232 slug=uvadashboard msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.25672703Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=staging-linode-tyo-us-z02" t=2024-05-29T13:44:15.256878178Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=staging-linode-tyo-us-z02" t=2024-05-29T13:44:15.256866687Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=244232 slug=uvadashboard t=2024-05-29T13:44:15.256817529Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.256748698Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.256809605Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.256782576Z caller=manager.go:153 user=414734 slug=cubiko msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:15.256727127Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=staging-linode-sin-us-z01" t=2024-05-29T13:44:15.256743576Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.256691648Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.256626656Z caller=remote_instance_store.go:51 user=316418 slug=workmotion msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.256585279Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.256620922Z caller=manager.go:153 user=656911 slug=liamgibs msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=316418 slug=workmotion instance="FunctionName=CallInternalSchemaMigrationApiBeta" t=2024-05-29T13:44:15.25655356Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.256467463Z caller=manager.go:153 user=535905 slug=soficshifts msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-us-lax-gsl-01" t=2024-05-29T13:44:15.256543926Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-us-ewr-gsl-03" t=2024-05-29T13:44:15.256472269Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-us-ewr-gsl-03" t=2024-05-29T13:44:15.256462604Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.256318872Z caller=manager.go:153 user=535611 slug=lohanda msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Paris, country=France, datacenter=DataPacket, environment=production, instance=156.146.63.134:9998, ip=156.146.63.134, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/france.crt, role=streaming-optimized, server=paris402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.256348168Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-us-ewr-gsl-01" t=2024-05-29T13:44:15.256325127Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.256290791Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.256188119Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Paris, country=France, datacenter=DataPacket, environment=production, instance=156.146.63.129:9998, ip=156.146.63.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=paris414, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.256100713Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.256043493Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Paris, country=France, datacenter=DataPacket, environment=production, instance=156.146.63.129:9998, ip=156.146.63.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=paris414, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.256082295Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.255906221Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-us-ewr-comcast-07" t=2024-05-29T13:44:15.256003226Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Paris, country=France, datacenter=DataPacket, environment=production, instance=156.146.63.129:9998, ip=156.146.63.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/france.crt, role=streaming-optimized, server=paris414, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.255880838Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.255869168Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-us-ewr-comcast-05" t=2024-05-29T13:44:15.255842589Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-us-ewr-comcast-04" t=2024-05-29T13:44:15.255789219Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.255706973Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-us-ewr-comcast-03" t=2024-05-29T13:44:15.255715876Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.255608435Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=master, dbinstance_identifier=lon-qa-frontend-db, env=qa" t=2024-05-29T13:44:15.255517093Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-us-dfw-latitude-03" t=2024-05-29T13:44:15.255457403Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.255365925Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-us-dfw-latitude-02" t=2024-05-29T13:44:15.255374516Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.255285288Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-us-dfw-latitude-02" t=2024-05-29T13:44:15.255364311Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=master, dbinstance_identifier=lon-prod-frontend-database, env=uk" t=2024-05-29T13:44:15.255324181Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Panama City, country=Panama, datacenter=M247, environment=production, instance=91.90.126.65:9998, ip=91.90.126.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=panama407, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.255307645Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=master, dbinstance_identifier=lon-prod-frontend-database, env=uk" t=2024-05-29T13:44:15.255306019Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.25526087Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-us-dca-latitude-02" t=2024-05-29T13:44:15.255238363Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-us-dca-latitude-01" t=2024-05-29T13:44:15.255170989Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=master, dbinstance_identifier=lon-pp-frontend-database, env=pp" t=2024-05-29T13:44:15.255102816Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-tr-ist-vultr-02" t=2024-05-29T13:44:15.255099891Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-tr-ist-vultr-01" t=2024-05-29T13:44:15.255012352Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-sg-xsp-gsl-02" t=2024-05-29T13:44:15.254951394Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:15.254630413Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=master, dbinstance_identifier=lon-dev-frontend-db, env=dev" t=2024-05-29T13:44:15.254898623Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=432323 slug=lithic instance= t=2024-05-29T13:44:15.254582444Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-sg-xsp-gsl-01" t=2024-05-29T13:44:15.254864463Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-sg-sin-gsl-03" t=2024-05-29T13:44:15.254788857Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-sg-sin-gsl-02" t=2024-05-29T13:44:15.254676206Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=master, dbinstance_identifier=fran-prod-frontend-db, env=eu" t=2024-05-29T13:44:15.254666361Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Panama City, country=Panama, datacenter=M247, environment=production, instance=91.90.126.51:9998, ip=91.90.126.51, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/panama.crt, role=vpn, server=panama406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.25469462Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-sg-sin-gsl-02" t=2024-05-29T13:44:15.254666759Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-sg-sin-dp-02" t=2024-05-29T13:44:15.254518025Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.254443455Z caller=manager.go:153 user=658519 slug=harleen1 msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=SQLPerf, dbinstance_identifier=syd-prod-frontend-db, env=au" t=2024-05-29T13:44:15.254441933Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.25445642Z caller=remote_instance_store.go:51 user=314067 slug=itsme msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-sg-sin-dp-01" t=2024-05-29T13:44:15.254443416Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=314067 slug=itsme instance= t=2024-05-29T13:44:15.25438726Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.254259647Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=280735 slug=grafana1eed t=2024-05-29T13:44:15.253968909Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=280735 slug=grafana1eed instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.253897285Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.25423041Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=280735 slug=grafana1eed t=2024-05-29T13:44:15.253821446Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=280735 slug=grafana1eed version=3 fingerprint=9f57c2ecab1bd696 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.253679686Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.253039181s EvaluationString:}]" duration=15.654954ms + level=debug ts=2024-05-29T13:44:15.253970381Z caller=manager.go:153 user=673133 slug=musgravegrafana msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-nz-akl-gsl-02" t=2024-05-29T13:44:15.253731085Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-nz-akl-gsl-01" t=2024-05-29T13:44:15.253648268Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=SQLPerf, dbinstance_identifier=lon-qa-frontend-db, env=qa" t=2024-05-29T13:44:15.253628263Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-frontend-database-read-replica-1, env=uk" t=2024-05-29T13:44:15.253555465Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-no-osl-glesys-02" t=2024-05-29T13:44:15.253544575Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.25343023Z caller=remote_instance_store.go:51 user=306470 slug=iconik msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Oslo, country=Norway, datacenter=Glesys, environment=production, instance=46.246.122.98:9998, ip=46.246.122.98, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/no.crt, role=vpn, server=oslo403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.253400774Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.253365426Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-frontend-database, env=uk" t=2024-05-29T13:44:15.253329774Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.253261309Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Oslo, country=Norway, datacenter=Glesys, environment=production, instance=46.246.122.66:9998, ip=46.246.122.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=oslo402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.253165273Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=236496 slug=improbable t=2024-05-29T13:44:15.253126084Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.306665ms + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=SQLPerf, dbinstance_identifier=lon-prod-frontend-database, env=uk" t=2024-05-29T13:44:15.251847849Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-dow-jones-db, env=uk" t=2024-05-29T13:44:15.253144178Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.253099167Z caller=manager.go:153 user=318831 slug=grafanavianet msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:15.253054982Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.253063465Z caller=remote_instance_store.go:51 user=214309 slug=spenmo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.252895503Z caller=remote_instance_store.go:51 user=405431 slug=deepersignals msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.252918044Z caller=manager.go:153 user=536482 slug=rtdex msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-kr-icn-vultr-02" t=2024-05-29T13:44:15.252886444Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=371085 slug=comexport t=2024-05-29T13:44:15.252866624Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-kr-icn-vultr-01" t=2024-05-29T13:44:15.252818075Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=273545 slug=strigoio t=2024-05-29T13:44:15.186586145Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.87123ms + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:15.252588998Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.252568592Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-jp-tyo-gsl-02" t=2024-05-29T13:44:15.252600646Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Oslo, country=Norway, datacenter=Glesys, environment=production, instance=46.246.122.34:9998, ip=46.246.122.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/no.crt, role=vpn, server=oslo401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.252617411Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-creditsafe-db, env=uk" t=2024-05-29T13:44:15.25257693Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Oslo, country=Norway, datacenter=Glesys, environment=production, instance=46.246.122.162:9998, ip=46.246.122.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=oslo404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.252452228Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-comply-advantage-db, env=uk" t=2024-05-29T13:44:15.252395702Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-jp-tyo-dp-01" t=2024-05-29T13:44:15.252361156Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.252359648Z caller=manager.go:153 user=656459 slug=activeport msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-jp-tyo-dp-01" t=2024-05-29T13:44:15.252350209Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371085 slug=comexport t=2024-05-29T13:44:15.252272059Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-it-mxp-dp-03" t=2024-05-29T13:44:15.252286392Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=341675 slug=markabrahamx t=2024-05-29T13:44:15.252265852Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.252198578Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-it-mxp-dp-01" t=2024-05-29T13:44:15.252127634Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.252005222Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-in-del-vultr-01" t=2024-05-29T13:44:15.251958498Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance= t=2024-05-29T13:44:15.251904283Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-billing-db, env=uk" t=2024-05-29T13:44:15.251876362Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-il-tlv-dp-02" t=2024-05-29T13:44:15.251873362Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-billing-db, env=uk" t=2024-05-29T13:44:15.247475672Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-il-tlv-dp-02" t=2024-05-29T13:44:15.251861447Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.251730204Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-il-tlv-dp-01" t=2024-05-29T13:44:15.251779913Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.251710887Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ontario, country=Canada, datacenter=TSS / Performive, environment=production, instance=66.115.145.199:9998, ip=66.115.145.199, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-ontario.crt, role=vpn, server=ontario409, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.251729641Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.25162309Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=283914 slug=emmasleep instance="__name__=aws_sqs_approximate_number_of_messages_visible_average, account_id=178863526580, dimension_QueueName=ecom-prod-fluent-event-publisher-queue, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-queue, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics" t=2024-05-29T13:44:15.251485816Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.251453387Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ontario, country=Canada, datacenter=TSS / Performive, environment=production, instance=66.115.145.188:9998, ip=66.115.145.188, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=ontario412, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.251425277Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:15.247496752Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-gb-lon-ukserv-04" t=2024-05-29T13:44:15.251373567Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=318831 slug=grafanavianet instance= t=2024-05-29T13:44:15.251309457Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=SQLPerf, dbinstance_identifier=lon-dev-frontend-db, env=dev" t=2024-05-29T13:44:15.251338467Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.251222672Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-gb-lon-ukserv-03" t=2024-05-29T13:44:15.251301137Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-gb-lon-ukserv-03" t=2024-05-29T13:44:15.251291239Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.251205093Z caller=remote_instance_store.go:51 user=158536 slug=clearsaleantifraude msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=158536 slug=clearsaleantifraude t=2024-05-29T13:44:15.251148582Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.251160954Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=111839 slug=last9 instance= t=2024-05-29T13:44:15.251188541Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ontario, country=Canada, datacenter=TSS / Performive, environment=production, instance=66.115.145.188:9998, ip=66.115.145.188, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-ontario.crt, role=vpn, server=ontario412, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.251162593Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-gb-lon-ukserv-01" t=2024-05-29T13:44:15.251146034Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=111839 slug=last9 t=2024-05-29T13:44:15.2510965Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}{{ $cluster }}': error parsing template __alert_Churn rate ($instance) - PROD(tsdb.last9.io): template: __alert_Churn rate ($instance) - PROD(tsdb.last9.io):1: undefined variable \"$cluster\"" + logger=ngalert.state.manager.persist user=163513 slug=dialpad t=2024-05-29T13:44:15.250937436Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=SQLPerf, dbinstance_identifier=fran-prod-frontend-db, env=eu" t=2024-05-29T13:44:15.251056846Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.250888566Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-gb-lon-gsl-05" t=2024-05-29T13:44:15.251010183Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.250992934Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=111839 slug=last9 version=366 fingerprint=c67759765a047ba4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.250737016Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.250423635s EvaluationString:}]" duration=759.867836ms + level=debug ts=2024-05-29T13:44:15.250852925Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-gb-lon-gsl-04" t=2024-05-29T13:44:15.2509156Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ontario, country=Canada, datacenter=TSS / Performive, environment=production, instance=66.115.145.187:9998, ip=66.115.145.187, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=ontario411, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.25091235Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.250800673Z caller=remote_instance_store.go:51 user=316960 slug=mojamteam msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=112387 slug=lucidhq t=2024-05-29T13:44:15.250817771Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=25.859427ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.250810226Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="db=SQLAudit, dbinstance_identifier=lon-pp-frontend-database, env=pp" t=2024-05-29T13:44:15.250751172Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=469068 slug=dvulpe t=2024-05-29T13:44:15.250659768Z level=debug msg="Saving alert states done" count=5 max_state_save_concurrency=1 duration=133.546173ms + level=debug ts=2024-05-29T13:44:15.250674556Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-gb-lon-gsl-01" t=2024-05-29T13:44:15.250702083Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=101180 slug=rawnet t=2024-05-29T13:44:15.250602402Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=28.070564ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-fr-mrs-gsl-04" t=2024-05-29T13:44:15.250633308Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-fr-mrs-gsl-03" t=2024-05-29T13:44:15.250569893Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.250569031Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-fr-mrs-gsl-02" t=2024-05-29T13:44:15.250478695Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ontario, country=Canada, datacenter=TSS / Performive, environment=production, instance=66.115.145.186:9998, ip=66.115.145.186, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-ontario.crt, role=vpn, server=ontario410, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.250478517Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ontario, country=Canada, datacenter=TSS / Performive, environment=production, instance=198.8.95.221:9998, ip=198.8.95.221, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=ontario414, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.250368643Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.250301757Z caller=remote_instance_store.go:51 user=277970 slug=teckresourcestest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.250340959Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=112732 slug=gleamer t=2024-05-29T13:44:15.250271196Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=26.474361ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-fr-cdg-dp-01" t=2024-05-29T13:44:15.250254511Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:15.250109311Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=2 fingerprint=12689e67e24c2bef attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.249975805Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=13.411339ms + level=debug ts=2024-05-29T13:44:15.249952354Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=error ts=2024-05-29T13:44:15.249902493Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ontario, country=Canada, datacenter=TSS / Performive, environment=production, instance=198.8.95.194:9998, ip=198.8.95.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=ontario413, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.250021315Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.249990471Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.249952855Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-dk-cph-glesys-02" t=2024-05-29T13:44:15.249948412Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ontario, country=Canada, datacenter=TSS / Performive, environment=production, instance=198.8.95.194:9998, ip=198.8.95.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-ontario.crt, role=vpn, server=ontario413, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.249867905Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.24974282Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ontario, country=Canada, datacenter=TSS / Performive, environment=production, instance=172.98.92.66:9998, ip=172.98.92.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=ontario408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.249681882Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ontario, country=Canada, datacenter=TSS / Performive, environment=production, instance=172.98.92.66:9998, ip=172.98.92.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=ontario408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.249665685Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=445941 slug=terawatt instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.249608494Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-de-fra-dp-02" t=2024-05-29T13:44:15.249723641Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=445941 slug=terawatt instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.249583721Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=445941 slug=terawatt version=9 fingerprint=cf7c3e450f87f939 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.24938259Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.248892632s EvaluationString:}]" duration=51.785896ms + level=debug ts=2024-05-29T13:44:15.249584329Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-de-fra-dp-01" t=2024-05-29T13:44:15.249650389Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.249373239Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="console | Postgres Failed Connection > 10" + level=debug ts=2024-05-29T13:44:15.249324075Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group=pulsebet-synthetic + level=debug ts=2024-05-29T13:44:15.249304179Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="sport | sports-datatools-ehconsumer-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.249278374Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group=prd-ai-ng-console-cms-eau + logger=ngalert.state.manager.persist user=21110 slug=foxnextgames t=2024-05-29T13:44:15.249478804Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.711787ms + logger=ngalert.state.manager user=391538 slug=risknarrative t=2024-05-29T13:44:15.249468306Z level=debug msg="State manager processing evaluation results" resultCount=59 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-cl-scl-latitude-01" t=2024-05-29T13:44:15.249481525Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.249212289Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group=prd-docdb-ng-user-eng-eau + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-ch-zrh-dp-02" t=2024-05-29T13:44:15.249424776Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.249187443Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="SB | prd-sb-ng-infra-shared-eau-(account-domain-nser-user)" + level=debug ts=2024-05-29T13:44:15.249172578Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group=prd-ai-ng-migration-datatools-eau + level=debug ts=2024-05-29T13:44:15.249165871Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="sport | bghandler-eng-engine-Sites-Probing" + logger=ngalert.state.manager.persist user=183214 slug=vectorizedio t=2024-05-29T13:44:15.249333764Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.685697ms + level=debug ts=2024-05-29T13:44:15.249155659Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="sport | bghandler-eng-persistenceengine-Sites-Probing" + logger=ngalert.state.manager user=27014 slug=baseline instance= t=2024-05-29T13:44:15.249326486Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-ch-zrh-dp-01" t=2024-05-29T13:44:15.249326623Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.249118229Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="frontend | betnation-website - Synthetic" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-ca-yyz-dp-03" t=2024-05-29T13:44:15.249259606Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.249088566Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="SB | prd-sb-ng-infra-shared-eau-(account-domain-wallet-fallback)" + level=debug ts=2024-05-29T13:44:15.24905503Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="SB | prd-sb-ng-infra-shared-eau-(promotion-domain-sf-fallback)" + level=debug ts=2024-05-29T13:44:15.249019779Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="frontend | betdeluxe-website - Synthetic" + level=debug ts=2024-05-29T13:44:15.248966169Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="bet | betplacement-eng-api-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.248950338Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="account | acc-dt-pollerselfexclusion-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.248915846Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group=prd-ai-ng-betnation-website-eau + level=debug ts=2024-05-29T13:44:15.248897496Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="SB | prd-sb-ng-infra-shared-eau-(bet-marketdatastore-fallback)" + level=debug ts=2024-05-29T13:44:15.248861942Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="bet | mds-eng-fallback-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.248819938Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group=Racing-cosmos + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-br-sao-vultr-02" t=2024-05-29T13:44:15.249029414Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.248811212Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="account | acc-dt-wrclientlimit-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.248789038Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="bet | betsettlement-eng-writer-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.248876851Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-br-sao-vultr-01" t=2024-05-29T13:44:15.248971077Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.248768686Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="promotion | Postgres Failed Connection > 10" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ontario, country=Canada, datacenter=TSS / Performive, environment=production, instance=172.98.80.66:9998, ip=172.98.80.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=ontario405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.248884234Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.2487603Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="account | acc-dt-wrpayment-Sites-Probing" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.248914653Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.434071ms + level=debug ts=2024-05-29T13:44:15.24875201Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="account | account-intercom-engine-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.248741888Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="bet | bethistory-eng-api-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.24888963Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.248724542Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group=prd-ai-ng-betdatatools-eng-eau + level=debug ts=2024-05-29T13:44:15.248715871Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="account | acc-dt-wrtransaction-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.248707457Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="account | acc-dt-polleraccount-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.248698624Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="console | console-frontend-Sites-Probing" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.248856478Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.248809821Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.24867726Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="PSQL | prd-psql-bet-shared" + logger=ngalert.state.manager.persist user=472647 slug=planet t=2024-05-29T13:44:15.248752922Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=26.935725ms + level=debug ts=2024-05-29T13:44:15.248784815Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.248554689Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="bet | tbs-writer-amlwriter-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.248532656Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="ASP | prd-asp-ng-bet-srm-engine-eau" + level=debug ts=2024-05-29T13:44:15.24852397Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="frontend | prebigbet-website-bigbet-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.248651151Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.248516404Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group=r2d2 + level=debug ts=2024-05-29T13:44:15.248493774Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="frontend | prebetdeluxe-website-betdeluxe-Sites-Probing" + logger=ngalert.state.manager.persist user=285250 slug=quartx t=2024-05-29T13:44:15.248648243Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.007311ms + level=debug ts=2024-05-29T13:44:15.248429286Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="SB | prd-sb-ng-infra-shared-eau-(promotion-priceboost-client-fallback)" + level=debug ts=2024-05-29T13:44:15.248584803Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.248389739Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="promotion | promotion-salesforce-engine-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.248378258Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="promotion | promotion-eng-bonusagedoutwriter-Sites-Probing" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-au-syd-gsl-02" t=2024-05-29T13:44:15.248606417Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.24835107Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group=prd-ai-ng-sports-signalr-eng-eau + level=debug ts=2024-05-29T13:44:15.248291951Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="ASP | prd-asp-ng-bet-settlement-engine-eau" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-au-syd-gsl-01" t=2024-05-29T13:44:15.248541552Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ontario, country=Canada, datacenter=TSS / Performive, environment=production, instance=172.98.80.2:9998, ip=172.98.80.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=ontario407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.248516281Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.248494346Z caller=remote_instance_store.go:51 user=618053 slug=teamfarahcpe2223 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ontario, country=Canada, datacenter=TSS / Performive, environment=production, instance=172.98.80.2:9998, ip=172.98.80.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=ontario407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.248500201Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.24824456Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="SB | prd-sb-ng-infra-shared-eau-(bet-settlement-sgm-fallback)" + level=debug ts=2024-05-29T13:44:15.248226877Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="sport | sports-eng-api-Sites-Probing" + logger=ngalert.state.manager.persist user=618053 slug=teamfarahcpe2223 t=2024-05-29T13:44:15.24845776Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.248218029Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="SB | prd-sb-ng-infra-shared-eau-(account-domain-password-reset-email)" + logger=ngalert.state.manager user=618053 slug=teamfarahcpe2223 instance="datasource_uid=LOVkmwLVz, ref_id=A" t=2024-05-29T13:44:15.248440559Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:15.248187657Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="SB | prd-sb-ng-infra-shared-eau-(account-domain-payment-fallback)" + level=debug ts=2024-05-29T13:44:15.248179514Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group=prd-ai-ng-betintercept-eng-eau + level=debug ts=2024-05-29T13:44:15.248169019Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="account | acc-dt-wruser-Sites-Probing" + logger=ngalert.scheduler user=618053 slug=teamfarahcpe2223 version=2 fingerprint=8a0a48e3be27c742 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.24831497Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=LOVkmwLVz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.247986704s EvaluationString:}]" duration=82.204479ms + level=debug ts=2024-05-29T13:44:15.248163082Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group=prd-ai-ng-sf-activities-eau + level=debug ts=2024-05-29T13:44:15.248155784Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="racing | tabingress-eng-engine-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.248145158Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="SB | prd-sb-ng-infra-shared-eau-(promotion-bonus-wallet-expiry)" + level=debug ts=2024-05-29T13:44:15.248136494Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group=prd-ai-ng-acc-cb-dt-eau + level=debug ts=2024-05-29T13:44:15.248108589Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="frontend | prepulsebet-website-pulsebet-Sites-Probing" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-at-vie-dp-03" t=2024-05-29T13:44:15.248320038Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ontario, country=Canada, datacenter=TSS / Performive, environment=production, instance=172.98.80.2:9998, ip=172.98.80.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-ontario.crt, role=vpn, server=ontario407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.248325847Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.248278885Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.248054753Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="bet | bethistory-salesforce-engine-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.248043369Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="CosmosDB | prd-docdb-ng-wallet-eng-eau" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=proxy-at-vie-dp-01" t=2024-05-29T13:44:15.248174251Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.24800549Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="frontend | prebetnation-website - Synthetic" + level=debug ts=2024-05-29T13:44:15.248113434Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.24794593Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group=prd-ai-ng-acc-dt-eau + level=debug ts=2024-05-29T13:44:15.247938337Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="account | user-eng-api-Sites-Probing" + logger=ngalert.state.manager user=443907 slug=awarehq instance= t=2024-05-29T13:44:15.248051862Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ontario, country=Canada, datacenter=TSS / Performive, environment=production, instance=172.98.80.130:9998, ip=172.98.80.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=ontario406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.248117348Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ontario, country=Canada, datacenter=TSS / Performive, environment=production, instance=172.98.80.130:9998, ip=172.98.80.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=ontario406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.248101071Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.2478779Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="sport | sports-datatools-pollerfixture-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.248002667Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.247869617Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="APIM | prd-apim-ng-infra-shared | Manual" + logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:15.247830502Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=25.571192ms + level=debug ts=2024-05-29T13:44:15.247843632Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="SB | prd-sb-ng-infra-shared-eau-(bet-settlement-fallback)" + level=debug ts=2024-05-29T13:44:15.247799295Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="ASP | prd-asp-ng-promotion-datatools-eau | High Memory Usage > 75%" + level=debug ts=2024-05-29T13:44:15.247788154Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="ASP | prd-asp-ng-promotion-xl-eau | High Memory Usage > 75%" + level=debug ts=2024-05-29T13:44:15.247768001Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="console | nser-xl-api-Sites-Probing" + logger=ngalert.scheduler user=471861 slug=planetstaging version=2 fingerprint=c7311676f6dbd549 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.247875176Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.247577845s EvaluationString:}]" duration=16.762672ms + level=debug ts=2024-05-29T13:44:15.247731832Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="bet | srm-eng-writer-Sites-Probing" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=p2p-vultr-mex-ar-a01" t=2024-05-29T13:44:15.24793008Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=p2p-vultr-icn-kr-a02" t=2024-05-29T13:44:15.247892154Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=491156 slug=tst01wr t=2024-05-29T13:44:15.247790009Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=22.069835ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ontario, country=Canada, datacenter=TSS / Performive, environment=production, instance=172.98.80.130:9998, ip=172.98.80.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-ontario.crt, role=vpn, server=ontario406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.247930337Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.247884427Z caller=remote_instance_store.go:51 user=736975 slug=jetcomms msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.247890824Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.247831044Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.24776051Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.247622333Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="console | Postgres CPU Percentage > 75%" + level=debug ts=2024-05-29T13:44:15.247753477Z caller=remote_instance_store.go:51 user=234870 slug=nextmovebooking msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.24759399Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group=prd-ai-ng-account-xl-eau + level=debug ts=2024-05-29T13:44:15.247585086Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group=prd-ai-ng-wallet-eng-eau + level=debug ts=2024-05-29T13:44:15.247565086Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group=prd-ai-ng-bgingress-eng-eau + logger=ngalert.state.manager.persist user=736975 slug=jetcomms t=2024-05-29T13:44:15.247773735Z level=debug msg="Deleting alert states" count=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ontario, country=Canada, datacenter=TSS / Performive, environment=production, instance=172.83.47.7:9998, ip=172.83.47.7, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=ontario404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.247769689Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.247550021Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group=prd-ai-ng-sports-eng-eau + level=debug ts=2024-05-29T13:44:15.247539347Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="ASP | prd-asp-ng-sport-feeds-eau" + level=debug ts=2024-05-29T13:44:15.247700462Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.247685941Z caller=remote_instance_store.go:51 user=530405 slug=zetetic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=234870 slug=nextmovebooking instance= t=2024-05-29T13:44:15.24768758Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.247725826Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=234870 slug=nextmovebooking instance= t=2024-05-29T13:44:15.24767508Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.247518273Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group=prd-ai-ng-console-eau + level=debug ts=2024-05-29T13:44:15.247448602Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group=prd-ai-ng-depositurnover-writer-eau + logger=ngalert.state.manager user=736975 slug=jetcomms instance="instance=10.105.127.8:9100" t=2024-05-29T13:44:15.247675922Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=234870 slug=nextmovebooking version=2 fingerprint=68355ba0618189ec attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.2475447Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.247137273s EvaluationString:}]" duration=28.989966ms + level=debug ts=2024-05-29T13:44:15.247421717Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="account | account-xl-api-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.247413567Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="PSQL | prd-psql-migration-shared" + logger=ngalert.state.manager user=736975 slug=jetcomms instance="instance=10.105.127.7:9100" t=2024-05-29T13:44:15.247608671Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.24735885Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="sport | sgm-eng-winningsims-rugbyleague-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.247283208Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group=prd-ai-ng-autho-logsevtgrid-eau + level=debug ts=2024-05-29T13:44:15.247274619Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="frontend | presurge-website-surge-Sites-Probing" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=p2p-linode-fra-za-a02" t=2024-05-29T13:44:15.247519975Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.247210486Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group=prd-ai-ng-betplacement-eng-eau + level=debug ts=2024-05-29T13:44:15.247190221Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="bet | betdatatools-eng-poller-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.247123519Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="migration | migration-datatools-wrauthuser-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.247114625Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="bet | bethistory-eng-writer-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.247091901Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group=prd-ai-ng-scaggregator-eng-eau + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=p2p-linode-ewr-us-a04" t=2024-05-29T13:44:15.247343121Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ontario, country=Canada, datacenter=TSS / Performive, environment=production, instance=172.83.47.71:9998, ip=172.83.47.71, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=ontario401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.247387002Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.247068275Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="frontend | prepulsebet-website - Synthetic" + level=debug ts=2024-05-29T13:44:15.247038762Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group=prd-ai-ng-prebetnation-website-eau + level=debug ts=2024-05-29T13:44:15.247018455Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group=prd-ai-ng-nser-xl-eau + logger=ngalert.scheduler user=736975 slug=jetcomms version=23 fingerprint=1a8fe7f3b6421267 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.247194691Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=10.105.0.1:9100 State:Normal Error: Results:map[] Values:map[cpu usage:{Var:cpu usage Labels:instance=10.105.0.1:9100 Value:0xc019dcde68} last:{Var:last Labels:instance=10.105.0.1:9100 Value:0xc019dcdea0} trigger:{Var:trigger Labels:instance=10.105.0.1:9100 Value:0xc019dcded0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.246678057s EvaluationString:[ var='cpu usage' labels={instance=10.105.0.1:9100} value=0.1589293182689747 ], [ var='last' labels={instance=10.105.0.1:9100} value=0.1589293182689747 ], [ var='trigger' labels={instance=10.105.0.1:9100} value=0 ]} {Instance:instance=10.105.127.7:9100 State:Normal Error: Results:map[] Values:map[cpu usage:{Var:cpu usage Labels:instance=10.105.127.7:9100 Value:0xc019dcdf18} last:{Var:last Labels:instance=10.105.127.7:9100 Value:0xc019dcdf40} trigger:{Var:trigger Labels:instance=10.105.127.7:9100 Value:0xc019dcdf60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.246688358s EvaluationString:[ var='cpu usage' labels={instance=10.105.127.7:9100} value=2.117291389189977 ], [ var='last' labels={instance=10.105.127.7:9100} value=2.117291389189977 ], [ var='trigger' labels={instance=10.105.127.7:9100} value=0 ]} {Instance:instance=10.105.127.8:9100 State:Normal Error: Results:map[] Values:map[cpu usage:{Var:cpu usage Labels:instance=10.105.127.8:9100 Value:0xc019dcdfc0} last:{Var:last Labels:instance=10.105.127.8:9100 Value:0xc019dcdfd8} trigger:{Var:trigger Labels:instance=10.105.127.8:9100 Value:0xc019dcdf90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.246692768s EvaluationString:[ var='cpu usage' labels={instance=10.105.127.8:9100} value=1.5014698599700818 ], [ var='last' labels={instance=10.105.127.8:9100} value=1.5014698599700818 ], [ var='trigger' labels={instance=10.105.127.8:9100} value=0 ]} {Instance:instance=10.105.255.2:9100 State:Normal Error: Results:map[] Values:map[cpu usage:{Var:cpu usage Labels:instance=10.105.255.2:9100 Value:0xc02a270028} last:{Var:last Labels:instance=10.105.255.2:9100 Value:0xc02a270040} trigger:{Var:trigger Labels:instance=10.105.255.2:9100 Value:0xc02a270060}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.246696968s EvaluationString:[ var='cpu usage' labels={instance=10.105.255.2:9100} value=31.085733478670292 ], [ var='last' labels={instance=10.105.255.2:9100} value=31.085733478670292 ], [ var='trigger' labels={instance=10.105.255.2:9100} value=0 ]} {Instance:instance=10.105.255.3:9100 State:Normal Error: Results:map[] Values:map[cpu usage:{Var:cpu usage Labels:instance=10.105.255.3:9100 Value:0xc02a2700a8} last:{Var:last Labels:instance=10.105.255.3:9100 Value:0xc02a2700d0} trigger:{Var:trigger Labels:instance=10.105.255.3:9100 Value:0xc02a270180}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.246700239s EvaluationString:[ var='cpu usage' labels={instance=10.105.255.3:9100} value=0.4690117252900322 ], [ var='last' labels={instance=10.105.255.3:9100} value=0.4690117252900322 ], [ var='trigger' labels={instance=10.105.255.3:9100} value=0 ]}]" duration=26.301374ms + level=debug ts=2024-05-29T13:44:15.246987572Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group=prd-ai-ng-tbs-writer-eau + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-backend-db-read-replica-1, env=uk" t=2024-05-29T13:44:15.247246421Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.246949942Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="racing | doingress-eng-engine-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.246942575Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="Rabbit MQ" + level=debug ts=2024-05-29T13:44:15.246938224Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=434892 slug=apexfsnzdev group="Sweet-FA - CPU - Domain Investments Event Consumer" + level=debug ts=2024-05-29T13:44:15.246931253Z caller=ruler.go:606 msg="rule group owned" user=434892 slug=apexfsnzdev group="Sweet-FA - Memory - Domain Access Event Consumer" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ontario, country=Canada, datacenter=TSS / Performive, environment=production, instance=172.83.47.71:9998, ip=172.83.47.71, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-ontario.crt, role=vpn, server=ontario401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.247235845Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.246906651Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=434892 slug=apexfsnzdev group="Sweet-FA - Memory - Nexus Seeder API" + level=debug ts=2024-05-29T13:44:15.247194138Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.246901259Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="CosmosDB | prd-docdb-ng-user-eng-eau" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=p2p-linode-ewr-us-a02" t=2024-05-29T13:44:15.24722076Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.246882786Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=434892 slug=apexfsnzdev group="Sweet-FA - CPU - Nexus Event Publisher" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=p2p-linode-ewr-us-a02" t=2024-05-29T13:44:15.247211509Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.246872225Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=434892 slug=apexfsnzdev group="Sweet-FA - ASPNetCore Health Checks - Registry Documents API" + level=debug ts=2024-05-29T13:44:15.246860277Z caller=ruler.go:606 msg="rule group owned" user=434892 slug=apexfsnzdev group="Sweet-FA - Memory - Domain Users API" + level=debug ts=2024-05-29T13:44:15.246847903Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=434892 slug=apexfsnzdev group="Sweet-FA - No Pods - Domain Access Event Consumer" + level=debug ts=2024-05-29T13:44:15.246837925Z caller=ruler.go:606 msg="rule group owned" user=434892 slug=apexfsnzdev group="Sweet-FA - No Pods - Domain Investments Event Consumer" + level=debug ts=2024-05-29T13:44:15.246731288Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group=prd-ai-ng-tbscrud-eng-eau + level=debug ts=2024-05-29T13:44:15.246723783Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="frontend | prebetnation-website-betnation-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.246716238Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group=prd-ai-ng-prepulsebet-website-eau + level=debug ts=2024-05-29T13:44:15.246991646Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv6-linode-ewr-us-z01" t=2024-05-29T13:44:15.247017503Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.24657242Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group=prd-ai-ng-promotion-signalr-eng-eau + level=debug ts=2024-05-29T13:44:15.246649243Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=434892 slug=apexfsnzdev group="Sweet-FA - Memory - Nexus Event Publisher" + level=debug ts=2024-05-29T13:44:15.246608632Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=434892 slug=apexfsnzdev group="Sweet-FA - CPU - Domain Users API" + level=debug ts=2024-05-29T13:44:15.245232427Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.246584399Z caller=ruler.go:606 msg="rule group owned" user=434892 slug=apexfsnzdev group=Platform + level=debug ts=2024-05-29T13:44:15.246561539Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="SB | prd-sb-ng-infra-shared-eau-(account-userengine-kyccallback)" + level=debug ts=2024-05-29T13:44:15.246546832Z caller=ruler.go:606 msg="rule group owned" user=434892 slug=apexfsnzdev group="Sweet-FA - Memory - Domain Investors API" + level=debug ts=2024-05-29T13:44:15.246856263Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv6-linode-bom-in-a02" t=2024-05-29T13:44:15.246883312Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.246527147Z caller=ruler.go:606 msg="rule group owned" user=434892 slug=apexfsnzdev group="Sweet-FA - API Errors - Registry Documents API" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv6-linode-bom-in-a02" t=2024-05-29T13:44:15.246872322Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.246161103Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group=prd-ai-ng-activtstmnt-snd-eau + level=debug ts=2024-05-29T13:44:15.246505094Z caller=ruler.go:606 msg="rule group owned" user=434892 slug=apexfsnzdev group="Sweet-FA - ASPNetCore Health Checks - Domain Users API" + level=debug ts=2024-05-29T13:44:15.245512946Z caller=ruler.go:606 msg="rule group owned" user=305351 slug=dylspeaking group=123 + level=debug ts=2024-05-29T13:44:15.246404542Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=434892 slug=apexfsnzdev group="Sweet-FA - Memory - Domain Investments Event Consumer" + level=debug ts=2024-05-29T13:44:15.246394954Z caller=ruler.go:606 msg="rule group owned" user=434892 slug=apexfsnzdev group="Sweet-FA - CPU - Domain Investors API" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ontario, country=Canada, datacenter=DataPacket, environment=production, instance=178.249.214.97:9998, ip=178.249.214.97, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=ontario416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.246692988Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.24633397Z caller=ruler.go:606 msg="rule group owned" user=434892 slug=apexfsnzdev group="Sweet-FA - Heartbeat Check - Nexus CDC Processor" + level=debug ts=2024-05-29T13:44:15.246293352Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=434892 slug=apexfsnzdev group="Sweet-FA - No Pods - Domain Investors Event Consumer" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.246650821Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.246278294Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=724572 slug=illawarraretirementtrust group="2d Evaluation group" + level=debug ts=2024-05-29T13:44:15.246253845Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=724572 slug=illawarraretirementtrust group="30m Evaluation Group" + level=debug ts=2024-05-29T13:44:15.246187137Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=724572 slug=illawarraretirementtrust group="1h Evaluation Group" + logger=ngalert.state.manager user=395357 slug=sensen instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.246541836Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.245681312Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=724572 slug=illawarraretirementtrust group="30d Evaluation group" + level=debug ts=2024-05-29T13:44:15.246144882Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="account | transhistory-eng-api-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.246135553Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="account | acc-cb-dt-wrcrossbrand-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.246104551Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group=web-synthetic-evaluation + level=debug ts=2024-05-29T13:44:15.246074879Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=466402 slug=apexfsnzprod group="Sweet-FA - Memory - Domain Investors API" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ontario, country=Canada, datacenter=DataPacket, environment=production, instance=178.249.214.97:9998, ip=178.249.214.97, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-ontario-so.crt, role=streaming-optimized, server=ontario416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.246513998Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv4-vultr-cdg-fr-a08" t=2024-05-29T13:44:15.246455796Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.245994972Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group=prd-ai-ng-account-salesforce-eau + level=debug ts=2024-05-29T13:44:15.246395218Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv4-vultr-cdg-fr-a07" t=2024-05-29T13:44:15.246343903Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.246203204Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.245865297Z caller=ruler.go:606 msg="rule group owned" user=292121 slug=constellationtherapy group="Psyker critical" + level=debug ts=2024-05-29T13:44:15.24585733Z caller=ruler.go:606 msg="rule group owned" user=292121 slug=constellationtherapy group="Psyker crash" + level=debug ts=2024-05-29T13:44:15.24583926Z caller=ruler.go:606 msg="rule group owned" user=292121 slug=constellationtherapy group="No Psyker jobs" + level=debug ts=2024-05-29T13:44:15.244263808Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=360441 slug=barygame group=router + level=debug ts=2024-05-29T13:44:15.245787786Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=292121 slug=constellationtherapy group="Web - Internal - Any Error" + level=debug ts=2024-05-29T13:44:15.24577954Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=292121 slug=constellationtherapy group="Any failed jobs" + level=debug ts=2024-05-29T13:44:15.245535775Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=314055 slug=axiefriends group=fast-trader + level=debug ts=2024-05-29T13:44:15.24563492Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=724572 slug=illawarraretirementtrust group="30m Evaluation group" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.246081735Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.245528687Z caller=ruler.go:606 msg="rule group owned" user=475814 slug=fl0core group=operator + level=debug ts=2024-05-29T13:44:15.245502343Z caller=ruler.go:606 msg="rule group owned" user=475146 slug=iuriimordovin group="Data sync delay" + level=debug ts=2024-05-29T13:44:15.245467266Z caller=ruler.go:606 msg="rule group owned" user=671365 slug=volleydev group="Prod Alerts" + level=debug ts=2024-05-29T13:44:15.245467116Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=348969 slug=huseinzol05 group=GPU-Usage + level=debug ts=2024-05-29T13:44:15.245459814Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=348969 slug=huseinzol05 group=Memory-Usage + level=debug ts=2024-05-29T13:44:15.245457977Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=662212 slug=zenglobal group="Zen Alerts" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv4-vultr-cdg-fr-a02" t=2024-05-29T13:44:15.245980683Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.245437602Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=348969 slug=huseinzol05 group="Tweets delta 10 minutes" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv4-vultr-cdg-fr-a01" t=2024-05-29T13:44:15.245901726Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv4-vultr-cdg-fr-a01" t=2024-05-29T13:44:15.245889255Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.245364858Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=421340 slug=yourlow group=Error + level=debug ts=2024-05-29T13:44:15.245346Z caller=ruler.go:606 msg="rule group owned" user=414734 slug=cubiko group=Clickhouse + level=debug ts=2024-05-29T13:44:15.245314553Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=455451 slug=opms group=emailNotification + level=debug ts=2024-05-29T13:44:15.245311317Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=414734 slug=cubiko group=manage + level=debug ts=2024-05-29T13:44:15.245220647Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=455451 slug=opms group="4xx every 1 min" + level=debug ts=2024-05-29T13:44:15.245263912Z caller=ruler.go:606 msg="rule group owned" user=344576 slug=blairm group="Disk Space Free %" + level=debug ts=2024-05-29T13:44:15.245250357Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=315952 slug=rogo group="CPU Utilization" + level=debug ts=2024-05-29T13:44:15.245240845Z caller=ruler.go:606 msg="rule group owned" user=346678 slug=kwils group=RAM + level=debug ts=2024-05-29T13:44:15.24523144Z caller=ruler.go:606 msg="rule group owned" user=346678 slug=kwils group=FS + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv4-thg-nyc-us-a07" t=2024-05-29T13:44:15.245708487Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.245212461Z caller=ruler.go:606 msg="rule group owned" user=455451 slug=opms group="5xx every 1 min" + level=debug ts=2024-05-29T13:44:15.245200652Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=656459 slug=activeport group="Orange Jordan alerts" + level=debug ts=2024-05-29T13:44:15.245192019Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=656459 slug=activeport group="Hanoi alerts" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv4-thg-nyc-us-a06" t=2024-05-29T13:44:15.245635274Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.245174082Z caller=ruler.go:606 msg="rule group owned" user=656459 slug=activeport group="GCG alerts" + level=debug ts=2024-05-29T13:44:15.24510751Z caller=ruler.go:606 msg="rule group owned" user=656459 slug=activeport group="STC alerts" + level=debug ts=2024-05-29T13:44:15.245098133Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=656459 slug=activeport group="Orange Spain alerts" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.245466618Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.245089987Z caller=ruler.go:606 msg="rule group owned" user=656459 slug=activeport group="HCM2 rack 7 alerts" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv4-thg-nyc-us-a03" t=2024-05-29T13:44:15.24537829Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.245025418Z caller=ruler.go:606 msg="rule group owned" user=412641 slug=declon group=system + level=debug ts=2024-05-29T13:44:15.244987145Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=616049 slug=mitamon group=ssl + level=debug ts=2024-05-29T13:44:15.2449216Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=623546 slug=jcwardle group=RecHoundProdAlerts + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.245258412Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.244910552Z caller=ruler.go:606 msg="rule group owned" user=388985 slug=gabrielmalaluan group=SampleGroup + level=debug ts=2024-05-29T13:44:15.244853897Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="bet | betdatatools-eng-adminapi-Sites-Probing" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv4-thg-nyc-us-a02" t=2024-05-29T13:44:15.245299697Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.245219453Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.24517386Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.244864885Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=397315 slug=royportas group="System Health" + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.245215194Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.24486104Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=656459 slug=activeport group="Turnium Canada alerts" + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.245211133Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.245203064Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=679029 slug=joveoprodaws t=2024-05-29T13:44:15.245162383Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.24460049Z caller=ruler.go:606 msg="rule group owned" user=443201 slug=hhcprod group=slackalert + level=debug ts=2024-05-29T13:44:15.244843119Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=656459 slug=activeport group="Hathway alerts" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv4-packex-hel-fi-a01" t=2024-05-29T13:44:15.245120328Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.244803927Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="CosmosDB | prd-docdb-ng-payment-eng-eau" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Omaha, country=United States, datacenter=DataPacket, environment=production, instance=84.239.25.129:9998, ip=84.239.25.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-nebraska-pf.crt, role=vpn, server=nebraska402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.245118183Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=505309 slug=jromero248 t=2024-05-29T13:44:15.245023803Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=46.23145ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.245073201Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.244767912Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group=prd-ai-ng-bethistory-eng-eau + level=debug ts=2024-05-29T13:44:15.244755664Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group=sports-bet + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.244949867Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=57.650203ms + level=debug ts=2024-05-29T13:44:15.244892577Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.24493519Z caller=remote_instance_store.go:51 user=637258 slug=testb9lab msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.244609691Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=284646 slug=habitap group="Angullia PC Monitoring" + level=debug ts=2024-05-29T13:44:15.244697463Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group=prd-ai-ng-bghandler-eng-eau + level=debug ts=2024-05-29T13:44:15.244689389Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="account | acc-dt-pollerclientfactor-Sites-Probing" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv4-m247-bru-be-a01" t=2024-05-29T13:44:15.244929307Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=435206 slug=kkrprivateuat t=2024-05-29T13:44:15.244930395Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv4-linode-sjc-us-a02" t=2024-05-29T13:44:15.244844054Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=435206 slug=kkrprivateuat t=2024-05-29T13:44:15.244867415Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.244661891Z caller=ruler.go:606 msg="rule group owned" user=460952 slug=prdnextgen group="sport | sgm-eng-winningsims-ausrules-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.24462689Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group=prd-ai-ng-cms-eau + level=debug ts=2024-05-29T13:44:15.244555885Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=284646 slug=habitap group="Site PC Eval Group" + level=info ts=2024-05-29T13:44:15.244762545Z caller=remote_alert_sender.go:94 user=288032 slug=dapperlabssre host=dapperlabssre-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.118.165:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=w2LesgE4k alerts=1 + level=debug ts=2024-05-29T13:44:15.244652094Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.244590697Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=561735 slug=tvhs group="Every 5min" + level=debug ts=2024-05-29T13:44:15.244579444Z caller=ruler.go:606 msg="rule group owned" user=441901 slug=openmarkets group="Storage Account Eval" + level=debug ts=2024-05-29T13:44:15.244544618Z caller=ruler.go:606 msg="rule group owned" user=441901 slug=openmarkets group="CPU Threshold" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv4-linode-bom-in-a01" t=2024-05-29T13:44:15.244582687Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.244478588Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=291766 slug=fita group="Disk Usage - Airflow Postgresql > 85%" + level=debug ts=2024-05-29T13:44:15.244450641Z caller=ruler.go:606 msg="rule group owned" user=291766 slug=fita group="Prod DB Memory Usage >70%" + level=debug ts=2024-05-29T13:44:15.244450035Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv4-bt-fra-de-a02" t=2024-05-29T13:44:15.24449353Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv4-bt-fra-de-a02" t=2024-05-29T13:44:15.244480115Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.244382701Z caller=ruler.go:606 msg="rule group owned" user=466402 slug=apexfsnzprod group="Sweet-FA - CPU - Domain Investments Event Consumer" + level=debug ts=2024-05-29T13:44:15.244363119Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=466402 slug=apexfsnzprod group="Sweet-FA - API Errors - Domain Users API" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.244483265Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.244343194Z caller=ruler.go:606 msg="rule group owned" user=466402 slug=apexfsnzprod group="Sweet-FA - No Pods - Domain Investments API" + logger=ngalert.state.manager user=555280 slug=hipcreative instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.244380595Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:15.244305714Z caller=remote_instance_store.go:51 user=432323 slug=lithic msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.244311419Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=466402 slug=apexfsnzprod group="Sweet-FA - Memory - Domain Investments API" + level=debug ts=2024-05-29T13:44:15.244154896Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="SB | prd-sb-ng-infra-shared-eau-(account-account-status-fallback)" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Nuuk, country=Greenland, datacenter=M247, environment=production, instance=91.90.120.190:9998, ip=91.90.120.190, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=greenland407, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.244337342Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.244219643Z caller=ruler.go:606 msg="rule group owned" user=475814 slug=fl0core group=builds + level=debug ts=2024-05-29T13:44:15.244210023Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=475814 slug=fl0core group=rds-connections + logger=ngalert.state.manager user=767797 slug=mgmresorts instance= t=2024-05-29T13:44:15.244267148Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.244181908Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=318831 slug=grafanavianet group=Puc + level=debug ts=2024-05-29T13:44:15.244152436Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=318831 slug=grafanavianet group=Ang + level=debug ts=2024-05-29T13:44:15.243230322Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group=prd-ai-ng-sgm-eng-pricing-eau + level=debug ts=2024-05-29T13:44:15.244141004Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=318831 slug=grafanavianet group=Simp + logger=ngalert.state.manager user=432323 slug=lithic instance= t=2024-05-29T13:44:15.244176975Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.244169743Z caller=remote_instance_store.go:51 user=278024 slug=fibreking msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.243995664Z caller=ruler.go:606 msg="rule group owned" user=384912 slug=emailtkdk group="iStock alert rules" + level=debug ts=2024-05-29T13:44:15.243985236Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=414754 slug=scancam group=edge + logger=ngalert.state.manager user=432323 slug=lithic instance= t=2024-05-29T13:44:15.244160103Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + level=debug ts=2024-05-29T13:44:15.244072691Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=278024 slug=fibreking t=2024-05-29T13:44:15.244119829Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.24390142Z caller=ruler.go:606 msg="rule group owned" user=745187 slug=maninweb3 group=NodeHealth + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ipv4-aruba-flr-it-a03" t=2024-05-29T13:44:15.244063498Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=278024 slug=fibreking version=14 fingerprint=890172768ac56838 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.243950415Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=IobyqSr7k, ref_id=Bearing Lubricant State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.243657959s EvaluationString:}]" duration=57.843552ms + level=debug ts=2024-05-29T13:44:15.243866104Z caller=ruler.go:606 msg="rule group owned" user=673133 slug=musgravegrafana group=test + level=debug ts=2024-05-29T13:44:15.243865224Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=318831 slug=grafanavianet group=EastSale + logger=ngalert.state.manager user=555280 slug=hipcreative t=2024-05-29T13:44:15.243970153Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:15.243799971Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.243820246Z caller=ruler.go:606 msg="rule group owned" user=318831 slug=grafanavianet group=DSTCafeVIC + level=debug ts=2024-05-29T13:44:15.243870119Z caller=remote_instance_store.go:51 user=236496 slug=improbable msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.243757487Z caller=ruler.go:606 msg="rule group owned" user=466402 slug=apexfsnzprod group="Sweet-FA - ASPNetCore Health Checks - Domain Investments API" + level=debug ts=2024-05-29T13:44:15.243735423Z caller=ruler.go:606 msg="rule group owned" user=309737 slug=info4460 group=K8s_Pod_Alerts + level=debug ts=2024-05-29T13:44:15.24368664Z caller=ruler.go:606 msg="rule group owned" user=915971 slug=rollbar group=Pipeline + level=debug ts=2024-05-29T13:44:15.24367575Z caller=ruler.go:606 msg="rule group owned" user=915971 slug=rollbar group=Debezium + level=debug ts=2024-05-29T13:44:15.24366083Z caller=ruler.go:606 msg="rule group owned" user=915971 slug=rollbar group=mysqld + level=debug ts=2024-05-29T13:44:15.24362285Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=466402 slug=apexfsnzprod group="Sweet-FA - Heartbeat Check - Nexus CDC Processor" + level=debug ts=2024-05-29T13:44:15.243602039Z caller=ruler.go:606 msg="rule group owned" user=466402 slug=apexfsnzprod group="Sweet-FA - CPU - Nexus CDC Processor" + level=debug ts=2024-05-29T13:44:15.243464994Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=466402 slug=apexfsnzprod group="Sweet-FA - No Pods - Nexus Event Publisher" + logger=ngalert.state.manager.persist user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:15.243504802Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=22.741623ms + level=debug ts=2024-05-29T13:44:15.243446001Z caller=ruler.go:606 msg="rule group owned" user=466402 slug=apexfsnzprod group="Sweet-FA - No Pods - Domain Access Event Consumer" + level=debug ts=2024-05-29T13:44:15.243429719Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=466402 slug=apexfsnzprod group="Sweet-FA - Specific Alerts - Nexus Seeder - Seeding Worker Error Count" + level=debug ts=2024-05-29T13:44:15.243419687Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=466402 slug=apexfsnzprod group="Sweet-FA - API Errors - Domain Investors API" + logger=ngalert.state.manager.persist user=714711 slug=nomiai t=2024-05-29T13:44:15.2433798Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=714711 slug=nomiai instance="service=selfie-generator" t=2024-05-29T13:44:15.243303829Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.243021922Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=466402 slug=apexfsnzprod group="Sweet-FA - API Errors - Registry Documents API" + level=debug ts=2024-05-29T13:44:15.243162693Z caller=remote_instance_store.go:51 user=615392 slug=shinemetrics msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.243010814Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=460952 slug=prdnextgen group="account | acc-dt-pollerclient-Sites-Probing" + level=debug ts=2024-05-29T13:44:15.243200446Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=379503 slug=smartseller group="1 minutes" + level=debug ts=2024-05-29T13:44:15.243184551Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=542567 slug=serviannz group=CPU's + level=debug ts=2024-05-29T13:44:15.243178713Z caller=ruler.go:606 msg="rule group owned" user=542567 slug=serviannz group="File Systems" + level=debug ts=2024-05-29T13:44:15.243107876Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=434892 slug=apexfsnzdev group="Sweet-FA - No Pods - Nexus CDC Processor" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.24316327Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.243029949Z caller=ruler.go:606 msg="rule group owned" user=396973 slug=rogueasianoc group=Default + level=debug ts=2024-05-29T13:44:15.243044163Z caller=ruler.go:606 msg="rule group owned" user=705776 slug=datacomcloudservices group=Oops + level=debug ts=2024-05-29T13:44:15.242998948Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=473905 slug=spree group=Checking + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Nuuk, country=Greenland, datacenter=M247, environment=production, instance=91.90.120.145:9998, ip=91.90.120.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/greenland.crt, role=vpn, server=greenland404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.242986772Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.242939319Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=error ts=2024-05-29T13:44:15.24288718Z caller=ruler.go:515 msg="failed to load config from grafana instance, skipping instance" user=480895 slug=sarahspangenbergaustra err="user has the remote ruler not enabled" + level=debug ts=2024-05-29T13:44:15.235970326Z caller=remote_instance_store.go:51 user=390300 slug=astrachain msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.24271592Z caller=remote_instance_store.go:51 user=477135 slug=grafanaboomerang msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=477135 slug=grafanaboomerang instance= t=2024-05-29T13:44:15.24266231Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=477135 slug=grafanaboomerang t=2024-05-29T13:44:15.242626302Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.242597487Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.24254074Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.242502088Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Nuuk, country=Greenland, datacenter=M247, environment=production, instance=91.90.120.130:9998, ip=91.90.120.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/greenland.crt, role=vpn, server=greenland403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.242539516Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.242494232Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.242464209Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.242350057Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.242301516Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.24227653Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.242300151Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=212546 slug=modica t=2024-05-29T13:44:15.24221785Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.335424ms + level=debug component=discovery ts=2024-05-29T13:44:15.242176384Z caller=retry.go:58 user=480895 msg="retrying grpc request" method=/remoteruler.rules.v1.RulesService/GetByRuleGroup attempt=4 + level=debug ts=2024-05-29T13:44:15.242053771Z caller=remote_instance_store.go:51 user=405431 slug=deepersignals msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.242154418Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.24213699Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.242051996Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.242066112Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.241996242Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Nicosia, country=Cyprus, datacenter=M247, environment=production, instance=185.253.162.15:9998, ip=185.253.162.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=cyprus404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.241852011Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=345894 slug=gw2drf t=2024-05-29T13:44:15.241377318Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=345894 slug=gw2drf instance= t=2024-05-29T13:44:15.241343418Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=285730 slug=niche instance="app=Niche.com Website" t=2024-05-29T13:44:15.241217626Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.240791531Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=New York, country=United States, datacenter=PIA, environment=production, instance=191.96.227.4:9998, ip=191.96.227.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newyorkcity.crt, role=vpn, server=newyork443, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.240728099Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.24064631Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.240535396Z caller=remote_instance_store.go:51 user=415003 slug=salaryfinance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.240540935Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=415003 slug=salaryfinance instance= t=2024-05-29T13:44:15.240477665Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.240494876Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=events-collector-sqs-worker, cluster=lmnd-production-us-east-1, container=kube-state-metrics, deployment=events-collector-sqs-worker, endpoint=http, instance=10.16.27.82:8080, job=kube-state-metrics, namespace=production, pod=kube-state-metrics-6c795d5489-v5txw, region=us-east-1, service=kube-state-metrics, stage=production" t=2024-05-29T13:44:15.240456206Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:15.240337132Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="region=us-east-1, service=kube-state-metrics, stage=production" + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:15.24030215Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=423960 slug=mukeshreddy996 t=2024-05-29T13:44:15.240145346Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=423960 slug=mukeshreddy996 instance= t=2024-05-29T13:44:15.240105711Z level=debug msg="Execution error state is Alerting" handler=resultAlerting previous_handler=resultError + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=New York, country=United States, datacenter=PIA, environment=production, instance=191.96.227.3:9998, ip=191.96.227.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newyorkcity.crt, role=vpn, server=newyork442, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.240162372Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=532409 slug=jnjdev instance= t=2024-05-29T13:44:15.240002968Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.239868554Z caller=remote_instance_store.go:51 user=127813 slug=clearsale msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.186816598Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=127813 slug=clearsale t=2024-05-29T13:44:15.239652657Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=New York, country=United States, datacenter=PIA, environment=production, instance=191.96.227.2:9998, ip=191.96.227.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newyorkcity.crt, role=vpn, server=newyork441, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.239699612Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=New York, country=United States, datacenter=PIA, environment=production, instance=191.96.227.2:9998, ip=191.96.227.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=newyork441, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.239448109Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=New York, country=United States, datacenter=PIA, environment=production, instance=191.96.227.2:9998, ip=191.96.227.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=newyork441, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.239425465Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:15.239248585Z caller=remote_image_capturer.go:61 user=236643 slug=riuzzang rule_org_id=1 rule_uid=SdWpVPiVk dashboard=9T-8HgG7k panel=2 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:15.238794481Z caller=remote_instance_store.go:51 user=94289 slug=translinegruppegmbh msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=94289 slug=translinegruppegmbh t=2024-05-29T13:44:15.238752382Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.238753676Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.238693758Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.238652526Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=94289 slug=translinegruppegmbh instance="datasource_uid=000000021, ref_id=A" t=2024-05-29T13:44:15.238684406Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.238662103Z caller=remote_image_capturer.go:54 user=236643 slug=riuzzang rule_org_id=1 rule_uid=SdWpVPiVk dashboard=9T-8HgG7k panel=2 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=236643 slug=riuzzang t=2024-05-29T13:44:15.238540046Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-v4monitoring-db, env=pp" t=2024-05-29T13:44:15.236275125Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=New York, country=United States, datacenter=PIA, environment=production, instance=191.96.150.3:9998, ip=191.96.150.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newyorkcity.crt, role=vpn, server=newyork436, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.238613058Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=307381 slug=kambitaskforce t=2024-05-29T13:44:15.238471204Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="datasource_uid=LMA4EMank, ref_id=Varnish \"Error\" log count" t=2024-05-29T13:44:15.238379856Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.238032568Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.237793788Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.237923741Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.237881926Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.237811271Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=New York, country=United States, datacenter=PIA, environment=production, instance=191.96.150.2:9998, ip=191.96.150.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=newyork435, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.23788223Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.237813623Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.237533025Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=New York, country=United States, datacenter=PIA, environment=production, instance=154.16.192.4:9998, ip=154.16.192.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newyorkcity.crt, role=vpn, server=newyork440, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.237616479Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.237495888Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.237270958Z caller=remote_instance_store.go:51 user=723897 slug=inthepocket msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.237320883Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.237034336Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=307381 slug=kambitaskforce version=115 fingerprint=c14a95885f857224 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.237018688Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=LMA4EMank, ref_id=Varnish \"Error\" log count State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.236577107s EvaluationString:}]" duration=104.768758ms + level=debug ts=2024-05-29T13:44:15.23695775Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.23693124Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=New York, country=United States, datacenter=PIA, environment=production, instance=154.16.192.3:9998, ip=154.16.192.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=newyork439, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.23691714Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.236809071Z caller=remote_instance_store.go:51 user=264216 slug=auterion msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.236845834Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:15.236756361Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=25.312452ms + logger=ngalert.state.manager.persist user=338059 slug=ninetailed t=2024-05-29T13:44:15.236781198Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=264216 slug=auterion instance= t=2024-05-29T13:44:15.236700891Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=338059 slug=ninetailed instance="DBInstanceIdentifier=ze5273ca1-postgresql" t=2024-05-29T13:44:15.236715139Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.23667756Z caller=remote_instance_store.go:51 user=285250 slug=quartx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=285250 slug=quartx instance="__name__=swap_used_percent, instance=prd01.lon.do.quartx.net" t=2024-05-29T13:44:15.236618611Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=264216 slug=auterion t=2024-05-29T13:44:15.236637684Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=338059 slug=ninetailed t=2024-05-29T13:44:15.236595938Z level=debug msg="State manager processing evaluation results" resultCount=3 + logger=ngalert.scheduler user=822121 slug=snapspot t=2024-05-29T13:44:15.236499664Z level=debug msg="Skip rule evaluation because it is paused" + level=debug ts=2024-05-29T13:44:15.236495476Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=87780 slug=zencloudandhosting t=2024-05-29T13:44:15.236427478Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=74.256963ms + logger=ngalert.state.manager user=285250 slug=quartx t=2024-05-29T13:44:15.236468944Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.186354912Z caller=remote_instance_store.go:51 user=350551 slug=loopme msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=New York, country=United States, datacenter=DataPacket, environment=production, instance=37.19.198.91:9998, ip=37.19.198.91, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newyorkcity.crt, role=vpn, server=newyork434, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.236254654Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.23610455Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.236164555Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.236076024Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=198225 slug=ocadoptclgcp instance="datasource_uid=grafanacloud-prom, ref_id=A,C" t=2024-05-29T13:44:15.236072185Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=506300 slug=jostens instance="instance=owbswjtcw01.jostens.com, job=prometheus" t=2024-05-29T13:44:15.235922362Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager.persist user=390300 slug=astrachain t=2024-05-29T13:44:15.235917936Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=506300 slug=jostens instance="instance=owbswjdypa01.jostens.com, job=prometheus" t=2024-05-29T13:44:15.235777349Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=506300 slug=jostens instance="instance=owbswjdypa01.jostens.com, job=prometheus" t=2024-05-29T13:44:15.235764169Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=New York, country=United States, datacenter=DataPacket, environment=production, instance=37.19.198.61:9998, ip=37.19.198.61, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newyorkcity.crt, role=vpn, server=newyork433, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.235826073Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=512940 slug=gruppoquattroits t=2024-05-29T13:44:15.186386873Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.981135ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=New York, country=United States, datacenter=DataPacket, environment=production, instance=37.19.198.61:9998, ip=37.19.198.61, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newyorkcity.crt, role=vpn, server=newyork433, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.235802174Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=198225 slug=ocadoptclgcp version=35 fingerprint=ff0d2e39480e1138 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.215078352Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A,C State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.214564355s EvaluationString:}]" duration=28.012838ms + logger=ngalert.state.manager user=390300 slug=astrachain t=2024-05-29T13:44:15.235712006Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=307381 slug=kambitaskforce t=2024-05-29T13:44:15.181011311Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=145.31668ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=New York, country=United States, datacenter=DataPacket, environment=production, instance=37.19.198.61:9998, ip=37.19.198.61, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=newyork433, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.235579219Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=506300 slug=jostens instance="instance=OWBSWGIJD02.gzint.web, job=prometheus" t=2024-05-29T13:44:15.235386001Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=New York, country=United States, datacenter=DataPacket, environment=production, instance=37.19.198.177:9998, ip=37.19.198.177, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-newyorkcity.crt, role=vpn, server=newyork447, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.235378577Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=461396 slug=ultimateai t=2024-05-29T13:44:15.235163371Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=506300 slug=jostens t=2024-05-29T13:44:15.235127259Z level=debug msg="State manager processing evaluation results" resultCount=6 + logger=ngalert.state.manager user=461396 slug=ultimateai instance="__name__=bot_monitoring_bot_state, app=bot-monitoring, argocd_argoproj_io_instance=production-bot-monitoring, cluster=production, env=production, id=62eb8c0f108d8fca2eaaaa86, instance=172.16.100.131:8080, job=kubernetes-service-endpoints, kubernetes_name=bot-monitoring-prod-service, kubernetes_namespace=ai, kustomize_base=common, maintainers=ultimate.ai, name=Gamesys - UK CDD DPA, owner=ultimate.ai, platform=zendesk, project=bot-monitoring, region=EU, sender=bot" t=2024-05-29T13:44:15.235106085Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=New York, country=United States, datacenter=DataPacket, environment=production, instance=37.19.198.177:9998, ip=37.19.198.177, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=newyork447, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.23516733Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=461396 slug=ultimateai instance="__name__=bot_monitoring_bot_state, app=bot-monitoring, argocd_argoproj_io_instance=production-bot-monitoring, cluster=production, env=production, id=62eb8c0f108d8fca2eaaaa86, instance=172.16.100.131:8080, job=kubernetes-service-endpoints, kubernetes_name=bot-monitoring-prod-service, kubernetes_namespace=ai, kustomize_base=common, maintainers=ultimate.ai, name=Gamesys - UK CDD DPA, owner=ultimate.ai, platform=zendesk, project=bot-monitoring, region=EU, sender=bot" t=2024-05-29T13:44:15.234975205Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=884866 slug=cnonumerique t=2024-05-29T13:44:15.234840777Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=30.968975ms + logger=ngalert.state.manager.persist user=159532 slug=getfabric t=2024-05-29T13:44:15.234891279Z level=debug msg="Saving alert states done" count=4 max_state_save_concurrency=1 duration=57.044377ms + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:15.233397234Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=45.067008ms + level=debug ts=2024-05-29T13:44:15.234517332Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.234522522Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.23395844Z caller=remote_instance_store.go:51 user=324296 slug=timeplus msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.233732699Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.233802081Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.historian backend=loki user=4947 slug=mediamath t=2024-05-29T13:44:15.233746256Z level=debug msg="Done saving alert state history batch" + level=debug ts=2024-05-29T13:44:15.233665065Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.233632246Z caller=remote_instance_store.go:51 user=35611 slug=play msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.23358751Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.233578117Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.23342807Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=557231 slug=lnrsusinsuranceprod t=2024-05-29T13:44:15.233303523Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.723753ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.233217879Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Nassau, country=Bahamas, datacenter=M247, environment=production, instance=95.181.238.63:9998, ip=95.181.238.63, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=bahamas408, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.233097634Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Nassau, country=Bahamas, datacenter=M247, environment=production, instance=95.181.238.63:9998, ip=95.181.238.63, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=bahamas408, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.233090921Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.233024636Z caller=remote_instance_store.go:51 user=112732 slug=gleamer msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Nassau, country=Bahamas, datacenter=M247, environment=production, instance=95.181.238.63:9998, ip=95.181.238.63, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/bahamas.crt, role=vpn, server=bahamas408, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.232966899Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.23284303Z caller=remote_instance_store.go:51 user=871095 slug=cmcnginp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.232776631Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.23282298Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.232742346Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Nassau, country=Bahamas, datacenter=M247, environment=production, instance=95.181.238.50:9998, ip=95.181.238.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/bahamas.crt, role=vpn, server=bahamas407, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.232728797Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:15.232712189Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:15.232659732Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=245291 slug=pismo version=615 fingerprint=a7a4964a0cb9fb53 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.232587411Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.232340615s EvaluationString:}]" duration=541.479581ms + level=debug ts=2024-05-29T13:44:15.232569801Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=55491 slug=demandbase t=2024-05-29T13:44:15.232575117Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.232533555Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.232513582Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=169420 slug=newspring t=2024-05-29T13:44:15.232292994Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.231997066Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.231905932Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.231903122Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.231828602Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Nassau, country=Bahamas, datacenter=M247, environment=production, instance=95.181.238.26:9998, ip=95.181.238.26, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/bahamas.crt, role=vpn, server=bahamas405, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.23186164Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID1934dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=543318 slug=tech85bb t=2024-05-29T13:44:15.231786368Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=21.785819ms + level=debug ts=2024-05-29T13:44:15.231812305Z caller=remote_instance_store.go:51 user=861995 slug=umasalud msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=861995 slug=umasalud instance="resource.label.database_id=uma-v2:uma-backend, resource.label.project_id=uma-v2, resource.label.region=us-central, resource.type=cloudsql_database" t=2024-05-29T13:44:15.23173589Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.231684669Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=861995 slug=umasalud instance="resource.label.database_id=uma-v2:uma-backend, resource.label.project_id=uma-v2, resource.label.region=us-central, resource.type=cloudsql_database" t=2024-05-29T13:44:15.231718634Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=861995 slug=umasalud t=2024-05-29T13:44:15.231643667Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=861995 slug=umasalud version=22 fingerprint=3602acf714627587 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.231525626Z level=debug msg="Alert rule evaluated" results="[{Instance:resource.label.database_id=uma-v2:uma-backend, resource.label.project_id=uma-v2, resource.label.region=us-central, resource.type=cloudsql_database State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:resource.label.database_id=uma-v2:uma-backend, resource.label.project_id=uma-v2, resource.label.region=us-central, resource.type=cloudsql_database Value:0xc021c07d18} C:{Var:C Labels:resource.label.database_id=uma-v2:uma-backend, resource.label.project_id=uma-v2, resource.label.region=us-central, resource.type=cloudsql_database Value:0xc021c07d40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.231155902s EvaluationString:[ var='B' labels={resource.label.database_id=uma-v2:uma-backend, resource.label.project_id=uma-v2, resource.label.region=us-central, resource.type=cloudsql_database} value=1 ], [ var='C' labels={resource.label.database_id=uma-v2:uma-backend, resource.label.project_id=uma-v2, resource.label.region=us-central, resource.type=cloudsql_database} value=0 ]}]" duration=112.139499ms + level=debug ts=2024-05-29T13:44:15.231566697Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.231590456Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=760662 slug=castlepay t=2024-05-29T13:44:15.231453415Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="agent_hostname=env-326717laio1eastus2, cloud_platform=Azure, customer_id=A218, device=/dev/mapper/rootvg-homelv, env_id=326717, env_name=A218 GFS POC, env_type=prod, fstype=xfs, instance=env-326717laio1eastus2, job=integrations/node_exporter, mountpoint=/home, region=eastus2, stage=preprod" t=2024-05-29T13:44:15.231451278Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:45:40Z next_ends_at=2024-05-29T13:46:10Z + logger=ngalert.state.manager user=760662 slug=castlepay instance="__name__=fly_instance_up, app=castle-api-prod, host=d192, instance=e286070b7e3498, region=iad" t=2024-05-29T13:44:15.231438925Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="agent_hostname=env-326717laio1eastus2, cloud_platform=Azure, customer_id=A218, device=/dev/mapper/rootvg-homelv, env_id=326717, env_name=A218 GFS POC, env_type=prod, fstype=xfs, instance=env-326717laio1eastus2, job=integrations/node_exporter, mountpoint=/home, region=eastus2, stage=preprod" t=2024-05-29T13:44:15.23142858Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:15.231408146Z caller=remote_instance_store.go:51 user=500743 slug=sgr msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.231416835Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.231301359Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.23133298Z caller=remote_instance_store.go:51 user=332019 slug=liberfly msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.231296224Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.231285802Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.231213796Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=158536 slug=clearsaleantifraude t=2024-05-29T13:44:15.231143075Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.984678ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="agent_hostname=env-326451laio1eastus2, cloud_platform=Azure, customer_id=A224, device=/dev/mapper/rootvg-homelv, env_id=326451, env_name=A224 Techint Prod, env_type=prod, fstype=xfs, instance=env-326451laio1eastus2, job=integrations/node_exporter, mountpoint=/home, region=eastus2, stage=preprod" t=2024-05-29T13:44:15.231069594Z level=debug msg="Setting next state" handler=resultAlerting + level=info ts=2024-05-29T13:44:15.231046924Z caller=remote_alert_sender.go:94 user=308620 slug=sneller host=sneller-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.5.244:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=4Zl5ZtS4z alerts=1 + logger=ngalert.state.historian backend=loki user=4947 slug=mediamath t=2024-05-29T13:44:15.231033883Z level=debug msg="Alert state changed creating annotation" newState=NoData oldState="Pending (Error)" + level=debug ts=2024-05-29T13:44:15.230831854Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:15.230734671Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=26.009578ms + level=debug ts=2024-05-29T13:44:15.230711544Z caller=remote_instance_store.go:51 user=469068 slug=dvulpe msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=183214 slug=vectorizedio t=2024-05-29T13:44:15.230645934Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.230684806Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:15.230596012Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=36.522843ms + logger=ngalert.state.manager.persist user=316960 slug=mojamteam t=2024-05-29T13:44:15.230586766Z level=debug msg="Deleting alert states" count=3 + logger=ngalert.state.manager user=316960 slug=mojamteam t=2024-05-29T13:44:15.230566921Z level=info msg="Detected stale state entry" cacheID="[[\"__alert_rule_namespace_uid__\",\"vvdX_liVk\"],[\"__alert_rule_uid__\",\"bdm7hrgwjrwg0f\"],[\"alertname\",\"PriceAbuse\"],[\"environment\",\"prod\"],[\"grafana_folder\",\"Product - SkinManager\"],[\"itemId\",\"11375\"],[\"project\",\"skinmanager\"],[\"severity\",\"critical\"],[\"userId\",\"4257437\"]]" state=Normal reason= + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.230625859Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=316960 slug=mojamteam t=2024-05-29T13:44:15.230539592Z level=info msg="Detected stale state entry" cacheID="[[\"__alert_rule_namespace_uid__\",\"vvdX_liVk\"],[\"__alert_rule_uid__\",\"bdm7hrgwjrwg0f\"],[\"alertname\",\"PriceAbuse\"],[\"environment\",\"prod\"],[\"grafana_folder\",\"Product - SkinManager\"],[\"itemId\",\"1645\"],[\"project\",\"skinmanager\"],[\"severity\",\"critical\"],[\"userId\",\"3739701\"]]" state=Normal reason= + logger=ngalert.state.manager user=793400 slug=bedrock instance="datasource_uid=addnq3a8rpxc0b, ref_id=A" t=2024-05-29T13:44:15.230524908Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=793400 slug=bedrock instance="datasource_uid=addnq3a8rpxc0b, ref_id=A" t=2024-05-29T13:44:15.230516655Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.23053554Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.230510445Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=183214 slug=vectorizedio version=43 fingerprint=3310beedefcfae98 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.230480566Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=zsAoBWS4z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.230175387s EvaluationString:}]" duration=247.467248ms + logger=ngalert.state.manager user=793400 slug=bedrock t=2024-05-29T13:44:15.230447937Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.230359329Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.230225319Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.230140552Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=316960 slug=mojamteam instance="itemId=7456, userId=3739701" t=2024-05-29T13:44:15.230113664Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=316960 slug=mojamteam instance="itemId=6077, userId=3420076" t=2024-05-29T13:44:15.230003422Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.229920191Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=316960 slug=mojamteam instance="itemId=5102, userId=3272809" t=2024-05-29T13:44:15.229789415Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.229589133Z caller=remote_instance_store.go:51 user=554823 slug=suller msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=554823 slug=suller instance="host=ehitajate114, sensor=coretemp_package_id_0" t=2024-05-29T13:44:15.229525499Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.22950342Z caller=remote_instance_store.go:51 user=916139 slug=cmtdspd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Montreal, country=Canada, datacenter=TSS / Performive, environment=production, instance=199.36.223.130:9998, ip=199.36.223.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=montreal411, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.229349298Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=280499 slug=nomadxyz t=2024-05-29T13:44:15.229152426Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=280499 slug=nomadxyz instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.229140226Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.scheduler user=280499 slug=nomadxyz version=4 fingerprint=277950d36ca13600 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.229038328Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.22875156s EvaluationString:}]" duration=34.045469ms + level=debug ts=2024-05-29T13:44:15.229046901Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.228992519Z caller=remote_instance_store.go:51 user=174054 slug=netrading msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.229008056Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.228734479Z caller=remote_instance_store.go:51 user=288032 slug=dapperlabssre msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:15.228674015Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.228412557Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Montreal, country=Canada, datacenter=TSS / Performive, environment=production, instance=172.98.82.58:9998, ip=172.98.82.58, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-montreal.crt, role=vpn, server=montreal409, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.228496072Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.228417867Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=info ts=2024-05-29T13:44:15.22835716Z caller=remote_alert_sender.go:94 user=264941 slug=agnosticeng host=agnosticeng-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.148.192.253:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fq9W6SaVz alerts=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.228165578Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.228100463Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.228069069Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.227986081Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Montreal, country=Canada, datacenter=TSS / Performive, environment=production, instance=172.98.71.66:9998, ip=172.98.71.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-montreal.crt, role=vpn, server=montreal402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.228004942Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.227961414Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.227904288Z caller=remote_instance_store.go:51 user=354676 slug=gridmarketenergy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.227840934Z caller=remote_instance_store.go:51 user=652809 slug=glassnode msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=111653 slug=theassociationmxp instance= t=2024-05-29T13:44:15.227786473Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager.persist user=517600 slug=primer t=2024-05-29T13:44:15.227699607Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=35.914459ms + level=debug ts=2024-05-29T13:44:15.227679646Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=111653 slug=theassociationmxp instance= t=2024-05-29T13:44:15.227772227Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.227755159Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=111653 slug=theassociationmxp t=2024-05-29T13:44:15.227709863Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.227604953Z caller=remote_instance_store.go:51 user=147806 slug=adevintaengprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=147806 slug=adevintaengprod t=2024-05-29T13:44:15.227547616Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Montreal, country=Canada, datacenter=TSS / Performive, environment=production, instance=172.98.71.2:9998, ip=172.98.71.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-montreal.crt, role=vpn, server=montreal403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.227566201Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=147806 slug=adevintaengprod instance= t=2024-05-29T13:44:15.227515439Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.227494489Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=871737 slug=happenstance instance="instance=hpn-dev" t=2024-05-29T13:44:15.227367085Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=proxy-0, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.227138665Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=426229 slug=accelbyte t=2024-05-29T13:44:15.227079094Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.664666ms + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=adjuster-84c4b5f46-n49q5, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.227050192Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Montreal, country=Canada, datacenter=TSS / Performive, environment=production, instance=172.98.71.194:9998, ip=172.98.71.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-montreal.crt, role=vpn, server=montreal405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.227073952Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.227018743Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:15.226873237Z level=debug msg="State manager processing evaluation results" resultCount=4 + logger=ngalert.scheduler user=183214 slug=vectorizedio version=10 fingerprint=933090e32ff32fdb attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.226766485Z level=debug msg="Alert rule evaluated" results="[{Instance:pod=adjuster-84c4b5f46-n49q5, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=adjuster-84c4b5f46-n49q5, serverless_id=int-eu-west-1 Value:0xc02f8566a0} RES:{Var:RES Labels:pod=adjuster-84c4b5f46-n49q5, serverless_id=int-eu-west-1 Value:0xc02f8566e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.22636658s EvaluationString:[ var='A' labels={pod=adjuster-84c4b5f46-n49q5, serverless_id=int-eu-west-1} value=0.006074269612630208 ], [ var='RES' labels={pod=adjuster-84c4b5f46-n49q5, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=proxy-0, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=proxy-0, serverless_id=int-eu-west-1 Value:0xc02f856770} RES:{Var:RES Labels:pod=proxy-0, serverless_id=int-eu-west-1 Value:0xc02f8567c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.226383212s EvaluationString:[ var='A' labels={pod=proxy-0, serverless_id=int-eu-west-1} value=0.002180457993115594 ], [ var='RES' labels={pod=proxy-0, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=proxy-1, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=proxy-1, serverless_id=int-eu-west-1 Value:0xc02f856850} RES:{Var:RES Labels:pod=proxy-1, serverless_id=int-eu-west-1 Value:0xc02f8568b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.226390043s EvaluationString:[ var='A' labels={pod=proxy-1, serverless_id=int-eu-west-1} value=0.0020606938440601986 ], [ var='RES' labels={pod=proxy-1, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=proxy-2, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=proxy-2, serverless_id=int-eu-west-1 Value:0xc02f856940} RES:{Var:RES Labels:pod=proxy-2, serverless_id=int-eu-west-1 Value:0xc02f856980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.22640229s EvaluationString:[ var='A' labels={pod=proxy-2, serverless_id=int-eu-west-1} value=0.0022576949247518413 ], [ var='RES' labels={pod=proxy-2, serverless_id=int-eu-west-1} value=0 ]}]" duration=48.303214ms + logger=ngalert.state.manager.persist user=466402 slug=apexfsnzprod t=2024-05-29T13:44:15.226449461Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.807161ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.226477639Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.226363433Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.226231244Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.226256479Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.226173512Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Montreal, country=Canada, datacenter=PIA, environment=production, instance=84.247.105.6:9998, ip=84.247.105.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=montreal427, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.226085049Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.225960425Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.226016338Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.225921529Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=212546 slug=modica t=2024-05-29T13:44:15.225880059Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=212546 slug=modica instance= t=2024-05-29T13:44:15.225872085Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=212546 slug=modica instance= t=2024-05-29T13:44:15.225865158Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.225815769Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Montreal, country=Canada, datacenter=PIA, environment=production, instance=84.247.105.6:9998, ip=84.247.105.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-montreal.crt, role=vpn, server=montreal427, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.225875705Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=212546 slug=modica version=2 fingerprint=c70b5190c2260cb3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.225782978Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.225502691s EvaluationString:}]" duration=60.913306ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.225810551Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.225752694Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.2257564Z caller=remote_instance_store.go:51 user=491156 slug=tst01wr msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.225681146Z caller=remote_instance_store.go:51 user=412141 slug=sharethrough msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Montreal, country=Canada, datacenter=PIA, environment=production, instance=84.247.105.5:9998, ip=84.247.105.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=montreal426, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.225660309Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=396586 slug=opengov t=2024-05-29T13:44:15.225563501Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.966732ms + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.225554279Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.607029ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Montreal, country=Canada, datacenter=PIA, environment=production, instance=84.247.105.5:9998, ip=84.247.105.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-montreal.crt, role=vpn, server=montreal426, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.225474065Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.225399639Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=679831 slug=joveostageaws t=2024-05-29T13:44:15.225322144Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=26.590114ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Montreal, country=Canada, datacenter=PIA, environment=production, instance=84.247.105.3:9998, ip=84.247.105.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=montreal425, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.225266035Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.225167218Z caller=remote_instance_store.go:51 user=893158 slug=cmfollnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.225191545Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=609912 slug=wirestock t=2024-05-29T13:44:15.225024603Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=609912 slug=wirestock instance= t=2024-05-29T13:44:15.225011083Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Montreal, country=Canada, datacenter=PIA, environment=production, instance=84.247.105.3:9998, ip=84.247.105.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-montreal.crt, role=vpn, server=montreal425, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.225018447Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=112387 slug=lucidhq t=2024-05-29T13:44:15.224955965Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=20177 slug=paddledash instance="DBInstanceIdentifier=paddle-production-adjustment-service-2" t=2024-05-29T13:44:15.224901038Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.224825972Z caller=remote_instance_store.go:51 user=436633 slug=swirldslabsproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.22465657Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.22462341Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.224593926Z caller=remote_instance_store.go:51 user=94501 slug=datastax msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.224610519Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.224465758Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=111653 slug=theassociationmxp t=2024-05-29T13:44:15.224449623Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=41.530103ms + level=debug ts=2024-05-29T13:44:15.224375973Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.224409651Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.224395974Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:15.224335978Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:15.224315929Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:15.224322773Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.224219645Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=114492 slug=railsbank version=2 fingerprint=d511ce51cdf9f323 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.224083679Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[A0:{Var:A Labels: Value:} A1:{Var:A Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.22374056s EvaluationString:[ var='A0' metric='NoData' labels={} value=null ], [ var='A1' metric='NoData' labels={} value=null ]}]" duration=418.91228ms + level=debug ts=2024-05-29T13:44:15.224126631Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.224011272Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Montreal, country=Canada, datacenter=PIA, environment=production, instance=140.228.24.9:9998, ip=140.228.24.9, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-montreal.crt, role=vpn, server=montreal431, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.223997752Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112732 slug=gleamer instance= t=2024-05-29T13:44:15.223775692Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Montreal, country=Canada, datacenter=PIA, environment=production, instance=140.228.24.8:9998, ip=140.228.24.8, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=montreal430, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.22378241Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.223677732Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.22352396Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.22344928Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=243040 slug=supra89kren t=2024-05-29T13:44:15.22330597Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=21.799876ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Montreal, country=Canada, datacenter=PIA, environment=production, instance=140.228.24.7:9998, ip=140.228.24.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=montreal429, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.223324073Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.223198251Z caller=remote_instance_store.go:51 user=633221 slug=chengtao msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.223272435Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:15.222952135Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=206107 slug=hydrolix instance="code=499" t=2024-05-29T13:44:15.222929613Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.222867016Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206107 slug=hydrolix t=2024-05-29T13:44:15.222849899Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.state.manager.persist user=416741 slug=despread t=2024-05-29T13:44:15.222781772Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="datasource_uid=grafanacloud-prom, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:15.222551195Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Montreal, country=Canada, datacenter=PIA, environment=production, instance=140.228.24.6:9998, ip=140.228.24.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-montreal.crt, role=vpn, server=montreal428, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.222627851Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="datasource_uid=grafanacloud-prom, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:15.222538759Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Montreal, country=Canada, datacenter=PIA, environment=production, instance=140.228.24.6:9998, ip=140.228.24.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ca-montreal.crt, role=vpn, server=montreal428, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.222599278Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.222569075Z caller=remote_instance_store.go:51 user=101180 slug=rawnet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod t=2024-05-29T13:44:15.222484533Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.222391117Z caller=remote_instance_store.go:51 user=738479 slug=gohero msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.222416719Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Montreal, country=Canada, datacenter=PIA, environment=production, instance=140.228.21.7:9998, ip=140.228.21.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=montreal423, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.222390155Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.222274501Z caller=remote_instance_store.go:51 user=465816 slug=metricgamingqa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.22230911Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=612525 slug=adleyeview instance="datasource_uid=d34355cc-e506-450a-95f7-69af012965b9, ref_id=A" t=2024-05-29T13:44:15.222205906Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=612525 slug=adleyeview t=2024-05-29T13:44:15.222169404Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.22204801Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Montreal, country=Canada, datacenter=PIA, environment=production, instance=140.228.21.6:9998, ip=140.228.21.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=montreal422, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.221912397Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Montreal, country=Canada, datacenter=PIA, environment=production, instance=140.228.21.5:9998, ip=140.228.21.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=montreal421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.22151485Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.221446086Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.220811436Z caller=remote_instance_store.go:51 user=543660 slug=jobcloudprogrammaticstage msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:15.220756921Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:15.220698038Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.220632768Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.22059624Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.220468245Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=801848 slug=alitheonprod t=2024-05-29T13:44:15.220376983Z level=debug msg="Saving alert states" count=4 max_state_save_concurrency=1 + logger=ngalert.state.manager user=801848 slug=alitheonprod instance="LoadBalancer=app/k8s-relink-relinkex-9ded1bc2cd/63e62ece046baa19" t=2024-05-29T13:44:15.220321333Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=801848 slug=alitheonprod instance="LoadBalancer=app/k8s-relink-relinkex-9ded1bc2cd/63e62ece046baa19" t=2024-05-29T13:44:15.220311853Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=801848 slug=alitheonprod instance="LoadBalancer=app/k8s-marini-mariniex-c460268eb4/0375b9e0c5e80988" t=2024-05-29T13:44:15.220280662Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.220361809Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.220269422Z caller=remote_alert_sender.go:94 user=770491 slug=reflex host=reflex-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.11.3.83:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=be0468e5-ae32-48f6-8b79-22e7d09771bd alerts=1 + logger=ngalert.state.manager user=801848 slug=alitheonprod instance="LoadBalancer=app/k8s-marini-mariniex-c460268eb4/0375b9e0c5e80988" t=2024-05-29T13:44:15.220246932Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.220319999Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.220306179Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.22018839Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.220071244Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.220053964Z caller=remote_alert_sender.go:94 user=285766 slug=payhub host=payhub-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.160.17.89:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=AxF4pULnz alerts=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.219982386Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Monaco, country=Monaco, datacenter=M247, environment=production, instance=95.181.233.14:9998, ip=95.181.233.14, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=monaco404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.219838404Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Monaco, country=Monaco, datacenter=M247, environment=production, instance=95.181.233.14:9998, ip=95.181.233.14, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=monaco404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.219817036Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.219756807Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=849903 slug=superlook t=2024-05-29T13:44:15.219382427Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.340974ms + logger=ngalert.state.manager.persist user=407315 slug=ppcp t=2024-05-29T13:44:15.219139663Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=31.6274ms + logger=ngalert.state.manager.persist user=495005 slug=idealscorp t=2024-05-29T13:44:15.219096989Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=38.623685ms + level=debug ts=2024-05-29T13:44:15.219004042Z caller=remote_instance_store.go:51 user=326888 slug=buildingblocks msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.218991794Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.219061479Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=770491 slug=reflex t=2024-05-29T13:44:15.218916488Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.813811ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.218937663Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.21881682Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.218803356Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.21874101Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.21876242Z caller=remote_instance_store.go:51 user=442934 slug=arqit msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.218734143Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.218685616Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.218703354Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.218654107Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=361282 slug=turing t=2024-05-29T13:44:15.218271733Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=43.782239ms + level=debug ts=2024-05-29T13:44:15.218167926Z caller=remote_instance_store.go:51 user=281086 slug=zunigamanuel msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.21813787Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.218099342Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.218054247Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.218071898Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.217994176Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Milano, country=Italy, datacenter=DataPacket, environment=production, instance=156.146.41.1:9998, ip=156.146.41.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/italy.crt, role=vpn, server=milano402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.217789323Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.217604008Z caller=remote_instance_store.go:51 user=487988 slug=microstrategyits msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Milano, country=Italy, datacenter=DataPacket, environment=production, instance=156.146.41.193:9998, ip=156.146.41.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=milano404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.217417879Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.217350761Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.21722865Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.217200023Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.217058228Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=384488 slug=g3rv4 t=2024-05-29T13:44:15.216926187Z level=debug msg="Saving alert states done" count=4 max_state_save_concurrency=1 duration=57.360294ms + level=debug ts=2024-05-29T13:44:15.216703961Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.216690635Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.216652126Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.216601694Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Milano, country=Italy, datacenter=DataPacket, environment=production, instance=138.199.54.49:9998, ip=138.199.54.49, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=milano406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.216663581Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.216555537Z caller=remote_instance_store.go:51 user=723897 slug=inthepocket msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.216487819Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.216387646Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.216429994Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=98.935751ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.216404164Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Milano, country=Italy, datacenter=DataPacket, environment=production, instance=138.199.54.49:9998, ip=138.199.54.49, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/italy-2.crt, role=streaming-optimized, server=milano406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.216136388Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.21599978Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.215813428Z caller=remote_instance_store.go:51 user=916139 slug=cmtdspd msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=528849 slug=bitvavo t=2024-05-29T13:44:15.215753833Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=528849 slug=bitvavo instance= t=2024-05-29T13:44:15.215721602Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=528849 slug=bitvavo version=2 fingerprint=403fbd8bf5651d3e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.21561078Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.21531191s EvaluationString:}]" duration=15.976825ms + level=debug ts=2024-05-29T13:44:15.215636025Z caller=remote_instance_store.go:51 user=194293 slug=enterpret msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=194293 slug=enterpret t=2024-05-29T13:44:15.215601934Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=194293 slug=enterpret instance= t=2024-05-29T13:44:15.215586895Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=830631 slug=api3 t=2024-05-29T13:44:15.215528976Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=20.932216ms + logger=ngalert.state.manager user=194293 slug=enterpret t=2024-05-29T13:44:15.21556481Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=472647 slug=planet instance= t=2024-05-29T13:44:15.215455315Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=472647 slug=planet version=5 fingerprint=e066d5b1948a13bc attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.215342466Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc035a8a2b0} C:{Var:C Labels: Value:0xc035a8a2b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.215020076s EvaluationString:[ var='B' labels={} value=60 ], [ var='C' labels={} value=0 ]}]" duration=328.371964ms + logger=ngalert.state.manager.persist user=667326 slug=lakovna t=2024-05-29T13:44:15.215127797Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=7.511269ms + level=debug ts=2024-05-29T13:44:15.215073511Z caller=remote_instance_store.go:51 user=526847 slug=soniclabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Miami, country=United States, datacenter=PIA, environment=production, instance=102.129.153.8:9998, ip=102.129.153.8, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-florida.crt, role=vpn, server=miami429, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.215136691Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.214756041Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=466402 slug=apexfsnzprod instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.214581938Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.214609636Z caller=remote_instance_store.go:51 user=191103 slug=amazonadmin msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.21460531Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:15.214543329Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.214480108Z caller=remote_instance_store.go:51 user=405431 slug=deepersignals msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.214487152Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=191103 slug=amazonadmin version=66 fingerprint=660d6995dbc9c68e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.214457091Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.214223523s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=482.860206ms + level=debug ts=2024-05-29T13:44:15.214447406Z caller=remote_instance_store.go:51 user=893158 slug=cmfollnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Miami, country=United States, datacenter=PIA, environment=production, instance=102.129.153.7:9998, ip=102.129.153.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-florida.crt, role=vpn, server=miami428, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.214420113Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.214244795Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.213886136Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.213792651Z caller=remote_instance_store.go:51 user=679831 slug=joveostageaws msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=sadevelop, uri=/rpc/com.asapp.schemas.domain.edge.conversation.v1.services.Conversation/UpsertConversation" t=2024-05-29T13:44:15.213754124Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=544997 slug=cloudbuilders t=2024-05-29T13:44:15.213740642Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=sadevelop, uri=/rpc/com.asapp.schemas.domain.edge.conversation.v1.services.Conversation/UpsertConversation" t=2024-05-29T13:44:15.213742248Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:15.213471527Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.009259ms + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=att, uri=/rpc/com.asapp.schemas.domain.edge.conversation.v1.services.Conversation/AddMessage" t=2024-05-29T13:44:15.213475959Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.213358881Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp t=2024-05-29T13:44:15.213344368Z level=debug msg="State manager processing evaluation results" resultCount=3 + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:15.213316037Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:15.21329186Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.213232038Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114492 slug=railsbank t=2024-05-29T13:44:15.213229925Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.212952492Z caller=remote_instance_store.go:51 user=753403 slug=romich msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.213088553Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.21286735Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.212899752Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.212698199Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Miami, country=United States, datacenter=PIA, environment=production, instance=102.129.153.5:9998, ip=102.129.153.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=miami426, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.212695592Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=456946 slug=menlosecurityredge instance="AutoScalingGroupName=redge-gw-0-aps1-az3" t=2024-05-29T13:44:15.212648346Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.212631336Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=425857 slug=cialfo t=2024-05-29T13:44:15.212524178Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.568145ms + logger=ngalert.state.manager user=396586 slug=opengov instance= t=2024-05-29T13:44:15.212573861Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=396586 slug=opengov t=2024-05-29T13:44:15.212557218Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=396586 slug=opengov version=323 fingerprint=77eba969204e1bcf attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.212471359Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[C:{Var:C Labels: Value:} D:{Var:D Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.211699525s EvaluationString:[ var='C' labels={} value=null ], [ var='D' labels={} value=null ]}]" duration=198.132041ms + logger=ngalert.state.manager user=456946 slug=menlosecurityredge t=2024-05-29T13:44:15.212454419Z level=debug msg="State manager processing evaluation results" resultCount=3 + logger=ngalert.scheduler user=456946 slug=menlosecurityredge version=1 fingerprint=52c4a73eda3ed568 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.212339032Z level=debug msg="Alert rule evaluated" results="[{Instance:AutoScalingGroupName=redge-gw-0-aps1-az1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AutoScalingGroupName=redge-gw-0-aps1-az1 Value:0xc02415fee8} C:{Var:C Labels:AutoScalingGroupName=redge-gw-0-aps1-az1 Value:0xc02415fef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.212020664s EvaluationString:[ var='B' labels={AutoScalingGroupName=redge-gw-0-aps1-az1} value=2 ], [ var='C' labels={AutoScalingGroupName=redge-gw-0-aps1-az1} value=0 ]} {Instance:AutoScalingGroupName=redge-gw-0-aps1-az2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AutoScalingGroupName=redge-gw-0-aps1-az2 Value:0xc02415ff00} C:{Var:C Labels:AutoScalingGroupName=redge-gw-0-aps1-az2 Value:0xc02415ff08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.212031485s EvaluationString:[ var='B' labels={AutoScalingGroupName=redge-gw-0-aps1-az2} value=2 ], [ var='C' labels={AutoScalingGroupName=redge-gw-0-aps1-az2} value=0 ]} {Instance:AutoScalingGroupName=redge-gw-0-aps1-az3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AutoScalingGroupName=redge-gw-0-aps1-az3 Value:0xc02415ff18} C:{Var:C Labels:AutoScalingGroupName=redge-gw-0-aps1-az3 Value:0xc02415ff20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.212038223s EvaluationString:[ var='B' labels={AutoScalingGroupName=redge-gw-0-aps1-az3} value=2 ], [ var='C' labels={AutoScalingGroupName=redge-gw-0-aps1-az3} value=0 ]}]" duration=1.046593796s + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Miami, country=United States, datacenter=PIA, environment=production, instance=102.129.153.4:9998, ip=102.129.153.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-florida.crt, role=vpn, server=miami425, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.212394431Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Miami, country=United States, datacenter=PIA, environment=production, instance=102.129.153.4:9998, ip=102.129.153.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=miami425, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.212135668Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.21204693Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Miami, country=United States, datacenter=PIA, environment=production, instance=102.129.153.3:9998, ip=102.129.153.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=miami424, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.211663523Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.211597218Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.scheduler user=70430 slug=dapperlabs version=1 fingerprint=b3fac6629d1ee678 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.211258999Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=RBZj4Ak4z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.210808913s EvaluationString:}]" duration=11.968754ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.211134318Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.211063351Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Miami, country=United States, datacenter=PIA, environment=production, instance=102.129.152.7:9998, ip=102.129.152.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-florida.crt, role=vpn, server=miami422, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.21101122Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=JAPAN Query" t=2024-05-29T13:44:15.211003635Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.210956582Z caller=remote_instance_store.go:51 user=502468 slug=gmawater msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.21096544Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:15.210912183Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.210906228Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.210763714Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.210673808Z caller=remote_alert_sender.go:94 user=194539 slug=sharris host=sharris-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.103.59:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=d867b55a-d148-409f-a6f0-9d22e178871a alerts=1 + logger=ngalert.state.manager.persist user=326874 slug=fastpath t=2024-05-29T13:44:15.210564306Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.578984ms + level=debug ts=2024-05-29T13:44:15.210460872Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.210265181Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.210186599Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Miami, country=United States, datacenter=PIA, environment=production, instance=102.129.152.5:9998, ip=102.129.152.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-florida.crt, role=vpn, server=miami420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.210214051Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=158536 slug=clearsaleantifraude instance="datasource_uid=grafanacloud-prom, ref_id=alert_timer_features" t=2024-05-29T13:44:15.210128964Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:15.21002244Z caller=remote_instance_store.go:51 user=543318 slug=tech85bb msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=543318 slug=tech85bb t=2024-05-29T13:44:15.20999524Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=info ts=2024-05-29T13:44:15.209935524Z caller=remote_alert_sender.go:94 user=656459 slug=activeport host=activeport-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.164.20.114:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=e386d579-4821-44c5-97d1-9e526c036306 alerts=1 + level=debug ts=2024-05-29T13:44:15.209917652Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=543318 slug=tech85bb t=2024-05-29T13:44:15.209931538Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Miami, country=United States, datacenter=PIA, environment=production, instance=102.129.152.5:9998, ip=102.129.152.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=miami420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.209979695Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.209941133Z caller=remote_instance_store.go:51 user=237629 slug=ocrolus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.209921634Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.209922827Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.209846404Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.209910855Z caller=remote_instance_store.go:51 user=496884 slug=wbx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=496884 slug=wbx instance="datasource_uid=grafanacloud-prom, ref_id=used" t=2024-05-29T13:44:15.209851298Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=496884 slug=wbx instance="datasource_uid=grafanacloud-prom, ref_id=used" t=2024-05-29T13:44:15.209772282Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=496884 slug=wbx instance="datasource_uid=grafanacloud-prom, ref_id=used" t=2024-05-29T13:44:15.20973591Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=496884 slug=wbx instance="datasource_uid=grafanacloud-prom, ref_id=used" t=2024-05-29T13:44:15.209705159Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.209554361Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.209319281Z caller=remote_instance_store.go:51 user=332031 slug=lexisnexisemailage msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=332031 slug=lexisnexisemailage t=2024-05-29T13:44:15.209268392Z level=debug msg="Saving alert states" count=5 max_state_save_concurrency=1 + logger=ngalert.state.manager user=236496 slug=improbable t=2024-05-29T13:44:15.209192793Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=332031 slug=lexisnexisemailage instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.209207636Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=332031 slug=lexisnexisemailage instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.209167431Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=332031 slug=lexisnexisemailage instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.2091574Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=332031 slug=lexisnexisemailage instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.20914796Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=332031 slug=lexisnexisemailage t=2024-05-29T13:44:15.209088738Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=332031 slug=lexisnexisemailage version=7 fingerprint=556c617aa4f4837f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.209012264Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.20867728s EvaluationString:}]" duration=81.180939ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Miami, country=United States, datacenter=PIA, environment=production, instance=102.129.152.3:9998, ip=102.129.152.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=miami418, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.209104286Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=849903 slug=superlook t=2024-05-29T13:44:15.209039255Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.208885755Z caller=remote_instance_store.go:51 user=159532 slug=getfabric msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.208617635Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Miami, country=United States, datacenter=PIA, environment=production, instance=102.129.152.2:9998, ip=102.129.152.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=miami417, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.208677425Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.208552841Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:15.208481571Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.160085ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.208401022Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0fbe610715e80b446:8080" t=2024-05-29T13:44:15.208425402Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0ed46c86edc47fbfb:8080" t=2024-05-29T13:44:15.208297472Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.208213303Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0e64bc78917066c68:8080" t=2024-05-29T13:44:15.208078779Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Miami, country=United States, datacenter=DataPacket, environment=production, instance=143.244.34.129:9998, ip=143.244.34.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-florida.crt, role=vpn, server=miami404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.208078258Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.208035648Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0de4361b4694a7a80:8080" t=2024-05-29T13:44:15.207945528Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0de4361b4694a7a80:8080" t=2024-05-29T13:44:15.207936128Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0de1a3e73dedec641:8080" t=2024-05-29T13:44:15.207858222Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0db82140b799bc612:8080" t=2024-05-29T13:44:15.207737152Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0da94adab2768f776:8080" t=2024-05-29T13:44:15.207704293Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Mexico, country=Mexico, datacenter=M247, environment=production, instance=77.81.142.90:9998, ip=77.81.142.90, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=mexico409, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.207704385Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Mexico, country=Mexico, datacenter=M247, environment=production, instance=77.81.142.90:9998, ip=77.81.142.90, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=mexico409, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.207692052Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=667326 slug=lakovna instance= t=2024-05-29T13:44:15.207602038Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.20764812Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=4947 slug=mediamath instance="datasource_uid=xpp5HraMk, ref_id=A" t=2024-05-29T13:44:15.207587456Z level=debug msg="Changing state" previous_state=Pending next_state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0da18406ddfcf9672:8080" t=2024-05-29T13:44:15.207638175Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.207559911Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=667326 slug=lakovna instance= t=2024-05-29T13:44:15.207591238Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0d693f6d27e183f83:8080" t=2024-05-29T13:44:15.20757005Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.207524151Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=667326 slug=lakovna version=39 fingerprint=a0a949df96c875bf attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.207475766Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc05094f3d0} C:{Var:C Labels: Value:0xc05094f3d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.206130833s EvaluationString:[ var='B' labels={} value=47.14635467529297 ], [ var='C' labels={} value=0 ]}]" duration=41.296284ms + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0d2eef9bfe7027239:8080" t=2024-05-29T13:44:15.207512888Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.207429163Z caller=remote_instance_store.go:51 user=652809 slug=glassnode msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.207452617Z caller=remote_instance_store.go:51 user=412141 slug=sharethrough msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=412141 slug=sharethrough t=2024-05-29T13:44:15.207408129Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Mexico, country=Mexico, datacenter=M247, environment=production, instance=77.81.142.75:9998, ip=77.81.142.75, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=mexico408, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.207369348Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0c746cae2e04002d0:8080" t=2024-05-29T13:44:15.207330446Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.207327196Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0bed052e56b34d529:8080" t=2024-05-29T13:44:15.207291704Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Mexico, country=Mexico, datacenter=M247, environment=production, instance=77.81.142.75:9998, ip=77.81.142.75, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/mexico.crt, role=vpn, server=mexico408, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.20718361Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:15.207163757Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=806229 slug=simplisafe instance="env=prd, host=ip-10-20-0-136, instance=ip-10-20-0-136, metric_type=gauge, otel=1, region=us-east-1, service=drupal, type=web" t=2024-05-29T13:44:15.207080446Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.207050627Z caller=remote_image_capturer.go:33 user=770491 slug=reflex rule_org_id=1 rule_uid=be0468e5-ae32-48f6-8b79-22e7d09771bd msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0b9eb791f52447959:8080" t=2024-05-29T13:44:15.207103043Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0b9eb791f52447959:8080" t=2024-05-29T13:44:15.207092724Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=770491 slug=reflex instance="app=fly-log-shipper-dev, component_id=better_stack, component_kind=sink, component_name=better_stack, component_type=http, host=2b56, instance=91852ed5a2e483, region=lax, status=202" t=2024-05-29T13:44:15.207016706Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:50:10Z next_ends_at=2024-05-29T13:52:10Z + logger=ngalert.state.manager user=770491 slug=reflex instance="app=fly-log-shipper-dev, component_id=better_stack, component_kind=sink, component_name=better_stack, component_type=http, host=2b56, instance=91852ed5a2e483, region=lax, status=202" t=2024-05-29T13:44:15.206990316Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=806229 slug=simplisafe t=2024-05-29T13:44:15.206960264Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:15.20694764Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse-vague-stoa" + logger=ngalert.scheduler user=806229 slug=simplisafe version=17 fingerprint=640bb3a702672393 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.206837172Z level=debug msg="Alert rule evaluated" results="[{Instance:env=prd, host=ip-10-20-0-136, instance=ip-10-20-0-136, metric_type=gauge, otel=1, region=us-east-1, service=drupal, type=web State:Normal Error: Results:map[] Values:map[Alert:{Var:Alert Labels:env=prd, host=ip-10-20-0-136, instance=ip-10-20-0-136, metric_type=gauge, otel=1, region=us-east-1, service=drupal, type=web Value:0xc010cdfc38} CallCenterClosed:{Var:CallCenterClosed Labels:env=prd, host=ip-10-20-0-136, instance=ip-10-20-0-136, metric_type=gauge, otel=1, region=us-east-1, service=drupal, type=web Value:0xc010cdfcd8} EligibilityRate:{Var:EligibilityRate Labels: Value:0xc010cdfd00} EligibilityRateRaw:{Var:EligibilityRateRaw Labels: Value:0xc010cdfd08} Eligible:{Var:Eligible Labels: Value:0xc010cdfd20} Ineligible:{Var:Ineligible Labels: Value:0xc010cdfd28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.206289182s EvaluationString:[ var='Alert' labels={env=prd, host=ip-10-20-0-136, instance=ip-10-20-0-136, metric_type=gauge, otel=1, region=us-east-1, service=drupal, type=web} value=0 ], [ var='CallCenterClosed' labels={env=prd, host=ip-10-20-0-136, instance=ip-10-20-0-136, metric_type=gauge, otel=1, region=us-east-1, service=drupal, type=web} value=0 ], [ var='EligibilityRate' labels={} value=16.279069767441865 ], [ var='EligibilityRateRaw' labels={} value=16.279069767441865 ], [ var='Eligible' labels={} value=7.0692090395480225 ], [ var='Ineligible' labels={} value=36.35593220338983 ]}]" duration=64.305877ms + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-tops-seasnail-ceddcs" t=2024-05-29T13:44:15.206872267Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-testy-cow" t=2024-05-29T13:44:15.206802679Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0ac4abb71f4c40b1c:8080" t=2024-05-29T13:44:15.206879629Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-testy-cow" t=2024-05-29T13:44:15.206796814Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.206823173Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:15.206776477Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse-testy-cow" + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0a7bdbeed7a7c99d8:8080" t=2024-05-29T13:44:15.206818251Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0a7bdbeed7a7c99d8:8080" t=2024-05-29T13:44:15.2068103Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.206743724Z caller=remote_instance_store.go:51 user=316418 slug=workmotion msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.206701895Z caller=remote_instance_store.go:51 user=771123 slug=dvflemmli msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.206693379Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-09db1e9715322899a:8080" t=2024-05-29T13:44:15.206700366Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-sts" t=2024-05-29T13:44:15.206641114Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-sts" t=2024-05-29T13:44:15.206634815Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=316418 slug=workmotion t=2024-05-29T13:44:15.20663238Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=771123 slug=dvflemmli t=2024-05-29T13:44:15.206632664Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-09d369352b20bdaaa:8080" t=2024-05-29T13:44:15.206644407Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=771123 slug=dvflemmli t=2024-05-29T13:44:15.206561472Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-098372ad716df0483:8080" t=2024-05-29T13:44:15.206581317Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-098372ad716df0483:8080" t=2024-05-29T13:44:15.20657187Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.206365112Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:15.206511893Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse-sincere-hawk-fpadbz" + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-sincere-asp-tyypgm" t=2024-05-29T13:44:15.20645811Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0931a11e75d1314cc:8080" t=2024-05-29T13:44:15.206511346Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID550dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=344017 slug=descript t=2024-05-29T13:44:15.206323468Z level=debug msg="Skip rule evaluation because it is paused" + level=debug ts=2024-05-29T13:44:15.206305793Z caller=remote_instance_store.go:51 user=375798 slug=beeworks msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-rigid-star" t=2024-05-29T13:44:15.20636889Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-08f2be4b37ff78981:8080" t=2024-05-29T13:44:15.206394767Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-rigid-star" t=2024-05-29T13:44:15.206362059Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=351895 slug=abacusworks t=2024-05-29T13:44:15.206256861Z level=debug msg="Saving alert states done" count=90 max_state_save_concurrency=1 duration=1.33181745s + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:15.206216957Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=32.932288ms + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0853bf8220b140514:8080" t=2024-05-29T13:44:15.206261248Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-07ce844529a7f5af7:8080" t=2024-05-29T13:44:15.206198144Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-07ce32697357c3a3e:8080" t=2024-05-29T13:44:15.206131953Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=288032 slug=dapperlabssre t=2024-05-29T13:44:15.206112478Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=288032 slug=dapperlabssre version=1 fingerprint=00908211e29fdf72 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.206001261Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=sAAhZ0a7z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.205628406s EvaluationString:}]" duration=2.029400038s + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-07ba54cbc4d41e7ef:8080" t=2024-05-29T13:44:15.206077448Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=152115 slug=mediakindsaasgcp t=2024-05-29T13:44:15.205986029Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=42.170498ms + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-07ba54cbc4d41e7ef:8080" t=2024-05-29T13:44:15.206065813Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.205980683Z caller=remote_instance_store.go:51 user=35611 slug=play msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-077055052a0ba3ec8:8080" t=2024-05-29T13:44:15.206010344Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-077055052a0ba3ec8:8080" t=2024-05-29T13:44:15.206000639Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-072e4bce9a47b76aa:8080" t=2024-05-29T13:44:15.205948987Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=289650 slug=eurostar instance= t=2024-05-29T13:44:15.205901209Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-072e4bce9a47b76aa:8080" t=2024-05-29T13:44:15.205914605Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-quality-bedbug-1usiqf" t=2024-05-29T13:44:15.20586631Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-06fbf087a39a8f233:8080" t=2024-05-29T13:44:15.205856019Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Mexico, country=Mexico, datacenter=M247, environment=production, instance=77.81.142.45:9998, ip=77.81.142.45, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=mexico406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.205813215Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0655fbf2a6b9b0763:8080" t=2024-05-29T13:44:15.205727275Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-pleased-muskrat-34euh7" t=2024-05-29T13:44:15.205695887Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-pleased-muskrat-34euh7" t=2024-05-29T13:44:15.205688569Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:15.205666374Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse-pleased-muskrat-34euh7" + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0604d509c57994492:8080" t=2024-05-29T13:44:15.205650244Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.204051202Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-05dd7f0cc4d23f482:8080" t=2024-05-29T13:44:15.205520908Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-optimum-worm-07md7q" t=2024-05-29T13:44:15.20550913Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-05a791ede82571145:8080" t=2024-05-29T13:44:15.205470229Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.205095846Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-05a791ede82571145:8080" t=2024-05-29T13:44:15.205460342Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0595b744302c2d1fa:8080" t=2024-05-29T13:44:15.205405422Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-maximum-mako-jr1uj2" t=2024-05-29T13:44:15.205337767Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:15.205317151Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse-maximum-mako-jr1uj2" + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-05619d678da00276b:8080" t=2024-05-29T13:44:15.205276026Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.205120405Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.205084659Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.205013845Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-040fbe61a5008c37f:8080" t=2024-05-29T13:44:15.20508354Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-intimate-mouse-8sqpzx" t=2024-05-29T13:44:15.205028854Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Mexico, country=Mexico, datacenter=M247, environment=production, instance=77.81.142.3:9998, ip=77.81.142.3, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/mexico.crt, role=vpn, server=mexico403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.205047328Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:15.205001411Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse-intimate-mouse-8sqpzx" + level=debug ts=2024-05-29T13:44:15.204936341Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:15.204903143Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse-intimate-chamois-tx9zvw" + level=debug ts=2024-05-29T13:44:15.204859486Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-inland-mar" t=2024-05-29T13:44:15.204851286Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-inland-mar" t=2024-05-29T13:44:15.204842893Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-02ecc5b2b5e3d0f51:8080" t=2024-05-29T13:44:15.204838683Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-02ca76dbd52fdd31d:8080" t=2024-05-29T13:44:15.204787528Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:15.204680367Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-honest-chigger-hcbrhj" t=2024-05-29T13:44:15.20475392Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.204686206Z caller=remote_instance_store.go:51 user=615392 slug=shinemetrics msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-02a9be6ea76e76716:8080" t=2024-05-29T13:44:15.204719046Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-024ad97aa50fefad3:8080" t=2024-05-29T13:44:15.204657152Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-harmless-kitten-peaxut" t=2024-05-29T13:44:15.204651899Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Mexico, country=Mexico, datacenter=M247, environment=production, instance=77.81.142.30:9998, ip=77.81.142.30, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/mexico.crt, role=vpn, server=mexico405, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.204586443Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-handy-jawfish-v44gkd" t=2024-05-29T13:44:15.20456543Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.204528394Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-01ef4c19a850eb395:8080" t=2024-05-29T13:44:15.20454824Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-fast-chicken-6u1a39" t=2024-05-29T13:44:15.204498827Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:15.204466521Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse-fast-chicken-6u1a39" +level=debug ts=2024-05-29T13:44:15.204420581Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=unseen-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistent98E8C" t=2024-05-29T13:44:15.204420414Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0172c7194a6c31817:8080" t=2024-05-29T13:44:15.204383678Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-0104d127702d59cee:8080" t=2024-05-29T13:44:15.204337116Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-009a334b97c3a1d28:8080" t=2024-05-29T13:44:15.204300998Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-008561f7199c35ad8:8080" t=2024-05-29T13:44:15.204245384Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.204166166Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-006ac7cf43f42565e:8080" t=2024-05-29T13:44:15.204165844Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=treehouse-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistentDDCF8" t=2024-05-29T13:44:15.204137539Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-enhanced-bedbug-v1vdsc" t=2024-05-29T13:44:15.204149637Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.204121283Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-004e425a0c86a5568:8080" t=2024-05-29T13:44:15.204096367Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-devoted-mackerel-av9yz5" t=2024-05-29T13:44:15.204048133Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=80938 slug=fispan instance="instance=i-001492827b9aef935:8080" t=2024-05-29T13:44:15.204012518Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-definite-gnat-7k4t60" t=2024-05-29T13:44:15.203937942Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=884866 slug=cnonumerique t=2024-05-29T13:44:15.203869852Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=treehouse-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistent9C98F" t=2024-05-29T13:44:15.203893497Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=884866 slug=cnonumerique instance="datasource_uid=fdhk917z41xj4a, ref_id=A" t=2024-05-29T13:44:15.203853411Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +level=debug ts=2024-05-29T13:44:15.203801755Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Mexico, country=Mexico, datacenter=M247, environment=production, instance=77.81.142.248:9998, ip=77.81.142.248, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=mexico414, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.203740278Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:15.203701807Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse-azure" +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-advanced-heron-87ad3m" t=2024-05-29T13:44:15.203539839Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=statuspro-justice-prod, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistent60739" t=2024-05-29T13:44:15.20346981Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse" t=2024-05-29T13:44:15.203445078Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:15.203431313Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse" +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-star-shrew-x7jynq" t=2024-05-29T13:44:15.203365011Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:15.203337152Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-star-shrew-x7jynq" +level=debug ts=2024-05-29T13:44:15.203298191Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.203288933Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.218002ms +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-similarweb" t=2024-05-29T13:44:15.203278757Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.203219332Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-ruling-poodle-iwf8zg" t=2024-05-29T13:44:15.203179835Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Mexico, country=Mexico, datacenter=M247, environment=production, instance=77.81.142.241:9998, ip=77.81.142.241, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=mexico413, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.20313103Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.203065954Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.203072836Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +level=debug ts=2024-05-29T13:44:15.202998425Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-precious-akita-yb6fqc" t=2024-05-29T13:44:15.203063147Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-mighty-vulture-mgmyhb" t=2024-05-29T13:44:15.202965281Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:15.202946763Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-mighty-vulture-mgmyhb" +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-mighty-iguana-iz3nkf" t=2024-05-29T13:44:15.202894807Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:15.202783732Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-living-gelding-664vr0" +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-innocent-sunbeam-uy8k35" t=2024-05-29T13:44:15.202706171Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-immense-husky-hz3ybb" t=2024-05-29T13:44:15.202624283Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.20264737Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Mexico, country=Mexico, datacenter=M247, environment=production, instance=77.81.142.229:9998, ip=77.81.142.229, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=mexico402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.202630955Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:15.202599435Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-helpful-boxer-x10u0j" t=2024-05-29T13:44:15.202519978Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Mexico, country=Mexico, datacenter=M247, environment=production, instance=77.81.142.229:9998, ip=77.81.142.229, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/mexico.crt, role=vpn, server=mexico402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.202379963Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.202320926Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-fine-scorpion-z46p3a" t=2024-05-29T13:44:15.202296688Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:15.202250747Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-fine-scorpion-z46p3a" +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-eternal-turkey-h7y8d4" t=2024-05-29T13:44:15.202177657Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Mexico, country=Mexico, datacenter=M247, environment=production, instance=77.81.142.118:9998, ip=77.81.142.118, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=mexico411, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.202155056Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Mexico, country=Mexico, datacenter=M247, environment=production, instance=77.81.142.118:9998, ip=77.81.142.118, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=mexico411, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.202138654Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.202108273Z caller=remote_instance_store.go:51 user=415315 slug=waveloprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.202011321Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=624354 slug=truliooworkflow t=2024-05-29T13:44:15.201983098Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-enigma" t=2024-05-29T13:44:15.201989157Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.201929497Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-enigma" t=2024-05-29T13:44:15.201982992Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.201903325Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.201893079Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-driving-albacore-f6p007" t=2024-05-29T13:44:15.201908446Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-driving-albacore-f6p007" t=2024-05-29T13:44:15.201897914Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:15.20187533Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-driving-albacore-f6p007" +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-divine-jackass-69gfky" t=2024-05-29T13:44:15.201818845Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Mexico, country=Mexico, datacenter=M247, environment=production, instance=77.81.142.118:9998, ip=77.81.142.118, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/mexico.crt, role=vpn, server=mexico411, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.201830234Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-divine-jackass-69gfky" t=2024-05-29T13:44:15.201807226Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.201767608Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.201766959Z caller=remote_instance_store.go:51 user=385107 slug=qubit9 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.201750499Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager.persist user=28776 slug=flotechnologies t=2024-05-29T13:44:15.201635938Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.719709ms +logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:15.201667182Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-custom1" +logger=ngalert.state.manager user=415315 slug=waveloprod instance="__name__=redis_evicted_keys_total, app=opentelemetry, az=undefined, cloud_provider=openstack, customer_name=ting, env=pre, instance=10.177.43.196:9121, job=redis_metrics, lob=wavelo, node=redis003.pre-bluebox.cnco.tucows.systems, platform=isos, project=isos, region=cnco2, team=isos, tenant=pre_bluebox" t=2024-05-29T13:44:15.201624053Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=243040 slug=supra89kren instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.20146471Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-companionprofessional" t=2024-05-29T13:44:15.201461595Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Mexico, country=Mexico, datacenter=M247, environment=production, instance=77.81.142.105:9998, ip=77.81.142.105, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=mexico410, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.20135427Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.201215843Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-bursting-hen-xau574" t=2024-05-29T13:44:15.201238515Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Mexico, country=Mexico, datacenter=M247, environment=production, instance=77.81.142.105:9998, ip=77.81.142.105, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/mexico.crt, role=vpn, server=mexico410, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.201109152Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=415315 slug=waveloprod instance="__name__=redis_evicted_keys_total, app=opentelemetry, az=undefined, cloud_provider=openstack, customer_name=ting, env=pre, instance=10.177.43.159:9121, job=redis_metrics, lob=wavelo, node=redis002.pre-bluebox.cnco.tucows.systems, platform=isos, project=isos, region=cnco2, team=isos, tenant=pre_bluebox" t=2024-05-29T13:44:15.201045846Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.200834528Z caller=remote_instance_store.go:51 user=211268 slug=vippstest msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-arm-dev" t=2024-05-29T13:44:15.20101584Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Melbourne, country=Australia, datacenter=ServersAustralia, environment=production, instance=181.214.199.98:9998, ip=181.214.199.98, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=melbourne414, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.200913459Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.200856238Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:15.200813233Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-arm" +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=starbreeze-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistentE0437" t=2024-05-29T13:44:15.20081921Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-apex" t=2024-05-29T13:44:15.20075566Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.20076346Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.200709631Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:15.200705531Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-apex" +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-allowed-frog-819x4t" t=2024-05-29T13:44:15.20062953Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.200723386Z caller=remote_instance_store.go:51 user=194293 slug=enterpret msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=194293 slug=enterpret t=2024-05-29T13:44:15.200681006Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=194293 slug=enterpret instance= t=2024-05-29T13:44:15.200657638Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=194293 slug=enterpret t=2024-05-29T13:44:15.20062768Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.scheduler user=211268 slug=vippstest version=2 fingerprint=a8a458d307fc1207 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.200456077Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=2.919182ms +level=debug ts=2024-05-29T13:44:15.200508046Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:15.200480642Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-aidn" +level=error ts=2024-05-29T13:44:15.20042322Z caller=remote_rule_evaluator.go:110 user=211268 slug=vippstest msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-barak" t=2024-05-29T13:44:15.200417022Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.200305941Z caller=remote_instance_store.go:51 user=916139 slug=cmtdspd msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=control-plane-v2" t=2024-05-29T13:44:15.200296872Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:15.200257708Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=control-plane-v2" +level=debug ts=2024-05-29T13:44:15.200200722Z caller=remote_instance_store.go:51 user=805026 slug=powwro11y msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=285766 slug=payhub instance= t=2024-05-29T13:44:15.200093679Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:59:10Z next_ends_at=2024-05-29T14:04:10Z +logger=ngalert.state.manager user=285766 slug=payhub instance= t=2024-05-29T13:44:15.200064443Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=starbreeze-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistentD372F" t=2024-05-29T13:44:15.20014094Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=285766 slug=payhub t=2024-05-29T13:44:15.200036765Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.200105557Z caller=remote_image_capturer.go:33 user=285766 slug=payhub rule_org_id=1 rule_uid=AxF4pULnz msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" +logger=ngalert.state.manager user=806229 slug=simplisafe instance="__name__=consul_healthcheck_status, check=leader, env=qa-west2, host=ip-10-91-4-13.us-west-2.compute.internal, instance=ip-10-91-4-13.us-west-2.compute.internal, otel=1, region=us-west-2, service=consul, type=qa-west2-consul-server, user=$USER" t=2024-05-29T13:44:15.200063858Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=312340 slug=lakefs version=47 fingerprint=e3bb5544c4e37e0b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.198982425Z level=debug msg="Alert rule evaluated" results="[{Instance:TableName=control-plane State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=control-plane Value:0xc01df5e480} C:{Var:C Labels:TableName=control-plane Value:0xc01df5e488}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.196930593s EvaluationString:[ var='B' labels={TableName=control-plane} value=0.14615384615384616 ], [ var='C' labels={TableName=control-plane} value=0 ]} {Instance:TableName=control-plane-v2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=control-plane-v2 Value:0xc01df5e618} C:{Var:C Labels:TableName=control-plane-v2 Value:0xc01df5e610}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.196945639s EvaluationString:[ var='B' labels={TableName=control-plane-v2} value=0 ], [ var='C' labels={TableName=control-plane-v2} value=0 ]} {Instance:TableName=lakefs-barak State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-barak Value:0xc01df5e690} C:{Var:C Labels:TableName=lakefs-barak Value:0xc01df5e698}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.19694926s EvaluationString:[ var='B' labels={TableName=lakefs-barak} value=0 ], [ var='C' labels={TableName=lakefs-barak} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-aidn State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-aidn Value:0xc01df5e7a0} C:{Var:C Labels:TableName=lakefs-cloud-refstore-aidn Value:0xc01df5e7a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.196952286s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-aidn} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-aidn} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-allowed-frog-819x4t State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-allowed-frog-819x4t Value:0xc01df5e7f0} C:{Var:C Labels:TableName=lakefs-cloud-refstore-allowed-frog-819x4t Value:0xc01df5e7f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.196956154s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-allowed-frog-819x4t} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-allowed-frog-819x4t} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-apex State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-apex Value:0xc01df5e9b0} C:{Var:C Labels:TableName=lakefs-cloud-refstore-apex Value:0xc01df5e9b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.19699982s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-apex} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-apex} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-arm State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-arm Value:0xc01df5eb00} C:{Var:C Labels:TableName=lakefs-cloud-refstore-arm Value:0xc01df5eb08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197003542s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-arm} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-arm} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-arm-dev State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-arm-dev Value:0xc01df5eb40} C:{Var:C Labels:TableName=lakefs-cloud-refstore-arm-dev Value:0xc01df5eb48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197006776s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-arm-dev} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-arm-dev} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-bursting-hen-xau574 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-bursting-hen-xau574 Value:0xc01df5eba0} C:{Var:C Labels:TableName=lakefs-cloud-refstore-bursting-hen-xau574 Value:0xc01df5eba8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197011236s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-bursting-hen-xau574} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-bursting-hen-xau574} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-close-sunbird-fhwmpw State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-close-sunbird-fhwmpw Value:0xc01df5ebf0} C:{Var:C Labels:TableName=lakefs-cloud-refstore-close-sunbird-fhwmpw Value:0xc01df5ebf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197014616s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-close-sunbird-fhwmpw} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-close-sunbird-fhwmpw} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-companionprofessional State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-companionprofessional Value:0xc01df5ec30} C:{Var:C Labels:TableName=lakefs-cloud-refstore-companionprofessional Value:0xc01df5ec38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197021342s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-companionprofessional} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-companionprofessional} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-cunning-insect-c6gj8j State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-cunning-insect-c6gj8j Value:0xc01df5ec88} C:{Var:C Labels:TableName=lakefs-cloud-refstore-cunning-insect-c6gj8j Value:0xc01df5ec80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197026655s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-cunning-insect-c6gj8j} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-cunning-insect-c6gj8j} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-custom1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-custom1 Value:0xc01df5ed58} C:{Var:C Labels:TableName=lakefs-cloud-refstore-custom1 Value:0xc01df5ed50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197029555s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-custom1} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-custom1} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-divine-jackass-69gfky State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-divine-jackass-69gfky Value:0xc01df5ee48} C:{Var:C Labels:TableName=lakefs-cloud-refstore-divine-jackass-69gfky Value:0xc01df5ee40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197032559s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-divine-jackass-69gfky} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-divine-jackass-69gfky} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-driving-albacore-f6p007 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-driving-albacore-f6p007 Value:0xc01df5ee90} C:{Var:C Labels:TableName=lakefs-cloud-refstore-driving-albacore-f6p007 Value:0xc01df5ee98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197035629s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-driving-albacore-f6p007} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-driving-albacore-f6p007} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-enigma State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-enigma Value:0xc01df5ef08} C:{Var:C Labels:TableName=lakefs-cloud-refstore-enigma Value:0xc01df5ef00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197040782s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-enigma} value=0.8632077004810383 ], [ var='C' labels={TableName=lakefs-cloud-refstore-enigma} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-epcor State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-epcor Value:0xc01df5efb0} C:{Var:C Labels:TableName=lakefs-cloud-refstore-epcor Value:0xc01df5efb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197044925s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-epcor} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-epcor} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-eternal-turkey-h7y8d4 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-eternal-turkey-h7y8d4 Value:0xc01df5f000} C:{Var:C Labels:TableName=lakefs-cloud-refstore-eternal-turkey-h7y8d4 Value:0xc01df5f008}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197048519s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-eternal-turkey-h7y8d4} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-eternal-turkey-h7y8d4} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-fine-scorpion-z46p3a State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-fine-scorpion-z46p3a Value:0xc01df5f040} C:{Var:C Labels:TableName=lakefs-cloud-refstore-fine-scorpion-z46p3a Value:0xc01df5f048}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197052912s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-fine-scorpion-z46p3a} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-fine-scorpion-z46p3a} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-healthy-mutt-8wx29p State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-healthy-mutt-8wx29p Value:0xc01df5f0f8} C:{Var:C Labels:TableName=lakefs-cloud-refstore-healthy-mutt-8wx29p Value:0xc01df5f0f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197058057s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-healthy-mutt-8wx29p} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-healthy-mutt-8wx29p} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-helpful-boxer-x10u0j State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-helpful-boxer-x10u0j Value:0xc01df5f140} C:{Var:C Labels:TableName=lakefs-cloud-refstore-helpful-boxer-x10u0j Value:0xc01df5f148}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197061383s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-helpful-boxer-x10u0j} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-helpful-boxer-x10u0j} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-immense-husky-hz3ybb State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-immense-husky-hz3ybb Value:0xc01df5f248} C:{Var:C Labels:TableName=lakefs-cloud-refstore-immense-husky-hz3ybb Value:0xc01df5f240}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197064335s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-immense-husky-hz3ybb} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-immense-husky-hz3ybb} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-innocent-sunbeam-uy8k35 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-innocent-sunbeam-uy8k35 Value:0xc01df5f310} C:{Var:C Labels:TableName=lakefs-cloud-refstore-innocent-sunbeam-uy8k35 Value:0xc01df5f318}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197066851s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-innocent-sunbeam-uy8k35} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-innocent-sunbeam-uy8k35} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-living-gelding-664vr0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-living-gelding-664vr0 Value:0xc01df5f360} C:{Var:C Labels:TableName=lakefs-cloud-refstore-living-gelding-664vr0 Value:0xc01df5f368}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197071912s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-living-gelding-664vr0} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-living-gelding-664vr0} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-mighty-iguana-iz3nkf State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-mighty-iguana-iz3nkf Value:0xc01df5f3a0} C:{Var:C Labels:TableName=lakefs-cloud-refstore-mighty-iguana-iz3nkf Value:0xc01df5f3a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197075797s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-mighty-iguana-iz3nkf} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-mighty-iguana-iz3nkf} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-mighty-vulture-mgmyhb State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-mighty-vulture-mgmyhb Value:0xc01df5f3e0} C:{Var:C Labels:TableName=lakefs-cloud-refstore-mighty-vulture-mgmyhb Value:0xc01df5f3e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197079149s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-mighty-vulture-mgmyhb} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-mighty-vulture-mgmyhb} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-precious-akita-yb6fqc State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-precious-akita-yb6fqc Value:0xc01df5f488} C:{Var:C Labels:TableName=lakefs-cloud-refstore-precious-akita-yb6fqc Value:0xc01df5f480}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197081949s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-precious-akita-yb6fqc} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-precious-akita-yb6fqc} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-ruling-poodle-iwf8zg State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-ruling-poodle-iwf8zg Value:0xc01df5f4f8} C:{Var:C Labels:TableName=lakefs-cloud-refstore-ruling-poodle-iwf8zg Value:0xc01df5f4f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197085508s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-ruling-poodle-iwf8zg} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-ruling-poodle-iwf8zg} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-similarweb State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-similarweb Value:0xc01df5f540} C:{Var:C Labels:TableName=lakefs-cloud-refstore-similarweb Value:0xc01df5f548}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197089242s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-similarweb} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-similarweb} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-star-shrew-x7jynq State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-star-shrew-x7jynq Value:0xc01df5f588} C:{Var:C Labels:TableName=lakefs-cloud-refstore-star-shrew-x7jynq Value:0xc01df5f580}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197092532s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-star-shrew-x7jynq} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-star-shrew-x7jynq} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse Value:0xc01df5f5d0} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse Value:0xc01df5f5d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197095577s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-advanced-heron-87ad3m State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-advanced-heron-87ad3m Value:0xc01df5f630} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-advanced-heron-87ad3m Value:0xc01df5f638}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197099021s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-advanced-heron-87ad3m} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-advanced-heron-87ad3m} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-artistic-pug-5f50bt State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-artistic-pug-5f50bt Value:0xc01df5f750} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-artistic-pug-5f50bt Value:0xc01df5f758}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197101986s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-artistic-pug-5f50bt} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-artistic-pug-5f50bt} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-azure State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-azure Value:0xc01df5f830} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-azure Value:0xc01df5f838}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197105602s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-azure} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-azure} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-current-ostrich-sv74vd State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-current-ostrich-sv74vd Value:0xc01df5f888} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-current-ostrich-sv74vd Value:0xc01df5f880}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197110125s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-current-ostrich-sv74vd} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-current-ostrich-sv74vd} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-definite-gnat-7k4t60 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-definite-gnat-7k4t60 Value:0xc01df5f8f8} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-definite-gnat-7k4t60 Value:0xc01df5f8f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197113011s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-definite-gnat-7k4t60} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-definite-gnat-7k4t60} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-devoted-mackerel-av9yz5 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-devoted-mackerel-av9yz5 Value:0xc01df5f990} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-devoted-mackerel-av9yz5 Value:0xc01df5f998}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197115991s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-devoted-mackerel-av9yz5} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-devoted-mackerel-av9yz5} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-enhanced-bedbug-v1vdsc State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-enhanced-bedbug-v1vdsc Value:0xc01df5f9e0} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-enhanced-bedbug-v1vdsc Value:0xc01df5f9e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197119225s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-enhanced-bedbug-v1vdsc} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-enhanced-bedbug-v1vdsc} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-epic-monster-gdiaz1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-epic-monster-gdiaz1 Value:0xc01df5fba0} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-epic-monster-gdiaz1 Value:0xc01df5fba8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197122168s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-epic-monster-gdiaz1} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-epic-monster-gdiaz1} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-evident-wh State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-evident-wh Value:0xc01df5fc70} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-evident-wh Value:0xc01df5fc78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197125022s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-evident-wh} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-evident-wh} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-fast-chicken-6u1a39 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-fast-chicken-6u1a39 Value:0xc01df5fd30} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-fast-chicken-6u1a39 Value:0xc01df5fd38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197128871s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-fast-chicken-6u1a39} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-fast-chicken-6u1a39} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-handy-jawfish-v44gkd State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-handy-jawfish-v44gkd Value:0xc01df5fd80} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-handy-jawfish-v44gkd Value:0xc01df5fd88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197131218s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-handy-jawfish-v44gkd} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-handy-jawfish-v44gkd} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-harmless-kitten-peaxut State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-harmless-kitten-peaxut Value:0xc01df5fdc0} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-harmless-kitten-peaxut Value:0xc01df5fdc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197134312s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-harmless-kitten-peaxut} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-harmless-kitten-peaxut} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-honest-chigger-hcbrhj State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-honest-chigger-hcbrhj Value:0xc01df5ff00} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-honest-chigger-hcbrhj Value:0xc01df5ff08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197140564s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-honest-chigger-hcbrhj} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-honest-chigger-hcbrhj} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-inland-mar State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-inland-mar Value:0xc01df5ff40} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-inland-mar Value:0xc01df5ff48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197144425s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-inland-mar} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-inland-mar} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-intimate-chamois-tx9zvw State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-intimate-chamois-tx9zvw Value:0xc01df5ff80} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-intimate-chamois-tx9zvw Value:0xc01df5ff88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197147174s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-intimate-chamois-tx9zvw} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-intimate-chamois-tx9zvw} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-intimate-mouse-8sqpzx State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-intimate-mouse-8sqpzx Value:0xc008116018} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-intimate-mouse-8sqpzx Value:0xc008116010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197150042s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-intimate-mouse-8sqpzx} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-intimate-mouse-8sqpzx} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-leading-le State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-leading-le Value:0xc008116068} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-leading-le Value:0xc008116060}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197152606s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-leading-le} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-leading-le} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-loyal-wallaby-qv0ffu State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-loyal-wallaby-qv0ffu Value:0xc008116100} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-loyal-wallaby-qv0ffu Value:0xc008116108}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197155713s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-loyal-wallaby-qv0ffu} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-loyal-wallaby-qv0ffu} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-maximum-mako-jr1uj2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-maximum-mako-jr1uj2 Value:0xc008116150} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-maximum-mako-jr1uj2 Value:0xc008116158}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197158926s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-maximum-mako-jr1uj2} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-maximum-mako-jr1uj2} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-needed-snapper-kpneda State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-needed-snapper-kpneda Value:0xc0081161f0} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-needed-snapper-kpneda Value:0xc0081161f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197161586s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-needed-snapper-kpneda} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-needed-snapper-kpneda} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-optimum-worm-07md7q State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-optimum-worm-07md7q Value:0xc0081162a8} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-optimum-worm-07md7q Value:0xc0081162a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197163979s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-optimum-worm-07md7q} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-optimum-worm-07md7q} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-pl-gc State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-pl-gc Value:0xc0081162e0} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-pl-gc Value:0xc0081162e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197168082s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-pl-gc} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-pl-gc} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-pleased-muskrat-34euh7 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-pleased-muskrat-34euh7 Value:0xc0081163a0} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-pleased-muskrat-34euh7 Value:0xc0081163a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197170456s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-pleased-muskrat-34euh7} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-pleased-muskrat-34euh7} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-powerful-goose-gk1f8c State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-powerful-goose-gk1f8c Value:0xc008116410} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-powerful-goose-gk1f8c Value:0xc008116418}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197173092s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-powerful-goose-gk1f8c} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-powerful-goose-gk1f8c} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-quality-bedbug-1usiqf State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-quality-bedbug-1usiqf Value:0xc0081164c0} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-quality-bedbug-1usiqf Value:0xc0081164c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197175846s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-quality-bedbug-1usiqf} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-quality-bedbug-1usiqf} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-regular-chipmunk-hj8ukf State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-regular-chipmunk-hj8ukf Value:0xc008116500} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-regular-chipmunk-hj8ukf Value:0xc008116508}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197192255s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-regular-chipmunk-hj8ukf} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-regular-chipmunk-hj8ukf} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-renewing-kingfish-d22q0h State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-renewing-kingfish-d22q0h Value:0xc0081165b0} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-renewing-kingfish-d22q0h Value:0xc0081165b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197197592s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-renewing-kingfish-d22q0h} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-renewing-kingfish-d22q0h} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-rich-pony-1grudx State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-rich-pony-1grudx Value:0xc008116680} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-rich-pony-1grudx Value:0xc008116688}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197202696s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-rich-pony-1grudx} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-rich-pony-1grudx} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-right-pony-2n5nzm State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-right-pony-2n5nzm Value:0xc008116720} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-right-pony-2n5nzm Value:0xc008116728}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197207904s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-right-pony-2n5nzm} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-right-pony-2n5nzm} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-rigid-star State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-rigid-star Value:0xc008116760} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-rigid-star Value:0xc008116768}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197213594s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-rigid-star} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-rigid-star} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-sincere-asp-tyypgm State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-sincere-asp-tyypgm Value:0xc008116810} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-sincere-asp-tyypgm Value:0xc008116818}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197216956s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-sincere-asp-tyypgm} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-sincere-asp-tyypgm} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-sincere-hawk-fpadbz State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-sincere-hawk-fpadbz Value:0xc008116858} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-sincere-hawk-fpadbz Value:0xc008116850}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197222662s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-sincere-hawk-fpadbz} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-sincere-hawk-fpadbz} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-sts State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-sts Value:0xc008116900} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-sts Value:0xc008116908}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197226026s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-sts} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-sts} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-summary-tiger-8y4t5r State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-summary-tiger-8y4t5r Value:0xc008116958} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-summary-tiger-8y4t5r Value:0xc008116950}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.19722926s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-summary-tiger-8y4t5r} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-summary-tiger-8y4t5r} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-testy-cow State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-testy-cow Value:0xc0081169f0} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-testy-cow Value:0xc0081169f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197232201s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-testy-cow} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-testy-cow} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-tops-seasnail-ceddcs State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-tops-seasnail-ceddcs Value:0xc008116a38} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-tops-seasnail-ceddcs Value:0xc008116a30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197235492s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-tops-seasnail-ceddcs} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-tops-seasnail-ceddcs} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-vague-stoa State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-vague-stoa Value:0xc008116a70} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-vague-stoa Value:0xc008116a78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197238243s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-vague-stoa} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-vague-stoa} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-treeverse-wealthy-pug-qur9bw State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-treeverse-wealthy-pug-qur9bw Value:0xc008116b28} C:{Var:C Labels:TableName=lakefs-cloud-refstore-treeverse-wealthy-pug-qur9bw Value:0xc008116b20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197241962s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-treeverse-wealthy-pug-qur9bw} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-treeverse-wealthy-pug-qur9bw} value=0 ]} {Instance:TableName=lakefs-cloud-refstore-woven-geo-dev State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-cloud-refstore-woven-geo-dev Value:0xc008116bd0} C:{Var:C Labels:TableName=lakefs-cloud-refstore-woven-geo-dev Value:0xc008116bd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197246448s EvaluationString:[ var='B' labels={TableName=lakefs-cloud-refstore-woven-geo-dev} value=0 ], [ var='C' labels={TableName=lakefs-cloud-refstore-woven-geo-dev} value=0 ]} {Instance:TableName=lakefs-replication-table State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=lakefs-replication-table Value:0xc008116c20} C:{Var:C Labels:TableName=lakefs-replication-table Value:0xc008116c28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197251088s EvaluationString:[ var='B' labels={TableName=lakefs-replication-table} value=0 ], [ var='C' labels={TableName=lakefs-replication-table} value=0 ]} {Instance:TableName=terraform-locks State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:TableName=terraform-locks Value:0xc008116d50} C:{Var:C Labels:TableName=terraform-locks Value:0xc008116d58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.197255484s EvaluationString:[ var='B' labels={TableName=terraform-locks} value=0 ], [ var='C' labels={TableName=terraform-locks} value=0 ]}]" duration=262.678355ms +level=debug ts=2024-05-29T13:44:15.199976703Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=806229 slug=simplisafe t=2024-05-29T13:44:15.200011438Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="env=qa-west2" +level=debug ts=2024-05-29T13:44:15.199834371Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=starbreeze-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistentCF375" t=2024-05-29T13:44:15.19989059Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=806229 slug=simplisafe t=2024-05-29T13:44:15.199771195Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="env=qa-west2" +logger=ngalert.state.manager.persist user=59625 slug=alquilerargentina t=2024-05-29T13:44:15.199777996Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=806229 slug=simplisafe instance="__name__=consul_healthcheck_status, check=leader, env=qa-east, host=ip-10-90-8-166.ec2.internal, instance=ip-10-90-8-166.ec2.internal, otel=1, region=us-east-1, service=consul, type=qa-east-consul-server, user=$USER" t=2024-05-29T13:44:15.199749985Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.199727226Z caller=remote_instance_store.go:51 user=739130 slug=redphasetech msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=59625 slug=alquilerargentina instance= t=2024-05-29T13:44:15.199750053Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=59625 slug=alquilerargentina t=2024-05-29T13:44:15.199726023Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=806229 slug=simplisafe t=2024-05-29T13:44:15.199702954Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="env=qa-east" +logger=ngalert.state.manager user=806229 slug=simplisafe t=2024-05-29T13:44:15.199593623Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="env=qa-east" +logger=ngalert.state.manager user=806229 slug=simplisafe instance="__name__=consul_healthcheck_status, check=leader, env=qa-east, host=ip-10-90-12-77.ec2.internal, instance=ip-10-90-12-77.ec2.internal, otel=1, region=us-east-1, service=consul, type=qa-east-consul-server, user=$USER" t=2024-05-29T13:44:15.199573742Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=806229 slug=simplisafe t=2024-05-29T13:44:15.199425849Z level=debug msg="State manager processing evaluation results" resultCount=6 +logger=ngalert.scheduler user=806229 slug=simplisafe version=33 fingerprint=24feaa876a01ff9e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.199254077Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=consul_healthcheck_status, check=leader, env=qa-east, host=ip-10-90-12-77.ec2.internal, instance=ip-10-90-12-77.ec2.internal, otel=1, region=us-east-1, service=consul, type=qa-east-consul-server, user=$USER State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=consul_healthcheck_status, check=leader, env=qa-east, host=ip-10-90-12-77.ec2.internal, instance=ip-10-90-12-77.ec2.internal, otel=1, region=us-east-1, service=consul, type=qa-east-consul-server, user=$USER Value:0xc06017a008} C:{Var:C Labels:__name__=consul_healthcheck_status, check=leader, env=qa-east, host=ip-10-90-12-77.ec2.internal, instance=ip-10-90-12-77.ec2.internal, otel=1, region=us-east-1, service=consul, type=qa-east-consul-server, user=$USER Value:0xc06017a070}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.198258052s EvaluationString:[ var='A' labels={__name__=consul_healthcheck_status, check=leader, env=qa-east, host=ip-10-90-12-77.ec2.internal, instance=ip-10-90-12-77.ec2.internal, otel=1, region=us-east-1, service=consul, type=qa-east-consul-server, user=$USER} value=0 ], [ var='C' labels={__name__=consul_healthcheck_status, check=leader, env=qa-east, host=ip-10-90-12-77.ec2.internal, instance=ip-10-90-12-77.ec2.internal, otel=1, region=us-east-1, service=consul, type=qa-east-consul-server, user=$USER} value=0 ]} {Instance:__name__=consul_healthcheck_status, check=leader, env=qa-east, host=ip-10-90-6-248, instance=ip-10-90-6-248, otel=1, region=us-east-1, service=consul, type=qa-east-consul-server, user=$USER State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=consul_healthcheck_status, check=leader, env=qa-east, host=ip-10-90-6-248, instance=ip-10-90-6-248, otel=1, region=us-east-1, service=consul, type=qa-east-consul-server, user=$USER Value:0xc06017a228} C:{Var:C Labels:__name__=consul_healthcheck_status, check=leader, env=qa-east, host=ip-10-90-6-248, instance=ip-10-90-6-248, otel=1, region=us-east-1, service=consul, type=qa-east-consul-server, user=$USER Value:0xc06017a2c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.198281682s EvaluationString:[ var='A' labels={__name__=consul_healthcheck_status, check=leader, env=qa-east, host=ip-10-90-6-248, instance=ip-10-90-6-248, otel=1, region=us-east-1, service=consul, type=qa-east-consul-server, user=$USER} value=0 ], [ var='C' labels={__name__=consul_healthcheck_status, check=leader, env=qa-east, host=ip-10-90-6-248, instance=ip-10-90-6-248, otel=1, region=us-east-1, service=consul, type=qa-east-consul-server, user=$USER} value=0 ]} {Instance:__name__=consul_healthcheck_status, check=leader, env=qa-east, host=ip-10-90-8-166.ec2.internal, instance=ip-10-90-8-166.ec2.internal, otel=1, region=us-east-1, service=consul, type=qa-east-consul-server, user=$USER State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=consul_healthcheck_status, check=leader, env=qa-east, host=ip-10-90-8-166.ec2.internal, instance=ip-10-90-8-166.ec2.internal, otel=1, region=us-east-1, service=consul, type=qa-east-consul-server, user=$USER Value:0xc06017a420} C:{Var:C Labels:__name__=consul_healthcheck_status, check=leader, env=qa-east, host=ip-10-90-8-166.ec2.internal, instance=ip-10-90-8-166.ec2.internal, otel=1, region=us-east-1, service=consul, type=qa-east-consul-server, user=$USER Value:0xc06017a3b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.198297422s EvaluationString:[ var='A' labels={__name__=consul_healthcheck_status, check=leader, env=qa-east, host=ip-10-90-8-166.ec2.internal, instance=ip-10-90-8-166.ec2.internal, otel=1, region=us-east-1, service=consul, type=qa-east-consul-server, user=$USER} value=0 ], [ var='C' labels={__name__=consul_healthcheck_status, check=leader, env=qa-east, host=ip-10-90-8-166.ec2.internal, instance=ip-10-90-8-166.ec2.internal, otel=1, region=us-east-1, service=consul, type=qa-east-consul-server, user=$USER} value=0 ]} {Instance:__name__=consul_healthcheck_status, check=leader, env=qa-west2, host=ip-10-91-11-251.us-west-2.compute.internal, instance=ip-10-91-11-251.us-west-2.compute.internal, otel=1, region=us-west-2, service=consul, type=qa-west2-consul-server, user=$USER State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=consul_healthcheck_status, check=leader, env=qa-west2, host=ip-10-91-11-251.us-west-2.compute.internal, instance=ip-10-91-11-251.us-west-2.compute.internal, otel=1, region=us-west-2, service=consul, type=qa-west2-consul-server, user=$USER Value:0xc06017a510} C:{Var:C Labels:__name__=consul_healthcheck_status, check=leader, env=qa-west2, host=ip-10-91-11-251.us-west-2.compute.internal, instance=ip-10-91-11-251.us-west-2.compute.internal, otel=1, region=us-west-2, service=consul, type=qa-west2-consul-server, user=$USER Value:0xc06017a580}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.198311732s EvaluationString:[ var='A' labels={__name__=consul_healthcheck_status, check=leader, env=qa-west2, host=ip-10-91-11-251.us-west-2.compute.internal, instance=ip-10-91-11-251.us-west-2.compute.internal, otel=1, region=us-west-2, service=consul, type=qa-west2-consul-server, user=$USER} value=0 ], [ var='C' labels={__name__=consul_healthcheck_status, check=leader, env=qa-west2, host=ip-10-91-11-251.us-west-2.compute.internal, instance=ip-10-91-11-251.us-west-2.compute.internal, otel=1, region=us-west-2, service=consul, type=qa-west2-consul-server, user=$USER} value=0 ]} {Instance:__name__=consul_healthcheck_status, check=leader, env=qa-west2, host=ip-10-91-13-8.us-west-2.compute.internal, instance=ip-10-91-13-8.us-west-2.compute.internal, otel=1, region=us-west-2, service=consul, type=qa-west2-consul-server, user=$USER State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=consul_healthcheck_status, check=leader, env=qa-west2, host=ip-10-91-13-8.us-west-2.compute.internal, instance=ip-10-91-13-8.us-west-2.compute.internal, otel=1, region=us-west-2, service=consul, type=qa-west2-consul-server, user=$USER Value:0xc06017a700} C:{Var:C Labels:__name__=consul_healthcheck_status, check=leader, env=qa-west2, host=ip-10-91-13-8.us-west-2.compute.internal, instance=ip-10-91-13-8.us-west-2.compute.internal, otel=1, region=us-west-2, service=consul, type=qa-west2-consul-server, user=$USER Value:0xc06017a778}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.198323162s EvaluationString:[ var='A' labels={__name__=consul_healthcheck_status, check=leader, env=qa-west2, host=ip-10-91-13-8.us-west-2.compute.internal, instance=ip-10-91-13-8.us-west-2.compute.internal, otel=1, region=us-west-2, service=consul, type=qa-west2-consul-server, user=$USER} value=0 ], [ var='C' labels={__name__=consul_healthcheck_status, check=leader, env=qa-west2, host=ip-10-91-13-8.us-west-2.compute.internal, instance=ip-10-91-13-8.us-west-2.compute.internal, otel=1, region=us-west-2, service=consul, type=qa-west2-consul-server, user=$USER} value=0 ]} {Instance:__name__=consul_healthcheck_status, check=leader, env=qa-west2, host=ip-10-91-4-13.us-west-2.compute.internal, instance=ip-10-91-4-13.us-west-2.compute.internal, otel=1, region=us-west-2, service=consul, type=qa-west2-consul-server, user=$USER State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=consul_healthcheck_status, check=leader, env=qa-west2, host=ip-10-91-4-13.us-west-2.compute.internal, instance=ip-10-91-4-13.us-west-2.compute.internal, otel=1, region=us-west-2, service=consul, type=qa-west2-consul-server, user=$USER Value:0xc06017a878} C:{Var:C Labels:__name__=consul_healthcheck_status, check=leader, env=qa-west2, host=ip-10-91-4-13.us-west-2.compute.internal, instance=ip-10-91-4-13.us-west-2.compute.internal, otel=1, region=us-west-2, service=consul, type=qa-west2-consul-server, user=$USER Value:0xc06017a8f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.198333242s EvaluationString:[ var='A' labels={__name__=consul_healthcheck_status, check=leader, env=qa-west2, host=ip-10-91-4-13.us-west-2.compute.internal, instance=ip-10-91-4-13.us-west-2.compute.internal, otel=1, region=us-west-2, service=consul, type=qa-west2-consul-server, user=$USER} value=0 ], [ var='C' labels={__name__=consul_healthcheck_status, check=leader, env=qa-west2, host=ip-10-91-4-13.us-west-2.compute.internal, instance=ip-10-91-4-13.us-west-2.compute.internal, otel=1, region=us-west-2, service=consul, type=qa-west2-consul-server, user=$USER} value=0 ]}]" duration=18.19195ms +level=debug ts=2024-05-29T13:44:15.199268938Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=starbreeze-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistentC27C1" t=2024-05-29T13:44:15.199259278Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=tmpfs, fstype=tmpfs, instance=sueuwe1dpladwcadb21001.plad.gcp.hclsw.internal, job=PREPROD-AUTH-DB-Host, mountpoint=/run" t=2024-05-29T13:44:15.199216357Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=tmpfs, fstype=tmpfs, instance=sueuwe1bpladwcldb21001.plad.gcp.hclsw.internal, job=PREPROD-LIVE-DB-Host, mountpoint=/run/user/2003" t=2024-05-29T13:44:15.199174556Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=tmpfs, fstype=tmpfs, instance=sueuwe1bpladwcldb21001.plad.gcp.hclsw.internal, job=PREPROD-LIVE-DB-Host, mountpoint=/run" t=2024-05-29T13:44:15.199154101Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=tmpfs, fstype=tmpfs, instance=queuwe1bpladwcldb21001.plad.gcp.hclsw.internal, job=QA-DB-Host, mountpoint=/run/user/2101" t=2024-05-29T13:44:15.199120268Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=tmpfs, fstype=tmpfs, instance=queuwe1bpladwcldb21001.plad.gcp.hclsw.internal, job=QA-DB-Host, mountpoint=/run/user/2101" t=2024-05-29T13:44:15.19910533Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=tmpfs, fstype=tmpfs, instance=queuwe1bpladwcldb21001.plad.gcp.hclsw.internal, job=QA-DB-Host, mountpoint=/run" t=2024-05-29T13:44:15.199069921Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=starbreeze-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistentC25C2" t=2024-05-29T13:44:15.199045278Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=893158 slug=cmfollnp t=2024-05-29T13:44:15.198749201Z level=debug msg="Deleting alert states" count=1 +level=debug ts=2024-05-29T13:44:15.198831529Z caller=remote_instance_store.go:51 user=893158 slug=cmfollnp msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.198787085Z caller=remote_instance_store.go:51 user=505309 slug=jromero248 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=679831 slug=joveostageaws instance="datasource_uid=a6f971c0-2a39-492e-8c6a-8034f1702671, ref_id=A" t=2024-05-29T13:44:15.198704837Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=505309 slug=jromero248 instance="datasource_uid=gS1UKYt4z, ref_id=A" t=2024-05-29T13:44:15.198702813Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +logger=ngalert.state.manager user=505309 slug=jromero248 instance="datasource_uid=gS1UKYt4z, ref_id=A" t=2024-05-29T13:44:15.198648912Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +logger=ngalert.state.manager user=679831 slug=joveostageaws instance="datasource_uid=a6f971c0-2a39-492e-8c6a-8034f1702671, ref_id=A" t=2024-05-29T13:44:15.198605087Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.198642957Z caller=remote_instance_store.go:51 user=843304 slug=ppcgroup msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=505309 slug=jromero248 instance="datasource_uid=gS1UKYt4z, ref_id=A" t=2024-05-29T13:44:15.198574577Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +logger=ngalert.scheduler user=505309 slug=jromero248 version=4 fingerprint=a30d688b2c86d4a2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.198461517Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=gS1UKYt4z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.198088913s EvaluationString:}]" duration=103.609772ms +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/sda2, fstype=xfs, instance=queuwe1bpladwcldb21001.plad.gcp.hclsw.internal, job=QA-DB-Host, mountpoint=/" t=2024-05-29T13:44:15.198514092Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=843304 slug=ppcgroup version=15 fingerprint=798b2b7e04aa13b3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.198467054Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190861447s EvaluationString:}]" duration=21.352827ms +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=suuscn1bfollnalapp1002.foll.gcp.hclsw.internal, job=Pre-prod-follnalapp1002-Host-VM, mountpoint=/run/user/7900" t=2024-05-29T13:44:15.198501909Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=suuscn1bfollcmlapp1002.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1002-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.198425328Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=suuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Pre-prod-follspaweb1001-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.198375749Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=suuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Pre-prod-follsnaapp1001-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.198352809Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/sda2, fstype=xfs, instance=dueuwe1bpladutlbst1001, job=Bastion-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.198348846Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/sda2, fstype=xfs, instance=dueuwe1bpladutlbst1001, job=Bastion-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.198335018Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=suuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Pre-prod-follnalapp1003-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.198318715Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/sda1, fstype=vfat, instance=sueuwe1dpladwcadb21001.plad.gcp.hclsw.internal, job=PREPROD-AUTH-DB-Host, mountpoint=/boot/efi" t=2024-05-29T13:44:15.198279878Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/sda1, fstype=vfat, instance=sueuwe1bpladwcldb21001.plad.gcp.hclsw.internal, job=PREPROD-LIVE-DB-Host, mountpoint=/boot/efi" t=2024-05-29T13:44:15.198029092Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.198107131Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=starbreeze-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistentB39B0" t=2024-05-29T13:44:15.198246787Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.198069727Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/sda1, fstype=vfat, instance=queuwe1bpladwcldb21001.plad.gcp.hclsw.internal, job=QA-DB-Host, mountpoint=/boot/efi" t=2024-05-29T13:44:15.19792219Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=quuscn1afollwcsdbs1002.foll.gcp.hclsw.internal, job=QA-follwcsdbs1002-DB-Host-VM, mountpoint=/run/user/5945" t=2024-05-29T13:44:15.197954799Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=quuscn1afollwcsdbs1002.foll.gcp.hclsw.internal, job=QA-follwcsdbs1002-DB-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.197923865Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.197963983Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=527204 slug=lnrsusinsurancenonprod t=2024-05-29T13:44:15.197880055Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.829029ms +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/sda1, fstype=vfat, instance=dueuwe1bpladwcldb21002.plad.gcp.hclsw.internal, job=Dev-DB-Host, mountpoint=/boot/efi" t=2024-05-29T13:44:15.197888152Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.197960833Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/sda1, fstype=vfat, instance=dueuwe1bpladwcldb21002.plad.gcp.hclsw.internal, job=Dev-DB-Host, mountpoint=/boot/efi" t=2024-05-29T13:44:15.197878877Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/run/user/7900" t=2024-05-29T13:44:15.197870827Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/sda1, fstype=vfat, instance=dueuwe1bpladutlbst1001, job=Bastion-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.197852705Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/sda1, fstype=vfat, instance=dueuwe1bpladutlbst1001, job=Bastion-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.197843212Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.19801141Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/mapper/vg02-lv02, fstype=ext4, instance=sueuwe1bpladwcldb21001.plad.gcp.hclsw.internal, job=PREPROD-LIVE-DB-Host, mountpoint=/home" t=2024-05-29T13:44:15.197774687Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=starbreeze-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistentB2D43" t=2024-05-29T13:44:15.197993219Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:15.197915531Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.096478ms +level=debug ts=2024-05-29T13:44:15.19785919Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.197818591Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.197749294Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/mapper/vg02-lv02, fstype=ext4, instance=sueuwe1bpladwcldb21001.plad.gcp.hclsw.internal, job=PREPROD-LIVE-DB-Host, mountpoint=/export" t=2024-05-29T13:44:15.197733942Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:15.197739775Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=76.221358ms +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.19771256Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=802128 slug=unstatic t=2024-05-29T13:44:15.19767309Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=27.063772ms +logger=ngalert.state.manager.persist user=363350 slug=elements6007 t=2024-05-29T13:44:15.197712416Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/mapper/vg02-lv02, fstype=ext4, instance=sueuwe1bpladwcldb21001.plad.gcp.hclsw.internal, job=PREPROD-LIVE-DB-Host, mountpoint=/db2inst" t=2024-05-29T13:44:15.197695244Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/mapper/vg02-lv02, fstype=ext4, instance=sueuwe1bpladwcldb21001.plad.gcp.hclsw.internal, job=PREPROD-LIVE-DB-Host, mountpoint=/data" t=2024-05-29T13:44:15.197651224Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.197546353Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=starbreeze-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistentADFEF" t=2024-05-29T13:44:15.197544713Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=sueuwe1dpladwcadb21001.plad.gcp.hclsw.internal, job=PREPROD-AUTH-DB-Host, mountpoint=/home" t=2024-05-29T13:44:15.197521009Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=679029 slug=joveoprodaws t=2024-05-29T13:44:15.197503112Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=25.665482ms +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Melbourne, country=Australia, datacenter=ServersAustralia, environment=production, instance=181.214.199.66:9998, ip=181.214.199.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=melbourne413, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.197484666Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=sueuwe1dpladwcadb21001.plad.gcp.hclsw.internal, job=PREPROD-AUTH-DB-Host, mountpoint=/export" t=2024-05-29T13:44:15.197435868Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/run/user/4264" t=2024-05-29T13:44:15.197439754Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.197233495Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/run/user/4264" t=2024-05-29T13:44:15.197430028Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=8bd877c20025d6e6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.197302623Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.196941856s EvaluationString:}]" duration=15.514467ms +logger=ngalert.state.manager.persist user=183214 slug=vectorizedio t=2024-05-29T13:44:15.197242613Z level=debug msg="Saving alert states" count=59 max_state_save_concurrency=1 +logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.19718949Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=79.664061ms +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=sueuwe1dpladwcadb21001.plad.gcp.hclsw.internal, job=PREPROD-AUTH-DB-Host, mountpoint=/db2inst" t=2024-05-29T13:44:15.19720455Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Melbourne, country=Australia, datacenter=ServersAustralia, environment=production, instance=181.214.199.66:9998, ip=181.214.199.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/aus-melbourne.crt, role=vpn, server=melbourne413, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.197146162Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=491157 slug=prd01wr instance="container=transfer-compliance" t=2024-05-29T13:44:15.197183249Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=sueuwe1dpladwcadb21001.plad.gcp.hclsw.internal, job=PREPROD-AUTH-DB-Host, mountpoint=/db2inst" t=2024-05-29T13:44:15.197047664Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/run/user/7912" t=2024-05-29T13:44:15.197100442Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/run/user/7912" t=2024-05-29T13:44:15.197084884Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/run/user/7905" t=2024-05-29T13:44:15.197044436Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=starbreeze-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistentA2BF2" t=2024-05-29T13:44:15.197037955Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/run/user/7900" t=2024-05-29T13:44:15.196968644Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=queuwe1bpladwcldb21001.plad.gcp.hclsw.internal, job=QA-DB-Host, mountpoint=/opt" t=2024-05-29T13:44:15.196968027Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=491157 slug=prd01wr instance="container=pricing-graphql" t=2024-05-29T13:44:15.197004551Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Melbourne, country=Australia, datacenter=ServersAustralia, environment=production, instance=181.214.199.130:9998, ip=181.214.199.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=melbourne412, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.196987584Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=queuwe1bpladwcldb21001.plad.gcp.hclsw.internal, job=QA-DB-Host, mountpoint=/home" t=2024-05-29T13:44:15.196885555Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=841535 slug=elvaco01 t=2024-05-29T13:44:15.196887579Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.226202ms +logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:15.196878301Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=proxy-0, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.196885344Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=proxy-0, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.196877624Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.196816666Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=queuwe1bpladwcldb21001.plad.gcp.hclsw.internal, job=QA-DB-Host, mountpoint=/export" t=2024-05-29T13:44:15.196850728Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=starbreeze-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistent9CC5F" t=2024-05-29T13:44:15.196764568Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/run/user/4208" t=2024-05-29T13:44:15.196837999Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Melbourne, country=Australia, datacenter=ServersAustralia, environment=production, instance=181.214.199.130:9998, ip=181.214.199.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/aus-melbourne.crt, role=vpn, server=melbourne412, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.196777561Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/run/user/4208" t=2024-05-29T13:44:15.196827366Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=queuwe1bpladwcldb21001.plad.gcp.hclsw.internal, job=QA-DB-Host, mountpoint=/db2inst" t=2024-05-29T13:44:15.196805933Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=prometheus-prometheus-node-exporter-tknqg, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.196798475Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=queuwe1bpladwcldb21001.plad.gcp.hclsw.internal, job=QA-DB-Host, mountpoint=/db2inst" t=2024-05-29T13:44:15.196796993Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=prometheus-prometheus-node-exporter-m2kvn, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.196734717Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=queuwe1bpladwcldb21001.plad.gcp.hclsw.internal, job=QA-DB-Host, mountpoint=/data" t=2024-05-29T13:44:15.196754479Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/run/user/4172" t=2024-05-29T13:44:15.196602287Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=prometheus-prometheus-node-exporter-m2kvn, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.19672258Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=cbfad1269f4c3e0f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.196637269Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.19641376s EvaluationString:}]" duration=184.854018ms +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=prometheus-prometheus-node-exporter-f7hqj, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.196632694Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=dueuwe1bpladwcldb21002.plad.gcp.hclsw.internal, job=Dev-DB-Host, mountpoint=/home" t=2024-05-29T13:44:15.19663353Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=dueuwe1bpladwcldb21002.plad.gcp.hclsw.internal, job=Dev-DB-Host, mountpoint=/export" t=2024-05-29T13:44:15.196589666Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/run/user/4172" t=2024-05-29T13:44:15.19641462Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Melbourne, country=Australia, datacenter=PIA, environment=production, instance=173.239.203.8:9998, ip=173.239.203.8, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=melbourne434, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.196575861Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.196507126Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.196501041Z caller=remote_instance_store.go:51 user=191103 slug=amazonadmin msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Melbourne, country=Australia, datacenter=PIA, environment=production, instance=173.239.203.8:9998, ip=173.239.203.8, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/au-australia-so.crt, role=streaming-optimized, server=melbourne434, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.196287767Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.196254464Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Melbourne, country=Australia, datacenter=PIA, environment=production, instance=173.239.203.7:9998, ip=173.239.203.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=melbourne433, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.196068075Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.196471597Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=dueuwe1bpladwcldb21002.plad.gcp.hclsw.internal, job=Dev-DB-Host, mountpoint=/data" t=2024-05-29T13:44:15.196500512Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=633221 slug=chengtao instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.196353192Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:15.196448031Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.196088427Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=633221 slug=chengtao instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.196329101Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=dueuwe1bpladutlbst1001, job=Bastion-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.196414855Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=633221 slug=chengtao instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.19629552Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=prometheus-prometheus-kube-prometheus-prometheus-0, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.196392843Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=prometheus-prometheus-kube-prometheus-prometheus-0, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.196381389Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=dueuwe1bpladutlbst1001, job=Bastion-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.196334627Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=656459 slug=activeport t=2024-05-29T13:44:15.196314802Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/run/user/7900" t=2024-05-29T13:44:15.196348136Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=491157 slug=prd01wr instance="container=graphql-fx-rate-alerts" t=2024-05-29T13:44:15.196277353Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=633221 slug=chengtao instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.196238079Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=633221 slug=chengtao instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.19620394Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=starbreeze-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistent80D7B" t=2024-05-29T13:44:15.196149634Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=868411 slug=cmpladnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=dueuwe1bpladutlbst1001, job=Bastion-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.196261682Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/run/user/4193" t=2024-05-29T13:44:15.196162996Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=prometheus-kube-prometheus-operator-6dfdbb5f7d-ft498, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.19622954Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=274199 slug=telemetriahgm instance= t=2024-05-29T13:44:15.196168371Z level=debug msg="Setting next state" handler=resultError +level=debug ts=2024-05-29T13:44:15.194311959Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=615392 slug=shinemetrics t=2024-05-29T13:44:15.193743951Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.939032ms +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/run/user/7900" t=2024-05-29T13:44:15.19595928Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/run/user/4193" t=2024-05-29T13:44:15.195916962Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.195889173Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Melbourne, country=Australia, datacenter=PIA, environment=production, instance=173.239.203.7:9998, ip=173.239.203.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/aus-melbourne.crt, role=vpn, server=melbourne433, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.195873591Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Melbourne, country=Australia, datacenter=PIA, environment=production, instance=173.239.203.7:9998, ip=173.239.203.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/aus-melbourne.crt, role=vpn, server=melbourne433, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.195857834Z level=debug msg="Setting next state" handler=resultNormal +level=info ts=2024-05-29T13:44:15.195608311Z caller=remote_alert_sender.go:94 user=767797 slug=mgmresorts host=mgmresorts-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.71.101:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=admlwegfczrwga alerts=1 +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=logging-fluent-bit-6w9sj, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.195765488Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.195732775Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.195714724Z caller=remote_instance_store.go:51 user=434892 slug=apexfsnzdev msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=491157 slug=prd01wr t=2024-05-29T13:44:15.195660926Z level=debug msg="State manager processing evaluation results" resultCount=8 +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=logging-fluent-bit-6c4j9, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.195706364Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.19563911Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.195672361Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=434892 slug=apexfsnzdev t=2024-05-29T13:44:15.195658582Z level=debug msg="Saving alert states" count=6 max_state_save_concurrency=1 +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.195705918Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.195692499Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Melbourne, country=Australia, datacenter=PIA, environment=production, instance=173.239.203.6:9998, ip=173.239.203.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=melbourne432, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.195663228Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.195626017Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager.persist user=767797 slug=mgmresorts t=2024-05-29T13:44:15.19548579Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.333105ms +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/run/user/2022" t=2024-05-29T13:44:15.195613168Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=starbreeze-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistent6A6BF" t=2024-05-29T13:44:15.195595105Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.195509686Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.195587012Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.195532474Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.195504541Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/run/user/4172" t=2024-05-29T13:44:15.195542012Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=kube-proxy-sptn2, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.195547279Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=kube-proxy-sptn2, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.195535959Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Melbourne, country=Australia, datacenter=PIA, environment=production, instance=173.239.203.6:9998, ip=173.239.203.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/aus-melbourne.crt, role=vpn, server=melbourne432, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.195453878Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/run/user/10002" t=2024-05-29T13:44:15.19549388Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=kube-proxy-nb5sm, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.195461247Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.195437457Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.195403514Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.195389006Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=kube-proxy-6ll6t, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.195348396Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=nuuscn1afolllvmora1001.foll.gcp.hclsw.internal, job=Non-prod-folllvmora1001-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.19531361Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=880160 slug=unbdchprod t=2024-05-29T13:44:15.195212963Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=ingress-nginx-controller-76477854cc-sfzqx, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.195245046Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=880160 slug=unbdchprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.195196808Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=880160 slug=unbdchprod t=2024-05-29T13:44:15.195138519Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.195006797Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.195047796Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.195029447Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.194851693Z caller=remote_instance_store.go:51 user=264941 slug=agnosticeng msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=434892 slug=apexfsnzdev instance="instance=10.0.1.189:9102" t=2024-05-29T13:44:15.195003688Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.195006183Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.194983781Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.194954512Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.194941228Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.194897011Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Melbourne, country=Australia, datacenter=PIA, environment=production, instance=173.239.203.4:9998, ip=173.239.203.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=melbourne430, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.194863679Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.194870985Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.194856457Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=falco-6jn86, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.194782802Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=434892 slug=apexfsnzdev instance="instance=10.0.1.142:9102" t=2024-05-29T13:44:15.19476539Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.194817846Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=434892 slug=apexfsnzdev instance="instance=10.0.1.142:9102" t=2024-05-29T13:44:15.194752638Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Melbourne, country=Australia, datacenter=PIA, environment=production, instance=173.239.203.4:9998, ip=173.239.203.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/aus-melbourne.crt, role=vpn, server=melbourne430, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.194681895Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.194650314Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=starbreeze-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistent29F43" t=2024-05-29T13:44:15.194612658Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.194632521Z caller=remote_instance_store.go:51 user=830631 slug=api3 msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.1945693Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=434892 slug=apexfsnzdev instance="instance=10.0.0.225:9102" t=2024-05-29T13:44:15.194589726Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=830631 slug=api3 instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.194578949Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=830631 slug=api3 instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.194574948Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.194565649Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=830631 slug=api3 instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.194503377Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:15.1945022Z caller=remote_instance_store.go:51 user=174054 slug=netrading msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Melbourne, country=Australia, datacenter=PIA, environment=production, instance=173.239.203.3:9998, ip=173.239.203.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=melbourne429, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.194492942Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=external-dns-7cb4dc559b-6tmjd, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.194495884Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=830631 slug=api3 version=88 fingerprint=a3aa3b640428c37f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.194424436Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.194030215s EvaluationString:}]" duration=34.172559ms +level=debug ts=2024-05-29T13:44:15.194412909Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" +level=info ts=2024-05-29T13:44:15.194362848Z caller=grafana.go:247 user=884866 slug=cnonumerique msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=15&state=firing&state=pending&state=error" groups=10 alerts=0 +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=csi-secrets-store-secrets-store-csi-driver-tp985, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.194437055Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.194450759Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=csi-secrets-store-secrets-store-csi-driver-rt6wm, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.194386931Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=csi-secrets-store-secrets-store-csi-driver-rt6wm, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.194378068Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=starbreeze-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistent29574" t=2024-05-29T13:44:15.194393803Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=starbreeze-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistent29574" t=2024-05-29T13:44:15.194383131Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.194373637Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Melbourne, country=Australia, datacenter=PIA, environment=production, instance=173.239.203.3:9998, ip=173.239.203.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/aus-melbourne.crt, role=vpn, server=melbourne429, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.194263277Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.194236509Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=csi-secrets-store-secrets-store-csi-driver-2z56p, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.194181339Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=csi-secrets-store-secrets-store-csi-driver-2z56p, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.194165549Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.194136649Z caller=remote_instance_store.go:51 user=540828 slug=finfoprod153 msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.194093163Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.193991766Z caller=remote_instance_store.go:51 user=220750 slug=homeys msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.19408227Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:15.194063392Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=114492 slug=railsbank instance="QueueName=PROD-PLAY-RB_QUEUE_INTER_LEDGER_THROTTLER-SQS" t=2024-05-29T13:44:15.194040625Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Manila, country=Philippines, datacenter=M247, environment=production, instance=188.214.125.178:9998, ip=188.214.125.178, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=philippines403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.194059598Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Manila, country=Philippines, datacenter=M247, environment=production, instance=188.214.125.178:9998, ip=188.214.125.178, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=philippines403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.194044701Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=114492 slug=railsbank t=2024-05-29T13:44:15.193960966Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=starbreeze-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistent248A2" t=2024-05-29T13:44:15.194017734Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=114492 slug=railsbank version=1 fingerprint=a9e986a232e348a3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.193850875Z level=debug msg="Alert rule evaluated" results="[{Instance:QueueName=PROD-PLAY-RB_QUEUE_INTER_LEDGER_THROTTLER-SQS State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:QueueName=PROD-PLAY-RB_QUEUE_INTER_LEDGER_THROTTLER-SQS Value:0xc01dd4a2a0} C:{Var:C Labels:QueueName=PROD-PLAY-RB_QUEUE_INTER_LEDGER_THROTTLER-SQS Value:0xc01dd4a2a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.19349322s EvaluationString:[ var='B' labels={QueueName=PROD-PLAY-RB_QUEUE_INTER_LEDGER_THROTTLER-SQS} value=0 ], [ var='C' labels={QueueName=PROD-PLAY-RB_QUEUE_INTER_LEDGER_THROTTLER-SQS} value=0 ]}]" duration=221.70783ms +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.193996615Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=console-748747555-bh8hd, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.193989704Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=console-748747555-bh8hd, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.193979099Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=console-748747555-64brw, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.193920221Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.193897151Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=cert-manager-webhook-85676d588-hs92l, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.193848165Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=tmpfs, fstype=tmpfs, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/run" t=2024-05-29T13:44:15.193869923Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Manila, country=Philippines, datacenter=M247, environment=production, instance=188.214.125.178:9998, ip=188.214.125.178, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/philippines.crt, role=vpn, server=philippines403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.19381447Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=fst-vol1.foll.gcp.hclsw.internal:/vol1/qa, fstype=nfs, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/mnt/filestore" t=2024-05-29T13:44:15.193814103Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.193754364Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" +level=info ts=2024-05-29T13:44:15.193755722Z caller=remote_alert_sender.go:94 user=174016 slug=journalstaging host=journalstaging-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.66.172:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=d9Yns3dnz alerts=1 +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=cert-manager-cainjector-7d564485c-qbffv, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.193758624Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=fst-vol1.foll.gcp.hclsw.internal:/vol1/qa, fstype=nfs, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/mnt/filestore" t=2024-05-29T13:44:15.193764478Z level=debug msg="Setting next state" handler=resultNormal +level=info ts=2024-05-29T13:44:15.193661228Z caller=remote_alert_sender.go:94 user=21051 slug=mojio host=mojio-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.80.247:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=db526c3c-8623-4590-930a-e85762d107bd alerts=1 +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=fst-vol1.foll.gcp.hclsw.internal:/vol1/qa, fstype=nfs, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/mnt/filestore" t=2024-05-29T13:44:15.193685597Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=21051 slug=mojio t=2024-05-29T13:44:15.193581331Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.754462ms +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=fst-vol1.foll.gcp.hclsw.internal:/vol1/pre, fstype=nfs, instance=suuscn1bfollcmlapp1002.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1002-Host-VM, mountpoint=/mnt/filestore" t=2024-05-29T13:44:15.19365798Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=fst-vol1.foll.gcp.hclsw.internal:/vol1/pre, fstype=nfs, instance=suuscn1bfollcmlapp1002.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1002-Host-VM, mountpoint=/mnt/filestore" t=2024-05-29T13:44:15.193644158Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=fst-vol1.foll.gcp.hclsw.internal:/vol1/pre, fstype=nfs, instance=suuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follmqbapp1001-Host-VM, mountpoint=/mnt/filestore" t=2024-05-29T13:44:15.19361884Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=starbreeze-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistent15826" t=2024-05-29T13:44:15.19355202Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=canary-5bf99b9865-x8hsf, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.193537819Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.193517068Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.193493511Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=aws-secrets-store-secrets-store-csi-driver-provider-aws-w5fng, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.193478999Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=fst-vol1.foll.gcp.hclsw.internal:/vol1/dev, fstype=nfs, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/mnt/filestore" t=2024-05-29T13:44:15.193477405Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Manila, country=Philippines, datacenter=M247, environment=production, instance=188.214.125.146:9998, ip=188.214.125.146, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=philippines402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.193460036Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=fst-vol1.foll.gcp.hclsw.internal:/vol1/dev, fstype=nfs, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/mnt/filestore" t=2024-05-29T13:44:15.193434772Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=fst-vol1.foll.gcp.hclsw.internal:/vol1/dev, fstype=nfs, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/mnt/filestore" t=2024-05-29T13:44:15.19339344Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=fst-vol1.foll.gcp.hclsw.internal:/vol1/dev, fstype=nfs, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/mnt/filestore" t=2024-05-29T13:44:15.193366272Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=aws-node-zxsjr, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.19332241Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=aws-node-qjltt, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.193278028Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=fst-vol1.foll.gcp.hclsw.internal:/vol1/dev, fstype=nfs, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/mnt/filestore" t=2024-05-29T13:44:15.193353123Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=fst-vol1.foll.gcp.hclsw.internal:/vol1/dev, fstype=nfs, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/mnt/filestore" t=2024-05-29T13:44:15.19332488Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.193201844Z caller=remote_instance_store.go:51 user=672418 slug=streamkap msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=fst-vol1.foll.gcp.hclsw.internal:/vol1/dev, fstype=nfs, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/mnt/filestore" t=2024-05-29T13:44:15.19331166Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Manila, country=Philippines, datacenter=M247, environment=production, instance=188.214.125.146:9998, ip=188.214.125.146, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/philippines.crt, role=vpn, server=philippines402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.193251186Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Manila, country=Philippines, datacenter=M247, environment=production, instance=188.214.125.146:9998, ip=188.214.125.146, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/philippines.crt, role=vpn, server=philippines402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.193221537Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=aws-node-qcch5, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.193203014Z level=debug msg="Keeping state" state=Normal +level=info ts=2024-05-29T13:44:15.193143764Z caller=remote_alert_sender.go:94 user=22398 slug=sunfolding host=sunfolding-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.37.117:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=cece3596-0e1a-49cf-be24-9c7801dbdca0 alerts=1 +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=aws-node-qcch5, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.193193272Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda2, fstype=xfs, instance=suuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Pre-prod-follspaweb1001-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.193145071Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.193045448Z caller=remote_instance_store.go:51 user=326874 slug=fastpath msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda2, fstype=xfs, instance=suuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Pre-prod-follsnaapp1001-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.193105659Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=starbreeze-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistent04C11" t=2024-05-29T13:44:15.193077638Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=aws-load-balancer-controller-77c6c4674f-xrbvp, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.193012354Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda2, fstype=xfs, instance=suuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follnalapp1001-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.192978489Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Manila, country=Philippines, datacenter=M247, environment=production, instance=188.214.125.130:9998, ip=188.214.125.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=philippines401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.193025034Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.192991142Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=326874 slug=fastpath instance="alert_sensitivity=high, instance=https://connector-gp-cus.test.gofastpath.com/api/health, job=http-check-connector-gp-cus-test" t=2024-05-29T13:44:15.192965139Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="pod=adjuster-84c4b5f46-n49q5, serverless_id=int-eu-west-1" t=2024-05-29T13:44:15.192954732Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=326874 slug=fastpath instance="alert_sensitivity=high, instance=https://connector-gp-cus.test.gofastpath.com/api/health, job=http-check-connector-gp-cus-test" t=2024-05-29T13:44:15.192950616Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.192891011Z caller=remote_instance_store.go:51 user=871095 slug=cmcnginp msg="calling SaveAlertInstance" +logger=ngalert.scheduler user=183214 slug=vectorizedio version=35 fingerprint=5ebc188f58300542 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.192078545Z level=debug msg="Alert rule evaluated" results="[{Instance:pod=adjuster-84c4b5f46-n49q5, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=adjuster-84c4b5f46-n49q5, serverless_id=int-eu-west-1 Value:0xc035c31380} RES:{Var:RES Labels:pod=adjuster-84c4b5f46-n49q5, serverless_id=int-eu-west-1 Value:0xc035c313e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.19029404s EvaluationString:[ var='A' labels={pod=adjuster-84c4b5f46-n49q5, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=adjuster-84c4b5f46-n49q5, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=aws-load-balancer-controller-77c6c4674f-xrbvp, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=aws-load-balancer-controller-77c6c4674f-xrbvp, serverless_id=int-eu-west-1 Value:0xc035c31450} RES:{Var:RES Labels:pod=aws-load-balancer-controller-77c6c4674f-xrbvp, serverless_id=int-eu-west-1 Value:0xc035c314a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190306799s EvaluationString:[ var='A' labels={pod=aws-load-balancer-controller-77c6c4674f-xrbvp, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=aws-load-balancer-controller-77c6c4674f-xrbvp, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=aws-node-dbk49, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=aws-node-dbk49, serverless_id=int-eu-west-1 Value:0xc035c315c0} RES:{Var:RES Labels:pod=aws-node-dbk49, serverless_id=int-eu-west-1 Value:0xc035c31620}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190312984s EvaluationString:[ var='A' labels={pod=aws-node-dbk49, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=aws-node-dbk49, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=aws-node-n2l7q, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=aws-node-n2l7q, serverless_id=int-eu-west-1 Value:0xc035c31710} RES:{Var:RES Labels:pod=aws-node-n2l7q, serverless_id=int-eu-west-1 Value:0xc035c31760}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190318152s EvaluationString:[ var='A' labels={pod=aws-node-n2l7q, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=aws-node-n2l7q, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=aws-node-qcch5, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=aws-node-qcch5, serverless_id=int-eu-west-1 Value:0xc035c31830} RES:{Var:RES Labels:pod=aws-node-qcch5, serverless_id=int-eu-west-1 Value:0xc035c318c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190322603s EvaluationString:[ var='A' labels={pod=aws-node-qcch5, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=aws-node-qcch5, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=aws-node-qjltt, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=aws-node-qjltt, serverless_id=int-eu-west-1 Value:0xc035c319b0} RES:{Var:RES Labels:pod=aws-node-qjltt, serverless_id=int-eu-west-1 Value:0xc035c31a10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190333163s EvaluationString:[ var='A' labels={pod=aws-node-qjltt, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=aws-node-qjltt, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=aws-node-zxsjr, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=aws-node-zxsjr, serverless_id=int-eu-west-1 Value:0xc035c31ad0} RES:{Var:RES Labels:pod=aws-node-zxsjr, serverless_id=int-eu-west-1 Value:0xc035c31b20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190337783s EvaluationString:[ var='A' labels={pod=aws-node-zxsjr, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=aws-node-zxsjr, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=aws-secrets-store-secrets-store-csi-driver-provider-aws-m2bnk, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=aws-secrets-store-secrets-store-csi-driver-provider-aws-m2bnk, serverless_id=int-eu-west-1 Value:0xc035c31bd0} RES:{Var:RES Labels:pod=aws-secrets-store-secrets-store-csi-driver-provider-aws-m2bnk, serverless_id=int-eu-west-1 Value:0xc035c31c30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190342445s EvaluationString:[ var='A' labels={pod=aws-secrets-store-secrets-store-csi-driver-provider-aws-m2bnk, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=aws-secrets-store-secrets-store-csi-driver-provider-aws-m2bnk, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=aws-secrets-store-secrets-store-csi-driver-provider-aws-w5fng, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=aws-secrets-store-secrets-store-csi-driver-provider-aws-w5fng, serverless_id=int-eu-west-1 Value:0xc035c31cc0} RES:{Var:RES Labels:pod=aws-secrets-store-secrets-store-csi-driver-provider-aws-w5fng, serverless_id=int-eu-west-1 Value:0xc035c31d20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190347733s EvaluationString:[ var='A' labels={pod=aws-secrets-store-secrets-store-csi-driver-provider-aws-w5fng, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=aws-secrets-store-secrets-store-csi-driver-provider-aws-w5fng, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=canary-5bf99b9865-x8hsf, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=canary-5bf99b9865-x8hsf, serverless_id=int-eu-west-1 Value:0xc035c31dd0} RES:{Var:RES Labels:pod=canary-5bf99b9865-x8hsf, serverless_id=int-eu-west-1 Value:0xc035c31e30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190352973s EvaluationString:[ var='A' labels={pod=canary-5bf99b9865-x8hsf, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=canary-5bf99b9865-x8hsf, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=canary-long-0, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=canary-long-0, serverless_id=int-eu-west-1 Value:0xc035c31f20} RES:{Var:RES Labels:pod=canary-long-0, serverless_id=int-eu-west-1 Value:0xc035c31f90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.1903597s EvaluationString:[ var='A' labels={pod=canary-long-0, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=canary-long-0, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=cert-manager-7fccf897dc-cw478, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=cert-manager-7fccf897dc-cw478, serverless_id=int-eu-west-1 Value:0xc022c5a060} RES:{Var:RES Labels:pod=cert-manager-7fccf897dc-cw478, serverless_id=int-eu-west-1 Value:0xc022c5a030}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190364464s EvaluationString:[ var='A' labels={pod=cert-manager-7fccf897dc-cw478, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=cert-manager-7fccf897dc-cw478, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=cert-manager-cainjector-7d564485c-qbffv, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=cert-manager-cainjector-7d564485c-qbffv, serverless_id=int-eu-west-1 Value:0xc022c5a100} RES:{Var:RES Labels:pod=cert-manager-cainjector-7d564485c-qbffv, serverless_id=int-eu-west-1 Value:0xc022c5a0c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190368642s EvaluationString:[ var='A' labels={pod=cert-manager-cainjector-7d564485c-qbffv, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=cert-manager-cainjector-7d564485c-qbffv, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=cert-manager-webhook-85676d588-hs92l, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=cert-manager-webhook-85676d588-hs92l, serverless_id=int-eu-west-1 Value:0xc022c5a240} RES:{Var:RES Labels:pod=cert-manager-webhook-85676d588-hs92l, serverless_id=int-eu-west-1 Value:0xc022c5a270}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190372115s EvaluationString:[ var='A' labels={pod=cert-manager-webhook-85676d588-hs92l, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=cert-manager-webhook-85676d588-hs92l, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=console-748747555-64brw, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=console-748747555-64brw, serverless_id=int-eu-west-1 Value:0xc022c5a2d0} RES:{Var:RES Labels:pod=console-748747555-64brw, serverless_id=int-eu-west-1 Value:0xc022c5a310}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190374904s EvaluationString:[ var='A' labels={pod=console-748747555-64brw, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=console-748747555-64brw, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=console-748747555-bh8hd, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=console-748747555-bh8hd, serverless_id=int-eu-west-1 Value:0xc022c5a370} RES:{Var:RES Labels:pod=console-748747555-bh8hd, serverless_id=int-eu-west-1 Value:0xc022c5a3a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190377554s EvaluationString:[ var='A' labels={pod=console-748747555-bh8hd, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=console-748747555-bh8hd, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=coredns-6f84b48f54-bzrj5, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=coredns-6f84b48f54-bzrj5, serverless_id=int-eu-west-1 Value:0xc022c5a530} RES:{Var:RES Labels:pod=coredns-6f84b48f54-bzrj5, serverless_id=int-eu-west-1 Value:0xc022c5a560}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190380262s EvaluationString:[ var='A' labels={pod=coredns-6f84b48f54-bzrj5, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=coredns-6f84b48f54-bzrj5, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=coredns-6f84b48f54-xm6g5, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=coredns-6f84b48f54-xm6g5, serverless_id=int-eu-west-1 Value:0xc022c5a5f0} RES:{Var:RES Labels:pod=coredns-6f84b48f54-xm6g5, serverless_id=int-eu-west-1 Value:0xc022c5a620}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190382603s EvaluationString:[ var='A' labels={pod=coredns-6f84b48f54-xm6g5, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=coredns-6f84b48f54-xm6g5, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=csi-secrets-store-secrets-store-csi-driver-2z56p, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=csi-secrets-store-secrets-store-csi-driver-2z56p, serverless_id=int-eu-west-1 Value:0xc022c5a6e0} RES:{Var:RES Labels:pod=csi-secrets-store-secrets-store-csi-driver-2z56p, serverless_id=int-eu-west-1 Value:0xc022c5a6b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190385145s EvaluationString:[ var='A' labels={pod=csi-secrets-store-secrets-store-csi-driver-2z56p, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=csi-secrets-store-secrets-store-csi-driver-2z56p, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=csi-secrets-store-secrets-store-csi-driver-8x7w9, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=csi-secrets-store-secrets-store-csi-driver-8x7w9, serverless_id=int-eu-west-1 Value:0xc022c5a740} RES:{Var:RES Labels:pod=csi-secrets-store-secrets-store-csi-driver-8x7w9, serverless_id=int-eu-west-1 Value:0xc022c5a7a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190390661s EvaluationString:[ var='A' labels={pod=csi-secrets-store-secrets-store-csi-driver-8x7w9, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=csi-secrets-store-secrets-store-csi-driver-8x7w9, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=csi-secrets-store-secrets-store-csi-driver-ccgsq, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=csi-secrets-store-secrets-store-csi-driver-ccgsq, serverless_id=int-eu-west-1 Value:0xc022c5a950} RES:{Var:RES Labels:pod=csi-secrets-store-secrets-store-csi-driver-ccgsq, serverless_id=int-eu-west-1 Value:0xc022c5a980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190393019s EvaluationString:[ var='A' labels={pod=csi-secrets-store-secrets-store-csi-driver-ccgsq, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=csi-secrets-store-secrets-store-csi-driver-ccgsq, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=csi-secrets-store-secrets-store-csi-driver-rt6wm, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=csi-secrets-store-secrets-store-csi-driver-rt6wm, serverless_id=int-eu-west-1 Value:0xc022c5aa10} RES:{Var:RES Labels:pod=csi-secrets-store-secrets-store-csi-driver-rt6wm, serverless_id=int-eu-west-1 Value:0xc022c5aa40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190395525s EvaluationString:[ var='A' labels={pod=csi-secrets-store-secrets-store-csi-driver-rt6wm, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=csi-secrets-store-secrets-store-csi-driver-rt6wm, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=csi-secrets-store-secrets-store-csi-driver-tp985, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=csi-secrets-store-secrets-store-csi-driver-tp985, serverless_id=int-eu-west-1 Value:0xc022c5ab10} RES:{Var:RES Labels:pod=csi-secrets-store-secrets-store-csi-driver-tp985, serverless_id=int-eu-west-1 Value:0xc022c5ab40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190397636s EvaluationString:[ var='A' labels={pod=csi-secrets-store-secrets-store-csi-driver-tp985, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=csi-secrets-store-secrets-store-csi-driver-tp985, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=external-dns-7cb4dc559b-6tmjd, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=external-dns-7cb4dc559b-6tmjd, serverless_id=int-eu-west-1 Value:0xc022c5abb0} RES:{Var:RES Labels:pod=external-dns-7cb4dc559b-6tmjd, serverless_id=int-eu-west-1 Value:0xc022c5abe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.19040068s EvaluationString:[ var='A' labels={pod=external-dns-7cb4dc559b-6tmjd, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=external-dns-7cb4dc559b-6tmjd, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=external-service-controller-manager-664fd867c9-vsjr9, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=external-service-controller-manager-664fd867c9-vsjr9, serverless_id=int-eu-west-1 Value:0xc022c5aee0} RES:{Var:RES Labels:pod=external-service-controller-manager-664fd867c9-vsjr9, serverless_id=int-eu-west-1 Value:0xc022c5af10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190402892s EvaluationString:[ var='A' labels={pod=external-service-controller-manager-664fd867c9-vsjr9, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=external-service-controller-manager-664fd867c9-vsjr9, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=falco-2qbbj, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=falco-2qbbj, serverless_id=int-eu-west-1 Value:0xc022c5af90} RES:{Var:RES Labels:pod=falco-2qbbj, serverless_id=int-eu-west-1 Value:0xc022c5afd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190405676s EvaluationString:[ var='A' labels={pod=falco-2qbbj, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=falco-2qbbj, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=falco-6jn86, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=falco-6jn86, serverless_id=int-eu-west-1 Value:0xc022c5b0b0} RES:{Var:RES Labels:pod=falco-6jn86, serverless_id=int-eu-west-1 Value:0xc022c5b050}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190408034s EvaluationString:[ var='A' labels={pod=falco-6jn86, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=falco-6jn86, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=falco-cm5db, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=falco-cm5db, serverless_id=int-eu-west-1 Value:0xc022c5b260} RES:{Var:RES Labels:pod=falco-cm5db, serverless_id=int-eu-west-1 Value:0xc022c5b2a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190410243s EvaluationString:[ var='A' labels={pod=falco-cm5db, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=falco-cm5db, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=falco-k8s-764c49597-cntbq, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=falco-k8s-764c49597-cntbq, serverless_id=int-eu-west-1 Value:0xc022c5b300} RES:{Var:RES Labels:pod=falco-k8s-764c49597-cntbq, serverless_id=int-eu-west-1 Value:0xc022c5b330}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190412897s EvaluationString:[ var='A' labels={pod=falco-k8s-764c49597-cntbq, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=falco-k8s-764c49597-cntbq, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=falco-m4sf4, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=falco-m4sf4, serverless_id=int-eu-west-1 Value:0xc022c5b540} RES:{Var:RES Labels:pod=falco-m4sf4, serverless_id=int-eu-west-1 Value:0xc022c5b580}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190415686s EvaluationString:[ var='A' labels={pod=falco-m4sf4, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=falco-m4sf4, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=falco-qqsv9, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=falco-qqsv9, serverless_id=int-eu-west-1 Value:0xc022c5b630} RES:{Var:RES Labels:pod=falco-qqsv9, serverless_id=int-eu-west-1 Value:0xc022c5b710}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190417944s EvaluationString:[ var='A' labels={pod=falco-qqsv9, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=falco-qqsv9, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=falco-sidekick-5968b4c5c-thjk2, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=falco-sidekick-5968b4c5c-thjk2, serverless_id=int-eu-west-1 Value:0xc022c5b7d0} RES:{Var:RES Labels:pod=falco-sidekick-5968b4c5c-thjk2, serverless_id=int-eu-west-1 Value:0xc022c5b7a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190421755s EvaluationString:[ var='A' labels={pod=falco-sidekick-5968b4c5c-thjk2, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=falco-sidekick-5968b4c5c-thjk2, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=ingress-nginx-controller-76477854cc-sfzqx, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=ingress-nginx-controller-76477854cc-sfzqx, serverless_id=int-eu-west-1 Value:0xc022c5b8a0} RES:{Var:RES Labels:pod=ingress-nginx-controller-76477854cc-sfzqx, serverless_id=int-eu-west-1 Value:0xc022c5b8e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190425586s EvaluationString:[ var='A' labels={pod=ingress-nginx-controller-76477854cc-sfzqx, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=ingress-nginx-controller-76477854cc-sfzqx, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=ingress-nginx-controller-76477854cc-zzqqx, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=ingress-nginx-controller-76477854cc-zzqqx, serverless_id=int-eu-west-1 Value:0xc022c5b970} RES:{Var:RES Labels:pod=ingress-nginx-controller-76477854cc-zzqqx, serverless_id=int-eu-west-1 Value:0xc022c5b9a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190429939s EvaluationString:[ var='A' labels={pod=ingress-nginx-controller-76477854cc-zzqqx, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=ingress-nginx-controller-76477854cc-zzqqx, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=kube-proxy-6ll6t, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=kube-proxy-6ll6t, serverless_id=int-eu-west-1 Value:0xc022c5ba20} RES:{Var:RES Labels:pod=kube-proxy-6ll6t, serverless_id=int-eu-west-1 Value:0xc022c5ba60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190434457s EvaluationString:[ var='A' labels={pod=kube-proxy-6ll6t, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=kube-proxy-6ll6t, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=kube-proxy-7jggm, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=kube-proxy-7jggm, serverless_id=int-eu-west-1 Value:0xc022c5bae0} RES:{Var:RES Labels:pod=kube-proxy-7jggm, serverless_id=int-eu-west-1 Value:0xc022c5bb20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.19043809s EvaluationString:[ var='A' labels={pod=kube-proxy-7jggm, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=kube-proxy-7jggm, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=kube-proxy-nb5sm, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=kube-proxy-nb5sm, serverless_id=int-eu-west-1 Value:0xc022c5bba0} RES:{Var:RES Labels:pod=kube-proxy-nb5sm, serverless_id=int-eu-west-1 Value:0xc022c5bbe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190441497s EvaluationString:[ var='A' labels={pod=kube-proxy-nb5sm, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=kube-proxy-nb5sm, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=kube-proxy-sptn2, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=kube-proxy-sptn2, serverless_id=int-eu-west-1 Value:0xc022c5bc60} RES:{Var:RES Labels:pod=kube-proxy-sptn2, serverless_id=int-eu-west-1 Value:0xc022c5bcb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190444926s EvaluationString:[ var='A' labels={pod=kube-proxy-sptn2, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=kube-proxy-sptn2, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=kube-proxy-ss9q6, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=kube-proxy-ss9q6, serverless_id=int-eu-west-1 Value:0xc028f76210} RES:{Var:RES Labels:pod=kube-proxy-ss9q6, serverless_id=int-eu-west-1 Value:0xc028f767d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190448573s EvaluationString:[ var='A' labels={pod=kube-proxy-ss9q6, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=kube-proxy-ss9q6, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=logging-fluent-bit-6c4j9, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=logging-fluent-bit-6c4j9, serverless_id=int-eu-west-1 Value:0xc028f76830} RES:{Var:RES Labels:pod=logging-fluent-bit-6c4j9, serverless_id=int-eu-west-1 Value:0xc028f76860}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190452225s EvaluationString:[ var='A' labels={pod=logging-fluent-bit-6c4j9, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=logging-fluent-bit-6c4j9, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=logging-fluent-bit-6w9sj, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=logging-fluent-bit-6w9sj, serverless_id=int-eu-west-1 Value:0xc028f768c0} RES:{Var:RES Labels:pod=logging-fluent-bit-6w9sj, serverless_id=int-eu-west-1 Value:0xc028f768f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190455799s EvaluationString:[ var='A' labels={pod=logging-fluent-bit-6w9sj, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=logging-fluent-bit-6w9sj, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=logging-fluent-bit-l79km, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=logging-fluent-bit-l79km, serverless_id=int-eu-west-1 Value:0xc028f76960} RES:{Var:RES Labels:pod=logging-fluent-bit-l79km, serverless_id=int-eu-west-1 Value:0xc028f76990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190459458s EvaluationString:[ var='A' labels={pod=logging-fluent-bit-l79km, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=logging-fluent-bit-l79km, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=logging-fluent-bit-p76hm, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=logging-fluent-bit-p76hm, serverless_id=int-eu-west-1 Value:0xc028f76a00} RES:{Var:RES Labels:pod=logging-fluent-bit-p76hm, serverless_id=int-eu-west-1 Value:0xc028f76a30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190464994s EvaluationString:[ var='A' labels={pod=logging-fluent-bit-p76hm, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=logging-fluent-bit-p76hm, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=logging-fluent-bit-spb8n, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=logging-fluent-bit-spb8n, serverless_id=int-eu-west-1 Value:0xc028f76a90} RES:{Var:RES Labels:pod=logging-fluent-bit-spb8n, serverless_id=int-eu-west-1 Value:0xc028f76ac0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190472409s EvaluationString:[ var='A' labels={pod=logging-fluent-bit-spb8n, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=logging-fluent-bit-spb8n, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=metering-collector-65488fd877-mjnnb, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=metering-collector-65488fd877-mjnnb, serverless_id=int-eu-west-1 Value:0xc028f76b20} RES:{Var:RES Labels:pod=metering-collector-65488fd877-mjnnb, serverless_id=int-eu-west-1 Value:0xc028f76b50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190476124s EvaluationString:[ var='A' labels={pod=metering-collector-65488fd877-mjnnb, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=metering-collector-65488fd877-mjnnb, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=node-debugger-ip-10-254-15-51.eu-west-1.compute.internal-bbtbc, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=node-debugger-ip-10-254-15-51.eu-west-1.compute.internal-bbtbc, serverless_id=int-eu-west-1 Value:0xc028f76d70} RES:{Var:RES Labels:pod=node-debugger-ip-10-254-15-51.eu-west-1.compute.internal-bbtbc, serverless_id=int-eu-west-1 Value:0xc028f76da0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190481284s EvaluationString:[ var='A' labels={pod=node-debugger-ip-10-254-15-51.eu-west-1.compute.internal-bbtbc, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=node-debugger-ip-10-254-15-51.eu-west-1.compute.internal-bbtbc, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=prometheus-kube-prometheus-operator-6dfdbb5f7d-ft498, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=prometheus-kube-prometheus-operator-6dfdbb5f7d-ft498, serverless_id=int-eu-west-1 Value:0xc028f76e20} RES:{Var:RES Labels:pod=prometheus-kube-prometheus-operator-6dfdbb5f7d-ft498, serverless_id=int-eu-west-1 Value:0xc028f76e50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190485808s EvaluationString:[ var='A' labels={pod=prometheus-kube-prometheus-operator-6dfdbb5f7d-ft498, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=prometheus-kube-prometheus-operator-6dfdbb5f7d-ft498, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=prometheus-kube-state-metrics-66969c7df7-dj2mw, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=prometheus-kube-state-metrics-66969c7df7-dj2mw, serverless_id=int-eu-west-1 Value:0xc028f76eb0} RES:{Var:RES Labels:pod=prometheus-kube-state-metrics-66969c7df7-dj2mw, serverless_id=int-eu-west-1 Value:0xc028f76ee0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190491011s EvaluationString:[ var='A' labels={pod=prometheus-kube-state-metrics-66969c7df7-dj2mw, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=prometheus-kube-state-metrics-66969c7df7-dj2mw, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=prometheus-prometheus-kube-prometheus-prometheus-0, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=prometheus-prometheus-kube-prometheus-prometheus-0, serverless_id=int-eu-west-1 Value:0xc028f76f50} RES:{Var:RES Labels:pod=prometheus-prometheus-kube-prometheus-prometheus-0, serverless_id=int-eu-west-1 Value:0xc028f76f80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.19049592s EvaluationString:[ var='A' labels={pod=prometheus-prometheus-kube-prometheus-prometheus-0, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=prometheus-prometheus-kube-prometheus-prometheus-0, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=prometheus-prometheus-node-exporter-8mhzq, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=prometheus-prometheus-node-exporter-8mhzq, serverless_id=int-eu-west-1 Value:0xc028f76fe0} RES:{Var:RES Labels:pod=prometheus-prometheus-node-exporter-8mhzq, serverless_id=int-eu-west-1 Value:0xc028f77010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190500545s EvaluationString:[ var='A' labels={pod=prometheus-prometheus-node-exporter-8mhzq, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=prometheus-prometheus-node-exporter-8mhzq, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=prometheus-prometheus-node-exporter-cgrpz, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=prometheus-prometheus-node-exporter-cgrpz, serverless_id=int-eu-west-1 Value:0xc028f77070} RES:{Var:RES Labels:pod=prometheus-prometheus-node-exporter-cgrpz, serverless_id=int-eu-west-1 Value:0xc028f770a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190506184s EvaluationString:[ var='A' labels={pod=prometheus-prometheus-node-exporter-cgrpz, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=prometheus-prometheus-node-exporter-cgrpz, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=prometheus-prometheus-node-exporter-f7hqj, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=prometheus-prometheus-node-exporter-f7hqj, serverless_id=int-eu-west-1 Value:0xc028f77100} RES:{Var:RES Labels:pod=prometheus-prometheus-node-exporter-f7hqj, serverless_id=int-eu-west-1 Value:0xc028f77130}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190512084s EvaluationString:[ var='A' labels={pod=prometheus-prometheus-node-exporter-f7hqj, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=prometheus-prometheus-node-exporter-f7hqj, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=prometheus-prometheus-node-exporter-m2kvn, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=prometheus-prometheus-node-exporter-m2kvn, serverless_id=int-eu-west-1 Value:0xc028f77270} RES:{Var:RES Labels:pod=prometheus-prometheus-node-exporter-m2kvn, serverless_id=int-eu-west-1 Value:0xc028f775b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190532042s EvaluationString:[ var='A' labels={pod=prometheus-prometheus-node-exporter-m2kvn, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=prometheus-prometheus-node-exporter-m2kvn, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=prometheus-prometheus-node-exporter-tknqg, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=prometheus-prometheus-node-exporter-tknqg, serverless_id=int-eu-west-1 Value:0xc028f77610} RES:{Var:RES Labels:pod=prometheus-prometheus-node-exporter-tknqg, serverless_id=int-eu-west-1 Value:0xc028f77640}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190536156s EvaluationString:[ var='A' labels={pod=prometheus-prometheus-node-exporter-tknqg, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=prometheus-prometheus-node-exporter-tknqg, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=proxy-0, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=proxy-0, serverless_id=int-eu-west-1 Value:0xc028f776b0} RES:{Var:RES Labels:pod=proxy-0, serverless_id=int-eu-west-1 Value:0xc028f77720}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.19054126s EvaluationString:[ var='A' labels={pod=proxy-0, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=proxy-0, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=proxy-1, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=proxy-1, serverless_id=int-eu-west-1 Value:0xc028f777d0} RES:{Var:RES Labels:pod=proxy-1, serverless_id=int-eu-west-1 Value:0xc028f77810}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.19054578s EvaluationString:[ var='A' labels={pod=proxy-1, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=proxy-1, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=proxy-2, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=proxy-2, serverless_id=int-eu-west-1 Value:0xc028f778a8} RES:{Var:RES Labels:pod=proxy-2, serverless_id=int-eu-west-1 Value:0xc028f778e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190550677s EvaluationString:[ var='A' labels={pod=proxy-2, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=proxy-2, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=shell, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=shell, serverless_id=int-eu-west-1 Value:0xc028f77950} RES:{Var:RES Labels:pod=shell, serverless_id=int-eu-west-1 Value:0xc028f77968}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190555743s EvaluationString:[ var='A' labels={pod=shell, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=shell, serverless_id=int-eu-west-1} value=0 ]} {Instance:pod=temporal-bus-5d94bf6dd8-x7h98, serverless_id=int-eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:pod=temporal-bus-5d94bf6dd8-x7h98, serverless_id=int-eu-west-1 Value:0xc028f77a20} RES:{Var:RES Labels:pod=temporal-bus-5d94bf6dd8-x7h98, serverless_id=int-eu-west-1 Value:0xc028f779e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.190560253s EvaluationString:[ var='A' labels={pod=temporal-bus-5d94bf6dd8-x7h98, serverless_id=int-eu-west-1} value=0 ], [ var='RES' labels={pod=temporal-bus-5d94bf6dd8-x7h98, serverless_id=int-eu-west-1} value=0 ]}]" duration=39.694652ms +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.192794676Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager.persist user=700399 slug=demo19344 t=2024-05-29T13:44:15.192748241Z level=debug msg="Saving alert states done" count=5 max_state_save_concurrency=1 duration=66.297819ms +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda2, fstype=xfs, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.192686159Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.192596736Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda2, fstype=xfs, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.192602152Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda2, fstype=xfs, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.192586211Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=824501 slug=bendingspoons instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.192551458Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=824501 slug=bendingspoons instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.192528802Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:15.192288366Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda2, fstype=xfs, instance=nuuscn1afollnetnat1001.foll.gcp.hclsw.internal, job=Non-prod-follnetnat1001-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.192135332Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=savage-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistentEEA9F" t=2024-05-29T13:44:15.192111018Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=538962 slug=innovaciondigitalbcs t=2024-05-29T13:44:15.192030347Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.318389ms +level=debug ts=2024-05-29T13:44:15.192063321Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda2, fstype=xfs, instance=nuuscn1afolllvmora1001.foll.gcp.hclsw.internal, job=Non-prod-folllvmora1001-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.192085697Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda2, fstype=xfs, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.191961068Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Manchester, country=United Kingdom, datacenter=PIA, environment=production, instance=45.133.172.66:9998, ip=45.133.172.66, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/uk-manchester.crt, role=vpn, server=manchester421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.191824575Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda2, fstype=xfs, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.191699971Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=94289 slug=translinegruppegmbh instance= t=2024-05-29T13:44:15.191667365Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:47:00Z next_ends_at=2024-05-29T13:48:00Z +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda2, fstype=xfs, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.191670746Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=sandbox-justice-dev, game_interval_time=12, game_namespace=Accelbytetesting, game_template=gameSessionPersistentTest-711AB67C49CBA4BA668CD9873165CC94" t=2024-05-29T13:44:15.191645843Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=94289 slug=translinegruppegmbh t=2024-05-29T13:44:15.191604425Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda2, fstype=xfs, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.191633006Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda2, fstype=xfs, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.19160417Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda2, fstype=xfs, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.191579645Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda2, fstype=xfs, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.191498331Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda2, fstype=xfs, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.191454385Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda2, fstype=xfs, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.191413137Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=194539 slug=sharris instance= t=2024-05-29T13:44:15.191384277Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +level=debug ts=2024-05-29T13:44:15.191318712Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda2, fstype=xfs, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.191346253Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda2, fstype=xfs, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.191320769Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda2, fstype=xfs, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.191274222Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=xfs, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.191235913Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=xfs, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.191219069Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=172772 slug=ppbtradingtribe instance="QueueName=gem-failed-event-mapping-queue" t=2024-05-29T13:44:15.191210098Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=172772 slug=ppbtradingtribe t=2024-05-29T13:44:15.191161935Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=xfs, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.191166979Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=xfs, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.19115557Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=sandbox-justice-dev, game_interval_time=12, game_namespace=Accelbytetesting, game_template=UETestPersistent61DAF" t=2024-05-29T13:44:15.191192433Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=xfs, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.191128667Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=xfs, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.19111779Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=suuscn1bfollspaweb1002.foll.gcp.hclsw.internal, job=Pre-prod-follspaweb1002-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.191089883Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=suuscn1bfollnalapp1002.foll.gcp.hclsw.internal, job=Pre-prod-follnalapp1002-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.19106257Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.191105998Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Manchester, country=United Kingdom, datacenter=PIA, environment=production, instance=45.133.172.34:9998, ip=45.133.172.34, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/uk-manchester.crt, role=vpn, server=manchester420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.191062981Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.191024128Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=suuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Pre-prod-follspaweb1001-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.191006978Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.191002242Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=suuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Pre-prod-follsnaapp1001-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.190979946Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=sandbox-justice-dev, game_interval_time=12, game_namespace=Accelbytetesting, game_template=UETestPersistent60345" t=2024-05-29T13:44:15.190922466Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.190837403Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.190761061Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=159532 slug=getfabric t=2024-05-29T13:44:15.190667282Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=29.270402ms +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Manchester, country=United Kingdom, datacenter=PIA, environment=production, instance=45.133.172.34:9998, ip=45.133.172.34, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=manchester420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.190713879Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=sandbox-justice-dev, game_interval_time=12, game_namespace=Accelbytetesting, game_template=UETestPersistent2639E" t=2024-05-29T13:44:15.190665198Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.190667412Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.190586177Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.190530432Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.190519724Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.190430943Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=nuuscn1afollnetnat1001.foll.gcp.hclsw.internal, job=Non-prod-follnetnat1001-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.1904023Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=nuuscn1afolllvmora1001.foll.gcp.hclsw.internal, job=Non-prod-folllvmora1001-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.19037452Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=288032 slug=dapperlabssre t=2024-05-29T13:44:15.190309852Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.190238055Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.190264929Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.190261833Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.190225179Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.190207837Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.190198555Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.190175485Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.190156444Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.190118933Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.190091972Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.190005493Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.189982267Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=334408 slug=voltagrid t=2024-05-29T13:44:15.189892751Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=334408 slug=voltagrid instance= t=2024-05-29T13:44:15.189880991Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.189953384Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.189890922Z caller=remote_instance_store.go:51 user=137351 slug=pinnacle21 msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.189921506Z caller=remote_instance_store.go:51 user=334408 slug=voltagrid msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.18991577Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=334408 slug=voltagrid version=67 fingerprint=261d6d091396ed32 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.189781073Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.189520554s EvaluationString:}]" duration=32.806389ms +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.189732486Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.189678797Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/sda1, fstype=vfat, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/boot/efi" t=2024-05-29T13:44:15.189718219Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.189650147Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg_u00-lv_u00, fstype=xfs, instance=quuscn1afollwcsdbs1002.foll.gcp.hclsw.internal, job=QA-follwcsdbs1002-DB-Host-VM, mountpoint=/u00" t=2024-05-29T13:44:15.189589074Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.189507904Z caller=remote_instance_store.go:51 user=224047 slug=ppbtradingtribeprd msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.189496275Z caller=remote_instance_store.go:51 user=71676 slug=lemonbeat msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=112732 slug=gleamer t=2024-05-29T13:44:15.189472189Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=36.813633ms +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg_u00-lv_u00, fstype=xfs, instance=duuscn1afollwcsdbs1001.foll.gcp.hclsw.internal, job=Dev-DB-Host-VM, mountpoint=/u00" t=2024-05-29T13:44:15.189559514Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg_main-lv_root, fstype=xfs, instance=quuscn1afollwcsdbs1002.foll.gcp.hclsw.internal, job=QA-follwcsdbs1002-DB-Host-VM, mountpoint=/" t=2024-05-29T13:44:15.18951198Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.189386026Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1bfollspaweb1002.foll.gcp.hclsw.internal, job=Pre-prod-follspaweb1002-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.189443139Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1bfollspaweb1002.foll.gcp.hclsw.internal, job=Pre-prod-follspaweb1002-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.189424204Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1bfollspaweb1002.foll.gcp.hclsw.internal, job=Pre-prod-follspaweb1002-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.189407198Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1bfollspaweb1002.foll.gcp.hclsw.internal, job=Pre-prod-follspaweb1002-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.189359261Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1bfollnalapp1002.foll.gcp.hclsw.internal, job=Pre-prod-follnalapp1002-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.189287419Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=538037 slug=drivewealth instance= t=2024-05-29T13:44:15.189301177Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1bfollnalapp1002.foll.gcp.hclsw.internal, job=Pre-prod-follnalapp1002-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.189243145Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=538037 slug=drivewealth version=129 fingerprint=0dc2a963c560bc84 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.189194807Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc03e517f38} B:{Var:B Labels: Value:0xc03e517f28} C:{Var:C Labels: Value:0xc03e517f30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.18889811s EvaluationString:[ var='A' labels={} value=41.60747034358898 ], [ var='B' labels={} value=41.60747034358898 ], [ var='C' labels={} value=0 ]}]" duration=26.280488ms +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1bfollnalapp1002.foll.gcp.hclsw.internal, job=Pre-prod-follnalapp1002-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.18919847Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1bfollnalapp1002.foll.gcp.hclsw.internal, job=Pre-prod-follnalapp1002-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.189189304Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1bfollcmlapp1002.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1002-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.189127785Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=infinitemana-justice-dev, game_interval_time=12, game_namespace=ExpeditionsStaging, game_template=GeneralChatChannel" t=2024-05-29T13:44:15.189054287Z level=debug msg="Changing state" previous_state=Pending next_state=Normal previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:44:10Z +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Pre-prod-follspaweb1001-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.189009648Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Pre-prod-follspaweb1001-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.188926679Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Manchester, country=United Kingdom, datacenter=PIA, environment=production, instance=45.133.172.226:9998, ip=45.133.172.226, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=manchester426, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.188910289Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Pre-prod-follsnaapp1001-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.188885955Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.188701984Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.188803083Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Pre-prod-follsnaapp1001-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.188807822Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Pre-prod-follsnaapp1001-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.188798087Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Pre-prod-follsnaapp1001-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.18875743Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=wes.sea, group=directory, instance=directory-01.wes.sea, origin=volterra-infra-vm" t=2024-05-29T13:44:15.188731401Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=wes.sea, group=directory, instance=directory-01.wes.sea, origin=volterra-infra-vm" t=2024-05-29T13:44:15.188724297Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Pre-prod-follnalapp1003-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.188654046Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=127813 slug=clearsale instance= t=2024-05-29T13:44:15.188559132Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=127813 slug=clearsale version=7 fingerprint=88efa6f58ff1b319 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.188430081Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.188106982s EvaluationString:}]" duration=178.205275ms +logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sv10.sjc, group=directory, instance=directory-02.sv10.sjc, origin=volterra-infra-vm" t=2024-05-29T13:44:15.18848974Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sv10.sjc, group=directory, instance=directory-02.sv10.sjc, origin=volterra-infra-vm" t=2024-05-29T13:44:15.188479847Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=genun-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistent9EC1B" t=2024-05-29T13:44:15.188464231Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.188404517Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follnalapp1001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.188439246Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Manchester, country=United Kingdom, datacenter=PIA, environment=production, instance=45.133.172.194:9998, ip=45.133.172.194, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/uk-manchester.crt, role=vpn, server=manchester425, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.188435185Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sto6.sto, group=directory, instance=directory-02.sto6.sto, origin=volterra-infra-vm" t=2024-05-29T13:44:15.18842057Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=642786 slug=sophoscomnsg t=2024-05-29T13:44:15.188303799Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.918949ms +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follmqbapp1001-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.188312742Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follmqbapp1001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.188284787Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sp4.sao, group=directory, instance=directory-01.sp4.sao, origin=volterra-infra-vm" t=2024-05-29T13:44:15.188295074Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follmqbapp1001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.188270632Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.188249483Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=432323 slug=lithic instance="LoadBalancer=app/authorizer-live-lb/c3a147816d0d11e6, TargetGroup=targetgroup/authorizer-live-3000-tg/7e483081d1c0c195" t=2024-05-29T13:44:15.188269198Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sif.che, group=directory, instance=directory-01.sif.che, origin=volterra-infra-vm" t=2024-05-29T13:44:15.188229648Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follmqbapp1001-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.188221719Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.188185728Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.188210542Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.188195053Z level=debug msg="Saving alert states" count=84 max_state_save_concurrency=1 +logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sg3.sin, group=directory, instance=directory-02.sg3.sin, origin=volterra-infra-vm" t=2024-05-29T13:44:15.188192083Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=genpop-justice-prod, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistentBB733" t=2024-05-29T13:44:15.188097638Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follintapp1001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.18807479Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa2.par, group=directory, instance=directory-02.pa2.par, origin=volterra-infra-vm" t=2024-05-29T13:44:15.188058245Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa2.par, group=directory, instance=directory-02.pa2.par, origin=volterra-infra-vm" t=2024-05-29T13:44:15.188050868Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follintapp1001-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.188004446Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.187977581Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" +logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=os1.osa, group=directory, instance=directory-02.os1.osa, origin=volterra-infra-vm" t=2024-05-29T13:44:15.187954644Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=os1.osa, group=directory, instance=directory-02.os1.osa, origin=volterra-infra-vm" t=2024-05-29T13:44:15.187943865Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.187873044Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node28 - 35.213.132.28, job=node-exporter, metrics_node_id=29, node_id=28" t=2024-05-29T13:44:15.187842503Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ny2.nyc, group=directory, instance=directory-02.ny2.nyc, origin=volterra-infra-vm" t=2024-05-29T13:44:15.187828903Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.187776711Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.187741591Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.187757886Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.187734709Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node27 - 35.212.108.83, job=node-exporter, metrics_node_id=28, node_id=27" t=2024-05-29T13:44:15.18768456Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.187671581Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.187649536Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.187673372Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Manchester, country=United Kingdom, datacenter=M247, environment=production, instance=89.238.137.43:9998, ip=89.238.137.43, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/uk-manchester.crt, role=vpn, server=manchester404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.18762638Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=me1.mel, group=directory, instance=directory-02.me1.mel, origin=volterra-infra-vm" t=2024-05-29T13:44:15.187648277Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=467576 slug=clmistaging instance= t=2024-05-29T13:44:15.18761829Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.187608516Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=me1.mel, group=directory, instance=directory-01.me1.mel, origin=volterra-infra-vm" t=2024-05-29T13:44:15.187615508Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=467576 slug=clmistaging t=2024-05-29T13:44:15.187571401Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.187592323Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.187545418Z caller=remote_instance_store.go:51 user=407315 slug=ppcp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node26 - 35.214.1.71, job=node-exporter, metrics_node_id=27, node_id=26" t=2024-05-29T13:44:15.187537563Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=407315 slug=ppcp t=2024-05-29T13:44:15.18750744Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 +logger=ngalert.state.manager user=407315 slug=ppcp instance="DBInstanceIdentifier=paperpile-pg" t=2024-05-29T13:44:15.187498495Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.187502741Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.187509719Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=407315 slug=ppcp instance="DBInstanceIdentifier=airbyte-pg" t=2024-05-29T13:44:15.187458944Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=407315 slug=ppcp instance="DBInstanceIdentifier=airbyte-pg" t=2024-05-29T13:44:15.18744657Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=407315 slug=ppcp t=2024-05-29T13:44:15.187405587Z level=debug msg="State manager processing evaluation results" resultCount=2 +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.187483803Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.187442211Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node25 - 160.202.129.143, job=node-exporter, metrics_node_id=26, node_id=25" t=2024-05-29T13:44:15.187387805Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.187427993Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=407315 slug=ppcp version=6 fingerprint=20283ccd476a1373 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.187316705Z level=debug msg="Alert rule evaluated" results="[{Instance:DBInstanceIdentifier=airbyte-pg State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=airbyte-pg Value:0xc0e2b7ab18} C:{Var:C Labels:DBInstanceIdentifier=airbyte-pg Value:0xc0e2b7ab40} D:{Var:D Labels:DBInstanceIdentifier=airbyte-pg Value:0xc0e2b7ab10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.186909135s EvaluationString:[ var='B' labels={DBInstanceIdentifier=airbyte-pg} value=7.8790979584e+10 ], [ var='C' labels={DBInstanceIdentifier=airbyte-pg} value=78.790979584 ], [ var='D' labels={DBInstanceIdentifier=airbyte-pg} value=0 ]} {Instance:DBInstanceIdentifier=paperpile-pg State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=paperpile-pg Value:0xc0e2b7ac10} C:{Var:C Labels:DBInstanceIdentifier=paperpile-pg Value:0xc0e2b7ac18} D:{Var:D Labels:DBInstanceIdentifier=paperpile-pg Value:0xc0e2b7ac40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.18692159s EvaluationString:[ var='B' labels={DBInstanceIdentifier=paperpile-pg} value=1.49447471104e+11 ], [ var='C' labels={DBInstanceIdentifier=paperpile-pg} value=149.447471104 ], [ var='D' labels={DBInstanceIdentifier=paperpile-pg} value=0 ]}]" duration=52.806698ms +level=debug ts=2024-05-29T13:44:15.187362387Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.1873526Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" +logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ls1.lis, group=directory, instance=directory-01.ls1.lis, origin=volterra-infra-vm" t=2024-05-29T13:44:15.18740456Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ls1.lis, group=directory, instance=directory-01.ls1.lis, origin=volterra-infra-vm" t=2024-05-29T13:44:15.187392748Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=334045 slug=aireye instance= t=2024-05-29T13:44:15.187403536Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.187348551Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.187363146Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.187317562Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=334045 slug=aireye t=2024-05-29T13:44:15.187298627Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.18728653Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=374423 slug=bitburst t=2024-05-29T13:44:15.187247517Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.452275ms +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Manchester, country=United Kingdom, datacenter=M247, environment=production, instance=89.238.137.43:9998, ip=89.238.137.43, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=manchester404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.187301201Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.187252649Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.549308ms +logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=hk2.hkg, group=directory, instance=directory-01.hk2.hkg, origin=volterra-infra-vm" t=2024-05-29T13:44:15.187234102Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.187250014Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:15.187240315Z level=debug msg="State manager processing evaluation results" resultCount=5 +logger=ngalert.state.manager user=698963 slug=lemonade instance="app=chewy-gateway-worker, pod=chewy-gateway-worker-7b487dcb66-r2m69" t=2024-05-29T13:44:15.187214037Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=fr4.fra, group=directory, instance=directory-02.fr4.fra, origin=volterra-infra-vm" t=2024-05-29T13:44:15.187199536Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.187039252Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" +logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dc12.ash, group=directory, instance=directory-01.dc12.ash, origin=volterra-infra-vm" t=2024-05-29T13:44:15.186965681Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.187007972Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.186998285Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.186867777Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Manchester, country=United Kingdom, datacenter=M247, environment=production, instance=89.238.137.42:9998, ip=89.238.137.42, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/uk-manchester.crt, role=vpn, server=manchester403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.186936253Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.186941523Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node22 - 35.207.208.247, job=node-exporter, metrics_node_id=23, node_id=22" t=2024-05-29T13:44:15.186899137Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node22 - 35.207.208.247, job=node-exporter, metrics_node_id=23, node_id=22" t=2024-05-29T13:44:15.186888999Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=343338 slug=f5sdc version=106 fingerprint=6cb14f7dc57186e5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.185539788Z level=debug msg="Alert rule evaluated" results="[{Instance:datacenter=ams9.ams, group=directory, instance=directory-01.ams9.ams, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ams9.ams, group=directory, instance=directory-01.ams9.ams, origin=volterra-infra-vm Value:0xc01bc31b20} B:{Var:B Labels:datacenter=ams9.ams, group=directory, instance=directory-01.ams9.ams, origin=volterra-infra-vm Value:0xc01bc31b60} C:{Var:C Labels:datacenter=ams9.ams, group=directory, instance=directory-01.ams9.ams, origin=volterra-infra-vm Value:0xc01bc31ad0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.182941859s EvaluationString:[ var='A' labels={datacenter=ams9.ams, group=directory, instance=directory-01.ams9.ams, origin=volterra-infra-vm} value=23 ], [ var='B' labels={datacenter=ams9.ams, group=directory, instance=directory-01.ams9.ams, origin=volterra-infra-vm} value=23 ], [ var='C' labels={datacenter=ams9.ams, group=directory, instance=directory-01.ams9.ams, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=ams9.ams, group=directory, instance=directory-02.ams9.ams, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ams9.ams, group=directory, instance=directory-02.ams9.ams, origin=volterra-infra-vm Value:0xc01bc31bd8} B:{Var:B Labels:datacenter=ams9.ams, group=directory, instance=directory-02.ams9.ams, origin=volterra-infra-vm Value:0xc01bc31d28} C:{Var:C Labels:datacenter=ams9.ams, group=directory, instance=directory-02.ams9.ams, origin=volterra-infra-vm Value:0xc01bc31d68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.182960748s EvaluationString:[ var='A' labels={datacenter=ams9.ams, group=directory, instance=directory-02.ams9.ams, origin=volterra-infra-vm} value=24 ], [ var='B' labels={datacenter=ams9.ams, group=directory, instance=directory-02.ams9.ams, origin=volterra-infra-vm} value=24 ], [ var='C' labels={datacenter=ams9.ams, group=directory, instance=directory-02.ams9.ams, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=dal3.dal, group=directory, instance=directory-01.dal3.dal, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dal3.dal, group=directory, instance=directory-01.dal3.dal, origin=volterra-infra-vm Value:0xc01bc31e00} B:{Var:B Labels:datacenter=dal3.dal, group=directory, instance=directory-01.dal3.dal, origin=volterra-infra-vm Value:0xc022464030} C:{Var:C Labels:datacenter=dal3.dal, group=directory, instance=directory-01.dal3.dal, origin=volterra-infra-vm Value:0xc022464078}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.182970971s EvaluationString:[ var='A' labels={datacenter=dal3.dal, group=directory, instance=directory-01.dal3.dal, origin=volterra-infra-vm} value=1 ], [ var='B' labels={datacenter=dal3.dal, group=directory, instance=directory-01.dal3.dal, origin=volterra-infra-vm} value=1 ], [ var='C' labels={datacenter=dal3.dal, group=directory, instance=directory-01.dal3.dal, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=dal3.dal, group=directory, instance=directory-02.dal3.dal, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dal3.dal, group=directory, instance=directory-02.dal3.dal, origin=volterra-infra-vm Value:0xc022464170} B:{Var:B Labels:datacenter=dal3.dal, group=directory, instance=directory-02.dal3.dal, origin=volterra-infra-vm Value:0xc0224641b8} C:{Var:C Labels:datacenter=dal3.dal, group=directory, instance=directory-02.dal3.dal, origin=volterra-infra-vm Value:0xc022464208}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.182981723s EvaluationString:[ var='A' labels={datacenter=dal3.dal, group=directory, instance=directory-02.dal3.dal, origin=volterra-infra-vm} value=9 ], [ var='B' labels={datacenter=dal3.dal, group=directory, instance=directory-02.dal3.dal, origin=volterra-infra-vm} value=9 ], [ var='C' labels={datacenter=dal3.dal, group=directory, instance=directory-02.dal3.dal, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=dc12.ash, group=directory, instance=directory-01.dc12.ash, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dc12.ash, group=directory, instance=directory-01.dc12.ash, origin=volterra-infra-vm Value:0xc0224642f0} B:{Var:B Labels:datacenter=dc12.ash, group=directory, instance=directory-01.dc12.ash, origin=volterra-infra-vm Value:0xc022464330} C:{Var:C Labels:datacenter=dc12.ash, group=directory, instance=directory-01.dc12.ash, origin=volterra-infra-vm Value:0xc0224642b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.182990793s EvaluationString:[ var='A' labels={datacenter=dc12.ash, group=directory, instance=directory-01.dc12.ash, origin=volterra-infra-vm} value=30 ], [ var='B' labels={datacenter=dc12.ash, group=directory, instance=directory-01.dc12.ash, origin=volterra-infra-vm} value=30 ], [ var='C' labels={datacenter=dc12.ash, group=directory, instance=directory-01.dc12.ash, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=dc12.ash, group=directory, instance=directory-02.dc12.ash, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dc12.ash, group=directory, instance=directory-02.dc12.ash, origin=volterra-infra-vm Value:0xc0224643d8} B:{Var:B Labels:datacenter=dc12.ash, group=directory, instance=directory-02.dc12.ash, origin=volterra-infra-vm Value:0xc022464420} C:{Var:C Labels:datacenter=dc12.ash, group=directory, instance=directory-02.dc12.ash, origin=volterra-infra-vm Value:0xc022464478}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.182998489s EvaluationString:[ var='A' labels={datacenter=dc12.ash, group=directory, instance=directory-02.dc12.ash, origin=volterra-infra-vm} value=22 ], [ var='B' labels={datacenter=dc12.ash, group=directory, instance=directory-02.dc12.ash, origin=volterra-infra-vm} value=22 ], [ var='C' labels={datacenter=dc12.ash, group=directory, instance=directory-02.dc12.ash, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=dx1.dxb, group=directory, instance=directory-01.dx1.dxb, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dx1.dxb, group=directory, instance=directory-01.dx1.dxb, origin=volterra-infra-vm Value:0xc022464500} B:{Var:B Labels:datacenter=dx1.dxb, group=directory, instance=directory-01.dx1.dxb, origin=volterra-infra-vm Value:0xc022464550} C:{Var:C Labels:datacenter=dx1.dxb, group=directory, instance=directory-01.dx1.dxb, origin=volterra-infra-vm Value:0xc0224645c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183003623s EvaluationString:[ var='A' labels={datacenter=dx1.dxb, group=directory, instance=directory-01.dx1.dxb, origin=volterra-infra-vm} value=39 ], [ var='B' labels={datacenter=dx1.dxb, group=directory, instance=directory-01.dx1.dxb, origin=volterra-infra-vm} value=39 ], [ var='C' labels={datacenter=dx1.dxb, group=directory, instance=directory-01.dx1.dxb, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=dx1.dxb, group=directory, instance=directory-02.dx1.dxb, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dx1.dxb, group=directory, instance=directory-02.dx1.dxb, origin=volterra-infra-vm Value:0xc0224646e8} B:{Var:B Labels:datacenter=dx1.dxb, group=directory, instance=directory-02.dx1.dxb, origin=volterra-infra-vm Value:0xc022464640} C:{Var:C Labels:datacenter=dx1.dxb, group=directory, instance=directory-02.dx1.dxb, origin=volterra-infra-vm Value:0xc0224646a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183011044s EvaluationString:[ var='A' labels={datacenter=dx1.dxb, group=directory, instance=directory-02.dx1.dxb, origin=volterra-infra-vm} value=41 ], [ var='B' labels={datacenter=dx1.dxb, group=directory, instance=directory-02.dx1.dxb, origin=volterra-infra-vm} value=41 ], [ var='C' labels={datacenter=dx1.dxb, group=directory, instance=directory-02.dx1.dxb, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=fr4.fra, group=directory, instance=directory-01.fr4.fra, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=fr4.fra, group=directory, instance=directory-01.fr4.fra, origin=volterra-infra-vm Value:0xc022464818} B:{Var:B Labels:datacenter=fr4.fra, group=directory, instance=directory-01.fr4.fra, origin=volterra-infra-vm Value:0xc022464798} C:{Var:C Labels:datacenter=fr4.fra, group=directory, instance=directory-01.fr4.fra, origin=volterra-infra-vm Value:0xc0224647d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183015967s EvaluationString:[ var='A' labels={datacenter=fr4.fra, group=directory, instance=directory-01.fr4.fra, origin=volterra-infra-vm} value=44 ], [ var='B' labels={datacenter=fr4.fra, group=directory, instance=directory-01.fr4.fra, origin=volterra-infra-vm} value=44 ], [ var='C' labels={datacenter=fr4.fra, group=directory, instance=directory-01.fr4.fra, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=fr4.fra, group=directory, instance=directory-02.fr4.fra, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=fr4.fra, group=directory, instance=directory-02.fr4.fra, origin=volterra-infra-vm Value:0xc022464908} B:{Var:B Labels:datacenter=fr4.fra, group=directory, instance=directory-02.fr4.fra, origin=volterra-infra-vm Value:0xc022464960} C:{Var:C Labels:datacenter=fr4.fra, group=directory, instance=directory-02.fr4.fra, origin=volterra-infra-vm Value:0xc0224648b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183021526s EvaluationString:[ var='A' labels={datacenter=fr4.fra, group=directory, instance=directory-02.fr4.fra, origin=volterra-infra-vm} value=48 ], [ var='B' labels={datacenter=fr4.fra, group=directory, instance=directory-02.fr4.fra, origin=volterra-infra-vm} value=48 ], [ var='C' labels={datacenter=fr4.fra, group=directory, instance=directory-02.fr4.fra, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=hk2.hkg, group=directory, instance=directory-01.hk2.hkg, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=hk2.hkg, group=directory, instance=directory-01.hk2.hkg, origin=volterra-infra-vm Value:0xc022464ab8} B:{Var:B Labels:datacenter=hk2.hkg, group=directory, instance=directory-01.hk2.hkg, origin=volterra-infra-vm Value:0xc022464a10} C:{Var:C Labels:datacenter=hk2.hkg, group=directory, instance=directory-01.hk2.hkg, origin=volterra-infra-vm Value:0xc022464a60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183028484s EvaluationString:[ var='A' labels={datacenter=hk2.hkg, group=directory, instance=directory-01.hk2.hkg, origin=volterra-infra-vm} value=25 ], [ var='B' labels={datacenter=hk2.hkg, group=directory, instance=directory-01.hk2.hkg, origin=volterra-infra-vm} value=25 ], [ var='C' labels={datacenter=hk2.hkg, group=directory, instance=directory-01.hk2.hkg, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=hk2.hkg, group=directory, instance=directory-02.hk2.hkg, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=hk2.hkg, group=directory, instance=directory-02.hk2.hkg, origin=volterra-infra-vm Value:0xc022464b50} B:{Var:B Labels:datacenter=hk2.hkg, group=directory, instance=directory-02.hk2.hkg, origin=volterra-infra-vm Value:0xc022464b90} C:{Var:C Labels:datacenter=hk2.hkg, group=directory, instance=directory-02.hk2.hkg, origin=volterra-infra-vm Value:0xc022464bd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183033431s EvaluationString:[ var='A' labels={datacenter=hk2.hkg, group=directory, instance=directory-02.hk2.hkg, origin=volterra-infra-vm} value=25 ], [ var='B' labels={datacenter=hk2.hkg, group=directory, instance=directory-02.hk2.hkg, origin=volterra-infra-vm} value=25 ], [ var='C' labels={datacenter=hk2.hkg, group=directory, instance=directory-02.hk2.hkg, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=ld6.lon, group=directory, instance=directory-01.ld6.lon, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ld6.lon, group=directory, instance=directory-01.ld6.lon, origin=volterra-infra-vm Value:0xc022464c80} B:{Var:B Labels:datacenter=ld6.lon, group=directory, instance=directory-01.ld6.lon, origin=volterra-infra-vm Value:0xc022464cd0} C:{Var:C Labels:datacenter=ld6.lon, group=directory, instance=directory-01.ld6.lon, origin=volterra-infra-vm Value:0xc022464d18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183038961s EvaluationString:[ var='A' labels={datacenter=ld6.lon, group=directory, instance=directory-01.ld6.lon, origin=volterra-infra-vm} value=15 ], [ var='B' labels={datacenter=ld6.lon, group=directory, instance=directory-01.ld6.lon, origin=volterra-infra-vm} value=15 ], [ var='C' labels={datacenter=ld6.lon, group=directory, instance=directory-01.ld6.lon, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=ld6.lon, group=directory, instance=directory-02.ld6.lon, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ld6.lon, group=directory, instance=directory-02.ld6.lon, origin=volterra-infra-vm Value:0xc022464dd0} B:{Var:B Labels:datacenter=ld6.lon, group=directory, instance=directory-02.ld6.lon, origin=volterra-infra-vm Value:0xc022464e38} C:{Var:C Labels:datacenter=ld6.lon, group=directory, instance=directory-02.ld6.lon, origin=volterra-infra-vm Value:0xc022464e90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183043963s EvaluationString:[ var='A' labels={datacenter=ld6.lon, group=directory, instance=directory-02.ld6.lon, origin=volterra-infra-vm} value=15 ], [ var='B' labels={datacenter=ld6.lon, group=directory, instance=directory-02.ld6.lon, origin=volterra-infra-vm} value=15 ], [ var='C' labels={datacenter=ld6.lon, group=directory, instance=directory-02.ld6.lon, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=ls1.lis, group=directory, instance=directory-01.ls1.lis, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ls1.lis, group=directory, instance=directory-01.ls1.lis, origin=volterra-infra-vm Value:0xc022464fd8} B:{Var:B Labels:datacenter=ls1.lis, group=directory, instance=directory-01.ls1.lis, origin=volterra-infra-vm Value:0xc022464f30} C:{Var:C Labels:datacenter=ls1.lis, group=directory, instance=directory-01.ls1.lis, origin=volterra-infra-vm Value:0xc022464f80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183048661s EvaluationString:[ var='A' labels={datacenter=ls1.lis, group=directory, instance=directory-01.ls1.lis, origin=volterra-infra-vm} value=58 ], [ var='B' labels={datacenter=ls1.lis, group=directory, instance=directory-01.ls1.lis, origin=volterra-infra-vm} value=58 ], [ var='C' labels={datacenter=ls1.lis, group=directory, instance=directory-01.ls1.lis, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=ls1.lis, group=directory, instance=directory-02.ls1.lis, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ls1.lis, group=directory, instance=directory-02.ls1.lis, origin=volterra-infra-vm Value:0xc022465080} B:{Var:B Labels:datacenter=ls1.lis, group=directory, instance=directory-02.ls1.lis, origin=volterra-infra-vm Value:0xc0224650d0} C:{Var:C Labels:datacenter=ls1.lis, group=directory, instance=directory-02.ls1.lis, origin=volterra-infra-vm Value:0xc022465110}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183055926s EvaluationString:[ var='A' labels={datacenter=ls1.lis, group=directory, instance=directory-02.ls1.lis, origin=volterra-infra-vm} value=46 ], [ var='B' labels={datacenter=ls1.lis, group=directory, instance=directory-02.ls1.lis, origin=volterra-infra-vm} value=46 ], [ var='C' labels={datacenter=ls1.lis, group=directory, instance=directory-02.ls1.lis, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=mb2.mum, group=directory, instance=directory-01.mb2.mum, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mb2.mum, group=directory, instance=directory-01.mb2.mum, origin=volterra-infra-vm Value:0xc0224651c0} B:{Var:B Labels:datacenter=mb2.mum, group=directory, instance=directory-01.mb2.mum, origin=volterra-infra-vm Value:0xc022465210} C:{Var:C Labels:datacenter=mb2.mum, group=directory, instance=directory-01.mb2.mum, origin=volterra-infra-vm Value:0xc022465260}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183060624s EvaluationString:[ var='A' labels={datacenter=mb2.mum, group=directory, instance=directory-01.mb2.mum, origin=volterra-infra-vm} value=51 ], [ var='B' labels={datacenter=mb2.mum, group=directory, instance=directory-01.mb2.mum, origin=volterra-infra-vm} value=51 ], [ var='C' labels={datacenter=mb2.mum, group=directory, instance=directory-01.mb2.mum, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=mb2.mum, group=directory, instance=directory-02.mb2.mum, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mb2.mum, group=directory, instance=directory-02.mb2.mum, origin=volterra-infra-vm Value:0xc022465300} B:{Var:B Labels:datacenter=mb2.mum, group=directory, instance=directory-02.mb2.mum, origin=volterra-infra-vm Value:0xc022465340} C:{Var:C Labels:datacenter=mb2.mum, group=directory, instance=directory-02.mb2.mum, origin=volterra-infra-vm Value:0xc022465390}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183065656s EvaluationString:[ var='A' labels={datacenter=mb2.mum, group=directory, instance=directory-02.mb2.mum, origin=volterra-infra-vm} value=57 ], [ var='B' labels={datacenter=mb2.mum, group=directory, instance=directory-02.mb2.mum, origin=volterra-infra-vm} value=57 ], [ var='C' labels={datacenter=mb2.mum, group=directory, instance=directory-02.mb2.mum, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=md2.mad, group=directory, instance=directory-01.md2.mad, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=md2.mad, group=directory, instance=directory-01.md2.mad, origin=volterra-infra-vm Value:0xc0224654c8} B:{Var:B Labels:datacenter=md2.mad, group=directory, instance=directory-01.md2.mad, origin=volterra-infra-vm Value:0xc022465428} C:{Var:C Labels:datacenter=md2.mad, group=directory, instance=directory-01.md2.mad, origin=volterra-infra-vm Value:0xc022465478}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183071699s EvaluationString:[ var='A' labels={datacenter=md2.mad, group=directory, instance=directory-01.md2.mad, origin=volterra-infra-vm} value=60 ], [ var='B' labels={datacenter=md2.mad, group=directory, instance=directory-01.md2.mad, origin=volterra-infra-vm} value=60 ], [ var='C' labels={datacenter=md2.mad, group=directory, instance=directory-01.md2.mad, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=md2.mad, group=directory, instance=directory-02.md2.mad, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=md2.mad, group=directory, instance=directory-02.md2.mad, origin=volterra-infra-vm Value:0xc0224655b8} B:{Var:B Labels:datacenter=md2.mad, group=directory, instance=directory-02.md2.mad, origin=volterra-infra-vm Value:0xc022465608} C:{Var:C Labels:datacenter=md2.mad, group=directory, instance=directory-02.md2.mad, origin=volterra-infra-vm Value:0xc022465560}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183081042s EvaluationString:[ var='A' labels={datacenter=md2.mad, group=directory, instance=directory-02.md2.mad, origin=volterra-infra-vm} value=19 ], [ var='B' labels={datacenter=md2.mad, group=directory, instance=directory-02.md2.mad, origin=volterra-infra-vm} value=19 ], [ var='C' labels={datacenter=md2.mad, group=directory, instance=directory-02.md2.mad, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=me1.mel, group=directory, instance=directory-01.me1.mel, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=me1.mel, group=directory, instance=directory-01.me1.mel, origin=volterra-infra-vm Value:0xc0224656c0} B:{Var:B Labels:datacenter=me1.mel, group=directory, instance=directory-01.me1.mel, origin=volterra-infra-vm Value:0xc022465730} C:{Var:C Labels:datacenter=me1.mel, group=directory, instance=directory-01.me1.mel, origin=volterra-infra-vm Value:0xc022465780}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183089403s EvaluationString:[ var='A' labels={datacenter=me1.mel, group=directory, instance=directory-01.me1.mel, origin=volterra-infra-vm} value=51 ], [ var='B' labels={datacenter=me1.mel, group=directory, instance=directory-01.me1.mel, origin=volterra-infra-vm} value=51 ], [ var='C' labels={datacenter=me1.mel, group=directory, instance=directory-01.me1.mel, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=me1.mel, group=directory, instance=directory-02.me1.mel, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=me1.mel, group=directory, instance=directory-02.me1.mel, origin=volterra-infra-vm Value:0xc022465810} B:{Var:B Labels:datacenter=me1.mel, group=directory, instance=directory-02.me1.mel, origin=volterra-infra-vm Value:0xc022465860} C:{Var:C Labels:datacenter=me1.mel, group=directory, instance=directory-02.me1.mel, origin=volterra-infra-vm Value:0xc0224658e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183104479s EvaluationString:[ var='A' labels={datacenter=me1.mel, group=directory, instance=directory-02.me1.mel, origin=volterra-infra-vm} value=62 ], [ var='B' labels={datacenter=me1.mel, group=directory, instance=directory-02.me1.mel, origin=volterra-infra-vm} value=62 ], [ var='C' labels={datacenter=me1.mel, group=directory, instance=directory-02.me1.mel, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=mtl7.mon, group=directory, instance=directory-01.mtl7.mon, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mtl7.mon, group=directory, instance=directory-01.mtl7.mon, origin=volterra-infra-vm Value:0xc022465970} B:{Var:B Labels:datacenter=mtl7.mon, group=directory, instance=directory-01.mtl7.mon, origin=volterra-infra-vm Value:0xc0224659c0} C:{Var:C Labels:datacenter=mtl7.mon, group=directory, instance=directory-01.mtl7.mon, origin=volterra-infra-vm Value:0xc022465a10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.1831132s EvaluationString:[ var='A' labels={datacenter=mtl7.mon, group=directory, instance=directory-01.mtl7.mon, origin=volterra-infra-vm} value=51 ], [ var='B' labels={datacenter=mtl7.mon, group=directory, instance=directory-01.mtl7.mon, origin=volterra-infra-vm} value=51 ], [ var='C' labels={datacenter=mtl7.mon, group=directory, instance=directory-01.mtl7.mon, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=mtl7.mon, group=directory, instance=directory-02.mtl7.mon, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mtl7.mon, group=directory, instance=directory-02.mtl7.mon, origin=volterra-infra-vm Value:0xc022465b58} B:{Var:B Labels:datacenter=mtl7.mon, group=directory, instance=directory-02.mtl7.mon, origin=volterra-infra-vm Value:0xc022465aa0} C:{Var:C Labels:datacenter=mtl7.mon, group=directory, instance=directory-02.mtl7.mon, origin=volterra-infra-vm Value:0xc022465af0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183122152s EvaluationString:[ var='A' labels={datacenter=mtl7.mon, group=directory, instance=directory-02.mtl7.mon, origin=volterra-infra-vm} value=20 ], [ var='B' labels={datacenter=mtl7.mon, group=directory, instance=directory-02.mtl7.mon, origin=volterra-infra-vm} value=20 ], [ var='C' labels={datacenter=mtl7.mon, group=directory, instance=directory-02.mtl7.mon, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=ny2.nyc, group=directory, instance=directory-01.ny2.nyc, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ny2.nyc, group=directory, instance=directory-01.ny2.nyc, origin=volterra-infra-vm Value:0xc022465c40} B:{Var:B Labels:datacenter=ny2.nyc, group=directory, instance=directory-01.ny2.nyc, origin=volterra-infra-vm Value:0xc022465c80} C:{Var:C Labels:datacenter=ny2.nyc, group=directory, instance=directory-01.ny2.nyc, origin=volterra-infra-vm Value:0xc022465bf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183130479s EvaluationString:[ var='A' labels={datacenter=ny2.nyc, group=directory, instance=directory-01.ny2.nyc, origin=volterra-infra-vm} value=8 ], [ var='B' labels={datacenter=ny2.nyc, group=directory, instance=directory-01.ny2.nyc, origin=volterra-infra-vm} value=8 ], [ var='C' labels={datacenter=ny2.nyc, group=directory, instance=directory-01.ny2.nyc, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=ny2.nyc, group=directory, instance=directory-02.ny2.nyc, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ny2.nyc, group=directory, instance=directory-02.ny2.nyc, origin=volterra-infra-vm Value:0xc022465d50} B:{Var:B Labels:datacenter=ny2.nyc, group=directory, instance=directory-02.ny2.nyc, origin=volterra-infra-vm Value:0xc022465d90} C:{Var:C Labels:datacenter=ny2.nyc, group=directory, instance=directory-02.ny2.nyc, origin=volterra-infra-vm Value:0xc022465d10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183138779s EvaluationString:[ var='A' labels={datacenter=ny2.nyc, group=directory, instance=directory-02.ny2.nyc, origin=volterra-infra-vm} value=14 ], [ var='B' labels={datacenter=ny2.nyc, group=directory, instance=directory-02.ny2.nyc, origin=volterra-infra-vm} value=14 ], [ var='C' labels={datacenter=ny2.nyc, group=directory, instance=directory-02.ny2.nyc, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=os1.osa, group=directory, instance=directory-01.os1.osa, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=os1.osa, group=directory, instance=directory-01.os1.osa, origin=volterra-infra-vm Value:0xc022465ea0} B:{Var:B Labels:datacenter=os1.osa, group=directory, instance=directory-01.os1.osa, origin=volterra-infra-vm Value:0xc022465e20} C:{Var:C Labels:datacenter=os1.osa, group=directory, instance=directory-01.os1.osa, origin=volterra-infra-vm Value:0xc022465e60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183147916s EvaluationString:[ var='A' labels={datacenter=os1.osa, group=directory, instance=directory-01.os1.osa, origin=volterra-infra-vm} value=6 ], [ var='B' labels={datacenter=os1.osa, group=directory, instance=directory-01.os1.osa, origin=volterra-infra-vm} value=6 ], [ var='C' labels={datacenter=os1.osa, group=directory, instance=directory-01.os1.osa, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=os1.osa, group=directory, instance=directory-02.os1.osa, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=os1.osa, group=directory, instance=directory-02.os1.osa, origin=volterra-infra-vm Value:0xc022465f28} B:{Var:B Labels:datacenter=os1.osa, group=directory, instance=directory-02.os1.osa, origin=volterra-infra-vm Value:0xc022465fa0} C:{Var:C Labels:datacenter=os1.osa, group=directory, instance=directory-02.os1.osa, origin=volterra-infra-vm Value:0xc022465fe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183155018s EvaluationString:[ var='A' labels={datacenter=os1.osa, group=directory, instance=directory-02.os1.osa, origin=volterra-infra-vm} value=58 ], [ var='B' labels={datacenter=os1.osa, group=directory, instance=directory-02.os1.osa, origin=volterra-infra-vm} value=58 ], [ var='C' labels={datacenter=os1.osa, group=directory, instance=directory-02.os1.osa, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=pa2.par, group=directory, instance=directory-01.pa2.par, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, group=directory, instance=directory-01.pa2.par, origin=volterra-infra-vm Value:0xc019b60298} B:{Var:B Labels:datacenter=pa2.par, group=directory, instance=directory-01.pa2.par, origin=volterra-infra-vm Value:0xc019b600f8} C:{Var:C Labels:datacenter=pa2.par, group=directory, instance=directory-01.pa2.par, origin=volterra-infra-vm Value:0xc019b60200}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183159203s EvaluationString:[ var='A' labels={datacenter=pa2.par, group=directory, instance=directory-01.pa2.par, origin=volterra-infra-vm} value=34 ], [ var='B' labels={datacenter=pa2.par, group=directory, instance=directory-01.pa2.par, origin=volterra-infra-vm} value=34 ], [ var='C' labels={datacenter=pa2.par, group=directory, instance=directory-01.pa2.par, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=pa2.par, group=directory, instance=directory-02.pa2.par, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, group=directory, instance=directory-02.pa2.par, origin=volterra-infra-vm Value:0xc019b604f0} B:{Var:B Labels:datacenter=pa2.par, group=directory, instance=directory-02.pa2.par, origin=volterra-infra-vm Value:0xc019b60620} C:{Var:C Labels:datacenter=pa2.par, group=directory, instance=directory-02.pa2.par, origin=volterra-infra-vm Value:0xc019b60440}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183163847s EvaluationString:[ var='A' labels={datacenter=pa2.par, group=directory, instance=directory-02.pa2.par, origin=volterra-infra-vm} value=34 ], [ var='B' labels={datacenter=pa2.par, group=directory, instance=directory-02.pa2.par, origin=volterra-infra-vm} value=34 ], [ var='C' labels={datacenter=pa2.par, group=directory, instance=directory-02.pa2.par, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=pa4.par, group=directory, instance=directory-01.pa4.par, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, group=directory, instance=directory-01.pa4.par, origin=volterra-infra-vm Value:0xc019b607c0} B:{Var:B Labels:datacenter=pa4.par, group=directory, instance=directory-01.pa4.par, origin=volterra-infra-vm Value:0xc019b60720} C:{Var:C Labels:datacenter=pa4.par, group=directory, instance=directory-01.pa4.par, origin=volterra-infra-vm Value:0xc019b60768}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183172511s EvaluationString:[ var='A' labels={datacenter=pa4.par, group=directory, instance=directory-01.pa4.par, origin=volterra-infra-vm} value=29 ], [ var='B' labels={datacenter=pa4.par, group=directory, instance=directory-01.pa4.par, origin=volterra-infra-vm} value=29 ], [ var='C' labels={datacenter=pa4.par, group=directory, instance=directory-01.pa4.par, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=pa4.par, group=directory, instance=directory-02.pa4.par, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, group=directory, instance=directory-02.pa4.par, origin=volterra-infra-vm Value:0xc019b60e30} B:{Var:B Labels:datacenter=pa4.par, group=directory, instance=directory-02.pa4.par, origin=volterra-infra-vm Value:0xc019b60a18} C:{Var:C Labels:datacenter=pa4.par, group=directory, instance=directory-02.pa4.par, origin=volterra-infra-vm Value:0xc019b60d10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.18318208s EvaluationString:[ var='A' labels={datacenter=pa4.par, group=directory, instance=directory-02.pa4.par, origin=volterra-infra-vm} value=43 ], [ var='B' labels={datacenter=pa4.par, group=directory, instance=directory-02.pa4.par, origin=volterra-infra-vm} value=43 ], [ var='C' labels={datacenter=pa4.par, group=directory, instance=directory-02.pa4.par, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sg3.sin, group=directory, instance=directory-01.sg3.sin, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sg3.sin, group=directory, instance=directory-01.sg3.sin, origin=volterra-infra-vm Value:0xc019b61340} B:{Var:B Labels:datacenter=sg3.sin, group=directory, instance=directory-01.sg3.sin, origin=volterra-infra-vm Value:0xc019b61428} C:{Var:C Labels:datacenter=sg3.sin, group=directory, instance=directory-01.sg3.sin, origin=volterra-infra-vm Value:0xc019b611e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183191077s EvaluationString:[ var='A' labels={datacenter=sg3.sin, group=directory, instance=directory-01.sg3.sin, origin=volterra-infra-vm} value=26 ], [ var='B' labels={datacenter=sg3.sin, group=directory, instance=directory-01.sg3.sin, origin=volterra-infra-vm} value=26 ], [ var='C' labels={datacenter=sg3.sin, group=directory, instance=directory-01.sg3.sin, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sg3.sin, group=directory, instance=directory-02.sg3.sin, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sg3.sin, group=directory, instance=directory-02.sg3.sin, origin=volterra-infra-vm Value:0xc019b61578} B:{Var:B Labels:datacenter=sg3.sin, group=directory, instance=directory-02.sg3.sin, origin=volterra-infra-vm Value:0xc019b615b8} C:{Var:C Labels:datacenter=sg3.sin, group=directory, instance=directory-02.sg3.sin, origin=volterra-infra-vm Value:0xc019b61600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183199987s EvaluationString:[ var='A' labels={datacenter=sg3.sin, group=directory, instance=directory-02.sg3.sin, origin=volterra-infra-vm} value=46 ], [ var='B' labels={datacenter=sg3.sin, group=directory, instance=directory-02.sg3.sin, origin=volterra-infra-vm} value=46 ], [ var='C' labels={datacenter=sg3.sin, group=directory, instance=directory-02.sg3.sin, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sif.che, group=directory, instance=directory-01.sif.che, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sif.che, group=directory, instance=directory-01.sif.che, origin=volterra-infra-vm Value:0xc019b61688} B:{Var:B Labels:datacenter=sif.che, group=directory, instance=directory-01.sif.che, origin=volterra-infra-vm Value:0xc019b616c8} C:{Var:C Labels:datacenter=sif.che, group=directory, instance=directory-01.sif.che, origin=volterra-infra-vm Value:0xc019b61710}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.18320882s EvaluationString:[ var='A' labels={datacenter=sif.che, group=directory, instance=directory-01.sif.che, origin=volterra-infra-vm} value=30 ], [ var='B' labels={datacenter=sif.che, group=directory, instance=directory-01.sif.che, origin=volterra-infra-vm} value=30 ], [ var='C' labels={datacenter=sif.che, group=directory, instance=directory-01.sif.che, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sif.che, group=directory, instance=directory-02.sif.che, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sif.che, group=directory, instance=directory-02.sif.che, origin=volterra-infra-vm Value:0xc019b61790} B:{Var:B Labels:datacenter=sif.che, group=directory, instance=directory-02.sif.che, origin=volterra-infra-vm Value:0xc019b617d8} C:{Var:C Labels:datacenter=sif.che, group=directory, instance=directory-02.sif.che, origin=volterra-infra-vm Value:0xc019b61820}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183217087s EvaluationString:[ var='A' labels={datacenter=sif.che, group=directory, instance=directory-02.sif.che, origin=volterra-infra-vm} value=13 ], [ var='B' labels={datacenter=sif.che, group=directory, instance=directory-02.sif.che, origin=volterra-infra-vm} value=13 ], [ var='C' labels={datacenter=sif.che, group=directory, instance=directory-02.sif.che, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sp4.sao, group=directory, instance=directory-01.sp4.sao, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sp4.sao, group=directory, instance=directory-01.sp4.sao, origin=volterra-infra-vm Value:0xc019b618a0} B:{Var:B Labels:datacenter=sp4.sao, group=directory, instance=directory-01.sp4.sao, origin=volterra-infra-vm Value:0xc019b618e0} C:{Var:C Labels:datacenter=sp4.sao, group=directory, instance=directory-01.sp4.sao, origin=volterra-infra-vm Value:0xc019b61928}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183227322s EvaluationString:[ var='A' labels={datacenter=sp4.sao, group=directory, instance=directory-01.sp4.sao, origin=volterra-infra-vm} value=58 ], [ var='B' labels={datacenter=sp4.sao, group=directory, instance=directory-01.sp4.sao, origin=volterra-infra-vm} value=58 ], [ var='C' labels={datacenter=sp4.sao, group=directory, instance=directory-01.sp4.sao, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sp4.sao, group=directory, instance=directory-02.sp4.sao, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sp4.sao, group=directory, instance=directory-02.sp4.sao, origin=volterra-infra-vm Value:0xc019b619b8} B:{Var:B Labels:datacenter=sp4.sao, group=directory, instance=directory-02.sp4.sao, origin=volterra-infra-vm Value:0xc019b61a00} C:{Var:C Labels:datacenter=sp4.sao, group=directory, instance=directory-02.sp4.sao, origin=volterra-infra-vm Value:0xc019b61a48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183235605s EvaluationString:[ var='A' labels={datacenter=sp4.sao, group=directory, instance=directory-02.sp4.sao, origin=volterra-infra-vm} value=15 ], [ var='B' labels={datacenter=sp4.sao, group=directory, instance=directory-02.sp4.sao, origin=volterra-infra-vm} value=15 ], [ var='C' labels={datacenter=sp4.sao, group=directory, instance=directory-02.sp4.sao, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sto6.sto, group=directory, instance=directory-01.sto6.sto, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sto6.sto, group=directory, instance=directory-01.sto6.sto, origin=volterra-infra-vm Value:0xc019b61b08} B:{Var:B Labels:datacenter=sto6.sto, group=directory, instance=directory-01.sto6.sto, origin=volterra-infra-vm Value:0xc019b61b60} C:{Var:C Labels:datacenter=sto6.sto, group=directory, instance=directory-01.sto6.sto, origin=volterra-infra-vm Value:0xc019b61ac8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183242857s EvaluationString:[ var='A' labels={datacenter=sto6.sto, group=directory, instance=directory-01.sto6.sto, origin=volterra-infra-vm} value=18 ], [ var='B' labels={datacenter=sto6.sto, group=directory, instance=directory-01.sto6.sto, origin=volterra-infra-vm} value=18 ], [ var='C' labels={datacenter=sto6.sto, group=directory, instance=directory-01.sto6.sto, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sto6.sto, group=directory, instance=directory-02.sto6.sto, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sto6.sto, group=directory, instance=directory-02.sto6.sto, origin=volterra-infra-vm Value:0xc019b61cb0} B:{Var:B Labels:datacenter=sto6.sto, group=directory, instance=directory-02.sto6.sto, origin=volterra-infra-vm Value:0xc019b61c10} C:{Var:C Labels:datacenter=sto6.sto, group=directory, instance=directory-02.sto6.sto, origin=volterra-infra-vm Value:0xc019b61c70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.18324918s EvaluationString:[ var='A' labels={datacenter=sto6.sto, group=directory, instance=directory-02.sto6.sto, origin=volterra-infra-vm} value=1 ], [ var='B' labels={datacenter=sto6.sto, group=directory, instance=directory-02.sto6.sto, origin=volterra-infra-vm} value=1 ], [ var='C' labels={datacenter=sto6.sto, group=directory, instance=directory-02.sto6.sto, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sv10.sjc, group=directory, instance=directory-01.sv10.sjc, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sv10.sjc, group=directory, instance=directory-01.sv10.sjc, origin=volterra-infra-vm Value:0xc019b61dc0} B:{Var:B Labels:datacenter=sv10.sjc, group=directory, instance=directory-01.sv10.sjc, origin=volterra-infra-vm Value:0xc019b61d40} C:{Var:C Labels:datacenter=sv10.sjc, group=directory, instance=directory-01.sv10.sjc, origin=volterra-infra-vm Value:0xc019b61d80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183253945s EvaluationString:[ var='A' labels={datacenter=sv10.sjc, group=directory, instance=directory-01.sv10.sjc, origin=volterra-infra-vm} value=1 ], [ var='B' labels={datacenter=sv10.sjc, group=directory, instance=directory-01.sv10.sjc, origin=volterra-infra-vm} value=1 ], [ var='C' labels={datacenter=sv10.sjc, group=directory, instance=directory-01.sv10.sjc, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sv10.sjc, group=directory, instance=directory-02.sv10.sjc, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sv10.sjc, group=directory, instance=directory-02.sv10.sjc, origin=volterra-infra-vm Value:0xc019b61e58} B:{Var:B Labels:datacenter=sv10.sjc, group=directory, instance=directory-02.sv10.sjc, origin=volterra-infra-vm Value:0xc019b61eb8} C:{Var:C Labels:datacenter=sv10.sjc, group=directory, instance=directory-02.sv10.sjc, origin=volterra-infra-vm Value:0xc019b61f38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183258342s EvaluationString:[ var='A' labels={datacenter=sv10.sjc, group=directory, instance=directory-02.sv10.sjc, origin=volterra-infra-vm} value=28 ], [ var='B' labels={datacenter=sv10.sjc, group=directory, instance=directory-02.sv10.sjc, origin=volterra-infra-vm} value=28 ], [ var='C' labels={datacenter=sv10.sjc, group=directory, instance=directory-02.sv10.sjc, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sy5.syd, group=directory, instance=directory-01.sy5.syd, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sy5.syd, group=directory, instance=directory-01.sy5.syd, origin=volterra-infra-vm Value:0xc01c1b2368} B:{Var:B Labels:datacenter=sy5.syd, group=directory, instance=directory-01.sy5.syd, origin=volterra-infra-vm Value:0xc01c1b20a0} C:{Var:C Labels:datacenter=sy5.syd, group=directory, instance=directory-01.sy5.syd, origin=volterra-infra-vm Value:0xc01c1b2220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183263078s EvaluationString:[ var='A' labels={datacenter=sy5.syd, group=directory, instance=directory-01.sy5.syd, origin=volterra-infra-vm} value=26 ], [ var='B' labels={datacenter=sy5.syd, group=directory, instance=directory-01.sy5.syd, origin=volterra-infra-vm} value=26 ], [ var='C' labels={datacenter=sy5.syd, group=directory, instance=directory-01.sy5.syd, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sy5.syd, group=directory, instance=directory-02.sy5.syd, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sy5.syd, group=directory, instance=directory-02.sy5.syd, origin=volterra-infra-vm Value:0xc01c1b2450} B:{Var:B Labels:datacenter=sy5.syd, group=directory, instance=directory-02.sy5.syd, origin=volterra-infra-vm Value:0xc01c1b2498} C:{Var:C Labels:datacenter=sy5.syd, group=directory, instance=directory-02.sy5.syd, origin=volterra-infra-vm Value:0xc01c1b24e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183271493s EvaluationString:[ var='A' labels={datacenter=sy5.syd, group=directory, instance=directory-02.sy5.syd, origin=volterra-infra-vm} value=49 ], [ var='B' labels={datacenter=sy5.syd, group=directory, instance=directory-02.sy5.syd, origin=volterra-infra-vm} value=49 ], [ var='C' labels={datacenter=sy5.syd, group=directory, instance=directory-02.sy5.syd, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=tr2.tor, group=directory, instance=directory-01.tr2.tor, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=tr2.tor, group=directory, instance=directory-01.tr2.tor, origin=volterra-infra-vm Value:0xc01c1b2620} B:{Var:B Labels:datacenter=tr2.tor, group=directory, instance=directory-01.tr2.tor, origin=volterra-infra-vm Value:0xc01c1b2570} C:{Var:C Labels:datacenter=tr2.tor, group=directory, instance=directory-01.tr2.tor, origin=volterra-infra-vm Value:0xc01c1b25c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183279009s EvaluationString:[ var='A' labels={datacenter=tr2.tor, group=directory, instance=directory-01.tr2.tor, origin=volterra-infra-vm} value=56 ], [ var='B' labels={datacenter=tr2.tor, group=directory, instance=directory-01.tr2.tor, origin=volterra-infra-vm} value=56 ], [ var='C' labels={datacenter=tr2.tor, group=directory, instance=directory-01.tr2.tor, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=tr2.tor, group=directory, instance=directory-02.tr2.tor, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=tr2.tor, group=directory, instance=directory-02.tr2.tor, origin=volterra-infra-vm Value:0xc01c1b2790} B:{Var:B Labels:datacenter=tr2.tor, group=directory, instance=directory-02.tr2.tor, origin=volterra-infra-vm Value:0xc01c1b2710} C:{Var:C Labels:datacenter=tr2.tor, group=directory, instance=directory-02.tr2.tor, origin=volterra-infra-vm Value:0xc01c1b2750}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183287578s EvaluationString:[ var='A' labels={datacenter=tr2.tor, group=directory, instance=directory-02.tr2.tor, origin=volterra-infra-vm} value=49 ], [ var='B' labels={datacenter=tr2.tor, group=directory, instance=directory-02.tr2.tor, origin=volterra-infra-vm} value=49 ], [ var='C' labels={datacenter=tr2.tor, group=directory, instance=directory-02.tr2.tor, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=ty8.tky, group=directory, instance=directory-01.ty8.tky, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ty8.tky, group=directory, instance=directory-01.ty8.tky, origin=volterra-infra-vm Value:0xc01c1b2888} B:{Var:B Labels:datacenter=ty8.tky, group=directory, instance=directory-01.ty8.tky, origin=volterra-infra-vm Value:0xc01c1b28d0} C:{Var:C Labels:datacenter=ty8.tky, group=directory, instance=directory-01.ty8.tky, origin=volterra-infra-vm Value:0xc01c1b2938}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183295622s EvaluationString:[ var='A' labels={datacenter=ty8.tky, group=directory, instance=directory-01.ty8.tky, origin=volterra-infra-vm} value=60 ], [ var='B' labels={datacenter=ty8.tky, group=directory, instance=directory-01.ty8.tky, origin=volterra-infra-vm} value=60 ], [ var='C' labels={datacenter=ty8.tky, group=directory, instance=directory-01.ty8.tky, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=ty8.tky, group=directory, instance=directory-02.ty8.tky, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ty8.tky, group=directory, instance=directory-02.ty8.tky, origin=volterra-infra-vm Value:0xc01c1b2a28} B:{Var:B Labels:datacenter=ty8.tky, group=directory, instance=directory-02.ty8.tky, origin=volterra-infra-vm Value:0xc01c1b2a70} C:{Var:C Labels:datacenter=ty8.tky, group=directory, instance=directory-02.ty8.tky, origin=volterra-infra-vm Value:0xc01c1b2ab0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183304122s EvaluationString:[ var='A' labels={datacenter=ty8.tky, group=directory, instance=directory-02.ty8.tky, origin=volterra-infra-vm} value=3 ], [ var='B' labels={datacenter=ty8.tky, group=directory, instance=directory-02.ty8.tky, origin=volterra-infra-vm} value=3 ], [ var='C' labels={datacenter=ty8.tky, group=directory, instance=directory-02.ty8.tky, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=wes.sea, group=directory, instance=directory-01.wes.sea, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=wes.sea, group=directory, instance=directory-01.wes.sea, origin=volterra-infra-vm Value:0xc01c1b2bc0} B:{Var:B Labels:datacenter=wes.sea, group=directory, instance=directory-01.wes.sea, origin=volterra-infra-vm Value:0xc01c1b2b30} C:{Var:C Labels:datacenter=wes.sea, group=directory, instance=directory-01.wes.sea, origin=volterra-infra-vm Value:0xc01c1b2b70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183312533s EvaluationString:[ var='A' labels={datacenter=wes.sea, group=directory, instance=directory-01.wes.sea, origin=volterra-infra-vm} value=3 ], [ var='B' labels={datacenter=wes.sea, group=directory, instance=directory-01.wes.sea, origin=volterra-infra-vm} value=3 ], [ var='C' labels={datacenter=wes.sea, group=directory, instance=directory-01.wes.sea, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=wes.sea, group=directory, instance=directory-02.wes.sea, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=wes.sea, group=directory, instance=directory-02.wes.sea, origin=volterra-infra-vm Value:0xc01c1b2c40} B:{Var:B Labels:datacenter=wes.sea, group=directory, instance=directory-02.wes.sea, origin=volterra-infra-vm Value:0xc01c1b2c88} C:{Var:C Labels:datacenter=wes.sea, group=directory, instance=directory-02.wes.sea, origin=volterra-infra-vm Value:0xc01c1b2ce0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.183321188s EvaluationString:[ var='A' labels={datacenter=wes.sea, group=directory, instance=directory-02.wes.sea, origin=volterra-infra-vm} value=13 ], [ var='B' labels={datacenter=wes.sea, group=directory, instance=directory-02.wes.sea, origin=volterra-infra-vm} value=13 ], [ var='C' labels={datacenter=wes.sea, group=directory, instance=directory-02.wes.sea, origin=volterra-infra-vm} value=0 ]}]" duration=554.96106ms +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node21 - 35.214.93.21, job=node-exporter, metrics_node_id=22, node_id=21" t=2024-05-29T13:44:15.18672487Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.186637271Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node20 - 35.208.184.213, job=node-exporter, metrics_node_id=21, node_id=20" t=2024-05-29T13:44:15.186569213Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node20 - 35.208.184.213, job=node-exporter, metrics_node_id=21, node_id=20" t=2024-05-29T13:44:15.186560831Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.18653448Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.186545277Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node19 - 35.216.100.177, job=node-exporter, metrics_node_id=20, node_id=19" t=2024-05-29T13:44:15.186424399Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.186436817Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/apps" t=2024-05-29T13:44:15.186345348Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.18624812Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=firewalk-justice-dev2, game_interval_time=12, game_namespace=accelbytetesting, game_template=gameSessionPersistentTest-C66EFA2540FAA1A1B154879F00AF5264" t=2024-05-29T13:44:15.186198899Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.186174214Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.186148804Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.18613139Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.185903891Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.186058073Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.185996103Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.185951807Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=195816 slug=esna t=2024-05-29T13:44:15.185887046Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.185912873Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Manchester, country=United Kingdom, datacenter=M247, environment=production, instance=89.238.137.41:9998, ip=89.238.137.41, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/uk-manchester.crt, role=vpn, server=manchester402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.185915741Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.185841316Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.185898418Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.185862128Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=firewalk-justice-dev2, game_interval_time=12, game_namespace=accelbytetesting, game_template=gameSessionPersistentTest-53DBD6124C3DD5FFFD2DB0B557B2AA31" t=2024-05-29T13:44:15.18585095Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.185815402Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.185770837Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node15 - 69.67.150.140, job=node-exporter, metrics_node_id=16, node_id=15" t=2024-05-29T13:44:15.185720113Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.185745214Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +level=debug ts=2024-05-29T13:44:15.185566618Z caller=remote_instance_store.go:51 user=652809 slug=glassnode msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=falldamage-justice-dev, game_interval_time=12, game_namespace=alaraprime, game_template=lobby" t=2024-05-29T13:44:15.185568323Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.185620317Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.185593664Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=falldamage-justice-dev, game_interval_time=12, game_namespace=alaraprime, game_template=lobby" t=2024-05-29T13:44:15.185552569Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.185554503Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.185519781Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.185463168Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.185508024Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.185451716Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.18543224Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.185385863Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.185358366Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=251760 slug=forgerock instance="datasource_uid=8CWVjso4z, ref_id=A" t=2024-05-29T13:44:15.18541018Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.185300194Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=251760 slug=forgerock version=64 fingerprint=9a9b7d8c8a20a2c9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.185307583Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=8CWVjso4z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.185015908s EvaluationString:}]" duration=250.700797ms +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.185317872Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=falldamage-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistentFA2B6" t=2024-05-29T13:44:15.18530959Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.185307958Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/apps" t=2024-05-29T13:44:15.185239741Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=falldamage-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistentFA2B6" t=2024-05-29T13:44:15.18529063Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.185221432Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.185184654Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.185124498Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/apps" t=2024-05-29T13:44:15.185090723Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.185003625Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.184910864Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=falldamage-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistentD9B10" t=2024-05-29T13:44:15.184998341Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Manchester, country=United Kingdom, datacenter=M247, environment=production, instance=89.238.137.40:9998, ip=89.238.137.40, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/uk-manchester.crt, role=vpn, server=manchester401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.185022869Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node10 - 35.215.85.174, job=node-exporter, metrics_node_id=11, node_id=10" t=2024-05-29T13:44:15.184934551Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.184745258Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Manchester, country=United Kingdom, datacenter=M247, environment=production, instance=89.238.137.40:9998, ip=89.238.137.40, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=manchester401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.18464553Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=841535 slug=elvaco01 instance= t=2024-05-29T13:44:15.184637234Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.184487895Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=841535 slug=elvaco01 t=2024-05-29T13:44:15.184587636Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.184643801Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" +level=debug ts=2024-05-29T13:44:15.184477514Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.184638049Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=841535 slug=elvaco01 version=5 fingerprint=d18d6d85b7bf4cbb attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.184507044Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc00779f7f8} B:{Var:B Labels: Value:0xc00779f800}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.184145358s EvaluationString:[ var='A' labels={} value=51615 ], [ var='B' labels={} value=0 ]}]" duration=140.050875ms +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node07 - 35.213.161.121, job=node-exporter, metrics_node_id=8, node_id=7" t=2024-05-29T13:44:15.184539636Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.184428086Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.184417128Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.18437112Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.184344058Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.18427655Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.184264291Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=bungie-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistentF29B1" t=2024-05-29T13:44:15.184138222Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node05 - 35.217.56.255, job=node-exporter, metrics_node_id=6, node_id=5" t=2024-05-29T13:44:15.184197971Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.184177056Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.184125214Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Madrid, country=Spain, datacenter=DataPacket, environment=production, instance=212.102.49.1:9998, ip=212.102.49.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/spain.crt, role=streaming-optimized, server=madrid402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.18409428Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.184103759Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node04 - 35.213.80.116, job=node-exporter, metrics_node_id=5, node_id=4" t=2024-05-29T13:44:15.184058635Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.184075499Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.184041901Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.184015079Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.184005312Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.18396317Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.183916465Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.183934766Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=28776 slug=flotechnologies instance= t=2024-05-29T13:44:15.183890685Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.183890301Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=bungie-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistentCEC10" t=2024-05-29T13:44:15.183834045Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.183870543Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=bungie-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistentCEC10" t=2024-05-29T13:44:15.183816036Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.183807859Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.183699322Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.183604809Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.18356925Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, instance=vmblrpssl1:1936, job=HAProxy, origin_prometheus=vmblrpssl1" t=2024-05-29T13:44:15.183531421Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.183525759Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node00 - 35.209.23.215, job=node-exporter, metrics_node_id=1, node_id=0" t=2024-05-29T13:44:15.183489001Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, instance=tdsalesforceproxy2:1936, job=HAProxy, origin_prometheus=tdsalesforceproxy2" t=2024-05-29T13:44:15.183450805Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=895137 slug=uid2 t=2024-05-29T13:44:15.183358464Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.036289ms +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.18344191Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.183428424Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, instance=tdpicrpssl2:1936, job=HAProxy, origin_prometheus=tdpicrpssl2" t=2024-05-29T13:44:15.183302234Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, instance=tdpicrpssl2:1936, job=HAProxy, origin_prometheus=tdpicrpssl2" t=2024-05-29T13:44:15.183288277Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=integration-docker, instance=localhost:9100, instance_type=hedera-node, inventory_name=node05 - 34.106.208.45, job=node-exporter, metrics_node_id=6, node_id=5" t=2024-05-29T13:44:15.183220862Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=barb-justice-stage, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistentB9440" t=2024-05-29T13:44:15.183220298Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.183288497Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.183277898Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=barb-justice-stage, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistentB9440" t=2024-05-29T13:44:15.183203124Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.183247509Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, instance=tdpicrpssl1:1936, job=HAProxy, origin_prometheus=tdpicrpssl1" t=2024-05-29T13:44:15.183222214Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.183209265Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.182940391Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.183043213Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=integration-docker" +level=debug ts=2024-05-29T13:44:15.183007185Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=527204 slug=lnrsusinsurancenonprod instance="datasource_uid=b1Lt6Kf4z, ref_id=A" t=2024-05-29T13:44:15.183028237Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:59:10Z next_ends_at=2024-05-29T14:04:10Z +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, instance=tdnonprodportal1:1936, job=HAProxy, origin_prometheus=tdnonprodportal1" t=2024-05-29T13:44:15.182990895Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=527204 slug=lnrsusinsurancenonprod t=2024-05-29T13:44:15.182966533Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=integration-docker, instance=localhost:9100, instance_type=hedera-node, inventory_name=node03 - 34.127.65.96, job=node-exporter, metrics_node_id=4, node_id=3" t=2024-05-29T13:44:15.182930412Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=barb-justice-stage, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistent65C37" t=2024-05-29T13:44:15.182929089Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, instance=tddiservicerpssl2:1936, job=HAProxy, origin_prometheus=tddiservicerpssl2" t=2024-05-29T13:44:15.182924888Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, instance=tddiservicerpssl2:1936, job=HAProxy, origin_prometheus=tddiservicerpssl2" t=2024-05-29T13:44:15.182913194Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=111653 slug=theassociationmxp t=2024-05-29T13:44:15.182915845Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=111653 slug=theassociationmxp instance= t=2024-05-29T13:44:15.182899575Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.182897689Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.182870593Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=111653 slug=theassociationmxp version=1 fingerprint=b41720e19f3960c2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.182697742Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.182331791s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=72.390158ms +level=debug ts=2024-05-29T13:44:15.182771135Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, instance=tddiservicerpssl1:1936, job=HAProxy, origin_prometheus=tddiservicerpssl1" t=2024-05-29T13:44:15.182840317Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.182824265Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.18279548Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.182753351Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, instance=sysops1vpntestproxy1:1936, job=HAProxy, origin_prometheus=sysops1vpntestproxy1" t=2024-05-29T13:44:15.18275131Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.182724663Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=integration-docker, instance=localhost:9100, instance_type=hedera-node, inventory_name=node02 - 104.197.130.126, job=node-exporter, metrics_node_id=3, node_id=2" t=2024-05-29T13:44:15.182764782Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=275418 slug=pschwenk t=2024-05-29T13:44:15.182766775Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.182672139Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.182612621Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=275418 slug=pschwenk t=2024-05-29T13:44:15.182691432Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Madrid, country=Spain, datacenter=DataPacket, environment=production, instance=212.102.49.185:9998, ip=212.102.49.185, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=madrid403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.182682934Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.182656285Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=275418 slug=pschwenk version=15 fingerprint=c2d3a6f4b064b92a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.18259334Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.182269967s EvaluationString:}]" duration=32.961384ms +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, instance=clientsuatrpssl1:1936, job=HAProxy, origin_prometheus=clientsuatrpssl1" t=2024-05-29T13:44:15.182636996Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, instance=clientsuatrpssl1:1936, job=HAProxy, origin_prometheus=clientsuatrpssl1" t=2024-05-29T13:44:15.182626311Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.182582839Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.182542019Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=integration-docker, instance=localhost:9100, instance_type=hedera-node, inventory_name=node01 - 34.85.253.181, job=node-exporter, metrics_node_id=2, node_id=1" t=2024-05-29T13:44:15.182590985Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.182536267Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=608555 slug=ias t=2024-05-29T13:44:15.182529092Z level=debug msg="Deleting alert states" count=1 +logger=ngalert.state.manager user=608555 slug=ias t=2024-05-29T13:44:15.182518839Z level=info msg="Detected stale state entry" cacheID="[[\"Series\",\"query8436bceaee8747988564f2cdaccb46af\"],[\"TargetGroup\",\"targetgroup/eng-ct-zh-zt-ml/7540ca45763622da\"],[\"__alert_rule_namespace_uid__\",\"a63484ae-feeb-4a65-a678-b4b6dd77dc42\"],[\"__alert_rule_uid__\",\"b224f38a-2d38-4f30-84b8-04f0cf973148\"],[\"alertname\",\"LTS High response time [PL]\"],[\"grafana_folder\",\"CRE\"],[\"team\",\"cre\"]]" state=Normal reason= +logger=ngalert.state.manager user=608555 slug=ias instance="Series=query5c1c0c938cfa4875a4342e30d9378ed6, TargetGroup=targetgroup/eng-ct-zh-zt-ml/7540ca45763622da" t=2024-05-29T13:44:15.182503439Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.182502823Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.182411711Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=integration-docker, instance=localhost:9100, instance_type=hedera-node, inventory_name=node00 - 34.138.249.218, job=node-exporter, metrics_node_id=1, node_id=0" t=2024-05-29T13:44:15.182456766Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=integration-docker, instance=localhost:9100, instance_type=hedera-node, inventory_name=node00 - 34.138.249.218, job=node-exporter, metrics_node_id=1, node_id=0" t=2024-05-29T13:44:15.182442328Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.182312799Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.182275408Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.182257313Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, env=prod, installation=claretprod, instance=claretrpssl1:1936, job=HAProxy, origin_prometheus=claretrpssl1" t=2024-05-29T13:44:15.182310355Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.182211201Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.18220079Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=wellaltus, env=prod, host=wellaltusawsrpssl1, installation=wellaltusaws, instance=wellaltusawsrpssl1:1936, job=HAProxy, master=wellaltusawsproxymaitre, origin_prometheus=wellaltusawsrpssl1, site=aws, type=proxy" t=2024-05-29T13:44:15.182121461Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=integration, instance=localhost:9100, instance_type=hedera-node, inventory_name=node05 - 34.106.94.135, job=node-exporter, metrics_node_id=6, node_id=5" t=2024-05-29T13:44:15.182136196Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.182038227Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.182012833Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.181982621Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=vmd, env=uat, host=vmdPPrpssl1, installation=vmdPP, instance=vmdPPrpssl1:1936, job=HAProxy, master=vmduatapp2.croesus.com, origin_prometheus=vmdPPrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.182008754Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.181965348Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=vmd, env=uat, host=vmdPPrpssl1, installation=vmdPP, instance=vmdPPrpssl1:1936, job=HAProxy, master=vmduatapp2.croesus.com, origin_prometheus=vmdPPrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.181992672Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.181925516Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.18189961Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=integration, instance=localhost:9100, instance_type=hedera-node, inventory_name=node04 - 34.94.112.161, job=node-exporter, metrics_node_id=5, node_id=4" t=2024-05-29T13:44:15.181869592Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.181843465Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.181827219Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.181817726Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.181695898Z caller=remote_instance_store.go:51 user=697627 slug=haqq msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/apps" t=2024-05-29T13:44:15.181792312Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.181766299Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=barb-justice-sit, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistentEB674" t=2024-05-29T13:44:15.1818347Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/apps" t=2024-05-29T13:44:15.181656043Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.18157183Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.181523633Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=integration, instance=localhost:9100, instance_type=hedera-node, inventory_name=node02 - 34.67.95.76, job=node-exporter, metrics_node_id=3, node_id=2" t=2024-05-29T13:44:15.181590381Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/apps" t=2024-05-29T13:44:15.181457665Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=vmd, env=uat, host=vmdPATrpssl1, installation=vmdPAT, instance=vmdPATrpssl1:1936, job=HAProxy, master=vmduatapp1.croesus.com, origin_prometheus=vmdPATrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.181610288Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/opt" t=2024-05-29T13:44:15.181428781Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.181334338Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.181377241Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/home" t=2024-05-29T13:44:15.181335221Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/export" t=2024-05-29T13:44:15.181291705Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/data" t=2024-05-29T13:44:15.181263492Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=vmd, env=prod, host=vmdrpssl1, installation=vmdprod, instance=vmdrpssl1:1936, job=HAProxy, master=vmdapp1.croesus.com, origin_prometheus=vmdrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.181453155Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/0QEMU_QEMU_HARDDISK_ccd6d85d-a556-4dbc-a2e7-804f234dc30c1, fstype=xfs, instance=quuscn1afollwcsdbs1002.foll.gcp.hclsw.internal, job=QA-follwcsdbs1002-DB-Host-VM, mountpoint=/boot" t=2024-05-29T13:44:15.181181247Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=integration, instance=localhost:9100, instance_type=hedera-node, inventory_name=node01 - 35.245.91.14, job=node-exporter, metrics_node_id=2, node_id=1" t=2024-05-29T13:44:15.181465674Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893158 slug=cmfollnp instance="device=/dev/mapper/0QEMU_QEMU_HARDDISK_ccd6d85d-a556-4dbc-a2e7-804f234dc30c1, fstype=xfs, instance=quuscn1afollwcsdbs1002.foll.gcp.hclsw.internal, job=QA-follwcsdbs1002-DB-Host-VM, mountpoint=/boot" t=2024-05-29T13:44:15.181166526Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Macau, country=Macao, datacenter=M247, environment=production, instance=84.252.92.2:9998, ip=84.252.92.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/macau.crt, role=vpn, server=macau403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.181446332Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=893158 slug=cmfollnp version=1 fingerprint=6bfbb8796cf00c7c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.171609262Z level=debug msg="Alert rule evaluated" results="[{Instance:device=/dev/mapper/0QEMU_QEMU_HARDDISK_ccd6d85d-a556-4dbc-a2e7-804f234dc30c1, fstype=xfs, instance=quuscn1afollwcsdbs1002.foll.gcp.hclsw.internal, job=QA-follwcsdbs1002-DB-Host-VM, mountpoint=/boot State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/0QEMU_QEMU_HARDDISK_ccd6d85d-a556-4dbc-a2e7-804f234dc30c1, fstype=xfs, instance=quuscn1afollwcsdbs1002.foll.gcp.hclsw.internal, job=QA-follwcsdbs1002-DB-Host-VM, mountpoint=/boot Value:0xc039f64470} B:{Var:B Labels:device=/dev/mapper/0QEMU_QEMU_HARDDISK_ccd6d85d-a556-4dbc-a2e7-804f234dc30c1, fstype=xfs, instance=quuscn1afollwcsdbs1002.foll.gcp.hclsw.internal, job=QA-follwcsdbs1002-DB-Host-VM, mountpoint=/boot Value:0xc039f644c0} C:{Var:C Labels:device=/dev/mapper/0QEMU_QEMU_HARDDISK_ccd6d85d-a556-4dbc-a2e7-804f234dc30c1, fstype=xfs, instance=quuscn1afollwcsdbs1002.foll.gcp.hclsw.internal, job=QA-follwcsdbs1002-DB-Host-VM, mountpoint=/boot Value:0xc039f64508}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139254585s EvaluationString:[ var='A' labels={device=/dev/mapper/0QEMU_QEMU_HARDDISK_ccd6d85d-a556-4dbc-a2e7-804f234dc30c1, fstype=xfs, instance=quuscn1afollwcsdbs1002.foll.gcp.hclsw.internal, job=QA-follwcsdbs1002-DB-Host-VM, mountpoint=/boot} value=49.51730461045365 ], [ var='B' labels={device=/dev/mapper/0QEMU_QEMU_HARDDISK_ccd6d85d-a556-4dbc-a2e7-804f234dc30c1, fstype=xfs, instance=quuscn1afollwcsdbs1002.foll.gcp.hclsw.internal, job=QA-follwcsdbs1002-DB-Host-VM, mountpoint=/boot} value=49.51730461045365 ], [ var='C' labels={device=/dev/mapper/0QEMU_QEMU_HARDDISK_ccd6d85d-a556-4dbc-a2e7-804f234dc30c1, fstype=xfs, instance=quuscn1afollwcsdbs1002.foll.gcp.hclsw.internal, job=QA-follwcsdbs1002-DB-Host-VM, mountpoint=/boot} value=0 ]} {Instance:device=/dev/mapper/0QEMU_QEMU_HARDDISK_e798b029-a2a8-495d-869e-8c70a2494fb2p1, fstype=xfs, instance=duuscn1afollwcsdbs1001.foll.gcp.hclsw.internal, job=Dev-DB-Host-VM, mountpoint=/boot State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/0QEMU_QEMU_HARDDISK_e798b029-a2a8-495d-869e-8c70a2494fb2p1, fstype=xfs, instance=duuscn1afollwcsdbs1001.foll.gcp.hclsw.internal, job=Dev-DB-Host-VM, mountpoint=/boot Value:0xc039f645c8} B:{Var:B Labels:device=/dev/mapper/0QEMU_QEMU_HARDDISK_e798b029-a2a8-495d-869e-8c70a2494fb2p1, fstype=xfs, instance=duuscn1afollwcsdbs1001.foll.gcp.hclsw.internal, job=Dev-DB-Host-VM, mountpoint=/boot Value:0xc039f64638} C:{Var:C Labels:device=/dev/mapper/0QEMU_QEMU_HARDDISK_e798b029-a2a8-495d-869e-8c70a2494fb2p1, fstype=xfs, instance=duuscn1afollwcsdbs1001.foll.gcp.hclsw.internal, job=Dev-DB-Host-VM, mountpoint=/boot Value:0xc039f64690}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139294935s EvaluationString:[ var='A' labels={device=/dev/mapper/0QEMU_QEMU_HARDDISK_e798b029-a2a8-495d-869e-8c70a2494fb2p1, fstype=xfs, instance=duuscn1afollwcsdbs1001.foll.gcp.hclsw.internal, job=Dev-DB-Host-VM, mountpoint=/boot} value=55.780402490138066 ], [ var='B' labels={device=/dev/mapper/0QEMU_QEMU_HARDDISK_e798b029-a2a8-495d-869e-8c70a2494fb2p1, fstype=xfs, instance=duuscn1afollwcsdbs1001.foll.gcp.hclsw.internal, job=Dev-DB-Host-VM, mountpoint=/boot} value=55.780402490138066 ], [ var='C' labels={device=/dev/mapper/0QEMU_QEMU_HARDDISK_e798b029-a2a8-495d-869e-8c70a2494fb2p1, fstype=xfs, instance=duuscn1afollwcsdbs1001.foll.gcp.hclsw.internal, job=Dev-DB-Host-VM, mountpoint=/boot} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/data Value:0xc039f64720} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/data Value:0xc039f64780} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/data Value:0xc039f647d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139313375s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/data} value=90.4936650314899 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/data} value=90.4936650314899 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/export Value:0xc039f648a8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/export Value:0xc039f64938} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/export Value:0xc039f64990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139330515s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/export} value=90.4936650314899 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/export} value=90.4936650314899 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/home Value:0xc039f64a28} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/home Value:0xc039f64ae8} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/home Value:0xc039f64b40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139343075s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/home} value=90.4936650314899 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/home} value=90.4936650314899 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/opt Value:0xc039f64c70} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/opt Value:0xc039f64be0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/opt Value:0xc039f64c30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139357225s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/opt} value=90.4936650314899 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/opt} value=90.4936650314899 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollcmlapp4001.foll.gcp.hclsw.internal, job=Dev-follcmlapp4001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/apps State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/apps Value:0xc039f64dd8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/apps Value:0xc039f64e10} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/apps Value:0xc039f64d80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139370496s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/apps} value=96.74811680006349 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/apps} value=96.74811680006349 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/apps} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/data Value:0xc039f64f30} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/data Value:0xc039f64f80} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/data Value:0xc039f64fe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139386116s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/data} value=96.74811680006349 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/data} value=96.74811680006349 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/export Value:0xc039f65138} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/export Value:0xc039f65080} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/export Value:0xc039f650e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139404196s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/export} value=96.74811680006349 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/export} value=96.74811680006349 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/home Value:0xc039f651c0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/home Value:0xc039f65220} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/home Value:0xc039f65280}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139421117s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/home} value=96.74811680006349 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/home} value=96.74811680006349 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/opt Value:0xc039f65308} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/opt Value:0xc039f65350} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/opt Value:0xc039f653b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139437807s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/opt} value=96.74811680006349 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/opt} value=96.74811680006349 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Dev-follintapp1001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/apps State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/apps Value:0xc039f654f0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/apps Value:0xc039f65440} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/apps Value:0xc039f65490}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139452537s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/apps} value=85.73742803404521 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/apps} value=85.73742803404521 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/apps} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/data Value:0xc039f655a0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/data Value:0xc039f655e8} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/data Value:0xc039f65620}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139465267s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/data} value=85.73742803404521 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/data} value=85.73742803404521 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/export Value:0xc039f656e0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/export Value:0xc039f65740} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/export Value:0xc039f65780}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139480897s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/export} value=85.73742803404521 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/export} value=85.73742803404521 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/home Value:0xc039f65830} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/home Value:0xc039f65890} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/home Value:0xc039f658d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139539017s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/home} value=85.73742803404521 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/home} value=85.73742803404521 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/opt Value:0xc039f65970} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/opt Value:0xc039f659d0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/opt Value:0xc039f65a30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139557147s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/opt} value=85.73742803404521 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/opt} value=85.73742803404521 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp2001.foll.gcp.hclsw.internal, job=Dev-follintapp2001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/apps State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/apps Value:0xc039f65ab8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/apps Value:0xc039f65b58} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/apps Value:0xc039f65c00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139570537s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/apps} value=97.00792039643376 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/apps} value=97.00792039643376 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/apps} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/data Value:0xc039f65ce8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/data Value:0xc039f65d40} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/data Value:0xc039f65c98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139584217s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/data} value=97.00792039643376 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/data} value=97.00792039643376 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/export Value:0xc039f65df0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/export Value:0xc039f65e50} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/export Value:0xc039f65eb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139597708s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/export} value=97.00792039643376 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/export} value=97.00792039643376 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/home Value:0xc039f65f68} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/home Value:0xc039f65fc8} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/home Value:0xc03c8a6000}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139613118s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/home} value=97.00792039643376 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/home} value=97.00792039643376 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/opt Value:0xc03c8a60d0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/opt Value:0xc03c8a6110} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/opt Value:0xc03c8a6088}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139628659s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/opt} value=97.00792039643376 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/opt} value=97.00792039643376 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollintapp3001.foll.gcp.hclsw.internal, job=Dev-follintapp3001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/data Value:0xc03c8a61a0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/data Value:0xc03c8a61f0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/data Value:0xc03c8a6240}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139644309s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/data} value=94.78691809728069 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/data} value=94.78691809728069 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/export Value:0xc03c8a62e0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/export Value:0xc03c8a6320} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/export Value:0xc03c8a6378}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139659169s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/export} value=94.78691809728069 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/export} value=94.78691809728069 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/home Value:0xc03c8a6400} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/home Value:0xc03c8a6448} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/home Value:0xc03c8a6480}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139672829s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/home} value=94.78691809728069 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/home} value=94.78691809728069 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/opt Value:0xc03c8a6500} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/opt Value:0xc03c8a6550} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/opt Value:0xc03c8a65a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139687459s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/opt} value=94.78691809728069 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/opt} value=94.78691809728069 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollmqbapp4001.foll.gcp.hclsw.internal, job=Dev-follmqbapp4001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/data Value:0xc03c8a66b8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/data Value:0xc03c8a6618} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/data Value:0xc03c8a6668}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139704239s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/data} value=91.60533551942439 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/data} value=91.60533551942439 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/export Value:0xc03c8a6758} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/export Value:0xc03c8a67b0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/export Value:0xc03c8a6808}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139717519s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/export} value=91.60533551942439 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/export} value=91.60533551942439 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/home Value:0xc03c8a6890} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/home Value:0xc03c8a68c8} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/home Value:0xc03c8a6918}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139732099s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/home} value=91.60533551942439 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/home} value=91.60533551942439 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/opt Value:0xc03c8a69c0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/opt Value:0xc03c8a6a00} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/opt Value:0xc03c8a6a40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139745289s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/opt} value=91.60533551942439 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/opt} value=91.60533551942439 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=Dev-follnalapp1001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/data Value:0xc03c8a6ae0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/data Value:0xc03c8a6b30} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/data Value:0xc03c8a6b70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139759339s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/data} value=91.60065037040351 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/data} value=91.60065037040351 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/export Value:0xc03c8a6ca8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/export Value:0xc03c8a6c00} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/export Value:0xc03c8a6c58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139776009s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/export} value=91.60065037040351 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/export} value=91.60065037040351 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/home Value:0xc03c8a6d30} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/home Value:0xc03c8a6d68} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/home Value:0xc03c8a6db0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139792179s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/home} value=91.60065037040351 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/home} value=91.60065037040351 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/opt Value:0xc03c8a6e30} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/opt Value:0xc03c8a6e70} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/opt Value:0xc03c8a6eb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.1398074s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/opt} value=91.60065037040351 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/opt} value=91.60065037040351 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=Dev-follnalapp1002-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/data Value:0xc03c8a6f40} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/data Value:0xc03c8a6f88} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/data Value:0xc03c8a6fc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.13982075s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/data} value=91.61361443499871 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/data} value=91.61361443499871 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/export Value:0xc03c8a7068} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/export Value:0xc03c8a70b8} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/export Value:0xc03c8a7100}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139832371s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/export} value=91.61361443499871 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/export} value=91.61361443499871 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/home Value:0xc03c8a7190} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/home Value:0xc03c8a71e0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/home Value:0xc03c8a7218}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139845451s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/home} value=91.61361443499871 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/home} value=91.61361443499871 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/opt Value:0xc03c8a7318} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/opt Value:0xc03c8a7298} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/opt Value:0xc03c8a72e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139873251s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/opt} value=91.61361443499871 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/opt} value=91.61361443499871 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=Dev-follnalapp1003-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/data Value:0xc03c8a73b0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/data Value:0xc03c8a73f0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/data Value:0xc03c8a7438}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139888801s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/data} value=66.18656621998201 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/data} value=66.18656621998201 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/export Value:0xc03c8a7520} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/export Value:0xc03c8a7568} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/export Value:0xc03c8a74d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139903291s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/export} value=66.18656621998201 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/export} value=66.18656621998201 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/home Value:0xc03c8a75f8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/home Value:0xc03c8a7630} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/home Value:0xc03c8a7680}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139917561s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/home} value=66.18656621998201 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/home} value=66.18656621998201 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/opt Value:0xc03c8a76f0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/opt Value:0xc03c8a7740} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/opt Value:0xc03c8a77a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139931681s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/opt} value=66.18656621998201 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/opt} value=66.18656621998201 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2001.foll.gcp.hclsw.internal, job=Dev-follnalapp2001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/data Value:0xc03c8a78b0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/data Value:0xc03c8a7828} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/data Value:0xc03c8a7868}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139948861s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/data} value=66.73884472644382 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/data} value=66.73884472644382 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/export Value:0xc03c8a7958} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/export Value:0xc03c8a79a0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/export Value:0xc03c8a79f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139962831s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/export} value=66.73884472644382 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/export} value=66.73884472644382 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/home Value:0xc03c8a7ad0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/home Value:0xc03c8a7b10} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/home Value:0xc03c8a7a80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139981561s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/home} value=66.73884472644382 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/home} value=66.73884472644382 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/opt Value:0xc03c8a7ba8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/opt Value:0xc03c8a7bf0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/opt Value:0xc03c8a7c28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139996581s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/opt} value=66.73884472644382 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/opt} value=66.73884472644382 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2002.foll.gcp.hclsw.internal, job=Dev-follnalapp2002-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/data Value:0xc03c8a7ca8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/data Value:0xc03c8a7cf0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/data Value:0xc03c8a7d38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140012091s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/data} value=66.41343345260324 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/data} value=66.41343345260324 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/export Value:0xc03c8a7db8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/export Value:0xc03c8a7e10} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/export Value:0xc03c8a7e60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140026872s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/export} value=66.41343345260324 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/export} value=66.41343345260324 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/home Value:0xc03c8a7ee8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/home Value:0xc03c8a7f20} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/home Value:0xc03c8a7f70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140040253s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/home} value=66.41343345260324 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/home} value=66.41343345260324 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/opt Value:0xc05c5f8118} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/opt Value:0xc05c5f8060} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/opt Value:0xc05c5f80d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140053933s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/opt} value=66.41343345260324 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/opt} value=66.41343345260324 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp2003.foll.gcp.hclsw.internal, job=Dev-follnalapp2003-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/data Value:0xc05c5f8320} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/data Value:0xc05c5f8238} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/data Value:0xc05c5f82c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140067483s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/data} value=95.0602353472744 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/data} value=95.0602353472744 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/export Value:0xc05c5f83a8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/export Value:0xc05c5f8400} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/export Value:0xc05c5f8458}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140081023s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/export} value=95.0602353472744 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/export} value=95.0602353472744 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/home Value:0xc05c5f84c8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/home Value:0xc05c5f8510} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/home Value:0xc05c5f8560}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140096573s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/home} value=95.0602353472744 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/home} value=95.0602353472744 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/opt Value:0xc05c5f85e0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/opt Value:0xc05c5f8630} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/opt Value:0xc05c5f8678}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140111673s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/opt} value=95.0602353472744 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/opt} value=95.0602353472744 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3001.foll.gcp.hclsw.internal, job=Dev-follnalapp3001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/data Value:0xc05c5f8720} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/data Value:0xc05c5f8760} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/data Value:0xc05c5f8798}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140125733s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/data} value=95.23852046891716 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/data} value=95.23852046891716 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/export Value:0xc05c5f88d8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/export Value:0xc05c5f8840} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/export Value:0xc05c5f8898}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140139423s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/export} value=95.23852046891716 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/export} value=95.23852046891716 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/home Value:0xc05c5f89b8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/home Value:0xc05c5f8a00} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/home Value:0xc05c5f8968}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140156233s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/home} value=95.23852046891716 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/home} value=95.23852046891716 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/opt Value:0xc05c5f8a90} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/opt Value:0xc05c5f8ae0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/opt Value:0xc05c5f8b30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140173333s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/opt} value=95.23852046891716 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/opt} value=95.23852046891716 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3002.foll.gcp.hclsw.internal, job=Dev-follnalapp3002-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/data Value:0xc05c5f8bb0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/data Value:0xc05c5f8c00} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/data Value:0xc05c5f8c50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140187783s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/data} value=95.41847774441179 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/data} value=95.41847774441179 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/export Value:0xc05c5f8da8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/export Value:0xc05c5f8cf0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/export Value:0xc05c5f8d40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140201313s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/export} value=95.41847774441179 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/export} value=95.41847774441179 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/home Value:0xc05c5f8ef0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/home Value:0xc05c5f8e50} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/home Value:0xc05c5f8ea0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140216973s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/home} value=95.41847774441179 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/home} value=95.41847774441179 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/opt Value:0xc05c5f8f70} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/opt Value:0xc05c5f8fc0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/opt Value:0xc05c5f9020}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140229543s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/opt} value=95.41847774441179 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/opt} value=95.41847774441179 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollnalapp3003.foll.gcp.hclsw.internal, job=Dev-follnalapp3003-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/data Value:0xc05c5f90b8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/data Value:0xc05c5f90f0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/data Value:0xc05c5f9128}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140244944s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/data} value=92.75028147923173 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/data} value=92.75028147923173 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/export Value:0xc05c5f91b8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/export Value:0xc05c5f9208} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/export Value:0xc05c5f9258}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140257345s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/export} value=92.75028147923173 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/export} value=92.75028147923173 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/home Value:0xc05c5f92d0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/home Value:0xc05c5f9330} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/home Value:0xc05c5f9380}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140268675s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/home} value=92.75028147923173 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/home} value=92.75028147923173 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/opt Value:0xc05c5f9410} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/opt Value:0xc05c5f9450} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/opt Value:0xc05c5f9488}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140282395s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/opt} value=92.75028147923173 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/opt} value=92.75028147923173 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=Dev-follsnaapp1001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/data Value:0xc05c5f95b0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/data Value:0xc05c5f9510} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/data Value:0xc05c5f9560}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140302345s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/data} value=86.4509271177127 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/data} value=86.4509271177127 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/export Value:0xc05c5f96e0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/export Value:0xc05c5f9640} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/export Value:0xc05c5f9690}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140315875s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/export} value=86.4509271177127 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/export} value=86.4509271177127 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/home Value:0xc05c5f97b0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/home Value:0xc05c5f97f0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/home Value:0xc05c5f9770}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140330455s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/home} value=86.4509271177127 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/home} value=86.4509271177127 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/opt Value:0xc05c5f98c0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/opt Value:0xc05c5f9910} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/opt Value:0xc05c5f9888}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140343935s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/opt} value=86.4509271177127 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/opt} value=86.4509271177127 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp2001.foll.gcp.hclsw.internal, job=Dev-follsnaapp2001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/data Value:0xc05c5f99d0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/data Value:0xc05c5f9a10} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/data Value:0xc05c5f9990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140353765s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/data} value=91.26866741499124 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/data} value=91.26866741499124 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/export Value:0xc05c5f9a98} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/export Value:0xc05c5f9ad8} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/export Value:0xc05c5f9b20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140364825s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/export} value=91.26866741499124 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/export} value=91.26866741499124 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/home Value:0xc05c5f9ba0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/home Value:0xc05c5f9bd8} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/home Value:0xc05c5f9c10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140374655s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/home} value=91.26866741499124 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/home} value=91.26866741499124 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/opt Value:0xc05c5f9c80} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/opt Value:0xc05c5f9cb8} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/opt Value:0xc05c5f9cf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140386695s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/opt} value=91.26866741499124 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/opt} value=91.26866741499124 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollsnaapp3001.foll.gcp.hclsw.internal, job=Dev-follsnaapp3001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/data Value:0xc05c5f9e20} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/data Value:0xc05c5f9e60} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/data Value:0xc05c5f9d70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140402975s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/data} value=92.71518183952047 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/data} value=92.71518183952047 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/export Value:0xc05c5f9f30} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/export Value:0xc05c5f9f88} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/export Value:0xc05c5f9ee8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140416145s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/export} value=92.71518183952047 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/export} value=92.71518183952047 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/home Value:0xc0a75f80b0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/home Value:0xc0a75f8110} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/home Value:0xc0a75f8000}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140428515s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/home} value=92.71518183952047 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/home} value=92.71518183952047 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/opt Value:0xc0a75f8208} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/opt Value:0xc0a75f8250} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/opt Value:0xc0a75f8190}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140439455s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/opt} value=92.71518183952047 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/opt} value=92.71518183952047 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=Dev-follspaweb1001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/data Value:0xc0a75f8390} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/data Value:0xc0a75f82f8} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/data Value:0xc0a75f8340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140450795s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/data} value=92.75249932431731 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/data} value=92.75249932431731 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/export Value:0xc0a75f8498} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/export Value:0xc0a75f84e8} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/export Value:0xc0a75f8538}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140463116s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/export} value=92.75249932431731 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/export} value=92.75249932431731 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/home Value:0xc0a75f85b0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/home Value:0xc0a75f8650} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/home Value:0xc0a75f86a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140477757s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/home} value=92.75249932431731 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/home} value=92.75249932431731 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/opt Value:0xc0a75f8710} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/opt Value:0xc0a75f8788} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/opt Value:0xc0a75f87d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140491897s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/opt} value=92.75249932431731 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/opt} value=92.75249932431731 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=Dev-follspaweb1002-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/data Value:0xc0a75f8860} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/data Value:0xc0a75f88a8} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/data Value:0xc0a75f88f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140505207s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/data} value=91.548018450912 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/data} value=91.548018450912 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/export Value:0xc0a75f89b8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/export Value:0xc0a75f8a40} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/export Value:0xc0a75f8ac8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140517937s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/export} value=91.548018450912 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/export} value=91.548018450912 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/home Value:0xc0a75f8b58} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/home Value:0xc0a75f8bd8} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/home Value:0xc0a75f8c68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140532417s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/home} value=91.548018450912 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/home} value=91.548018450912 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/opt Value:0xc0a75f8d28} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/opt Value:0xc0a75f8d78} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/opt Value:0xc0a75f8cf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140545957s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/opt} value=91.548018450912 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/opt} value=91.548018450912 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2001.foll.gcp.hclsw.internal, job=Dev-follspaweb2001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/data Value:0xc0a75f8e10} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/data Value:0xc0a75f8e50} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/data Value:0xc0a75f8ea0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140557877s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/data} value=88.96334022905155 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/data} value=88.96334022905155 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/export Value:0xc0a75f8f48} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/export Value:0xc0a75f8fa0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/export Value:0xc0a75f8fe8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140573267s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/export} value=88.96334022905155 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/export} value=88.96334022905155 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/home Value:0xc0a75f9080} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/home Value:0xc0a75f90c0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/home Value:0xc0a75f9110}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140584567s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/home} value=88.96334022905155 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/home} value=88.96334022905155 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/opt Value:0xc0a75f9180} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/opt Value:0xc0a75f91d0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/opt Value:0xc0a75f9208}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140596157s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/opt} value=88.96334022905155 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/opt} value=88.96334022905155 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb2002.foll.gcp.hclsw.internal, job=Dev-follspaweb2002-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/data Value:0xc0a75f92e0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/data Value:0xc0a75f9320} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/data Value:0xc0a75f92a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140609707s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/data} value=95.0508143779038 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/data} value=95.0508143779038 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/export Value:0xc0a75f93d0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/export Value:0xc0a75f9420} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/export Value:0xc0a75f9478}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140624877s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/export} value=95.0508143779038 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/export} value=95.0508143779038 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/home Value:0xc0a75f9518} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/home Value:0xc0a75f9560} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/home Value:0xc0a75f95a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140639927s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/home} value=95.0508143779038 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/home} value=95.0508143779038 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/opt Value:0xc0a75f9628} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/opt Value:0xc0a75f9668} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/opt Value:0xc0a75f96b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140652507s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/opt} value=95.0508143779038 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/opt} value=95.0508143779038 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3001.foll.gcp.hclsw.internal, job=Dev-follspaweb3001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/data Value:0xc0a75f9740} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/data Value:0xc0a75f9780} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/data Value:0xc0a75f97d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140666318s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/data} value=95.05105214336993 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/data} value=95.05105214336993 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/export Value:0xc0a75f98c8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/export Value:0xc0a75f9908} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/export Value:0xc0a75f9880}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140681378s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/export} value=95.05105214336993 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/export} value=95.05105214336993 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/home Value:0xc0a75f99a8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/home Value:0xc0a75f99f0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/home Value:0xc0a75f9a30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140698189s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/home} value=95.05105214336993 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/home} value=95.05105214336993 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/opt Value:0xc0a75f9ae0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/opt Value:0xc0a75f9e58} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/opt Value:0xc0a75f9f38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140713259s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/opt} value=95.05105214336993 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/opt} value=95.05105214336993 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1afollspaweb3002.foll.gcp.hclsw.internal, job=Dev-follspaweb3002-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/data Value:0xc0071ca000} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/data Value:0xc0071ca038} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/data Value:0xc0a75f9fb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140728039s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/data} value=88.49068573528595 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/data} value=88.49068573528595 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/export Value:0xc0071ca2c0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/export Value:0xc0071ca238} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/export Value:0xc0071ca280}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140741659s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/export} value=88.49068573528595 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/export} value=88.49068573528595 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/home Value:0xc0071ca470} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/home Value:0xc0071ca3f8} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/home Value:0xc0071ca430}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140754509s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/home} value=88.49068573528595 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/home} value=88.49068573528595 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/opt Value:0xc0071ca5b8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/opt Value:0xc0071ca5f0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/opt Value:0xc0071ca4e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140769719s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/opt} value=88.49068573528595 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/opt} value=88.49068573528595 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollnetutl1001.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/apps State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/apps Value:0xc0071ca6b0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/apps Value:0xc0071ca7e0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/apps Value:0xc0071ca820}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140785349s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/apps} value=17.557678394289994 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/apps} value=17.557678394289994 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/apps} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/data Value:0xc0071cabb8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/data Value:0xc0071cacb0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/data Value:0xc0071cab80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140799939s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/data} value=17.557678394289994 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/data} value=17.557678394289994 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/export Value:0xc0071cad40} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/export Value:0xc0071cad80} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/export Value:0xc0071cadc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140814589s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/export} value=17.557678394289994 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/export} value=17.557678394289994 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/home Value:0xc0071caf00} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/home Value:0xc0071caf40} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/home Value:0xc0071cb0f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140827339s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/home} value=17.557678394289994 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/home} value=17.557678394289994 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/opt Value:0xc0071cb400} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/opt Value:0xc0071cb440} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/opt Value:0xc0071cb170}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140845209s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/opt} value=17.557678394289994 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/opt} value=17.557678394289994 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbld1001.foll.gcp.hclsw.internal, job=Non-prod-follutlbld1001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/apps State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/apps Value:0xc0071cb4b8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/apps Value:0xc0071cb4f0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/apps Value:0xc0071cb528}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140858719s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/apps} value=82.88020901022027 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/apps} value=82.88020901022027 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/apps} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/data Value:0xc0071cb5e0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/data Value:0xc0071cb7a0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/data Value:0xc0071cb5a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.14087544s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/data} value=82.88020901022027 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/data} value=82.88020901022027 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/export Value:0xc0071cb880} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/export Value:0xc0071cb8c0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/export Value:0xc0071cb828}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.14089077s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/export} value=82.88020901022027 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/export} value=82.88020901022027 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/home Value:0xc0071cb938} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/home Value:0xc0071cbaf0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/home Value:0xc0071cbb30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140906521s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/home} value=82.88020901022027 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/home} value=82.88020901022027 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/opt Value:0xc0071cbba0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/opt Value:0xc0071cbbe0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/opt Value:0xc0071cbc18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140918991s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/opt} value=82.88020901022027 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/opt} value=82.88020901022027 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollutlbst1001.foll.gcp.hclsw.internal, job=Non-prod-bastion-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/data Value:0xc0071cbd60} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/data Value:0xc0071cbd98} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/data Value:0xc0071cbf40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140928561s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/data} value=97.02416427550442 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/data} value=97.02416427550442 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/export Value:0xc022d2e010} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/export Value:0xc022d2e050} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/export Value:0xc0071cbfc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140938591s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/export} value=97.02416427550442 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/export} value=97.02416427550442 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/home Value:0xc022d2e0e0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/home Value:0xc022d2e130} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/home Value:0xc022d2e170}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140947941s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/home} value=97.02416427550442 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/home} value=97.02416427550442 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/opt Value:0xc022d2e1f0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/opt Value:0xc022d2e228} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/opt Value:0xc022d2e270}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.140958841s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/opt} value=97.02416427550442 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/opt} value=97.02416427550442 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/data Value:0xc022d2e300} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/data Value:0xc022d2e348} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/data Value:0xc022d2e388}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141006661s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/data} value=97.04421146517637 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/data} value=97.04421146517637 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/export Value:0xc022d2e418} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/export Value:0xc022d2e470} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/export Value:0xc022d2e4c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141021681s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/export} value=97.04421146517637 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/export} value=97.04421146517637 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/home Value:0xc022d2e5d0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/home Value:0xc022d2e540} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/home Value:0xc022d2e590}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141036551s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/home} value=97.04421146517637 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/home} value=97.04421146517637 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/opt Value:0xc022d2e668} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/opt Value:0xc022d2e6a0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/opt Value:0xc022d2e6d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141049361s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/opt} value=97.04421146517637 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/opt} value=97.04421146517637 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1afollwxsapp1003.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1003-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/data Value:0xc022d2e750} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/data Value:0xc022d2e7a0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/data Value:0xc022d2e7f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141061701s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/data} value=95.79181703675837 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/data} value=95.79181703675837 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/export Value:0xc022d2e890} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/export Value:0xc022d2e8d8} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/export Value:0xc022d2e930}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141075061s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/export} value=95.79181703675837 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/export} value=95.79181703675837 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/home Value:0xc022d2e9c0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/home Value:0xc022d2ea10} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/home Value:0xc022d2ea50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141090092s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/home} value=95.79181703675837 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/home} value=95.79181703675837 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/opt Value:0xc022d2eb10} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/opt Value:0xc022d2eb60} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/opt Value:0xc022d2eac8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141106062s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/opt} value=95.79181703675837 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/opt} value=95.79181703675837 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1bfollwxsapp1002.foll.gcp.hclsw.internal, job=Non-prod-follwxsapp1002-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/data Value:0xc022d2ec78} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/data Value:0xc022d2ebf0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/data Value:0xc022d2ec28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141118942s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/data} value=90.26441580255356 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/data} value=90.26441580255356 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/export Value:0xc022d2edd8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/export Value:0xc022d2ee18} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/export Value:0xc022d2ed80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141132953s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/export} value=90.26441580255356 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/export} value=90.26441580255356 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/home Value:0xc022d2ee98} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/home Value:0xc022d2eed0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/home Value:0xc022d2efc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141145583s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/home} value=90.26441580255356 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/home} value=90.26441580255356 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/opt Value:0xc022d2f080} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/opt Value:0xc022d2f0c8} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/opt Value:0xc022d2f040}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141158713s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/opt} value=90.26441580255356 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/opt} value=90.26441580255356 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1cfollnetutl1002.foll.gcp.hclsw.internal, job=Non-prod-follnetutl1002-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/data Value:0xc022d2f1e0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/data Value:0xc022d2f150} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/data Value:0xc022d2f1a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141173423s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/data} value=92.3613829281433 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/data} value=92.3613829281433 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/export Value:0xc022d2f278} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/export Value:0xc022d2f410} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/export Value:0xc022d2f458}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141185933s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/export} value=92.3613829281433 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/export} value=92.3613829281433 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/home Value:0xc022d2f4e0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/home Value:0xc022d2f520} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/home Value:0xc022d2f5b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141200373s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/home} value=92.3613829281433 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/home} value=92.3613829281433 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/opt Value:0xc022d2f640} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/opt Value:0xc022d2f688} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/opt Value:0xc022d2f6c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141214763s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/opt} value=92.3613829281433 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/opt} value=92.3613829281433 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=QA-follcmlapp1001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/data Value:0xc022d2f748} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/data Value:0xc022d2f780} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/data Value:0xc022d2f7d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141227863s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/data} value=91.40666883020248 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/data} value=91.40666883020248 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/export Value:0xc022d2f8e0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/export Value:0xc022d2f920} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/export Value:0xc022d2f970}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141241373s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/export} value=91.40666883020248 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/export} value=91.40666883020248 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/home Value:0xc022d2fa00} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/home Value:0xc022d2fa50} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/home Value:0xc022d2fa90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141256743s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/home} value=91.40666883020248 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/home} value=91.40666883020248 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/opt Value:0xc022d2fc08} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/opt Value:0xc022d2fb88} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/opt Value:0xc022d2fbd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141272113s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/opt} value=91.40666883020248 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/opt} value=91.40666883020248 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollcmlapp1002.foll.gcp.hclsw.internal, job=QA-follcmlapp1002-bastion-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/apps State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/apps Value:0xc022d2fcf0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/apps Value:0xc022d2fd60} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/apps Value:0xc022d2fdf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141287133s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/apps} value=55.56603499268423 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/apps} value=55.56603499268423 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/apps} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/data Value:0xc022d2fee0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/data Value:0xc022d2ff20} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/data Value:0xc022d2fe90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141302854s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/data} value=55.56603499268423 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/data} value=55.56603499268423 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/export Value:0xc0118a8048} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/export Value:0xc022d2ffb0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/export Value:0xc022d2fff8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141318014s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/export} value=55.56603499268423 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/export} value=55.56603499268423 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/home Value:0xc0118a80c0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/home Value:0xc0118a8100} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/home Value:0xc0118a8140}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141333064s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/home} value=55.56603499268423 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/home} value=55.56603499268423 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/opt Value:0xc0118a81c8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/opt Value:0xc0118a8200} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/opt Value:0xc0118a8250}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141345705s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/opt} value=55.56603499268423 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/opt} value=55.56603499268423 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollintapp1001.foll.gcp.hclsw.internal, job=QA-follintapp1001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/data Value:0xc0118a82c0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/data Value:0xc0118a8300} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/data Value:0xc0118a8338}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141358865s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/data} value=94.94099401711927 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/data} value=94.94099401711927 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/export Value:0xc0118a83c8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/export Value:0xc0118a8418} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/export Value:0xc0118a8460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141372995s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/export} value=94.94099401711927 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/export} value=94.94099401711927 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/home Value:0xc0118a8528} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/home Value:0xc0118a8568} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/home Value:0xc0118a84d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141387775s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/home} value=94.94099401711927 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/home} value=94.94099401711927 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/opt Value:0xc0118a8668} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/opt Value:0xc0118a86b0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/opt Value:0xc0118a8620}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141404905s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/opt} value=94.94099401711927 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/opt} value=94.94099401711927 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollmqbapp1001.foll.gcp.hclsw.internal, job=QA-follmqbapp1001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/data Value:0xc0118a8758} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/data Value:0xc0118a87b0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/data Value:0xc0118a8820}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141420785s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/data} value=34.917998588530644 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/data} value=34.917998588530644 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/export Value:0xc0118a88e0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/export Value:0xc0118a8940} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/export Value:0xc0118a89c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141434175s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/export} value=34.917998588530644 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/export} value=34.917998588530644 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/home Value:0xc0118a8ac8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/home Value:0xc0118a8b10} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/home Value:0xc0118a8a80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141449755s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/home} value=34.917998588530644 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/home} value=34.917998588530644 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/opt Value:0xc0118a8bc8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/opt Value:0xc0118a8c10} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/opt Value:0xc0118a8c70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141463075s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/opt} value=34.917998588530644 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/opt} value=34.917998588530644 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1001.foll.gcp.hclsw.internal, job=QA-follnalapp1001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/data Value:0xc0118a8d08} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/data Value:0xc0118a8d50} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/data Value:0xc0118a8dc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141478245s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/data} value=32.09936304580524 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/data} value=32.09936304580524 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/export Value:0xc0118a8ed8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/export Value:0xc0118a8f40} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/export Value:0xc0118a8e88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141491245s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/export} value=32.09936304580524 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/export} value=32.09936304580524 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/home Value:0xc0118a8ff0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/home Value:0xc0118a9040} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/home Value:0xc0118a9080}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141506216s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/home} value=32.09936304580524 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/home} value=32.09936304580524 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/opt Value:0xc0118a90f8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/opt Value:0xc0118a9140} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/opt Value:0xc0118a9178}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141520106s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/opt} value=32.09936304580524 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/opt} value=32.09936304580524 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1002.foll.gcp.hclsw.internal, job=QA-follnalapp1002-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/data Value:0xc0118a91f8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/data Value:0xc0118a9230} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/data Value:0xc0118a9270}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141533456s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/data} value=40.97370960978633 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/data} value=40.97370960978633 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/export Value:0xc0118a9310} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/export Value:0xc0118a9360} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/export Value:0xc0118a93c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141548226s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/export} value=40.97370960978633 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/export} value=40.97370960978633 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/home Value:0xc0118a9550} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/home Value:0xc0118a94a0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/home Value:0xc0118a9518}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141561586s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/home} value=40.97370960978633 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/home} value=40.97370960978633 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/opt Value:0xc0118a95c8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/opt Value:0xc0118a9618} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/opt Value:0xc0118a9650}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141575107s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/opt} value=40.97370960978633 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/opt} value=40.97370960978633 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollnalapp1003.foll.gcp.hclsw.internal, job=QA-follnalapp1003-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/data Value:0xc0118a96e0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/data Value:0xc0118a9720} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/data Value:0xc0118a97b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141588327s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/data} value=90.17089256454378 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/data} value=90.17089256454378 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/export Value:0xc0118a9920} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/export Value:0xc0118a9888} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/export Value:0xc0118a98c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141628957s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/export} value=90.17089256454378 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/export} value=90.17089256454378 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/home Value:0xc0118a99a8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/home Value:0xc0118a99e0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/home Value:0xc0118a9a18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141642697s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/home} value=90.17089256454378 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/home} value=90.17089256454378 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/opt Value:0xc0118a9b30} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/opt Value:0xc0118a9aa0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/opt Value:0xc0118a9ae0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141656127s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/opt} value=90.17089256454378 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/opt} value=90.17089256454378 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollsnaapp1001.foll.gcp.hclsw.internal, job=QA-follsnaapp1001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/data Value:0xc0118a9bb0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/data Value:0xc0118a9bf0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/data Value:0xc0118a9c40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141668267s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/data} value=89.61201119415405 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/data} value=89.61201119415405 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/export Value:0xc0118a9cc8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/export Value:0xc0118a9d10} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/export Value:0xc0118a9d50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141679027s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/export} value=89.61201119415405 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/export} value=89.61201119415405 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/home Value:0xc0118a9dd8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/home Value:0xc0118a9e10} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/home Value:0xc0118a9e60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141689987s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/home} value=89.61201119415405 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/home} value=89.61201119415405 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/opt Value:0xc0118a9f10} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/opt Value:0xc0118a9f58} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/opt Value:0xc0118a9ed8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141700697s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/opt} value=89.61201119415405 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/opt} value=89.61201119415405 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1001.foll.gcp.hclsw.internal, job=QA-follspaweb1001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/data Value:0xc042894010} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/data Value:0xc042894060} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/data Value:0xc0118a9fd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141712347s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/data} value=89.9769188199318 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/data} value=89.9769188199318 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/export Value:0xc042894148} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/export Value:0xc042894190} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/export Value:0xc0428941d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141722948s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/export} value=89.9769188199318 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/export} value=89.9769188199318 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/home Value:0xc0428942a8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/home Value:0xc0428942f8} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/home Value:0xc042894270}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141733358s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/home} value=89.9769188199318 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/home} value=89.9769188199318 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/opt Value:0xc042894380} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/opt Value:0xc0428943b8} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/opt Value:0xc0428943f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141743818s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/opt} value=89.9769188199318 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/opt} value=89.9769188199318 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollspaweb1002.foll.gcp.hclsw.internal, job=QA-follspaweb1002-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/data Value:0xc0428944c8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/data Value:0xc042894518} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/data Value:0xc042894478}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141755118s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/data} value=96.42506142410797 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/data} value=96.42506142410797 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/export Value:0xc042894668} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/export Value:0xc0428945c8} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/export Value:0xc042894620}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141769278s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/export} value=96.42506142410797 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/export} value=96.42506142410797 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/home Value:0xc042894738} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/home Value:0xc042894778} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/home Value:0xc042894700}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141785299s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/home} value=96.42506142410797 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/home} value=96.42506142410797 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/opt Value:0xc042894888} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/opt Value:0xc0428947f8} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/opt Value:0xc042894840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141831309s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/opt} value=96.42506142410797 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/opt} value=96.42506142410797 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1001.foll.gcp.hclsw.internal, job=QA-follwxsapp1001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/data Value:0xc042894900} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/data Value:0xc042894940} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/data Value:0xc042894978}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141846669s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/data} value=97.08018464524973 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/data} value=97.08018464524973 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/export Value:0xc042894a70} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/export Value:0xc042894af0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/export Value:0xc042894a08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141859359s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/export} value=97.08018464524973 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/export} value=97.08018464524973 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/home Value:0xc042894c10} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/home Value:0xc042894b70} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/home Value:0xc042894bd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141872379s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/home} value=97.08018464524973 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/home} value=97.08018464524973 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/opt Value:0xc042894d18} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/opt Value:0xc042894ca0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/opt Value:0xc042894ce0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141905629s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/opt} value=97.08018464524973 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/opt} value=97.08018464524973 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=quuscn1afollwxsapp1002.foll.gcp.hclsw.internal, job=QA-follwxsapp1002-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/data Value:0xc042894e20} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/data Value:0xc042894da8} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/data Value:0xc042894de0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141923399s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/data} value=97.1268693919994 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/data} value=97.1268693919994 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/export Value:0xc042894f00} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/export Value:0xc042894f50} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/export Value:0xc042894eb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.14193792s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/export} value=97.1268693919994 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/export} value=97.1268693919994 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/export} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/home State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/home Value:0xc042894ff8} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/home Value:0xc0428950c8} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/home Value:0xc042895110}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.14195208s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/home} value=97.1268693919994 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/home} value=97.1268693919994 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/home} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/opt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/opt Value:0xc0428951d0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/opt Value:0xc042895220} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/opt Value:0xc042895180}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.142414244s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/opt} value=97.1268693919994 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/opt} value=97.1268693919994 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollcmlapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follcmlapp1001-Host-VM, mountpoint=/opt} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follintapp1001-Host-VM, mountpoint=/apps State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follintapp1001-Host-VM, mountpoint=/apps Value:0xc0428952a0} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follintapp1001-Host-VM, mountpoint=/apps Value:0xc0428952f0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follintapp1001-Host-VM, mountpoint=/apps Value:0xc042895330}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.142433434s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follintapp1001-Host-VM, mountpoint=/apps} value=90.40074550777231 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follintapp1001-Host-VM, mountpoint=/apps} value=90.40074550777231 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follintapp1001-Host-VM, mountpoint=/apps} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follintapp1001-Host-VM, mountpoint=/data State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follintapp1001-Host-VM, mountpoint=/data Value:0xc042895450} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follintapp1001-Host-VM, mountpoint=/data Value:0xc0428953d0} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follintapp1001-Host-VM, mountpoint=/data Value:0xc042895410}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.142447635s EvaluationString:[ var='A' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follintapp1001-Host-VM, mountpoint=/data} value=90.40074550777231 ], [ var='B' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follintapp1001-Host-VM, mountpoint=/data} value=90.40074550777231 ], [ var='C' labels={device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follintapp1001-Host-VM, mountpoint=/data} value=0 ]} {Instance:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follintapp1001-Host-VM, mountpoint=/export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follintapp1001-Host-VM, mountpoint=/export Value:0xc042895590} B:{Var:B Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follintapp1001-Host-VM, mountpoint=/export Value:0xc042895500} C:{Var:C Labels:device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1afollintapp1001.foll.gcp.hclsw.internal, job=Pre-Prod-Follintapp1001-Host-VM, mountpoint=/export Value:0xc042895540}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuratio +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.181427877Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=integration" +level=debug ts=2024-05-29T13:44:15.1812489Z caller=remote_instance_store.go:51 user=359284 slug=ankorstore msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.18136794Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +level=debug ts=2024-05-29T13:44:15.181328329Z caller=remote_instance_store.go:51 user=723897 slug=inthepocket msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=723897 slug=inthepocket t=2024-05-29T13:44:15.181264253Z level=debug msg="Saving alert states" count=110 max_state_save_concurrency=1 +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=integration, instance=localhost:9100, instance_type=hedera-node, inventory_name=node00 - 35.196.146.2, job=node-exporter, metrics_node_id=1, node_id=0" t=2024-05-29T13:44:15.181310289Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=https://plantuml.inthepocket.org, job=PlantUML" t=2024-05-29T13:44:15.181231045Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=vmbl, env=uat, host=vmblUATrpssl1, installation=vmbluatv9, instance=vmblUATrpssl1:1936, job=HAProxy, master=clientuatapp3.croesus.com, origin_prometheus=vmblUATrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.181254007Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=vmbl, env=uat, host=vmblUATrpssl1, installation=vmbluatv9, instance=vmblUATrpssl1:1936, job=HAProxy, master=clientuatapp3.croesus.com, origin_prometheus=vmblUATrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.18124Z level=debug msg="Setting next state" handler=resultAlerting +level=debug ts=2024-05-29T13:44:15.181186472Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=tll, env=prod, host=tllrpssl2, installation=tllprod, instance=tllrpssl2:1936, job=HAProxy, master=clientapp7.croesus.com, origin_prometheus=tllrpssl2, site=viger, type=proxy" t=2024-05-29T13:44:15.18117175Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node31 - 35.219.184.202, job=node-exporter, metrics_node_id=32, node_id=31" t=2024-05-29T13:44:15.181169561Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.181152528Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.18.90.12, job=switch_antwerpen" t=2024-05-29T13:44:15.181093439Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.18.90.12, job=switch_antwerpen" t=2024-05-29T13:44:15.181071461Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node29 - 35.212.39.15, job=node-exporter, metrics_node_id=30, node_id=29" t=2024-05-29T13:44:15.180898813Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.18.90.11, job=ap_entry" t=2024-05-29T13:44:15.180902166Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=449554 slug=metricgamingppe t=2024-05-29T13:44:15.180920181Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Kafka cluster {{ $labels.Cluster Name }} disk usage over 75%': error parsing template __alert_Kafka cluster disk usage over 75%: template: __alert_Kafka cluster disk usage over 75%!:(string=Name)1: function %!q(MISSING) not defined" +level=debug ts=2024-05-29T13:44:15.180861115Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=tdpic, env=uat, host=tdpicPATrpssl1, installation=tdpicpat, instance=tdpicPATrpssl1:1936, job=HAProxy, master=tddiuatapp2.croesus.com, origin_prometheus=tdpicPATrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.180878744Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.180883329Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet2" +logger=ngalert.state.manager.persist user=21051 slug=mojio t=2024-05-29T13:44:15.180823026Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.18.90.10, job=ap_meeting_rooms" t=2024-05-29T13:44:15.180773874Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=449554 slug=metricgamingppe instance="Cluster Name=bragg-nl" t=2024-05-29T13:44:15.180806685Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.18.90.10, job=ap_meeting_rooms" t=2024-05-29T13:44:15.180760232Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=449554 slug=metricgamingppe instance="Cluster Name=bragg-nl" t=2024-05-29T13:44:15.180794652Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=tdpic, env=uat, host=tdpicPATproxyfwlb1, installation=tdpicpat, instance=tdpicPATproxyfwlb1:1936, job=HAProxy, master=tdpicuatapp2.croesus.com, origin_prometheus=tdpicPATproxyfwlb1, site=viger, type=proxy" t=2024-05-29T13:44:15.180715091Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=449554 slug=metricgamingppe instance="Cluster Name=betzone-uk" t=2024-05-29T13:44:15.180700917Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=449554 slug=metricgamingppe t=2024-05-29T13:44:15.180672506Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Kafka cluster {{ $labels.Cluster Name }} disk usage over 75%': error parsing template __alert_Kafka cluster disk usage over 75%: template: __alert_Kafka cluster disk usage over 75%!:(string=Name)1: function %!q(MISSING) not defined" +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node27 - 35.212.125.100, job=node-exporter, metrics_node_id=28, node_id=27" t=2024-05-29T13:44:15.180700747Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.180608819Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=tdmf, env=uat, host=tdmfPATrpssl2, installation=tdmfPAT, instance=tdmfPATrpssl2:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tdmfPATrpssl2, site=viger, type=proxy" t=2024-05-29T13:44:15.180583276Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.180520918Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=tdmf, env=uat, host=tdmfPATrpssl1, installation=tdmfPAT, instance=tdmfPATrpssl1:1936, job=HAProxy, master=tdpicuatapp1.croesus.com, origin_prometheus=tdmfPATrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.180518527Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.180430473Z caller=remote_instance_store.go:51 user=672418 slug=streamkap msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.180411543Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=815794 slug=fymtech t=2024-05-29T13:44:15.180339298Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.754632ms +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Luxembourg, country=Luxembourg, datacenter=Altushost, environment=production, instance=37.46.113.191:9998, ip=37.46.113.191, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=luxembourg410, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.180442313Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=tdmf, env=prod, host=tdmfrpssl2, installation=tdmfprod, instance=tdmfrpssl2:1936, job=HAProxy, master=tdpicapp2.croesus.com, origin_prometheus=tdmfrpssl2, site=viger, type=proxy" t=2024-05-29T13:44:15.18042997Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node25 - 35.213.131.196, job=node-exporter, metrics_node_id=26, node_id=25" t=2024-05-29T13:44:15.18035571Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=449554 slug=metricgamingppe t=2024-05-29T13:44:15.180314217Z level=debug msg="State manager processing evaluation results" resultCount=5 +level=debug ts=2024-05-29T13:44:15.180340791Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.17.90.63, job=ap_entrance" t=2024-05-29T13:44:15.180341005Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.180328703Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet2" +logger=ngalert.state.manager.persist user=397650 slug=gofirefly t=2024-05-29T13:44:15.180242566Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=44.400503ms +level=debug ts=2024-05-29T13:44:15.18026405Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=alwaysgeeky-justice-dev, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistent01671" t=2024-05-29T13:44:15.180234353Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.180229854Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=767797 slug=mgmresorts instance="datasource_uid=d1aebc62-96b9-4d63-9239-4734a6bc96ce, ref_id=A" t=2024-05-29T13:44:15.180129925Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=tddi, env=uat, host=tddiservicePATrpssl1, installation=tddiPAT, instance=tddiservicePATrpssl1:1936, job=HAProxy, master=tddiuatapp4.croesus.com, origin_prometheus=tddiservicePATrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.180177219Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.180107941Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=aexlab-justice-prod, game_interval_time=12, game_namespace=accelbytetesting, game_template=UETestPersistentA9D7C" t=2024-05-29T13:44:15.179946259Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=tddi, env=uat, host=tddicroesusPATrpssl1, installation=tddiPAT, instance=tddicroesusPATrpssl1:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tddicroesusPATrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.180031746Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.17.90.61, job=ap_team1" t=2024-05-29T13:44:15.18001939Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=tddi, env=uat, host=tddicroesusPATrpssl1, installation=tddiPAT, instance=tddicroesusPATrpssl1:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tddicroesusPATrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.180018754Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.17.90.61, job=ap_team1" t=2024-05-29T13:44:15.180002352Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=21051 slug=mojio instance="datasource_uid=grafanacloud-mojio, ref_id=C" t=2024-05-29T13:44:15.179951981Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node22 - 34.0.1.175, job=node-exporter, metrics_node_id=23, node_id=22" t=2024-05-29T13:44:15.17995417Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node22 - 34.0.1.175, job=node-exporter, metrics_node_id=23, node_id=22" t=2024-05-29T13:44:15.179941835Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.179837263Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node20 - 35.208.128.216, job=node-exporter, metrics_node_id=21, node_id=20" t=2024-05-29T13:44:15.179702159Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=637258 slug=testb9lab t=2024-05-29T13:44:15.179733828Z level=debug msg="Saving alert states" count=20 max_state_save_concurrency=1 +logger=ngalert.state.manager user=637258 slug=testb9lab instance="metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://tutorials.cosmos.network/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target" t=2024-05-29T13:44:15.179591631Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.17.90.50, job=nvr_leuven" t=2024-05-29T13:44:15.179560577Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=td, env=uat, host=tdPATrpssl2, installation=tdPAT, instance=tdPATrpssl2:1936, job=HAProxy, master=tddiuatapp3.croesus.com, origin_prometheus=tdPATrpssl2, site=viger, type=proxy" t=2024-05-29T13:44:15.179588691Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=637258 slug=testb9lab instance="metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://tutorials.cosmos.network/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target" t=2024-05-29T13:44:15.179573978Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Luxembourg, country=Luxembourg, datacenter=Altushost, environment=production, instance=37.46.113.153:9998, ip=37.46.113.153, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/lu.crt, role=vpn, server=luxembourg412, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.179551708Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=td, env=uat, host=tdPATrpssl1, installation=tdPAT, instance=tdPATrpssl1:1936, job=HAProxy, master=tddiuatapp4.croesus.com, origin_prometheus=tdPATrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.179512676Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.17.90.5, job=switch_leuven" t=2024-05-29T13:44:15.179414099Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=td, env=uat, host=tdPATproxyfwlb2, installation=tdPAT, instance=tdPATproxyfwlb2:1936, job=HAProxy, master=tdpicuatapp1.croesus.com, origin_prometheus=tdPATproxyfwlb2, site=viger, type=proxy" t=2024-05-29T13:44:15.179403717Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=637258 slug=testb9lab instance="metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://tezos.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target" t=2024-05-29T13:44:15.179387476Z level=debug msg="Setting next state" handler=resultAlerting +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Louisville, country=United States, datacenter=DataPacket, environment=production, instance=84.239.6.129:9998, ip=84.239.6.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-kentucky-pf.crt, role=vpn, server=kentucky402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.179350627Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Louisville, country=United States, datacenter=DataPacket, environment=production, instance=84.239.6.129:9998, ip=84.239.6.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-kentucky-pf.crt, role=vpn, server=kentucky402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.179339996Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=916145 slug=cmselfpd t=2024-05-29T13:44:15.179329555Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.787611ms +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.179304389Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=td, env=uat, host=tdPATproxyfwlb1, installation=tdPAT, instance=tdPATproxyfwlb1:1936, job=HAProxy, master=tddiuatapp1.croesus.com, origin_prometheus=tdPATproxyfwlb1, site=viger, type=proxy" t=2024-05-29T13:44:15.179327953Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:15.179195164Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.390618ms +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.179203874Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet2" +logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.179177998Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=td, env=prod, host=tdrpssl1, installation=tdprod, instance=tdrpssl1:1936, job=HAProxy, master=tdapp8.croesus.com, origin_prometheus=tdrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.179178489Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.historian backend=loki user=508403 slug=zyax t=2024-05-29T13:44:15.179121659Z level=debug msg="Done saving alert state history batch" +level=info ts=2024-05-29T13:44:15.178931187Z caller=remote_alert_sender.go:94 user=819809 slug=sprinter host=sprinter-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.11.83.54:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=cc196225-99d3-44da-a6ba-a57fd322d991 alerts=1 +logger=ngalert.state.manager user=637258 slug=testb9lab instance="metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://support-gem-staging.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target" t=2024-05-29T13:44:15.179103917Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.17.70.76, job=ZoomRoom Longhorn" t=2024-05-29T13:44:15.179067554Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=245291 slug=pismo version=35 fingerprint=57b127da8b5ae797 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.17903824Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.178841256s EvaluationString:}]" duration=186.489976ms +logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.178957603Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.178877759Z caller=remote_instance_store.go:51 user=500743 slug=sgr msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Los Angeles, country=United States, datacenter=PIA, environment=production, instance=191.96.106.9:9998, ip=191.96.106.9, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-california.crt, role=vpn, server=losangeles438, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.178945529Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.178886112Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node15 - 35.213.254.208, job=node-exporter, metrics_node_id=16, node_id=15" t=2024-05-29T13:44:15.178878493Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=cbcf7ea1ad33ef4b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.178815883Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.178512582s EvaluationString:}]" duration=236.147544ms +logger=ngalert.state.manager user=637258 slug=testb9lab instance="metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://ida.interchain.io, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target" t=2024-05-29T13:44:15.178849068Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=819809 slug=sprinter t=2024-05-29T13:44:15.178712335Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.902682ms +logger=ngalert.state.manager.persist user=602335 slug=gcbgrupo t=2024-05-29T13:44:15.178670048Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.31541ms +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Los Angeles, country=United States, datacenter=PIA, environment=production, instance=191.96.106.9:9998, ip=191.96.106.9, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=losangeles438, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.178734919Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Los Angeles, country=United States, datacenter=PIA, environment=production, instance=191.96.106.9:9998, ip=191.96.106.9, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=losangeles438, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.17871659Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.178700269Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet2" +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-stage, game_interval_time=12, game_namespace=test1, game_template=solotom" t=2024-05-29T13:44:15.178649497Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.17.50.61, job=ZoomRoom Memphis" t=2024-05-29T13:44:15.178628851Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=174016 slug=journalstaging t=2024-05-29T13:44:15.178491374Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=637258 slug=testb9lab instance="metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://git.academy.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target" t=2024-05-29T13:44:15.178621687Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=637258 slug=testb9lab instance="metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://git.academy.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target" t=2024-05-29T13:44:15.178599998Z level=debug msg="Setting next state" handler=resultAlerting +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.178554928Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet2" +logger=ngalert.state.manager user=637258 slug=testb9lab instance="metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://docs.topos.technology, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target" t=2024-05-29T13:44:15.17849215Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=rjames, env=uat, host=rjamesPATrpssl1, installation=rjamesPAT, instance=rjamesPATrpssl1:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=rjamesPATrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.178499032Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Los Angeles, country=United States, datacenter=PIA, environment=production, instance=191.96.106.8:9998, ip=191.96.106.8, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-california.crt, role=vpn, server=losangeles437, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.178356562Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=rjames, env=uat, host=rjamesPATrpssl1, installation=rjamesPAT, instance=rjamesPATrpssl1:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=rjamesPATrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.178477536Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=174016 slug=journalstaging version=1 fingerprint=1b3af8d7edc073e0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.178300033Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=bYQmLgyGz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.177901881s EvaluationString:}]" duration=14.005738ms +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.178325427Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-stage, game_interval_time=12, game_namespace=samuel, game_template=onboarding" t=2024-05-29T13:44:15.178238833Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.17.40.20, job=paxton_leuven_keuken" t=2024-05-29T13:44:15.17827179Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=637258 slug=testb9lab instance="metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://certificates.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target" t=2024-05-29T13:44:15.178193053Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=rgmp, env=prod, host=rgmprpssl1, installation=rgmpprod, instance=rgmprpssl1:1936, job=HAProxy, master=clientapp3.croesus.com, origin_prometheus=rgmprpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.178201381Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Los Angeles, country=United States, datacenter=PIA, environment=production, instance=191.96.106.8:9998, ip=191.96.106.8, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=losangeles437, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.178155209Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Los Angeles, country=United States, datacenter=PIA, environment=production, instance=191.96.106.8:9998, ip=191.96.106.8, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=losangeles437, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.17814289Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.178105334Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.17.40.13, job=paxton_leuven_server" t=2024-05-29T13:44:15.178105913Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node10 - 35.213.99.36, job=node-exporter, metrics_node_id=11, node_id=10" t=2024-05-29T13:44:15.178056473Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=omg, env=prod, host=omgawsrpssl1, installation=omgaws, instance=omgawsrpssl1:1936, job=HAProxy, master=omgawsproxymaitre, origin_prometheus=omgawsrpssl1, site=aws, type=proxy" t=2024-05-29T13:44:15.178035228Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=637258 slug=testb9lab instance="metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target" t=2024-05-29T13:44:15.178044692Z level=debug msg="Setting next state" handler=resultAlerting +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.178023707Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet2" +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=omg, env=prod, host=omgawsrpssl1, installation=omgaws, instance=omgawsrpssl1:1936, job=HAProxy, master=omgawsproxymaitre, origin_prometheus=omgawsrpssl1, site=aws, type=proxy" t=2024-05-29T13:44:15.178022681Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.177962762Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=nbin, env=nonprod, host=nbinuatawsrpssl1, installation=nbinuataws, instance=nbinuatawsrpssl1:1936, job=HAProxy, master=nbinuatawsproxymaitre, origin_prometheus=nbinuatawsrpssl1, site=aws, type=proxy" t=2024-05-29T13:44:15.177943427Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-stage, game_interval_time=12, game_namespace=fardo, game_template=test" t=2024-05-29T13:44:15.177855144Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-stage, game_interval_time=12, game_namespace=fardo, game_template=test" t=2024-05-29T13:44:15.177837214Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.177794247Z caller=remote_instance_store.go:51 user=700399 slug=demo19344 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=159532 slug=getfabric t=2024-05-29T13:44:15.177796858Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Stock Snapshots Monitor" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Los Angeles, country=United States, datacenter=PIA, environment=production, instance=191.96.106.7:9998, ip=191.96.106.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=losangeles436, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.177755719Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=ipcsec, env=uat, host=ipcsecuatawsrpssl1, installation=ipcsecuataws, instance=ipcsecuatawsrpssl1:1936, job=HAProxy, master=ipcsecuatawsproxymaitre, origin_prometheus=ipcsecuatawsrpssl1, site=aws, type=proxy" t=2024-05-29T13:44:15.177711528Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=159532 slug=getfabric t=2024-05-29T13:44:15.177652342Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Stock Snapshots Monitor" +logger=ngalert.state.manager user=637258 slug=testb9lab instance="metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://academy.interchain.io/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target" t=2024-05-29T13:44:15.17762502Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node07 - 35.214.36.2, job=node-exporter, metrics_node_id=8, node_id=7" t=2024-05-29T13:44:15.177563897Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-stage, game_interval_time=12, game_namespace=fardo, game_template=onboarding" t=2024-05-29T13:44:15.177537248Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=159532 slug=getfabric instance="mfc=DAL01" t=2024-05-29T13:44:15.177540318Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=159532 slug=getfabric instance="mfc=DAL01" t=2024-05-29T13:44:15.177525252Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.16.91.68, job=ap_back" t=2024-05-29T13:44:15.177524024Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.177487306Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=ipcsec, env=uat, host=ipcsecPPrpssl1, installation=ipcsecPP, instance=ipcsecPPrpssl1:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=ipcsecPPrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.17752269Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.16.91.68, job=ap_back" t=2024-05-29T13:44:15.177511231Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=ipcsec, env=prod, host=ipcsecrpssl2, installation=ipcsecprod, instance=ipcsecrpssl2:1936, job=HAProxy, master=clientapp7.croesus.com, origin_prometheus=ipcsecrpssl2, site=viger, type=proxy" t=2024-05-29T13:44:15.177453227Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=159532 slug=getfabric t=2024-05-29T13:44:15.177467599Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Stock Snapshots Monitor" +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node06 - 35.210.253.145, job=node-exporter, metrics_node_id=7, node_id=6" t=2024-05-29T13:44:15.177372507Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.17734929Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.16.91.67, job=ap_center" t=2024-05-29T13:44:15.17739327Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node06 - 35.210.253.145, job=node-exporter, metrics_node_id=7, node_id=6" t=2024-05-29T13:44:15.177362669Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.177356773Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.177342755Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=ipcsec, env=prod, host=ipcsecrpssl1, installation=ipcsecprod, instance=ipcsecrpssl1:1936, job=HAProxy, master=clientapp5.croesus.com, origin_prometheus=ipcsecrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.177360622Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=159532 slug=getfabric instance="mfc=BS01" t=2024-05-29T13:44:15.177343026Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=159532 slug=getfabric t=2024-05-29T13:44:15.177298928Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Stock Snapshots Monitor" +level=debug ts=2024-05-29T13:44:15.177285841Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.177303847Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=637258 slug=testb9lab instance="metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://academy.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target" t=2024-05-29T13:44:15.177233826Z level=debug msg="Setting next state" handler=resultAlerting +level=debug ts=2024-05-29T13:44:15.177272221Z caller=remote_image_capturer.go:33 user=637258 slug=testb9lab rule_org_id=1 rule_uid=b5c799f9-892e-4811-8876-139e86572596 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" +logger=ngalert.state.manager user=637258 slug=testb9lab t=2024-05-29T13:44:15.177068034Z level=debug msg="State manager processing evaluation results" resultCount=20 +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=iavm, env=uat, host=iavmPATrpssl2, installation=iavmPAT, instance=iavmPATrpssl2:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=iavmPATrpssl2, site=viger, type=proxy" t=2024-05-29T13:44:15.177219632Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=iavm, env=uat, host=iavmPATrpssl2, installation=iavmPAT, instance=iavmPATrpssl2:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=iavmPATrpssl2, site=viger, type=proxy" t=2024-05-29T13:44:15.177207221Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.177155336Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.638853ms +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Los Angeles, country=United States, datacenter=PIA, environment=production, instance=191.96.106.5:9998, ip=191.96.106.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-california.crt, role=vpn, server=losangeles434, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.177153336Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node05 - 35.215.8.212, job=node-exporter, metrics_node_id=6, node_id=5" t=2024-05-29T13:44:15.177142251Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=637258 slug=testb9lab version=77 fingerprint=b266e5f4dfcf94d1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.176545532Z level=debug msg="Alert rule evaluated" results="[{Instance:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://academy.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://academy.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc042984368} C:{Var:C Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://academy.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc0429843d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.174370687s EvaluationString:[ var='B' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://academy.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=11 ], [ var='C' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://academy.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=1 ]} {Instance:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://academy.cosmos.network/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://academy.cosmos.network/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc0429844d8} C:{Var:C Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://academy.cosmos.network/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc0429844b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.174396263s EvaluationString:[ var='B' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://academy.cosmos.network/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=66 ], [ var='C' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://academy.cosmos.network/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=0 ]} {Instance:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://academy.interchain.io/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://academy.interchain.io/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc042984558} C:{Var:C Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://academy.interchain.io/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc042984a38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.174407867s EvaluationString:[ var='B' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://academy.interchain.io/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=78 ], [ var='C' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://academy.interchain.io/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=0 ]} {Instance:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://api.support-gem-staging.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://api.support-gem-staging.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc042984b18} C:{Var:C Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://api.support-gem-staging.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc042984ab8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.174418903s EvaluationString:[ var='B' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://api.support-gem-staging.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=72 ], [ var='C' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://api.support-gem-staging.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=0 ]} {Instance:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://api.support-gem.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://api.support-gem.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc042984b68} C:{Var:C Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://api.support-gem.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc042984bc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.174426825s EvaluationString:[ var='B' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://api.support-gem.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=46 ], [ var='C' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://api.support-gem.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=0 ]} {Instance:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://askgem.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://askgem.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc042985108} C:{Var:C Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://askgem.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc042984c38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.174433111s EvaluationString:[ var='B' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://askgem.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=57 ], [ var='C' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://askgem.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=0 ]} {Instance:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc042985188} C:{Var:C Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc042985208}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.174440991s EvaluationString:[ var='B' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=11 ], [ var='C' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=1 ]} {Instance:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://certificates.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://certificates.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc0466880b8} C:{Var:C Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://certificates.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc046688058}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.174449335s EvaluationString:[ var='B' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://certificates.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=53 ], [ var='C' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://certificates.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=0 ]} {Instance:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://developers.cosmos.network/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://developers.cosmos.network/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc046688118} C:{Var:C Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://developers.cosmos.network/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc046688148}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.174455663s EvaluationString:[ var='B' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://developers.cosmos.network/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=66 ], [ var='C' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://developers.cosmos.network/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=0 ]} {Instance:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://docs.topos.technology, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://docs.topos.technology, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc046688218} C:{Var:C Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://docs.topos.technology, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc0466881f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.174462621s EvaluationString:[ var='B' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://docs.topos.technology, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=68 ], [ var='C' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://docs.topos.technology, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=0 ]} {Instance:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://git.academy.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://git.academy.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc046688308} C:{Var:C Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://git.academy.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc0466882b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.174470218s EvaluationString:[ var='B' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://git.academy.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=1 ], [ var='C' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://git.academy.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=1 ]} {Instance:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://git.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://git.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc046688398} C:{Var:C Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://git.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc0466883e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.174475659s EvaluationString:[ var='B' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://git.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=38 ], [ var='C' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://git.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=0 ]} {Instance:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://ida.interchain.io, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://ida.interchain.io, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc046688488} C:{Var:C Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://ida.interchain.io, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc046688458}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.174483241s EvaluationString:[ var='B' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://ida.interchain.io, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=57 ], [ var='C' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://ida.interchain.io, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=0 ]} {Instance:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://interchainacademy.cosmos.network/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://interchainacademy.cosmos.network/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc046688788} C:{Var:C Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://interchainacademy.cosmos.network/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc0466887c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.174490087s EvaluationString:[ var='B' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://interchainacademy.cosmos.network/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=66 ], [ var='C' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://interchainacademy.cosmos.network/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=0 ]} {Instance:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://necaxanft.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://necaxanft.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc046688858} C:{Var:C Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://necaxanft.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc0466888a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.174500794s EvaluationString:[ var='B' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://necaxanft.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=11 ], [ var='C' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://necaxanft.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=1 ]} {Instance:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://support-gem-staging.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://support-gem-staging.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc0466889a8} C:{Var:C Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://support-gem-staging.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc0466889b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.174508542s EvaluationString:[ var='B' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://support-gem-staging.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=70 ], [ var='C' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://support-gem-staging.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=0 ]} {Instance:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://support-gem.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://support-gem.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc046688a88} C:{Var:C Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://support-gem.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc046688ad8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.174515436s EvaluationString:[ var='B' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://support-gem.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=42 ], [ var='C' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://support-gem.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=0 ]} {Instance:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://tezos.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://tezos.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc046688b88} C:{Var:C Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://tezos.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc046688bc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.174521501s EvaluationString:[ var='B' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://tezos.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=11 ], [ var='C' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://tezos.b9lab.com, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=1 ]} {Instance:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://tutorials.cosmos.network/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://tutorials.cosmos.network/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc046688d78} C:{Var:C Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://tutorials.cosmos.network/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc046688d58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.174529999s EvaluationString:[ var='B' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://tutorials.cosmos.network/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=63 ], [ var='C' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://tutorials.cosmos.network/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=0 ]} {Instance:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://tzmint.b9lab.com/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://tzmint.b9lab.com/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc046688e18} C:{Var:C Labels:metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://tzmint.b9lab.com/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target Value:0xc046688e58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.174537357s EvaluationString:[ var='B' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://tzmint.b9lab.com/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=0 ], [ var='C' labels={metric.label.pod=infra-check-tools-5c95d87cc5-9k97n, metric.label.scan_result_type=UrlCertValidityDays, metric.label.target=https://tzmint.b9lab.com/, resource.label.cluster=infra-monitoring-cluster, resource.label.instance=infra-check-tools-5c95d87cc5-9k97n:8080, resource.label.job=infra-check-tools, resource.label.location=europe-west3, resource.label.namespace=default, resource.label.project_id=infra-monitoring-394811, resource.type=prometheus_target} value=1 ]}]" duration=77.933828ms +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=iavm, env=uat, host=iavmPATrpssl1, installation=iavmPAT, instance=iavmPATrpssl1:1936, job=HAProxy, master=iavmuatapp1.croesus.com, origin_prometheus=iavmPATrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.177143246Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=159532 slug=getfabric version=18 fingerprint=0608285cd97d665c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.176970586Z level=debug msg="Alert rule evaluated" results="[{Instance:mfc=BS01 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:mfc=BS01 Value:0xc01b886b90} B:{Var:B Labels:mfc=BS01 Value:0xc01b886ba0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.176540102s EvaluationString:[ var='A' labels={mfc=BS01} value=0 ], [ var='B' labels={mfc=BS01} value=0 ]} {Instance:mfc=DAL01 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:mfc=DAL01 Value:0xc01b8871b0} B:{Var:B Labels:mfc=DAL01 Value:0xc01b8871c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.176550398s EvaluationString:[ var='A' labels={mfc=DAL01} value=0 ], [ var='B' labels={mfc=DAL01} value=0 ]} {Instance:mfc=EMK01 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:mfc=EMK01 Value:0xc01b887630} B:{Var:B Labels:mfc=EMK01 Value:0xc01b8871e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.176555167s EvaluationString:[ var='A' labels={mfc=EMK01} value=0 ], [ var='B' labels={mfc=EMK01} value=0 ]} {Instance:mfc=HLN01 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:mfc=HLN01 Value:0xc01b887660} B:{Var:B Labels:mfc=HLN01 Value:0xc01b887670}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.176560433s EvaluationString:[ var='A' labels={mfc=HLN01} value=0 ], [ var='B' labels={mfc=HLN01} value=0 ]}]" duration=28.403249ms +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.177109187Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=harbour, env=uat, host=harbouruatawsrpssl1, installation=harbouruataws, instance=harbouruatawsrpssl1:1936, job=HAProxy, master=harbouruatawsproxymaitre, origin_prometheus=harbouruatawsrpssl1, site=aws, type=proxy" t=2024-05-29T13:44:15.177064345Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=harbour, env=uat, host=harbouruatawsrpssl1, installation=harbouruataws, instance=harbouruatawsrpssl1:1936, job=HAProxy, master=harbouruatawsproxymaitre, origin_prometheus=harbouruatawsrpssl1, site=aws, type=proxy" t=2024-05-29T13:44:15.177051006Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.16.91.64, job=ap_grasvlakte" t=2024-05-29T13:44:15.176982468Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=gpd, env=uat, host=gpdPPrpssl1, installation=gpdPP, instance=gpdPPrpssl1:1936, job=HAProxy, master=vmduatapp2.croesus.com, origin_prometheus=gpdPPrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.176971785Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.176922716Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.176958752Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet2" +logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdeyrm9s020owb, ref_id=A" t=2024-05-29T13:44:15.176906049Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Los Angeles, country=United States, datacenter=PIA, environment=production, instance=191.96.106.5:9998, ip=191.96.106.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=losangeles434, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.176920139Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=gpd, env=uat, host=gpdPATproxy2, installation=gpdPAT, instance=gpdPATproxy2:1936, job=HAProxy, master=vmduatapp2.croesus.com, origin_prometheus=gpdPATproxy2, site=viger, type=proxy" t=2024-05-29T13:44:15.176895154Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.176890299Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.scheduler user=206107 slug=hydrolix version=3 fingerprint=8819a32545688e29 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.176791486Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=fdeyrm9s020owb, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.176524217s EvaluationString:}]" duration=140.71808ms +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node03 - 35.212.250.9, job=node-exporter, metrics_node_id=4, node_id=3" t=2024-05-29T13:44:15.176820531Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.16.91.63, job=ap_dory" t=2024-05-29T13:44:15.176795962Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.16.91.63, job=ap_dory" t=2024-05-29T13:44:15.176782061Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.176791578Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet2" + level=debug ts=2024-05-29T13:44:15.176677883Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.176763893Z caller=remote_instance_store.go:51 user=654076 slug=peerbr msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=gpd, env=uat, host=gpdPATproxy1, installation=gpdPAT, instance=gpdPATproxy1:1936, job=HAProxy, master=vmduatapp1.croesus.com, origin_prometheus=gpdPATproxy1, site=viger, type=proxy" t=2024-05-29T13:44:15.176790399Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Los Angeles, country=United States, datacenter=PIA, environment=production, instance=191.96.106.4:9998, ip=191.96.106.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-california.crt, role=vpn, server=losangeles433, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.176749469Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.176735789Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.176706853Z caller=remote_instance_store.go:57 user=654076 slug=peerbr msg="calling DeleteAlertInstances - not implemented" + level=debug ts=2024-05-29T13:44:15.176679609Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.16.91.62, job=ap_arena" t=2024-05-29T13:44:15.176663656Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=654076 slug=peerbr t=2024-05-29T13:44:15.176684391Z level=debug msg="Deleting alert states" count=1 + logger=ngalert.state.manager user=654076 slug=peerbr t=2024-05-29T13:44:15.176678182Z level=info msg="Detected stale state entry" cacheID="[[\"Series\",\"query82110145c5da4e4a9d4ed441cc7ec996\"],[\"__alert_rule_namespace_uid__\",\"f22c9f5f-d7f8-4a60-b998-58c1d02eec58\"],[\"__alert_rule_uid__\",\"b564e857-852c-4e4b-9ce5-fd39880e6548\"],[\"alertname\",\" AdiantePortalV2Down\"],[\"bu\",\"adiante\"],[\"grafana_folder\",\"Adiante\"],[\"severity\",\"critical\"],[\"squad\",\"retencao\"]]" state=Normal reason= + logger=ngalert.state.manager user=654076 slug=peerbr instance="Series=queryae1ac7412cb24e15a52a1c6e3947bcc9" t=2024-05-29T13:44:15.176666292Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=654076 slug=peerbr instance="Series=queryae1ac7412cb24e15a52a1c6e3947bcc9" t=2024-05-29T13:44:15.176655262Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=gpd, env=prod, host=gpdrpssl1, installation=gpdprod, instance=gpdrpssl1:1936, job=HAProxy, master=vmdapp2.croesus.com, origin_prometheus=gpdrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.176622473Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.176505716Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=fbn, env=uat, host=fbnPATrpssl2, installation=fbnPAT, instance=fbnPATrpssl2:1936, job=HAProxy, master=fbnuatapp4.croesus.com, origin_prometheus=fbnPATrpssl2, site=viger, type=proxy" t=2024-05-29T13:44:15.176555459Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node01 - 35.212.75.128, job=node-exporter, metrics_node_id=2, node_id=1" t=2024-05-29T13:44:15.176501336Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.176438776Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=fbn, env=uat, host=fbnPATrpssl1, installation=fbnPAT, instance=fbnPATrpssl1:1936, job=HAProxy, master=fbnuatapp1.croesus.com, origin_prometheus=fbnPATrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.17647358Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=fbn, env=uat, host=fbnPATrpssl1, installation=fbnPAT, instance=fbnPATrpssl1:1936, job=HAProxy, master=fbnuatapp1.croesus.com, origin_prometheus=fbnPATrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.176463057Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.176323706Z caller=remote_instance_store.go:51 user=536824 slug=forgerockit msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node00 - 35.211.224.92, job=node-exporter, metrics_node_id=1, node_id=0" t=2024-05-29T13:44:15.176351189Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=fbn, env=prod, host=fbnrpssl2, installation=fbnprod, instance=fbnrpssl2:1936, job=HAProxy, master=fbnapp2.croesus.com, origin_prometheus=fbnrpssl2, site=viger, type=proxy" t=2024-05-29T13:44:15.176399555Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.176370929Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=fbn, env=prod, host=fbnrpssl1, installation=fbnprod, instance=fbnrpssl1:1936, job=HAProxy, master=fbnloader1.croesus.com, origin_prometheus=fbnrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.17631511Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.176209465Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=508403 slug=zyax t=2024-05-29T13:44:15.176206936Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=28.928506ms + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.16.91.59, job=ap_reception" t=2024-05-29T13:44:15.17624282Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=croesus, env=test, host=passerelledemoawsproxy1, installation=passerelledemoaws, instance=passerelledemoawsproxy1:1936, job=HAProxy, master=unknown, origin_prometheus=passerelledemoawsproxy1, site=aws, type=proxy" t=2024-05-29T13:44:15.176236967Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=engnet1, instance=localhost:9100, instance_type=hedera-node, inventory_name=node06 - 35.208.138.144, job=node-exporter, metrics_node_id=7, node_id=6" t=2024-05-29T13:44:15.176222793Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=B" t=2024-05-29T13:44:15.17622244Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:15.176100099Z caller=remote_rule_evaluator.go:193 user=656459 slug=activeport msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.16.91.58, job=ap_gent_keuken" t=2024-05-29T13:44:15.176095818Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.176001237Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.176035711Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet1" + level=debug ts=2024-05-29T13:44:15.175980008Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=cibcisi, env=uat, host=cibcisipatrpssl2, installation=cibcisipat, instance=cibcisipatrpssl2:1936, job=HAProxy, master=unknown, origin_prometheus=cibcisipatrpssl2, site=viger, type=proxy" t=2024-05-29T13:44:15.175974156Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.175877198Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet1" + logger=ngalert.state.manager.persist user=615392 slug=shinemetrics t=2024-05-29T13:44:15.175803514Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Los Angeles, country=United States, datacenter=PIA, environment=production, instance=191.96.106.2:9998, ip=191.96.106.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-california.crt, role=vpn, server=losangeles411, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.175870096Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.location=europe-west1, resource.label.project_id=shine-163816, resource.label.queue_id=companies-monitoring-queue, resource.label.target_type=app_engine, resource.type=cloud_tasks_queue" t=2024-05-29T13:44:15.175758582Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=engnet1, instance=localhost:9100, instance_type=hedera-node, inventory_name=node03 - 35.212.205.112, job=node-exporter, metrics_node_id=4, node_id=3" t=2024-05-29T13:44:15.17573103Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.17566414Z caller=remote_instance_store.go:51 user=615392 slug=shinemetrics msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=testingccu, game_template=local-testing-ccu" t=2024-05-29T13:44:15.175651817Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.historian backend=loki user=893154 slug=cmselfnp t=2024-05-29T13:44:15.175633013Z level=debug msg="Done saving alert state history batch" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.175650812Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=cibcisi, env=prod, host=cibcisiprodrpssl1, installation=cibcisiprod, instance=cibcisiprodrpssl1:1936, job=HAProxy, master=unknown, origin_prometheus=cibcisiprodrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.175633812Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.175610435Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=engnet1, instance=localhost:9100, instance_type=hedera-node, inventory_name=node02 - 35.212.80.189, job=node-exporter, metrics_node_id=3, node_id=2" t=2024-05-29T13:44:15.175548875Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_vm_max_map_count, environment=engnet1, instance=localhost:9100, instance_type=hedera-node, inventory_name=node02 - 35.212.80.189, job=node-exporter, metrics_node_id=3, node_id=2" t=2024-05-29T13:44:15.17553613Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=cibc, env=uat, host=cibcUMAcollabrpssl1, installation=cibcUMAcollab, instance=cibcUMAcollabrpssl1:1936, job=HAProxy, master=cibcrqsuatapp1.croesus.com, origin_prometheus=cibcUMAcollabrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.175449459Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:15.175404092Z caller=remote_alert_sender.go:94 user=513718 slug=nakamura08 host=nakamura08-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.160.36.61:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=de1a4ed9-906c-426a-bfb8-7147bd7d6b8d alerts=1 + logger=ngalert.state.manager user=846919 slug=aimazing instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.175290259Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.175273758Z caller=remote_instance_store.go:51 user=375798 slug=beeworks msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=846919 slug=aimazing instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.175275238Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.16.91.52, job=ap_odessey" t=2024-05-29T13:44:15.175288733Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=846919 slug=aimazing instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.175265978Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Los Angeles, country=United States, datacenter=PIA, environment=production, instance=102.129.145.9:9998, ip=102.129.145.9, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=losangeles432, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.175266681Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.175227392Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.16.91.51, job=ap_it" t=2024-05-29T13:44:15.175162281Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.175087885Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.16.91.51, job=ap_it" t=2024-05-29T13:44:15.175148634Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:15.174844927Z level=debug msg="State manager processing evaluation results" resultCount=84 + logger=ngalert.state.manager.persist user=313382 slug=hyai t=2024-05-29T13:44:15.175045522Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=313382 slug=hyai instance="metric.name=value_execution_count_aggregate, resource.label.function_name=hyai-dev-kiwa-update-core-tables" t=2024-05-29T13:44:15.175003432Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.174961486Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.174910992Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.174775159Z caller=remote_instance_store.go:51 user=538962 slug=innovaciondigitalbcs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.174799877Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.174700666Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=538962 slug=innovaciondigitalbcs t=2024-05-29T13:44:15.174704258Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=292121 slug=constellationtherapy version=2 fingerprint=1b2e31ade320b00d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.17458367Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.174236963s EvaluationString:}]" duration=43.537819ms + logger=ngalert.state.manager user=313382 slug=hyai t=2024-05-29T13:44:15.174547781Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=sessionV2TestPersistent-ce9df02a-a0f8-41fe-9582-cfe03afb7203" t=2024-05-29T13:44:15.174615669Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=538962 slug=innovaciondigitalbcs t=2024-05-29T13:44:15.174622258Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Alerta de enrolamiento de usuarios" + logger=ngalert.state.manager user=538962 slug=innovaciondigitalbcs t=2024-05-29T13:44:15.174589457Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=sessionV2TestPersistent-ce9df02a-a0f8-41fe-9582-cfe03afb7203" t=2024-05-29T13:44:15.174595411Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.174605691Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:15.174589912Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=523054 slug=vialtopartners instance= t=2024-05-29T13:44:15.174578572Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=647444 slug=nttaidth135160 t=2024-05-29T13:44:15.17453455Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.617145ms + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.16.50.222, job=ups_gent" t=2024-05-29T13:44:15.174576889Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=361282 slug=turing instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.174471933Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=361282 slug=turing instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.174463489Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=361282 slug=turing instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.174448581Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.174414032Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=361282 slug=turing t=2024-05-29T13:44:15.174376392Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=cdbn, env=prod, host=cdbnrpssl1, installation=cdbnprod, instance=cdbnrpssl1:1936, job=HAProxy, master=clientapp6.croesus.com, origin_prometheus=cdbnrpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.174449909Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=82372 slug=fout t=2024-05-29T13:44:15.174399509Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=523054 slug=vialtopartners version=60 fingerprint=42010be034645c7d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.174378731Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc0592e9778} C:{Var:C Labels: Value:0xc0592e9780}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.17412497s EvaluationString:[ var='B' labels={} value=1 ], [ var='C' labels={} value=0 ]}]" duration=301.759388ms + level=debug ts=2024-05-29T13:44:15.174381074Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.174373116Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.16.40.11, job=paxton_gent_tw" t=2024-05-29T13:44:15.174323943Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=up, client=bncWeb, env=uat, host=bncWebuatv9rpssl1, installation=bncWebuatv9, instance=bncWebuatv9rpssl1:1936, job=HAProxy, master=nbcnlobuatapp2.croesus.com, origin_prometheus=bncWebuatv9rpssl1, site=viger, type=proxy" t=2024-05-29T13:44:15.174368757Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Los Angeles, country=United States, datacenter=PIA, environment=production, instance=102.129.145.7:9998, ip=102.129.145.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-california.crt, role=vpn, server=losangeles430, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.174145465Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.174074468Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.16.11.62, job=ZoomRoom Wibree" t=2024-05-29T13:44:15.174063916Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.16.11.61, job=ZoomRoom Twiggy" t=2024-05-29T13:44:15.173906074Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=171897 slug=croesus version=7 fingerprint=d46a5c62718c826b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.171602873Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=up, client=bncWeb, env=prod, host=bncWebrpssl1, installation=bncWebprod, instance=bncWebrpssl1:1936, job=HAProxy, master=webadvisorapp1.croesus.com, origin_prometheus=bncWebrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=bncWeb, env=prod, host=bncWebrpssl1, installation=bncWebprod, instance=bncWebrpssl1:1936, job=HAProxy, master=webadvisorapp1.croesus.com, origin_prometheus=bncWebrpssl1, site=viger, type=proxy Value:0xc01cd19228} B:{Var:B Labels:__name__=up, client=bncWeb, env=prod, host=bncWebrpssl1, installation=bncWebprod, instance=bncWebrpssl1:1936, job=HAProxy, master=webadvisorapp1.croesus.com, origin_prometheus=bncWebrpssl1, site=viger, type=proxy Value:0xc01cd19148}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165531634s EvaluationString:[ var='A' labels={__name__=up, client=bncWeb, env=prod, host=bncWebrpssl1, installation=bncWebprod, instance=bncWebrpssl1:1936, job=HAProxy, master=webadvisorapp1.croesus.com, origin_prometheus=bncWebrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=bncWeb, env=prod, host=bncWebrpssl1, installation=bncWebprod, instance=bncWebrpssl1:1936, job=HAProxy, master=webadvisorapp1.croesus.com, origin_prometheus=bncWebrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=bncWeb, env=prod, host=bncwebffproxy1, installation=bncWebprod, instance=bncwebffproxy1:1936, job=HAProxy, master=fbnapp5.croesus.com, origin_prometheus=bncwebffproxy1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=bncWeb, env=prod, host=bncwebffproxy1, installation=bncWebprod, instance=bncwebffproxy1:1936, job=HAProxy, master=fbnapp5.croesus.com, origin_prometheus=bncwebffproxy1, site=viger, type=proxy Value:0xc01cd19360} B:{Var:B Labels:__name__=up, client=bncWeb, env=prod, host=bncwebffproxy1, installation=bncWebprod, instance=bncwebffproxy1:1936, job=HAProxy, master=fbnapp5.croesus.com, origin_prometheus=bncwebffproxy1, site=viger, type=proxy Value:0xc01cd19400}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165552434s EvaluationString:[ var='A' labels={__name__=up, client=bncWeb, env=prod, host=bncwebffproxy1, installation=bncWebprod, instance=bncwebffproxy1:1936, job=HAProxy, master=fbnapp5.croesus.com, origin_prometheus=bncwebffproxy1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=bncWeb, env=prod, host=bncwebffproxy1, installation=bncWebprod, instance=bncwebffproxy1:1936, job=HAProxy, master=fbnapp5.croesus.com, origin_prometheus=bncwebffproxy1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=bncWeb, env=uat, host=bncWebuatv9ffproxy1, installation=bncWebuatv9, instance=bncWebuatv9ffproxy1:1936, job=HAProxy, master=fbnuatapp1.croesus.com, origin_prometheus=bncWebuatv9ffproxy1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=bncWeb, env=uat, host=bncWebuatv9ffproxy1, installation=bncWebuatv9, instance=bncWebuatv9ffproxy1:1936, job=HAProxy, master=fbnuatapp1.croesus.com, origin_prometheus=bncWebuatv9ffproxy1, site=viger, type=proxy Value:0xc01cd19510} B:{Var:B Labels:__name__=up, client=bncWeb, env=uat, host=bncWebuatv9ffproxy1, installation=bncWebuatv9, instance=bncWebuatv9ffproxy1:1936, job=HAProxy, master=fbnuatapp1.croesus.com, origin_prometheus=bncWebuatv9ffproxy1, site=viger, type=proxy Value:0xc01cd19598}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165560309s EvaluationString:[ var='A' labels={__name__=up, client=bncWeb, env=uat, host=bncWebuatv9ffproxy1, installation=bncWebuatv9, instance=bncWebuatv9ffproxy1:1936, job=HAProxy, master=fbnuatapp1.croesus.com, origin_prometheus=bncWebuatv9ffproxy1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=bncWeb, env=uat, host=bncWebuatv9ffproxy1, installation=bncWebuatv9, instance=bncWebuatv9ffproxy1:1936, job=HAProxy, master=fbnuatapp1.croesus.com, origin_prometheus=bncWebuatv9ffproxy1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=bncWeb, env=uat, host=bncWebuatv9rpssl1, installation=bncWebuatv9, instance=bncWebuatv9rpssl1:1936, job=HAProxy, master=nbcnlobuatapp2.croesus.com, origin_prometheus=bncWebuatv9rpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=bncWeb, env=uat, host=bncWebuatv9rpssl1, installation=bncWebuatv9, instance=bncWebuatv9rpssl1:1936, job=HAProxy, master=nbcnlobuatapp2.croesus.com, origin_prometheus=bncWebuatv9rpssl1, site=viger, type=proxy Value:0xc01cd19828} B:{Var:B Labels:__name__=up, client=bncWeb, env=uat, host=bncWebuatv9rpssl1, installation=bncWebuatv9, instance=bncWebuatv9rpssl1:1936, job=HAProxy, master=nbcnlobuatapp2.croesus.com, origin_prometheus=bncWebuatv9rpssl1, site=viger, type=proxy Value:0xc01cd198b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165570976s EvaluationString:[ var='A' labels={__name__=up, client=bncWeb, env=uat, host=bncWebuatv9rpssl1, installation=bncWebuatv9, instance=bncWebuatv9rpssl1:1936, job=HAProxy, master=nbcnlobuatapp2.croesus.com, origin_prometheus=bncWebuatv9rpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=bncWeb, env=uat, host=bncWebuatv9rpssl1, installation=bncWebuatv9, instance=bncWebuatv9rpssl1:1936, job=HAProxy, master=nbcnlobuatapp2.croesus.com, origin_prometheus=bncWebuatv9rpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=cdbn, env=prod, host=cdbnrpssl1, installation=cdbnprod, instance=cdbnrpssl1:1936, job=HAProxy, master=clientapp6.croesus.com, origin_prometheus=cdbnrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=cdbn, env=prod, host=cdbnrpssl1, installation=cdbnprod, instance=cdbnrpssl1:1936, job=HAProxy, master=clientapp6.croesus.com, origin_prometheus=cdbnrpssl1, site=viger, type=proxy Value:0xc01cd19af0} B:{Var:B Labels:__name__=up, client=cdbn, env=prod, host=cdbnrpssl1, installation=cdbnprod, instance=cdbnrpssl1:1936, job=HAProxy, master=clientapp6.croesus.com, origin_prometheus=cdbnrpssl1, site=viger, type=proxy Value:0xc01cd19a10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165578002s EvaluationString:[ var='A' labels={__name__=up, client=cdbn, env=prod, host=cdbnrpssl1, installation=cdbnprod, instance=cdbnrpssl1:1936, job=HAProxy, master=clientapp6.croesus.com, origin_prometheus=cdbnrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=cdbn, env=prod, host=cdbnrpssl1, installation=cdbnprod, instance=cdbnrpssl1:1936, job=HAProxy, master=clientapp6.croesus.com, origin_prometheus=cdbnrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=cdbn, env=prod, host=cdbnrpssl2, installation=cdbnprod, instance=cdbnrpssl2:1936, job=HAProxy, master=clientapp2.croesus.com, origin_prometheus=cdbnrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=cdbn, env=prod, host=cdbnrpssl2, installation=cdbnprod, instance=cdbnrpssl2:1936, job=HAProxy, master=clientapp2.croesus.com, origin_prometheus=cdbnrpssl2, site=viger, type=proxy Value:0xc01cd19c68} B:{Var:B Labels:__name__=up, client=cdbn, env=prod, host=cdbnrpssl2, installation=cdbnprod, instance=cdbnrpssl2:1936, job=HAProxy, master=clientapp2.croesus.com, origin_prometheus=cdbnrpssl2, site=viger, type=proxy Value:0xc01cd19e28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165587137s EvaluationString:[ var='A' labels={__name__=up, client=cdbn, env=prod, host=cdbnrpssl2, installation=cdbnprod, instance=cdbnrpssl2:1936, job=HAProxy, master=clientapp2.croesus.com, origin_prometheus=cdbnrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=cdbn, env=prod, host=cdbnrpssl2, installation=cdbnprod, instance=cdbnrpssl2:1936, job=HAProxy, master=clientapp2.croesus.com, origin_prometheus=cdbnrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=cibc, env=prod, host=cibcrpssl1, installation=cibcprod, instance=cibcrpssl1:1936, job=HAProxy, master=cibcapp1.croesus.com, origin_prometheus=cibcrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=cibc, env=prod, host=cibcrpssl1, installation=cibcprod, instance=cibcrpssl1:1936, job=HAProxy, master=cibcapp1.croesus.com, origin_prometheus=cibcrpssl1, site=viger, type=proxy Value:0xc011afe0b8} B:{Var:B Labels:__name__=up, client=cibc, env=prod, host=cibcrpssl1, installation=cibcprod, instance=cibcrpssl1:1936, job=HAProxy, master=cibcapp1.croesus.com, origin_prometheus=cibcrpssl1, site=viger, type=proxy Value:0xc011afe008}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165593908s EvaluationString:[ var='A' labels={__name__=up, client=cibc, env=prod, host=cibcrpssl1, installation=cibcprod, instance=cibcrpssl1:1936, job=HAProxy, master=cibcapp1.croesus.com, origin_prometheus=cibcrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=cibc, env=prod, host=cibcrpssl1, installation=cibcprod, instance=cibcrpssl1:1936, job=HAProxy, master=cibcapp1.croesus.com, origin_prometheus=cibcrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=cibc, env=prod, host=cibcrpssl2, installation=cibcprod, instance=cibcrpssl2:1936, job=HAProxy, master=cibcapp2.croesus.com, origin_prometheus=cibcrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=cibc, env=prod, host=cibcrpssl2, installation=cibcprod, instance=cibcrpssl2:1936, job=HAProxy, master=cibcapp2.croesus.com, origin_prometheus=cibcrpssl2, site=viger, type=proxy Value:0xc011afe588} B:{Var:B Labels:__name__=up, client=cibc, env=prod, host=cibcrpssl2, installation=cibcprod, instance=cibcrpssl2:1936, job=HAProxy, master=cibcapp2.croesus.com, origin_prometheus=cibcrpssl2, site=viger, type=proxy Value:0xc011afe638}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165602921s EvaluationString:[ var='A' labels={__name__=up, client=cibc, env=prod, host=cibcrpssl2, installation=cibcprod, instance=cibcrpssl2:1936, job=HAProxy, master=cibcapp2.croesus.com, origin_prometheus=cibcrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=cibc, env=prod, host=cibcrpssl2, installation=cibcprod, instance=cibcrpssl2:1936, job=HAProxy, master=cibcapp2.croesus.com, origin_prometheus=cibcrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=cibc, env=uat, host=cibcPATrpssl1, installation=cibcPAT, instance=cibcPATrpssl1:1936, job=HAProxy, master=cibcuatapp1.croesus.com, origin_prometheus=cibcPATrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=cibc, env=uat, host=cibcPATrpssl1, installation=cibcPAT, instance=cibcPATrpssl1:1936, job=HAProxy, master=cibcuatapp1.croesus.com, origin_prometheus=cibcPATrpssl1, site=viger, type=proxy Value:0xc011afe798} B:{Var:B Labels:__name__=up, client=cibc, env=uat, host=cibcPATrpssl1, installation=cibcPAT, instance=cibcPATrpssl1:1936, job=HAProxy, master=cibcuatapp1.croesus.com, origin_prometheus=cibcPATrpssl1, site=viger, type=proxy Value:0xc011afe880}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165609715s EvaluationString:[ var='A' labels={__name__=up, client=cibc, env=uat, host=cibcPATrpssl1, installation=cibcPAT, instance=cibcPATrpssl1:1936, job=HAProxy, master=cibcuatapp1.croesus.com, origin_prometheus=cibcPATrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=cibc, env=uat, host=cibcPATrpssl1, installation=cibcPAT, instance=cibcPATrpssl1:1936, job=HAProxy, master=cibcuatapp1.croesus.com, origin_prometheus=cibcPATrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=cibc, env=uat, host=cibcPATrpssl2, installation=cibcPAT, instance=cibcPATrpssl2:1936, job=HAProxy, master=cibcuatapp3.croesus.com, origin_prometheus=cibcPATrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=cibc, env=uat, host=cibcPATrpssl2, installation=cibcPAT, instance=cibcPATrpssl2:1936, job=HAProxy, master=cibcuatapp3.croesus.com, origin_prometheus=cibcPATrpssl2, site=viger, type=proxy Value:0xc011afea60} B:{Var:B Labels:__name__=up, client=cibc, env=uat, host=cibcPATrpssl2, installation=cibcPAT, instance=cibcPATrpssl2:1936, job=HAProxy, master=cibcuatapp3.croesus.com, origin_prometheus=cibcPATrpssl2, site=viger, type=proxy Value:0xc011afece0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.16561722s EvaluationString:[ var='A' labels={__name__=up, client=cibc, env=uat, host=cibcPATrpssl2, installation=cibcPAT, instance=cibcPATrpssl2:1936, job=HAProxy, master=cibcuatapp3.croesus.com, origin_prometheus=cibcPATrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=cibc, env=uat, host=cibcPATrpssl2, installation=cibcPAT, instance=cibcPATrpssl2:1936, job=HAProxy, master=cibcuatapp3.croesus.com, origin_prometheus=cibcPATrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=cibc, env=uat, host=cibcUMAcollabrpssl1, installation=cibcUMAcollab, instance=cibcUMAcollabrpssl1:1936, job=HAProxy, master=cibcrqsuatapp1.croesus.com, origin_prometheus=cibcUMAcollabrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=cibc, env=uat, host=cibcUMAcollabrpssl1, installation=cibcUMAcollab, instance=cibcUMAcollabrpssl1:1936, job=HAProxy, master=cibcrqsuatapp1.croesus.com, origin_prometheus=cibcUMAcollabrpssl1, site=viger, type=proxy Value:0xc011afee08} B:{Var:B Labels:__name__=up, client=cibc, env=uat, host=cibcUMAcollabrpssl1, installation=cibcUMAcollab, instance=cibcUMAcollabrpssl1:1936, job=HAProxy, master=cibcrqsuatapp1.croesus.com, origin_prometheus=cibcUMAcollabrpssl1, site=viger, type=proxy Value:0xc011afee90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.1656245s EvaluationString:[ var='A' labels={__name__=up, client=cibc, env=uat, host=cibcUMAcollabrpssl1, installation=cibcUMAcollab, instance=cibcUMAcollabrpssl1:1936, job=HAProxy, master=cibcrqsuatapp1.croesus.com, origin_prometheus=cibcUMAcollabrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=cibc, env=uat, host=cibcUMAcollabrpssl1, installation=cibcUMAcollab, instance=cibcUMAcollabrpssl1:1936, job=HAProxy, master=cibcrqsuatapp1.croesus.com, origin_prometheus=cibcUMAcollabrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=cibc, env=uat, host=cibcuatv9bkvrpssl1, installation=cibcuatv9bkv, instance=cibcuatv9bkvrpssl1:1936, job=HAProxy, master=cibcuatapp3.croesus.com, origin_prometheus=cibcuatv9bkvrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=cibc, env=uat, host=cibcuatv9bkvrpssl1, installation=cibcuatv9bkv, instance=cibcuatv9bkvrpssl1:1936, job=HAProxy, master=cibcuatapp3.croesus.com, origin_prometheus=cibcuatv9bkvrpssl1, site=viger, type=proxy Value:0xc011afef90} B:{Var:B Labels:__name__=up, client=cibc, env=uat, host=cibcuatv9bkvrpssl1, installation=cibcuatv9bkv, instance=cibcuatv9bkvrpssl1:1936, job=HAProxy, master=cibcuatapp3.croesus.com, origin_prometheus=cibcuatv9bkvrpssl1, site=viger, type=proxy Value:0xc011aff008}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165631832s EvaluationString:[ var='A' labels={__name__=up, client=cibc, env=uat, host=cibcuatv9bkvrpssl1, installation=cibcuatv9bkv, instance=cibcuatv9bkvrpssl1:1936, job=HAProxy, master=cibcuatapp3.croesus.com, origin_prometheus=cibcuatv9bkvrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=cibc, env=uat, host=cibcuatv9bkvrpssl1, installation=cibcuatv9bkv, instance=cibcuatv9bkvrpssl1:1936, job=HAProxy, master=cibcuatapp3.croesus.com, origin_prometheus=cibcuatv9bkvrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=cibcisi, env=prod, host=cibcisiprodrpssl1, installation=cibcisiprod, instance=cibcisiprodrpssl1:1936, job=HAProxy, master=unknown, origin_prometheus=cibcisiprodrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=cibcisi, env=prod, host=cibcisiprodrpssl1, installation=cibcisiprod, instance=cibcisiprodrpssl1:1936, job=HAProxy, master=unknown, origin_prometheus=cibcisiprodrpssl1, site=viger, type=proxy Value:0xc011aff138} B:{Var:B Labels:__name__=up, client=cibcisi, env=prod, host=cibcisiprodrpssl1, installation=cibcisiprod, instance=cibcisiprodrpssl1:1936, job=HAProxy, master=unknown, origin_prometheus=cibcisiprodrpssl1, site=viger, type=proxy Value:0xc011aff1c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165638118s EvaluationString:[ var='A' labels={__name__=up, client=cibcisi, env=prod, host=cibcisiprodrpssl1, installation=cibcisiprod, instance=cibcisiprodrpssl1:1936, job=HAProxy, master=unknown, origin_prometheus=cibcisiprodrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=cibcisi, env=prod, host=cibcisiprodrpssl1, installation=cibcisiprod, instance=cibcisiprodrpssl1:1936, job=HAProxy, master=unknown, origin_prometheus=cibcisiprodrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=cibcisi, env=prod, host=cibcisiprodrpssl2, installation=cibcisiprod, instance=cibcisiprodrpssl2:1936, job=HAProxy, master=unknown, origin_prometheus=cibcisiprodrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=cibcisi, env=prod, host=cibcisiprodrpssl2, installation=cibcisiprod, instance=cibcisiprodrpssl2:1936, job=HAProxy, master=unknown, origin_prometheus=cibcisiprodrpssl2, site=viger, type=proxy Value:0xc011aff488} B:{Var:B Labels:__name__=up, client=cibcisi, env=prod, host=cibcisiprodrpssl2, installation=cibcisiprod, instance=cibcisiprodrpssl2:1936, job=HAProxy, master=unknown, origin_prometheus=cibcisiprodrpssl2, site=viger, type=proxy Value:0xc011aff348}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165646302s EvaluationString:[ var='A' labels={__name__=up, client=cibcisi, env=prod, host=cibcisiprodrpssl2, installation=cibcisiprod, instance=cibcisiprodrpssl2:1936, job=HAProxy, master=unknown, origin_prometheus=cibcisiprodrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=cibcisi, env=prod, host=cibcisiprodrpssl2, installation=cibcisiprod, instance=cibcisiprodrpssl2:1936, job=HAProxy, master=unknown, origin_prometheus=cibcisiprodrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=cibcisi, env=uat, host=cibcisipatrpssl1, installation=cibcisipat, instance=cibcisipatrpssl1:1936, job=HAProxy, master=unknown, origin_prometheus=cibcisipatrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=cibcisi, env=uat, host=cibcisipatrpssl1, installation=cibcisipat, instance=cibcisipatrpssl1:1936, job=HAProxy, master=unknown, origin_prometheus=cibcisipatrpssl1, site=viger, type=proxy Value:0xc011aff5e8} B:{Var:B Labels:__name__=up, client=cibcisi, env=uat, host=cibcisipatrpssl1, installation=cibcisipat, instance=cibcisipatrpssl1:1936, job=HAProxy, master=unknown, origin_prometheus=cibcisipatrpssl1, site=viger, type=proxy Value:0xc011aff6a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165652363s EvaluationString:[ var='A' labels={__name__=up, client=cibcisi, env=uat, host=cibcisipatrpssl1, installation=cibcisipat, instance=cibcisipatrpssl1:1936, job=HAProxy, master=unknown, origin_prometheus=cibcisipatrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=cibcisi, env=uat, host=cibcisipatrpssl1, installation=cibcisipat, instance=cibcisipatrpssl1:1936, job=HAProxy, master=unknown, origin_prometheus=cibcisipatrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=cibcisi, env=uat, host=cibcisipatrpssl2, installation=cibcisipat, instance=cibcisipatrpssl2:1936, job=HAProxy, master=unknown, origin_prometheus=cibcisipatrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=cibcisi, env=uat, host=cibcisipatrpssl2, installation=cibcisipat, instance=cibcisipatrpssl2:1936, job=HAProxy, master=unknown, origin_prometheus=cibcisipatrpssl2, site=viger, type=proxy Value:0xc011aff980} B:{Var:B Labels:__name__=up, client=cibcisi, env=uat, host=cibcisipatrpssl2, installation=cibcisipat, instance=cibcisipatrpssl2:1936, job=HAProxy, master=unknown, origin_prometheus=cibcisipatrpssl2, site=viger, type=proxy Value:0xc011aff810}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165658277s EvaluationString:[ var='A' labels={__name__=up, client=cibcisi, env=uat, host=cibcisipatrpssl2, installation=cibcisipat, instance=cibcisipatrpssl2:1936, job=HAProxy, master=unknown, origin_prometheus=cibcisipatrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=cibcisi, env=uat, host=cibcisipatrpssl2, installation=cibcisipat, instance=cibcisipatrpssl2:1936, job=HAProxy, master=unknown, origin_prometheus=cibcisipatrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=claret, env=uat, host=claretPATrpssl1, installation=claretPAT, instance=claretPATrpssl1:1936, job=HAProxy, master=clientuatapp3.croesus.com, origin_prometheus=claretPATrpssl1, site=viger, type=proxy State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=claret, env=uat, host=claretPATrpssl1, installation=claretPAT, instance=claretPATrpssl1:1936, job=HAProxy, master=clientuatapp3.croesus.com, origin_prometheus=claretPATrpssl1, site=viger, type=proxy Value:0xc011affcd0} B:{Var:B Labels:__name__=up, client=claret, env=uat, host=claretPATrpssl1, installation=claretPAT, instance=claretPATrpssl1:1936, job=HAProxy, master=clientuatapp3.croesus.com, origin_prometheus=claretPATrpssl1, site=viger, type=proxy Value:0xc011affd90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165664455s EvaluationString:[ var='A' labels={__name__=up, client=claret, env=uat, host=claretPATrpssl1, installation=claretPAT, instance=claretPATrpssl1:1936, job=HAProxy, master=clientuatapp3.croesus.com, origin_prometheus=claretPATrpssl1, site=viger, type=proxy} value=0 ], [ var='B' labels={__name__=up, client=claret, env=uat, host=claretPATrpssl1, installation=claretPAT, instance=claretPATrpssl1:1936, job=HAProxy, master=clientuatapp3.croesus.com, origin_prometheus=claretPATrpssl1, site=viger, type=proxy} value=1 ]} {Instance:__name__=up, client=croesus, env=test, host=mobileStagingProxy1, installation=mobileStaging, instance=mobileStagingProxy1:1936, job=HAProxy, master=testapp2.croesus.com, origin_prometheus=mobileStagingProxy1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=croesus, env=test, host=mobileStagingProxy1, installation=mobileStaging, instance=mobileStagingProxy1:1936, job=HAProxy, master=testapp2.croesus.com, origin_prometheus=mobileStagingProxy1, site=viger, type=proxy Value:0xc011affea8} B:{Var:B Labels:__name__=up, client=croesus, env=test, host=mobileStagingProxy1, installation=mobileStaging, instance=mobileStagingProxy1:1936, job=HAProxy, master=testapp2.croesus.com, origin_prometheus=mobileStagingProxy1, site=viger, type=proxy Value:0xc011afff30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165671905s EvaluationString:[ var='A' labels={__name__=up, client=croesus, env=test, host=mobileStagingProxy1, installation=mobileStaging, instance=mobileStagingProxy1:1936, job=HAProxy, master=testapp2.croesus.com, origin_prometheus=mobileStagingProxy1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=croesus, env=test, host=mobileStagingProxy1, installation=mobileStaging, instance=mobileStagingProxy1:1936, job=HAProxy, master=testapp2.croesus.com, origin_prometheus=mobileStagingProxy1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=croesus, env=test, host=passerelledemoawsproxy1, installation=passerelledemoaws, instance=passerelledemoawsproxy1:1936, job=HAProxy, master=unknown, origin_prometheus=passerelledemoawsproxy1, site=aws, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=croesus, env=test, host=passerelledemoawsproxy1, installation=passerelledemoaws, instance=passerelledemoawsproxy1:1936, job=HAProxy, master=unknown, origin_prometheus=passerelledemoawsproxy1, site=aws, type=proxy Value:0xc00fa02020} B:{Var:B Labels:__name__=up, client=croesus, env=test, host=passerelledemoawsproxy1, installation=passerelledemoaws, instance=passerelledemoawsproxy1:1936, job=HAProxy, master=unknown, origin_prometheus=passerelledemoawsproxy1, site=aws, type=proxy Value:0xc00fa020b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165677865s EvaluationString:[ var='A' labels={__name__=up, client=croesus, env=test, host=passerelledemoawsproxy1, installation=passerelledemoaws, instance=passerelledemoawsproxy1:1936, job=HAProxy, master=unknown, origin_prometheus=passerelledemoawsproxy1, site=aws, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=croesus, env=test, host=passerelledemoawsproxy1, installation=passerelledemoaws, instance=passerelledemoawsproxy1:1936, job=HAProxy, master=unknown, origin_prometheus=passerelledemoawsproxy1, site=aws, type=proxy} value=0 ]} {Instance:__name__=up, client=fbn, env=prod, host=fbnrpssl1, installation=fbnprod, instance=fbnrpssl1:1936, job=HAProxy, master=fbnloader1.croesus.com, origin_prometheus=fbnrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=fbn, env=prod, host=fbnrpssl1, installation=fbnprod, instance=fbnrpssl1:1936, job=HAProxy, master=fbnloader1.croesus.com, origin_prometheus=fbnrpssl1, site=viger, type=proxy Value:0xc00fa02208} B:{Var:B Labels:__name__=up, client=fbn, env=prod, host=fbnrpssl1, installation=fbnprod, instance=fbnrpssl1:1936, job=HAProxy, master=fbnloader1.croesus.com, origin_prometheus=fbnrpssl1, site=viger, type=proxy Value:0xc00fa022a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165684967s EvaluationString:[ var='A' labels={__name__=up, client=fbn, env=prod, host=fbnrpssl1, installation=fbnprod, instance=fbnrpssl1:1936, job=HAProxy, master=fbnloader1.croesus.com, origin_prometheus=fbnrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=fbn, env=prod, host=fbnrpssl1, installation=fbnprod, instance=fbnrpssl1:1936, job=HAProxy, master=fbnloader1.croesus.com, origin_prometheus=fbnrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=fbn, env=prod, host=fbnrpssl2, installation=fbnprod, instance=fbnrpssl2:1936, job=HAProxy, master=fbnapp2.croesus.com, origin_prometheus=fbnrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=fbn, env=prod, host=fbnrpssl2, installation=fbnprod, instance=fbnrpssl2:1936, job=HAProxy, master=fbnapp2.croesus.com, origin_prometheus=fbnrpssl2, site=viger, type=proxy Value:0xc00fa023e0} B:{Var:B Labels:__name__=up, client=fbn, env=prod, host=fbnrpssl2, installation=fbnprod, instance=fbnrpssl2:1936, job=HAProxy, master=fbnapp2.croesus.com, origin_prometheus=fbnrpssl2, site=viger, type=proxy Value:0xc00fa02478}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.16569237s EvaluationString:[ var='A' labels={__name__=up, client=fbn, env=prod, host=fbnrpssl2, installation=fbnprod, instance=fbnrpssl2:1936, job=HAProxy, master=fbnapp2.croesus.com, origin_prometheus=fbnrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=fbn, env=prod, host=fbnrpssl2, installation=fbnprod, instance=fbnrpssl2:1936, job=HAProxy, master=fbnapp2.croesus.com, origin_prometheus=fbnrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=fbn, env=uat, host=fbnPATrpssl1, installation=fbnPAT, instance=fbnPATrpssl1:1936, job=HAProxy, master=fbnuatapp1.croesus.com, origin_prometheus=fbnPATrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=fbn, env=uat, host=fbnPATrpssl1, installation=fbnPAT, instance=fbnPATrpssl1:1936, job=HAProxy, master=fbnuatapp1.croesus.com, origin_prometheus=fbnPATrpssl1, site=viger, type=proxy Value:0xc00fa02598} B:{Var:B Labels:__name__=up, client=fbn, env=uat, host=fbnPATrpssl1, installation=fbnPAT, instance=fbnPATrpssl1:1936, job=HAProxy, master=fbnuatapp1.croesus.com, origin_prometheus=fbnPATrpssl1, site=viger, type=proxy Value:0xc00fa02628}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.16569813s EvaluationString:[ var='A' labels={__name__=up, client=fbn, env=uat, host=fbnPATrpssl1, installation=fbnPAT, instance=fbnPATrpssl1:1936, job=HAProxy, master=fbnuatapp1.croesus.com, origin_prometheus=fbnPATrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=fbn, env=uat, host=fbnPATrpssl1, installation=fbnPAT, instance=fbnPATrpssl1:1936, job=HAProxy, master=fbnuatapp1.croesus.com, origin_prometheus=fbnPATrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=fbn, env=uat, host=fbnPATrpssl2, installation=fbnPAT, instance=fbnPATrpssl2:1936, job=HAProxy, master=fbnuatapp4.croesus.com, origin_prometheus=fbnPATrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=fbn, env=uat, host=fbnPATrpssl2, installation=fbnPAT, instance=fbnPATrpssl2:1936, job=HAProxy, master=fbnuatapp4.croesus.com, origin_prometheus=fbnPATrpssl2, site=viger, type=proxy Value:0xc00fa02750} B:{Var:B Labels:__name__=up, client=fbn, env=uat, host=fbnPATrpssl2, installation=fbnPAT, instance=fbnPATrpssl2:1936, job=HAProxy, master=fbnuatapp4.croesus.com, origin_prometheus=fbnPATrpssl2, site=viger, type=proxy Value:0xc00fa027f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165704702s EvaluationString:[ var='A' labels={__name__=up, client=fbn, env=uat, host=fbnPATrpssl2, installation=fbnPAT, instance=fbnPATrpssl2:1936, job=HAProxy, master=fbnuatapp4.croesus.com, origin_prometheus=fbnPATrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=fbn, env=uat, host=fbnPATrpssl2, installation=fbnPAT, instance=fbnPATrpssl2:1936, job=HAProxy, master=fbnuatapp4.croesus.com, origin_prometheus=fbnPATrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=gpd, env=prod, host=gpdrpssl1, installation=gpdprod, instance=gpdrpssl1:1936, job=HAProxy, master=vmdapp2.croesus.com, origin_prometheus=gpdrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=gpd, env=prod, host=gpdrpssl1, installation=gpdprod, instance=gpdrpssl1:1936, job=HAProxy, master=vmdapp2.croesus.com, origin_prometheus=gpdrpssl1, site=viger, type=proxy Value:0xc00fa02920} B:{Var:B Labels:__name__=up, client=gpd, env=prod, host=gpdrpssl1, installation=gpdprod, instance=gpdrpssl1:1936, job=HAProxy, master=vmdapp2.croesus.com, origin_prometheus=gpdrpssl1, site=viger, type=proxy Value:0xc00fa029b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165714221s EvaluationString:[ var='A' labels={__name__=up, client=gpd, env=prod, host=gpdrpssl1, installation=gpdprod, instance=gpdrpssl1:1936, job=HAProxy, master=vmdapp2.croesus.com, origin_prometheus=gpdrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=gpd, env=prod, host=gpdrpssl1, installation=gpdprod, instance=gpdrpssl1:1936, job=HAProxy, master=vmdapp2.croesus.com, origin_prometheus=gpdrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=gpd, env=prod, host=gpdrpssl2, installation=gpdprod, instance=gpdrpssl2:1936, job=HAProxy, master=vmdapp4.croesus.com, origin_prometheus=gpdrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=gpd, env=prod, host=gpdrpssl2, installation=gpdprod, instance=gpdrpssl2:1936, job=HAProxy, master=vmdapp4.croesus.com, origin_prometheus=gpdrpssl2, site=viger, type=proxy Value:0xc00fa02af0} B:{Var:B Labels:__name__=up, client=gpd, env=prod, host=gpdrpssl2, installation=gpdprod, instance=gpdrpssl2:1936, job=HAProxy, master=vmdapp4.croesus.com, origin_prometheus=gpdrpssl2, site=viger, type=proxy Value:0xc00fa02b90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.16572125s EvaluationString:[ var='A' labels={__name__=up, client=gpd, env=prod, host=gpdrpssl2, installation=gpdprod, instance=gpdrpssl2:1936, job=HAProxy, master=vmdapp4.croesus.com, origin_prometheus=gpdrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=gpd, env=prod, host=gpdrpssl2, installation=gpdprod, instance=gpdrpssl2:1936, job=HAProxy, master=vmdapp4.croesus.com, origin_prometheus=gpdrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=gpd, env=uat, host=gpdPATproxy1, installation=gpdPAT, instance=gpdPATproxy1:1936, job=HAProxy, master=vmduatapp1.croesus.com, origin_prometheus=gpdPATproxy1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=gpd, env=uat, host=gpdPATproxy1, installation=gpdPAT, instance=gpdPATproxy1:1936, job=HAProxy, master=vmduatapp1.croesus.com, origin_prometheus=gpdPATproxy1, site=viger, type=proxy Value:0xc00fa02ce0} B:{Var:B Labels:__name__=up, client=gpd, env=uat, host=gpdPATproxy1, installation=gpdPAT, instance=gpdPATproxy1:1936, job=HAProxy, master=vmduatapp1.croesus.com, origin_prometheus=gpdPATproxy1, site=viger, type=proxy Value:0xc00fa02d70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165727525s EvaluationString:[ var='A' labels={__name__=up, client=gpd, env=uat, host=gpdPATproxy1, installation=gpdPAT, instance=gpdPATproxy1:1936, job=HAProxy, master=vmduatapp1.croesus.com, origin_prometheus=gpdPATproxy1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=gpd, env=uat, host=gpdPATproxy1, installation=gpdPAT, instance=gpdPATproxy1:1936, job=HAProxy, master=vmduatapp1.croesus.com, origin_prometheus=gpdPATproxy1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=gpd, env=uat, host=gpdPATproxy2, installation=gpdPAT, instance=gpdPATproxy2:1936, job=HAProxy, master=vmduatapp2.croesus.com, origin_prometheus=gpdPATproxy2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=gpd, env=uat, host=gpdPATproxy2, installation=gpdPAT, instance=gpdPATproxy2:1936, job=HAProxy, master=vmduatapp2.croesus.com, origin_prometheus=gpdPATproxy2, site=viger, type=proxy Value:0xc00fa02e98} B:{Var:B Labels:__name__=up, client=gpd, env=uat, host=gpdPATproxy2, installation=gpdPAT, instance=gpdPATproxy2:1936, job=HAProxy, master=vmduatapp2.croesus.com, origin_prometheus=gpdPATproxy2, site=viger, type=proxy Value:0xc00fa02f28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165735022s EvaluationString:[ var='A' labels={__name__=up, client=gpd, env=uat, host=gpdPATproxy2, installation=gpdPAT, instance=gpdPATproxy2:1936, job=HAProxy, master=vmduatapp2.croesus.com, origin_prometheus=gpdPATproxy2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=gpd, env=uat, host=gpdPATproxy2, installation=gpdPAT, instance=gpdPATproxy2:1936, job=HAProxy, master=vmduatapp2.croesus.com, origin_prometheus=gpdPATproxy2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=gpd, env=uat, host=gpdPPrpssl1, installation=gpdPP, instance=gpdPPrpssl1:1936, job=HAProxy, master=vmduatapp2.croesus.com, origin_prometheus=gpdPPrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=gpd, env=uat, host=gpdPPrpssl1, installation=gpdPP, instance=gpdPPrpssl1:1936, job=HAProxy, master=vmduatapp2.croesus.com, origin_prometheus=gpdPPrpssl1, site=viger, type=proxy Value:0xc00fa03090} B:{Var:B Labels:__name__=up, client=gpd, env=uat, host=gpdPPrpssl1, installation=gpdPP, instance=gpdPPrpssl1:1936, job=HAProxy, master=vmduatapp2.croesus.com, origin_prometheus=gpdPPrpssl1, site=viger, type=proxy Value:0xc00fa03150}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.16574093s EvaluationString:[ var='A' labels={__name__=up, client=gpd, env=uat, host=gpdPPrpssl1, installation=gpdPP, instance=gpdPPrpssl1:1936, job=HAProxy, master=vmduatapp2.croesus.com, origin_prometheus=gpdPPrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=gpd, env=uat, host=gpdPPrpssl1, installation=gpdPP, instance=gpdPPrpssl1:1936, job=HAProxy, master=vmduatapp2.croesus.com, origin_prometheus=gpdPPrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=harbour, env=uat, host=harbouruatawsrpssl1, installation=harbouruataws, instance=harbouruatawsrpssl1:1936, job=HAProxy, master=harbouruatawsproxymaitre, origin_prometheus=harbouruatawsrpssl1, site=aws, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=harbour, env=uat, host=harbouruatawsrpssl1, installation=harbouruataws, instance=harbouruatawsrpssl1:1936, job=HAProxy, master=harbouruatawsproxymaitre, origin_prometheus=harbouruatawsrpssl1, site=aws, type=proxy Value:0xc00fa03248} B:{Var:B Labels:__name__=up, client=harbour, env=uat, host=harbouruatawsrpssl1, installation=harbouruataws, instance=harbouruatawsrpssl1:1936, job=HAProxy, master=harbouruatawsproxymaitre, origin_prometheus=harbouruatawsrpssl1, site=aws, type=proxy Value:0xc00fa032c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165746878s EvaluationString:[ var='A' labels={__name__=up, client=harbour, env=uat, host=harbouruatawsrpssl1, installation=harbouruataws, instance=harbouruatawsrpssl1:1936, job=HAProxy, master=harbouruatawsproxymaitre, origin_prometheus=harbouruatawsrpssl1, site=aws, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=harbour, env=uat, host=harbouruatawsrpssl1, installation=harbouruataws, instance=harbouruatawsrpssl1:1936, job=HAProxy, master=harbouruatawsproxymaitre, origin_prometheus=harbouruatawsrpssl1, site=aws, type=proxy} value=0 ]} {Instance:__name__=up, client=iavm, env=uat, host=iavmPATrpssl1, installation=iavmPAT, instance=iavmPATrpssl1:1936, job=HAProxy, master=iavmuatapp1.croesus.com, origin_prometheus=iavmPATrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=iavm, env=uat, host=iavmPATrpssl1, installation=iavmPAT, instance=iavmPATrpssl1:1936, job=HAProxy, master=iavmuatapp1.croesus.com, origin_prometheus=iavmPATrpssl1, site=viger, type=proxy Value:0xc00fa03408} B:{Var:B Labels:__name__=up, client=iavm, env=uat, host=iavmPATrpssl1, installation=iavmPAT, instance=iavmPATrpssl1:1936, job=HAProxy, master=iavmuatapp1.croesus.com, origin_prometheus=iavmPATrpssl1, site=viger, type=proxy Value:0xc00fa034b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.16575312s EvaluationString:[ var='A' labels={__name__=up, client=iavm, env=uat, host=iavmPATrpssl1, installation=iavmPAT, instance=iavmPATrpssl1:1936, job=HAProxy, master=iavmuatapp1.croesus.com, origin_prometheus=iavmPATrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=iavm, env=uat, host=iavmPATrpssl1, installation=iavmPAT, instance=iavmPATrpssl1:1936, job=HAProxy, master=iavmuatapp1.croesus.com, origin_prometheus=iavmPATrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=iavm, env=uat, host=iavmPATrpssl2, installation=iavmPAT, instance=iavmPATrpssl2:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=iavmPATrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=iavm, env=uat, host=iavmPATrpssl2, installation=iavmPAT, instance=iavmPATrpssl2:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=iavmPATrpssl2, site=viger, type=proxy Value:0xc00fa035e8} B:{Var:B Labels:__name__=up, client=iavm, env=uat, host=iavmPATrpssl2, installation=iavmPAT, instance=iavmPATrpssl2:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=iavmPATrpssl2, site=viger, type=proxy Value:0xc00fa03688}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165759413s EvaluationString:[ var='A' labels={__name__=up, client=iavm, env=uat, host=iavmPATrpssl2, installation=iavmPAT, instance=iavmPATrpssl2:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=iavmPATrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=iavm, env=uat, host=iavmPATrpssl2, installation=iavmPAT, instance=iavmPATrpssl2:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=iavmPATrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=ipcsec, env=prod, host=ipcsecawsrpssl1, installation=ipcsecaws, instance=ipcsecawsrpssl1:1936, job=HAProxy, master=ipcsecawsproxymaitre, origin_prometheus=ipcsecawsrpssl1, site=aws, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=ipcsec, env=prod, host=ipcsecawsrpssl1, installation=ipcsecaws, instance=ipcsecawsrpssl1:1936, job=HAProxy, master=ipcsecawsproxymaitre, origin_prometheus=ipcsecawsrpssl1, site=aws, type=proxy Value:0xc00fa037c0} B:{Var:B Labels:__name__=up, client=ipcsec, env=prod, host=ipcsecawsrpssl1, installation=ipcsecaws, instance=ipcsecawsrpssl1:1936, job=HAProxy, master=ipcsecawsproxymaitre, origin_prometheus=ipcsecawsrpssl1, site=aws, type=proxy Value:0xc00fa03860}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165764838s EvaluationString:[ var='A' labels={__name__=up, client=ipcsec, env=prod, host=ipcsecawsrpssl1, installation=ipcsecaws, instance=ipcsecawsrpssl1:1936, job=HAProxy, master=ipcsecawsproxymaitre, origin_prometheus=ipcsecawsrpssl1, site=aws, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=ipcsec, env=prod, host=ipcsecawsrpssl1, installation=ipcsecaws, instance=ipcsecawsrpssl1:1936, job=HAProxy, master=ipcsecawsproxymaitre, origin_prometheus=ipcsecawsrpssl1, site=aws, type=proxy} value=0 ]} {Instance:__name__=up, client=ipcsec, env=prod, host=ipcsecrpssl1, installation=ipcsecprod, instance=ipcsecrpssl1:1936, job=HAProxy, master=clientapp5.croesus.com, origin_prometheus=ipcsecrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=ipcsec, env=prod, host=ipcsecrpssl1, installation=ipcsecprod, instance=ipcsecrpssl1:1936, job=HAProxy, master=clientapp5.croesus.com, origin_prometheus=ipcsecrpssl1, site=viger, type=proxy Value:0xc00fa039c0} B:{Var:B Labels:__name__=up, client=ipcsec, env=prod, host=ipcsecrpssl1, installation=ipcsecprod, instance=ipcsecrpssl1:1936, job=HAProxy, master=clientapp5.croesus.com, origin_prometheus=ipcsecrpssl1, site=viger, type=proxy Value:0xc00fa03a58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165773633s EvaluationString:[ var='A' labels={__name__=up, client=ipcsec, env=prod, host=ipcsecrpssl1, installation=ipcsecprod, instance=ipcsecrpssl1:1936, job=HAProxy, master=clientapp5.croesus.com, origin_prometheus=ipcsecrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=ipcsec, env=prod, host=ipcsecrpssl1, installation=ipcsecprod, instance=ipcsecrpssl1:1936, job=HAProxy, master=clientapp5.croesus.com, origin_prometheus=ipcsecrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=ipcsec, env=prod, host=ipcsecrpssl2, installation=ipcsecprod, instance=ipcsecrpssl2:1936, job=HAProxy, master=clientapp7.croesus.com, origin_prometheus=ipcsecrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=ipcsec, env=prod, host=ipcsecrpssl2, installation=ipcsecprod, instance=ipcsecrpssl2:1936, job=HAProxy, master=clientapp7.croesus.com, origin_prometheus=ipcsecrpssl2, site=viger, type=proxy Value:0xc00fa03bd0} B:{Var:B Labels:__name__=up, client=ipcsec, env=prod, host=ipcsecrpssl2, installation=ipcsecprod, instance=ipcsecrpssl2:1936, job=HAProxy, master=clientapp7.croesus.com, origin_prometheus=ipcsecrpssl2, site=viger, type=proxy Value:0xc00fa03c68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165779488s EvaluationString:[ var='A' labels={__name__=up, client=ipcsec, env=prod, host=ipcsecrpssl2, installation=ipcsecprod, instance=ipcsecrpssl2:1936, job=HAProxy, master=clientapp7.croesus.com, origin_prometheus=ipcsecrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=ipcsec, env=prod, host=ipcsecrpssl2, installation=ipcsecprod, instance=ipcsecrpssl2:1936, job=HAProxy, master=clientapp7.croesus.com, origin_prometheus=ipcsecrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=ipcsec, env=uat, host=ipcsecPPrpssl1, installation=ipcsecPP, instance=ipcsecPPrpssl1:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=ipcsecPPrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=ipcsec, env=uat, host=ipcsecPPrpssl1, installation=ipcsecPP, instance=ipcsecPPrpssl1:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=ipcsecPPrpssl1, site=viger, type=proxy Value:0xc00fa03e60} B:{Var:B Labels:__name__=up, client=ipcsec, env=uat, host=ipcsecPPrpssl1, installation=ipcsecPP, instance=ipcsecPPrpssl1:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=ipcsecPPrpssl1, site=viger, type=proxy Value:0xc00fa03db8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165786386s EvaluationString:[ var='A' labels={__name__=up, client=ipcsec, env=uat, host=ipcsecPPrpssl1, installation=ipcsecPP, instance=ipcsecPPrpssl1:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=ipcsecPPrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=ipcsec, env=uat, host=ipcsecPPrpssl1, installation=ipcsecPP, instance=ipcsecPPrpssl1:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=ipcsecPPrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=ipcsec, env=uat, host=ipcsecrapportawsrpssl1, installation=ipcsecrapportaws, instance=ipcsecrapportawsrpssl1:1936, job=HAProxy, master=ipcsecrapportawsproxymaitre, origin_prometheus=ipcsecrapportawsrpssl1, site=aws, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=ipcsec, env=uat, host=ipcsecrapportawsrpssl1, installation=ipcsecrapportaws, instance=ipcsecrapportawsrpssl1:1936, job=HAProxy, master=ipcsecrapportawsproxymaitre, origin_prometheus=ipcsecrapportawsrpssl1, site=aws, type=proxy Value:0xc00fa03f80} B:{Var:B Labels:__name__=up, client=ipcsec, env=uat, host=ipcsecrapportawsrpssl1, installation=ipcsecrapportaws, instance=ipcsecrapportawsrpssl1:1936, job=HAProxy, master=ipcsecrapportawsproxymaitre, origin_prometheus=ipcsecrapportawsrpssl1, site=aws, type=proxy Value:0xc01df2e010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165791957s EvaluationString:[ var='A' labels={__name__=up, client=ipcsec, env=uat, host=ipcsecrapportawsrpssl1, installation=ipcsecrapportaws, instance=ipcsecrapportawsrpssl1:1936, job=HAProxy, master=ipcsecrapportawsproxymaitre, origin_prometheus=ipcsecrapportawsrpssl1, site=aws, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=ipcsec, env=uat, host=ipcsecrapportawsrpssl1, installation=ipcsecrapportaws, instance=ipcsecrapportawsrpssl1:1936, job=HAProxy, master=ipcsecrapportawsproxymaitre, origin_prometheus=ipcsecrapportawsrpssl1, site=aws, type=proxy} value=0 ]} {Instance:__name__=up, client=ipcsec, env=uat, host=ipcsecuatawsrpssl1, installation=ipcsecuataws, instance=ipcsecuatawsrpssl1:1936, job=HAProxy, master=ipcsecuatawsproxymaitre, origin_prometheus=ipcsecuatawsrpssl1, site=aws, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=ipcsec, env=uat, host=ipcsecuatawsrpssl1, installation=ipcsecuataws, instance=ipcsecuatawsrpssl1:1936, job=HAProxy, master=ipcsecuatawsproxymaitre, origin_prometheus=ipcsecuatawsrpssl1, site=aws, type=proxy Value:0xc01df2e188} B:{Var:B Labels:__name__=up, client=ipcsec, env=uat, host=ipcsecuatawsrpssl1, installation=ipcsecuataws, instance=ipcsecuatawsrpssl1:1936, job=HAProxy, master=ipcsecuatawsproxymaitre, origin_prometheus=ipcsecuatawsrpssl1, site=aws, type=proxy Value:0xc01df2e108}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165798655s EvaluationString:[ var='A' labels={__name__=up, client=ipcsec, env=uat, host=ipcsecuatawsrpssl1, installation=ipcsecuataws, instance=ipcsecuatawsrpssl1:1936, job=HAProxy, master=ipcsecuatawsproxymaitre, origin_prometheus=ipcsecuatawsrpssl1, site=aws, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=ipcsec, env=uat, host=ipcsecuatawsrpssl1, installation=ipcsecuataws, instance=ipcsecuatawsrpssl1:1936, job=HAProxy, master=ipcsecuatawsproxymaitre, origin_prometheus=ipcsecuatawsrpssl1, site=aws, type=proxy} value=0 ]} {Instance:__name__=up, client=nbcn, env=prod, host=nbcnrpssl1, installation=nbcnprod, instance=nbcnrpssl1:1936, job=HAProxy, master=clientapp5.croesus.com, origin_prometheus=nbcnrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=nbcn, env=prod, host=nbcnrpssl1, installation=nbcnprod, instance=nbcnrpssl1:1936, job=HAProxy, master=clientapp5.croesus.com, origin_prometheus=nbcnrpssl1, site=viger, type=proxy Value:0xc01df2e2e8} B:{Var:B Labels:__name__=up, client=nbcn, env=prod, host=nbcnrpssl1, installation=nbcnprod, instance=nbcnrpssl1:1936, job=HAProxy, master=clientapp5.croesus.com, origin_prometheus=nbcnrpssl1, site=viger, type=proxy Value:0xc01df2e390}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165805847s EvaluationString:[ var='A' labels={__name__=up, client=nbcn, env=prod, host=nbcnrpssl1, installation=nbcnprod, instance=nbcnrpssl1:1936, job=HAProxy, master=clientapp5.croesus.com, origin_prometheus=nbcnrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=nbcn, env=prod, host=nbcnrpssl1, installation=nbcnprod, instance=nbcnrpssl1:1936, job=HAProxy, master=clientapp5.croesus.com, origin_prometheus=nbcnrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=nbcn, env=prod, host=nbcnrpssl2, installation=nbcnprod, instance=nbcnrpssl2:1936, job=HAProxy, master=clientapp7.croesus.com, origin_prometheus=nbcnrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=nbcn, env=prod, host=nbcnrpssl2, installation=nbcnprod, instance=nbcnrpssl2:1936, job=HAProxy, master=clientapp7.croesus.com, origin_prometheus=nbcnrpssl2, site=viger, type=proxy Value:0xc01df2e4e0} B:{Var:B Labels:__name__=up, client=nbcn, env=prod, host=nbcnrpssl2, installation=nbcnprod, instance=nbcnrpssl2:1936, job=HAProxy, master=clientapp7.croesus.com, origin_prometheus=nbcnrpssl2, site=viger, type=proxy Value:0xc01df2e568}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165812809s EvaluationString:[ var='A' labels={__name__=up, client=nbcn, env=prod, host=nbcnrpssl2, installation=nbcnprod, instance=nbcnrpssl2:1936, job=HAProxy, master=clientapp7.croesus.com, origin_prometheus=nbcnrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=nbcn, env=prod, host=nbcnrpssl2, installation=nbcnprod, instance=nbcnrpssl2:1936, job=HAProxy, master=clientapp7.croesus.com, origin_prometheus=nbcnrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=nbin, env=nonprod, host=nbinuatawsrpssl1, installation=nbinuataws, instance=nbinuatawsrpssl1:1936, job=HAProxy, master=nbinuatawsproxymaitre, origin_prometheus=nbinuatawsrpssl1, site=aws, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=nbin, env=nonprod, host=nbinuatawsrpssl1, installation=nbinuataws, instance=nbinuatawsrpssl1:1936, job=HAProxy, master=nbinuatawsproxymaitre, origin_prometheus=nbinuatawsrpssl1, site=aws, type=proxy Value:0xc01df2e758} B:{Var:B Labels:__name__=up, client=nbin, env=nonprod, host=nbinuatawsrpssl1, installation=nbinuataws, instance=nbinuatawsrpssl1:1936, job=HAProxy, master=nbinuatawsproxymaitre, origin_prometheus=nbinuatawsrpssl1, site=aws, type=proxy Value:0xc01df2e6b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165819795s EvaluationString:[ var='A' labels={__name__=up, client=nbin, env=nonprod, host=nbinuatawsrpssl1, installation=nbinuataws, instance=nbinuatawsrpssl1:1936, job=HAProxy, master=nbinuatawsproxymaitre, origin_prometheus=nbinuatawsrpssl1, site=aws, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=nbin, env=nonprod, host=nbinuatawsrpssl1, installation=nbinuataws, instance=nbinuatawsrpssl1:1936, job=HAProxy, master=nbinuatawsproxymaitre, origin_prometheus=nbinuatawsrpssl1, site=aws, type=proxy} value=0 ]} {Instance:__name__=up, client=omg, env=prod, host=omgawsrpssl1, installation=omgaws, instance=omgawsrpssl1:1936, job=HAProxy, master=omgawsproxymaitre, origin_prometheus=omgawsrpssl1, site=aws, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=omg, env=prod, host=omgawsrpssl1, installation=omgaws, instance=omgawsrpssl1:1936, job=HAProxy, master=omgawsproxymaitre, origin_prometheus=omgawsrpssl1, site=aws, type=proxy Value:0xc01df2e880} B:{Var:B Labels:__name__=up, client=omg, env=prod, host=omgawsrpssl1, installation=omgaws, instance=omgawsrpssl1:1936, job=HAProxy, master=omgawsproxymaitre, origin_prometheus=omgawsrpssl1, site=aws, type=proxy Value:0xc01df2e910}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165826391s EvaluationString:[ var='A' labels={__name__=up, client=omg, env=prod, host=omgawsrpssl1, installation=omgaws, instance=omgawsrpssl1:1936, job=HAProxy, master=omgawsproxymaitre, origin_prometheus=omgawsrpssl1, site=aws, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=omg, env=prod, host=omgawsrpssl1, installation=omgaws, instance=omgawsrpssl1:1936, job=HAProxy, master=omgawsproxymaitre, origin_prometheus=omgawsrpssl1, site=aws, type=proxy} value=0 ]} {Instance:__name__=up, client=omg, env=uat, host=omguatawsrpssl1, installation=omguataws, instance=omguatawsrpssl1:1936, job=HAProxy, master=omguatawsproxymaitre, origin_prometheus=omguatawsrpssl1, site=aws, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=omg, env=uat, host=omguatawsrpssl1, installation=omguataws, instance=omguatawsrpssl1:1936, job=HAProxy, master=omguatawsproxymaitre, origin_prometheus=omguatawsrpssl1, site=aws, type=proxy Value:0xc01df2ea40} B:{Var:B Labels:__name__=up, client=omg, env=uat, host=omguatawsrpssl1, installation=omguataws, instance=omguatawsrpssl1:1936, job=HAProxy, master=omguatawsproxymaitre, origin_prometheus=omguatawsrpssl1, site=aws, type=proxy Value:0xc01df2eae0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165833103s EvaluationString:[ var='A' labels={__name__=up, client=omg, env=uat, host=omguatawsrpssl1, installation=omguataws, instance=omguatawsrpssl1:1936, job=HAProxy, master=omguatawsproxymaitre, origin_prometheus=omguatawsrpssl1, site=aws, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=omg, env=uat, host=omguatawsrpssl1, installation=omguataws, instance=omguatawsrpssl1:1936, job=HAProxy, master=omguatawsproxymaitre, origin_prometheus=omguatawsrpssl1, site=aws, type=proxy} value=0 ]} {Instance:__name__=up, client=rgmp, env=prod, host=rgmprpssl1, installation=rgmpprod, instance=rgmprpssl1:1936, job=HAProxy, master=clientapp3.croesus.com, origin_prometheus=rgmprpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=rgmp, env=prod, host=rgmprpssl1, installation=rgmpprod, instance=rgmprpssl1:1936, job=HAProxy, master=clientapp3.croesus.com, origin_prometheus=rgmprpssl1, site=viger, type=proxy Value:0xc01df2ec40} B:{Var:B Labels:__name__=up, client=rgmp, env=prod, host=rgmprpssl1, installation=rgmpprod, instance=rgmprpssl1:1936, job=HAProxy, master=clientapp3.croesus.com, origin_prometheus=rgmprpssl1, site=viger, type=proxy Value:0xc01df2ecf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165846559s EvaluationString:[ var='A' labels={__name__=up, client=rgmp, env=prod, host=rgmprpssl1, installation=rgmpprod, instance=rgmprpssl1:1936, job=HAProxy, master=clientapp3.croesus.com, origin_prometheus=rgmprpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=rgmp, env=prod, host=rgmprpssl1, installation=rgmpprod, instance=rgmprpssl1:1936, job=HAProxy, master=clientapp3.croesus.com, origin_prometheus=rgmprpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=rgmp, env=prod, host=rgmprpssl2, installation=rgmpprod, instance=rgmprpssl2:1936, job=HAProxy, master=clientapp7.croesus.com, origin_prometheus=rgmprpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=rgmp, env=prod, host=rgmprpssl2, installation=rgmpprod, instance=rgmprpssl2:1936, job=HAProxy, master=clientapp7.croesus.com, origin_prometheus=rgmprpssl2, site=viger, type=proxy Value:0xc01df2f3e0} B:{Var:B Labels:__name__=up, client=rgmp, env=prod, host=rgmprpssl2, installation=rgmpprod, instance=rgmprpssl2:1936, job=HAProxy, master=clientapp7.croesus.com, origin_prometheus=rgmprpssl2, site=viger, type=proxy Value:0xc01df2f688}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165857906s EvaluationString:[ var='A' labels={__name__=up, client=rgmp, env=prod, host=rgmprpssl2, installation=rgmpprod, instance=rgmprpssl2:1936, job=HAProxy, master=clientapp7.croesus.com, origin_prometheus=rgmprpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=rgmp, env=prod, host=rgmprpssl2, installation=rgmpprod, instance=rgmprpssl2:1936, job=HAProxy, master=clientapp7.croesus.com, origin_prometheus=rgmprpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=rgmp, env=uat, host=rgmpPPrpssl1, installation=rgmpPP, instance=rgmpPPrpssl1:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=rgmpPPrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=rgmp, env=uat, host=rgmpPPrpssl1, installation=rgmpPP, instance=rgmpPPrpssl1:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=rgmpPPrpssl1, site=viger, type=proxy Value:0xc01df2f7f8} B:{Var:B Labels:__name__=up, client=rgmp, env=uat, host=rgmpPPrpssl1, installation=rgmpPP, instance=rgmpPPrpssl1:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=rgmpPPrpssl1, site=viger, type=proxy Value:0xc01df2f8e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.16586588s EvaluationString:[ var='A' labels={__name__=up, client=rgmp, env=uat, host=rgmpPPrpssl1, installation=rgmpPP, instance=rgmpPPrpssl1:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=rgmpPPrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=rgmp, env=uat, host=rgmpPPrpssl1, installation=rgmpPP, instance=rgmpPPrpssl1:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=rgmpPPrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=rjames, env=uat, host=rjamesPATrpssl1, installation=rjamesPAT, instance=rjamesPATrpssl1:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=rjamesPATrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=rjames, env=uat, host=rjamesPATrpssl1, installation=rjamesPAT, instance=rjamesPATrpssl1:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=rjamesPATrpssl1, site=viger, type=proxy Value:0xc01df2fa68} B:{Var:B Labels:__name__=up, client=rjames, env=uat, host=rjamesPATrpssl1, installation=rjamesPAT, instance=rjamesPATrpssl1:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=rjamesPATrpssl1, site=viger, type=proxy Value:0xc01df2fb08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165874831s EvaluationString:[ var='A' labels={__name__=up, client=rjames, env=uat, host=rjamesPATrpssl1, installation=rjamesPAT, instance=rjamesPATrpssl1:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=rjamesPATrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=rjames, env=uat, host=rjamesPATrpssl1, installation=rjamesPAT, instance=rjamesPATrpssl1:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=rjamesPATrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=rjames, env=uat, host=rjamesPATrpssl2, installation=rjamesPAT, instance=rjamesPATrpssl2:1936, job=HAProxy, master=clientuatapp3.croesus.com, origin_prometheus=rjamesPATrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=rjames, env=uat, host=rjamesPATrpssl2, installation=rjamesPAT, instance=rjamesPATrpssl2:1936, job=HAProxy, master=clientuatapp3.croesus.com, origin_prometheus=rjamesPATrpssl2, site=viger, type=proxy Value:0xc01df2fcf0} B:{Var:B Labels:__name__=up, client=rjames, env=uat, host=rjamesPATrpssl2, installation=rjamesPAT, instance=rjamesPATrpssl2:1936, job=HAProxy, master=clientuatapp3.croesus.com, origin_prometheus=rjamesPATrpssl2, site=viger, type=proxy Value:0xc01df2fc40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165882573s EvaluationString:[ var='A' labels={__name__=up, client=rjames, env=uat, host=rjamesPATrpssl2, installation=rjamesPAT, instance=rjamesPATrpssl2:1936, job=HAProxy, master=clientuatapp3.croesus.com, origin_prometheus=rjamesPATrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=rjames, env=uat, host=rjamesPATrpssl2, installation=rjamesPAT, instance=rjamesPATrpssl2:1936, job=HAProxy, master=clientuatapp3.croesus.com, origin_prometheus=rjamesPATrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=steadyhand, env=prod, host=steadyhandawsrpssl1, installation=steadyhandaws, instance=steadyhandawsrpssl1:1936, job=HAProxy, master=unknown, origin_prometheus=steadyhandawsrpssl1, site=aws, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=steadyhand, env=prod, host=steadyhandawsrpssl1, installation=steadyhandaws, instance=steadyhandawsrpssl1:1936, job=HAProxy, master=unknown, origin_prometheus=steadyhandawsrpssl1, site=aws, type=proxy Value:0xc01df2fe08} B:{Var:B Labels:__name__=up, client=steadyhand, env=prod, host=steadyhandawsrpssl1, installation=steadyhandaws, instance=steadyhandawsrpssl1:1936, job=HAProxy, master=unknown, origin_prometheus=steadyhandawsrpssl1, site=aws, type=proxy Value:0xc01df2fe88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165892581s EvaluationString:[ var='A' labels={__name__=up, client=steadyhand, env=prod, host=steadyhandawsrpssl1, installation=steadyhandaws, instance=steadyhandawsrpssl1:1936, job=HAProxy, master=unknown, origin_prometheus=steadyhandawsrpssl1, site=aws, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=steadyhand, env=prod, host=steadyhandawsrpssl1, installation=steadyhandaws, instance=steadyhandawsrpssl1:1936, job=HAProxy, master=unknown, origin_prometheus=steadyhandawsrpssl1, site=aws, type=proxy} value=0 ]} {Instance:__name__=up, client=steadyhand, env=test, host=steadyhandstagingrpssl1, installation=steadyhandstaging, instance=steadyhandstagingrpssl1:1936, job=HAProxy, master=steadyhandstagingproxymaitre, origin_prometheus=steadyhandstagingrpssl1, site=aws, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=steadyhand, env=test, host=steadyhandstagingrpssl1, installation=steadyhandstaging, instance=steadyhandstagingrpssl1:1936, job=HAProxy, master=steadyhandstagingproxymaitre, origin_prometheus=steadyhandstagingrpssl1, site=aws, type=proxy Value:0xc01df2ffe0} B:{Var:B Labels:__name__=up, client=steadyhand, env=test, host=steadyhandstagingrpssl1, installation=steadyhandstaging, instance=steadyhandstagingrpssl1:1936, job=HAProxy, master=steadyhandstagingproxymaitre, origin_prometheus=steadyhandstagingrpssl1, site=aws, type=proxy Value:0xc01df2ff68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165902458s EvaluationString:[ var='A' labels={__name__=up, client=steadyhand, env=test, host=steadyhandstagingrpssl1, installation=steadyhandstaging, instance=steadyhandstagingrpssl1:1936, job=HAProxy, master=steadyhandstagingproxymaitre, origin_prometheus=steadyhandstagingrpssl1, site=aws, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=steadyhand, env=test, host=steadyhandstagingrpssl1, installation=steadyhandstaging, instance=steadyhandstagingrpssl1:1936, job=HAProxy, master=steadyhandstagingproxymaitre, origin_prometheus=steadyhandstagingrpssl1, site=aws, type=proxy} value=0 ]} {Instance:__name__=up, client=steadyhand, env=uat, host=steadyhandpatawsrpssl1, installation=steadyhandpataws, instance=steadyhandpatawsrpssl1:1936, job=HAProxy, master=steadyhandpatawsproxymaitre, origin_prometheus=steadyhandpatawsrpssl1, site=aws, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=steadyhand, env=uat, host=steadyhandpatawsrpssl1, installation=steadyhandpataws, instance=steadyhandpatawsrpssl1:1936, job=HAProxy, master=steadyhandpatawsproxymaitre, origin_prometheus=steadyhandpatawsrpssl1, site=aws, type=proxy Value:0xc02295a0e8} B:{Var:B Labels:__name__=up, client=steadyhand, env=uat, host=steadyhandpatawsrpssl1, installation=steadyhandpataws, instance=steadyhandpatawsrpssl1:1936, job=HAProxy, master=steadyhandpatawsproxymaitre, origin_prometheus=steadyhandpatawsrpssl1, site=aws, type=proxy Value:0xc02295a170}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165912256s EvaluationString:[ var='A' labels={__name__=up, client=steadyhand, env=uat, host=steadyhandpatawsrpssl1, installation=steadyhandpataws, instance=steadyhandpatawsrpssl1:1936, job=HAProxy, master=steadyhandpatawsproxymaitre, origin_prometheus=steadyhandpatawsrpssl1, site=aws, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=steadyhand, env=uat, host=steadyhandpatawsrpssl1, installation=steadyhandpataws, instance=steadyhandpatawsrpssl1:1936, job=HAProxy, master=steadyhandpatawsproxymaitre, origin_prometheus=steadyhandpatawsrpssl1, site=aws, type=proxy} value=0 ]} {Instance:__name__=up, client=td, env=prod, host=tdproxyfwlb1, installation=tdprod, instance=tdproxyfwlb1:1936, job=HAProxy, master=tdapp8.croesus.com, origin_prometheus=tdproxyfwlb1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=td, env=prod, host=tdproxyfwlb1, installation=tdprod, instance=tdproxyfwlb1:1936, job=HAProxy, master=tdapp8.croesus.com, origin_prometheus=tdproxyfwlb1, site=viger, type=proxy Value:0xc02295a328} B:{Var:B Labels:__name__=up, client=td, env=prod, host=tdproxyfwlb1, installation=tdprod, instance=tdproxyfwlb1:1936, job=HAProxy, master=tdapp8.croesus.com, origin_prometheus=tdproxyfwlb1, site=viger, type=proxy Value:0xc02295a298}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165919394s EvaluationString:[ var='A' labels={__name__=up, client=td, env=prod, host=tdproxyfwlb1, installation=tdprod, instance=tdproxyfwlb1:1936, job=HAProxy, master=tdapp8.croesus.com, origin_prometheus=tdproxyfwlb1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=td, env=prod, host=tdproxyfwlb1, installation=tdprod, instance=tdproxyfwlb1:1936, job=HAProxy, master=tdapp8.croesus.com, origin_prometheus=tdproxyfwlb1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=td, env=prod, host=tdproxyfwlb2, installation=tdprod, instance=tdproxyfwlb2:1936, job=HAProxy, master=tdapp2.croesus.com, origin_prometheus=tdproxyfwlb2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=td, env=prod, host=tdproxyfwlb2, installation=tdprod, instance=tdproxyfwlb2:1936, job=HAProxy, master=tdapp2.croesus.com, origin_prometheus=tdproxyfwlb2, site=viger, type=proxy Value:0xc02295a4e8} B:{Var:B Labels:__name__=up, client=td, env=prod, host=tdproxyfwlb2, installation=tdprod, instance=tdproxyfwlb2:1936, job=HAProxy, master=tdapp2.croesus.com, origin_prometheus=tdproxyfwlb2, site=viger, type=proxy Value:0xc02295a458}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165926434s EvaluationString:[ var='A' labels={__name__=up, client=td, env=prod, host=tdproxyfwlb2, installation=tdprod, instance=tdproxyfwlb2:1936, job=HAProxy, master=tdapp2.croesus.com, origin_prometheus=tdproxyfwlb2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=td, env=prod, host=tdproxyfwlb2, installation=tdprod, instance=tdproxyfwlb2:1936, job=HAProxy, master=tdapp2.croesus.com, origin_prometheus=tdproxyfwlb2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=td, env=prod, host=tdrpssl1, installation=tdprod, instance=tdrpssl1:1936, job=HAProxy, master=tdapp8.croesus.com, origin_prometheus=tdrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=td, env=prod, host=tdrpssl1, installation=tdprod, instance=tdrpssl1:1936, job=HAProxy, master=tdapp8.croesus.com, origin_prometheus=tdrpssl1, site=viger, type=proxy Value:0xc02295a6b8} B:{Var:B Labels:__name__=up, client=td, env=prod, host=tdrpssl1, installation=tdprod, instance=tdrpssl1:1936, job=HAProxy, master=tdapp8.croesus.com, origin_prometheus=tdrpssl1, site=viger, type=proxy Value:0xc02295a620}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.16593343s EvaluationString:[ var='A' labels={__name__=up, client=td, env=prod, host=tdrpssl1, installation=tdprod, instance=tdrpssl1:1936, job=HAProxy, master=tdapp8.croesus.com, origin_prometheus=tdrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=td, env=prod, host=tdrpssl1, installation=tdprod, instance=tdrpssl1:1936, job=HAProxy, master=tdapp8.croesus.com, origin_prometheus=tdrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=td, env=prod, host=tdrpssl2, installation=tdprod, instance=tdrpssl2:1936, job=HAProxy, master=tdapp2.croesus.com, origin_prometheus=tdrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=td, env=prod, host=tdrpssl2, installation=tdprod, instance=tdrpssl2:1936, job=HAProxy, master=tdapp2.croesus.com, origin_prometheus=tdrpssl2, site=viger, type=proxy Value:0xc02295a7f0} B:{Var:B Labels:__name__=up, client=td, env=prod, host=tdrpssl2, installation=tdprod, instance=tdrpssl2:1936, job=HAProxy, master=tdapp2.croesus.com, origin_prometheus=tdrpssl2, site=viger, type=proxy Value:0xc02295a890}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165956327s EvaluationString:[ var='A' labels={__name__=up, client=td, env=prod, host=tdrpssl2, installation=tdprod, instance=tdrpssl2:1936, job=HAProxy, master=tdapp2.croesus.com, origin_prometheus=tdrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=td, env=prod, host=tdrpssl2, installation=tdprod, instance=tdrpssl2:1936, job=HAProxy, master=tdapp2.croesus.com, origin_prometheus=tdrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=td, env=uat, host=tdPATproxyfwlb1, installation=tdPAT, instance=tdPATproxyfwlb1:1936, job=HAProxy, master=tddiuatapp1.croesus.com, origin_prometheus=tdPATproxyfwlb1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=td, env=uat, host=tdPATproxyfwlb1, installation=tdPAT, instance=tdPATproxyfwlb1:1936, job=HAProxy, master=tddiuatapp1.croesus.com, origin_prometheus=tdPATproxyfwlb1, site=viger, type=proxy Value:0xc02295a9d8} B:{Var:B Labels:__name__=up, client=td, env=uat, host=tdPATproxyfwlb1, installation=tdPAT, instance=tdPATproxyfwlb1:1936, job=HAProxy, master=tddiuatapp1.croesus.com, origin_prometheus=tdPATproxyfwlb1, site=viger, type=proxy Value:0xc02295aa78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165963179s EvaluationString:[ var='A' labels={__name__=up, client=td, env=uat, host=tdPATproxyfwlb1, installation=tdPAT, instance=tdPATproxyfwlb1:1936, job=HAProxy, master=tddiuatapp1.croesus.com, origin_prometheus=tdPATproxyfwlb1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=td, env=uat, host=tdPATproxyfwlb1, installation=tdPAT, instance=tdPATproxyfwlb1:1936, job=HAProxy, master=tddiuatapp1.croesus.com, origin_prometheus=tdPATproxyfwlb1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=td, env=uat, host=tdPATproxyfwlb2, installation=tdPAT, instance=tdPATproxyfwlb2:1936, job=HAProxy, master=tdpicuatapp1.croesus.com, origin_prometheus=tdPATproxyfwlb2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=td, env=uat, host=tdPATproxyfwlb2, installation=tdPAT, instance=tdPATproxyfwlb2:1936, job=HAProxy, master=tdpicuatapp1.croesus.com, origin_prometheus=tdPATproxyfwlb2, site=viger, type=proxy Value:0xc02295ac40} B:{Var:B Labels:__name__=up, client=td, env=uat, host=tdPATproxyfwlb2, installation=tdPAT, instance=tdPATproxyfwlb2:1936, job=HAProxy, master=tdpicuatapp1.croesus.com, origin_prometheus=tdPATproxyfwlb2, site=viger, type=proxy Value:0xc02295abb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.16596938s EvaluationString:[ var='A' labels={__name__=up, client=td, env=uat, host=tdPATproxyfwlb2, installation=tdPAT, instance=tdPATproxyfwlb2:1936, job=HAProxy, master=tdpicuatapp1.croesus.com, origin_prometheus=tdPATproxyfwlb2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=td, env=uat, host=tdPATproxyfwlb2, installation=tdPAT, instance=tdPATproxyfwlb2:1936, job=HAProxy, master=tdpicuatapp1.croesus.com, origin_prometheus=tdPATproxyfwlb2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=td, env=uat, host=tdPATrpssl1, installation=tdPAT, instance=tdPATrpssl1:1936, job=HAProxy, master=tddiuatapp4.croesus.com, origin_prometheus=tdPATrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=td, env=uat, host=tdPATrpssl1, installation=tdPAT, instance=tdPATrpssl1:1936, job=HAProxy, master=tddiuatapp4.croesus.com, origin_prometheus=tdPATrpssl1, site=viger, type=proxy Value:0xc02295ae40} B:{Var:B Labels:__name__=up, client=td, env=uat, host=tdPATrpssl1, installation=tdPAT, instance=tdPATrpssl1:1936, job=HAProxy, master=tddiuatapp4.croesus.com, origin_prometheus=tdPATrpssl1, site=viger, type=proxy Value:0xc02295ada0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165976044s EvaluationString:[ var='A' labels={__name__=up, client=td, env=uat, host=tdPATrpssl1, installation=tdPAT, instance=tdPATrpssl1:1936, job=HAProxy, master=tddiuatapp4.croesus.com, origin_prometheus=tdPATrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=td, env=uat, host=tdPATrpssl1, installation=tdPAT, instance=tdPATrpssl1:1936, job=HAProxy, master=tddiuatapp4.croesus.com, origin_prometheus=tdPATrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=td, env=uat, host=tdPATrpssl2, installation=tdPAT, instance=tdPATrpssl2:1936, job=HAProxy, master=tddiuatapp3.croesus.com, origin_prometheus=tdPATrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=td, env=uat, host=tdPATrpssl2, installation=tdPAT, instance=tdPATrpssl2:1936, job=HAProxy, master=tddiuatapp3.croesus.com, origin_prometheus=tdPATrpssl2, site=viger, type=proxy Value:0xc02295af98} B:{Var:B Labels:__name__=up, client=td, env=uat, host=tdPATrpssl2, installation=tdPAT, instance=tdPATrpssl2:1936, job=HAProxy, master=tddiuatapp3.croesus.com, origin_prometheus=tdPATrpssl2, site=viger, type=proxy Value:0xc02295b028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165981753s EvaluationString:[ var='A' labels={__name__=up, client=td, env=uat, host=tdPATrpssl2, installation=tdPAT, instance=tdPATrpssl2:1936, job=HAProxy, master=tddiuatapp3.croesus.com, origin_prometheus=tdPATrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=td, env=uat, host=tdPATrpssl2, installation=tdPAT, instance=tdPATrpssl2:1936, job=HAProxy, master=tddiuatapp3.croesus.com, origin_prometheus=tdPATrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=td, env=uat, host=tdsalesforcepatproxy1, installation=tdsalesforcepat, instance=tdsalesforcepatproxy1:1936, job=HAProxy, master=tdpicuatapp1.croesus.com, origin_prometheus=tdsalesforcepatproxy1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=td, env=uat, host=tdsalesforcepatproxy1, installation=tdsalesforcepat, instance=tdsalesforcepatproxy1:1936, job=HAProxy, master=tdpicuatapp1.croesus.com, origin_prometheus=tdsalesforcepatproxy1, site=viger, type=proxy Value:0xc02295b130} B:{Var:B Labels:__name__=up, client=td, env=uat, host=tdsalesforcepatproxy1, installation=tdsalesforcepat, instance=tdsalesforcepatproxy1:1936, job=HAProxy, master=tdpicuatapp1.croesus.com, origin_prometheus=tdsalesforcepatproxy1, site=viger, type=proxy Value:0xc02295b1b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165987553s EvaluationString:[ var='A' labels={__name__=up, client=td, env=uat, host=tdsalesforcepatproxy1, installation=tdsalesforcepat, instance=tdsalesforcepatproxy1:1936, job=HAProxy, master=tdpicuatapp1.croesus.com, origin_prometheus=tdsalesforcepatproxy1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=td, env=uat, host=tdsalesforcepatproxy1, installation=tdsalesforcepat, instance=tdsalesforcepatproxy1:1936, job=HAProxy, master=tdpicuatapp1.croesus.com, origin_prometheus=tdsalesforcepatproxy1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=td, env=uat, host=tdsalesforcepatproxy2, installation=tdsalesforcepat, instance=tdsalesforcepatproxy2:1936, job=HAProxy, master=tdpicuatapp3.croesus.com, origin_prometheus=tdsalesforcepatproxy2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=td, env=uat, host=tdsalesforcepatproxy2, installation=tdsalesforcepat, instance=tdsalesforcepatproxy2:1936, job=HAProxy, master=tdpicuatapp3.croesus.com, origin_prometheus=tdsalesforcepatproxy2, site=viger, type=proxy Value:0xc02295b350} B:{Var:B Labels:__name__=up, client=td, env=uat, host=tdsalesforcepatproxy2, installation=tdsalesforcepat, instance=tdsalesforcepatproxy2:1936, job=HAProxy, master=tdpicuatapp3.croesus.com, origin_prometheus=tdsalesforcepatproxy2, site=viger, type=proxy Value:0xc02295b2c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165994208s EvaluationString:[ var='A' labels={__name__=up, client=td, env=uat, host=tdsalesforcepatproxy2, installation=tdsalesforcepat, instance=tdsalesforcepatproxy2:1936, job=HAProxy, master=tdpicuatapp3.croesus.com, origin_prometheus=tdsalesforcepatproxy2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=td, env=uat, host=tdsalesforcepatproxy2, installation=tdsalesforcepat, instance=tdsalesforcepatproxy2:1936, job=HAProxy, master=tdpicuatapp3.croesus.com, origin_prometheus=tdsalesforcepatproxy2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=tddi, env=prod, host=tddicroesusrpssl1, installation=tddiprod, instance=tddicroesusrpssl1:1936, job=HAProxy, master=tddiapp2.croesus.com, origin_prometheus=tddicroesusrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=tddi, env=prod, host=tddicroesusrpssl1, installation=tddiprod, instance=tddicroesusrpssl1:1936, job=HAProxy, master=tddiapp2.croesus.com, origin_prometheus=tddicroesusrpssl1, site=viger, type=proxy Value:0xc02295b4b8} B:{Var:B Labels:__name__=up, client=tddi, env=prod, host=tddicroesusrpssl1, installation=tddiprod, instance=tddicroesusrpssl1:1936, job=HAProxy, master=tddiapp2.croesus.com, origin_prometheus=tddicroesusrpssl1, site=viger, type=proxy Value:0xc02295b530}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165999735s EvaluationString:[ var='A' labels={__name__=up, client=tddi, env=prod, host=tddicroesusrpssl1, installation=tddiprod, instance=tddicroesusrpssl1:1936, job=HAProxy, master=tddiapp2.croesus.com, origin_prometheus=tddicroesusrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=tddi, env=prod, host=tddicroesusrpssl1, installation=tddiprod, instance=tddicroesusrpssl1:1936, job=HAProxy, master=tddiapp2.croesus.com, origin_prometheus=tddicroesusrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=tddi, env=prod, host=tddicroesusrpssl2, installation=tddiprod, instance=tddicroesusrpssl2:1936, job=HAProxy, master=tddiapp1.croesus.com, origin_prometheus=tddicroesusrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=tddi, env=prod, host=tddicroesusrpssl2, installation=tddiprod, instance=tddicroesusrpssl2:1936, job=HAProxy, master=tddiapp1.croesus.com, origin_prometheus=tddicroesusrpssl2, site=viger, type=proxy Value:0xc02295b6a8} B:{Var:B Labels:__name__=up, client=tddi, env=prod, host=tddicroesusrpssl2, installation=tddiprod, instance=tddicroesusrpssl2:1936, job=HAProxy, master=tddiapp1.croesus.com, origin_prometheus=tddicroesusrpssl2, site=viger, type=proxy Value:0xc02295b628}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166005799s EvaluationString:[ var='A' labels={__name__=up, client=tddi, env=prod, host=tddicroesusrpssl2, installation=tddiprod, instance=tddicroesusrpssl2:1936, job=HAProxy, master=tddiapp1.croesus.com, origin_prometheus=tddicroesusrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=tddi, env=prod, host=tddicroesusrpssl2, installation=tddiprod, instance=tddicroesusrpssl2:1936, job=HAProxy, master=tddiapp1.croesus.com, origin_prometheus=tddicroesusrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=tddi, env=uat, host=tddicroesusPATrpssl1, installation=tddiPAT, instance=tddicroesusPATrpssl1:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tddicroesusPATrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=tddi, env=uat, host=tddicroesusPATrpssl1, installation=tddiPAT, instance=tddicroesusPATrpssl1:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tddicroesusPATrpssl1, site=viger, type=proxy Value:0xc02295b798} B:{Var:B Labels:__name__=up, client=tddi, env=uat, host=tddicroesusPATrpssl1, installation=tddiPAT, instance=tddicroesusPATrpssl1:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tddicroesusPATrpssl1, site=viger, type=proxy Value:0xc02295b810}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166011336s EvaluationString:[ var='A' labels={__name__=up, client=tddi, env=uat, host=tddicroesusPATrpssl1, installation=tddiPAT, instance=tddicroesusPATrpssl1:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tddicroesusPATrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=tddi, env=uat, host=tddicroesusPATrpssl1, installation=tddiPAT, instance=tddicroesusPATrpssl1:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tddicroesusPATrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=tddi, env=uat, host=tddicroesusPATrpssl2, installation=tddiPAT, instance=tddicroesusPATrpssl2:1936, job=HAProxy, master=tddiuatapp4.croesus.com, origin_prometheus=tddicroesusPATrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=tddi, env=uat, host=tddicroesusPATrpssl2, installation=tddiPAT, instance=tddicroesusPATrpssl2:1936, job=HAProxy, master=tddiuatapp4.croesus.com, origin_prometheus=tddicroesusPATrpssl2, site=viger, type=proxy Value:0xc02295b900} B:{Var:B Labels:__name__=up, client=tddi, env=uat, host=tddicroesusPATrpssl2, installation=tddiPAT, instance=tddicroesusPATrpssl2:1936, job=HAProxy, master=tddiuatapp4.croesus.com, origin_prometheus=tddicroesusPATrpssl2, site=viger, type=proxy Value:0xc02295b980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166017936s EvaluationString:[ var='A' labels={__name__=up, client=tddi, env=uat, host=tddicroesusPATrpssl2, installation=tddiPAT, instance=tddicroesusPATrpssl2:1936, job=HAProxy, master=tddiuatapp4.croesus.com, origin_prometheus=tddicroesusPATrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=tddi, env=uat, host=tddicroesusPATrpssl2, installation=tddiPAT, instance=tddicroesusPATrpssl2:1936, job=HAProxy, master=tddiuatapp4.croesus.com, origin_prometheus=tddicroesusPATrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=tddi, env=uat, host=tddiservicePATrpssl1, installation=tddiPAT, instance=tddiservicePATrpssl1:1936, job=HAProxy, master=tddiuatapp4.croesus.com, origin_prometheus=tddiservicePATrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=tddi, env=uat, host=tddiservicePATrpssl1, installation=tddiPAT, instance=tddiservicePATrpssl1:1936, job=HAProxy, master=tddiuatapp4.croesus.com, origin_prometheus=tddiservicePATrpssl1, site=viger, type=proxy Value:0xc02295ba68} B:{Var:B Labels:__name__=up, client=tddi, env=uat, host=tddiservicePATrpssl1, installation=tddiPAT, instance=tddiservicePATrpssl1:1936, job=HAProxy, master=tddiuatapp4.croesus.com, origin_prometheus=tddiservicePATrpssl1, site=viger, type=proxy Value:0xc02295baf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166024912s EvaluationString:[ var='A' labels={__name__=up, client=tddi, env=uat, host=tddiservicePATrpssl1, installation=tddiPAT, instance=tddiservicePATrpssl1:1936, job=HAProxy, master=tddiuatapp4.croesus.com, origin_prometheus=tddiservicePATrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=tddi, env=uat, host=tddiservicePATrpssl1, installation=tddiPAT, instance=tddiservicePATrpssl1:1936, job=HAProxy, master=tddiuatapp4.croesus.com, origin_prometheus=tddiservicePATrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=tddi, env=uat, host=tddiservicePATrpssl2, installation=tddiPAT, instance=tddiservicePATrpssl2:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tddiservicePATrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=tddi, env=uat, host=tddiservicePATrpssl2, installation=tddiPAT, instance=tddiservicePATrpssl2:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tddiservicePATrpssl2, site=viger, type=proxy Value:0xc02295bbe8} B:{Var:B Labels:__name__=up, client=tddi, env=uat, host=tddiservicePATrpssl2, installation=tddiPAT, instance=tddiservicePATrpssl2:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tddiservicePATrpssl2, site=viger, type=proxy Value:0xc02295bc68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166034038s EvaluationString:[ var='A' labels={__name__=up, client=tddi, env=uat, host=tddiservicePATrpssl2, installation=tddiPAT, instance=tddiservicePATrpssl2:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tddiservicePATrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=tddi, env=uat, host=tddiservicePATrpssl2, installation=tddiPAT, instance=tddiservicePATrpssl2:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tddiservicePATrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=tdmf, env=prod, host=tdmfrpssl1, installation=tdmfprod, instance=tdmfrpssl1:1936, job=HAProxy, master=tdpicapp1.croesus.com, origin_prometheus=tdmfrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=tdmf, env=prod, host=tdmfrpssl1, installation=tdmfprod, instance=tdmfrpssl1:1936, job=HAProxy, master=tdpicapp1.croesus.com, origin_prometheus=tdmfrpssl1, site=viger, type=proxy Value:0xc02295be18} B:{Var:B Labels:__name__=up, client=tdmf, env=prod, host=tdmfrpssl1, installation=tdmfprod, instance=tdmfrpssl1:1936, job=HAProxy, master=tdpicapp1.croesus.com, origin_prometheus=tdmfrpssl1, site=viger, type=proxy Value:0xc02295bec8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166040349s EvaluationString:[ var='A' labels={__name__=up, client=tdmf, env=prod, host=tdmfrpssl1, installation=tdmfprod, instance=tdmfrpssl1:1936, job=HAProxy, master=tdpicapp1.croesus.com, origin_prometheus=tdmfrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=tdmf, env=prod, host=tdmfrpssl1, installation=tdmfprod, instance=tdmfrpssl1:1936, job=HAProxy, master=tdpicapp1.croesus.com, origin_prometheus=tdmfrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=tdmf, env=prod, host=tdmfrpssl2, installation=tdmfprod, instance=tdmfrpssl2:1936, job=HAProxy, master=tdpicapp2.croesus.com, origin_prometheus=tdmfrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=tdmf, env=prod, host=tdmfrpssl2, installation=tdmfprod, instance=tdmfrpssl2:1936, job=HAProxy, master=tdpicapp2.croesus.com, origin_prometheus=tdmfrpssl2, site=viger, type=proxy Value:0xc0225861a8} B:{Var:B Labels:__name__=up, client=tdmf, env=prod, host=tdmfrpssl2, installation=tdmfprod, instance=tdmfrpssl2:1936, job=HAProxy, master=tdpicapp2.croesus.com, origin_prometheus=tdmfrpssl2, site=viger, type=proxy Value:0xc022586020}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166046924s EvaluationString:[ var='A' labels={__name__=up, client=tdmf, env=prod, host=tdmfrpssl2, installation=tdmfprod, instance=tdmfrpssl2:1936, job=HAProxy, master=tdpicapp2.croesus.com, origin_prometheus=tdmfrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=tdmf, env=prod, host=tdmfrpssl2, installation=tdmfprod, instance=tdmfrpssl2:1936, job=HAProxy, master=tdpicapp2.croesus.com, origin_prometheus=tdmfrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=tdmf, env=uat, host=tdmfPATrpssl1, installation=tdmfPAT, instance=tdmfPATrpssl1:1936, job=HAProxy, master=tdpicuatapp1.croesus.com, origin_prometheus=tdmfPATrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=tdmf, env=uat, host=tdmfPATrpssl1, installation=tdmfPAT, instance=tdmfPATrpssl1:1936, job=HAProxy, master=tdpicuatapp1.croesus.com, origin_prometheus=tdmfPATrpssl1, site=viger, type=proxy Value:0xc022586300} B:{Var:B Labels:__name__=up, client=tdmf, env=uat, host=tdmfPATrpssl1, installation=tdmfPAT, instance=tdmfPATrpssl1:1936, job=HAProxy, master=tdpicuatapp1.croesus.com, origin_prometheus=tdmfPATrpssl1, site=viger, type=proxy Value:0xc022586398}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166053256s EvaluationString:[ var='A' labels={__name__=up, client=tdmf, env=uat, host=tdmfPATrpssl1, installation=tdmfPAT, instance=tdmfPATrpssl1:1936, job=HAProxy, master=tdpicuatapp1.croesus.com, origin_prometheus=tdmfPATrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=tdmf, env=uat, host=tdmfPATrpssl1, installation=tdmfPAT, instance=tdmfPATrpssl1:1936, job=HAProxy, master=tdpicuatapp1.croesus.com, origin_prometheus=tdmfPATrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=tdmf, env=uat, host=tdmfPATrpssl2, installation=tdmfPAT, instance=tdmfPATrpssl2:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tdmfPATrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=tdmf, env=uat, host=tdmfPATrpssl2, installation=tdmfPAT, instance=tdmfPATrpssl2:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tdmfPATrpssl2, site=viger, type=proxy Value:0xc0225864c8} B:{Var:B Labels:__name__=up, client=tdmf, env=uat, host=tdmfPATrpssl2, installation=tdmfPAT, instance=tdmfPATrpssl2:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tdmfPATrpssl2, site=viger, type=proxy Value:0xc022586560}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.1660591s EvaluationString:[ var='A' labels={__name__=up, client=tdmf, env=uat, host=tdmfPATrpssl2, installation=tdmfPAT, instance=tdmfPATrpssl2:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tdmfPATrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=tdmf, env=uat, host=tdmfPATrpssl2, installation=tdmfPAT, instance=tdmfPATrpssl2:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tdmfPATrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=tdpic, env=uat, host=tdpicPATproxyfwlb1, installation=tdpicpat, instance=tdpicPATproxyfwlb1:1936, job=HAProxy, master=tdpicuatapp2.croesus.com, origin_prometheus=tdpicPATproxyfwlb1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=tdpic, env=uat, host=tdpicPATproxyfwlb1, installation=tdpicpat, instance=tdpicPATproxyfwlb1:1936, job=HAProxy, master=tdpicuatapp2.croesus.com, origin_prometheus=tdpicPATproxyfwlb1, site=viger, type=proxy Value:0xc022586660} B:{Var:B Labels:__name__=up, client=tdpic, env=uat, host=tdpicPATproxyfwlb1, installation=tdpicpat, instance=tdpicPATproxyfwlb1:1936, job=HAProxy, master=tdpicuatapp2.croesus.com, origin_prometheus=tdpicPATproxyfwlb1, site=viger, type=proxy Value:0xc0225866e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166065189s EvaluationString:[ var='A' labels={__name__=up, client=tdpic, env=uat, host=tdpicPATproxyfwlb1, installation=tdpicpat, instance=tdpicPATproxyfwlb1:1936, job=HAProxy, master=tdpicuatapp2.croesus.com, origin_prometheus=tdpicPATproxyfwlb1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=tdpic, env=uat, host=tdpicPATproxyfwlb1, installation=tdpicpat, instance=tdpicPATproxyfwlb1:1936, job=HAProxy, master=tdpicuatapp2.croesus.com, origin_prometheus=tdpicPATproxyfwlb1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=tdpic, env=uat, host=tdpicPATproxyfwlb2, installation=tdpicpat, instance=tdpicPATproxyfwlb2:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tdpicPATproxyfwlb2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=tdpic, env=uat, host=tdpicPATproxyfwlb2, installation=tdpicpat, instance=tdpicPATproxyfwlb2:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tdpicPATproxyfwlb2, site=viger, type=proxy Value:0xc0225867e0} B:{Var:B Labels:__name__=up, client=tdpic, env=uat, host=tdpicPATproxyfwlb2, installation=tdpicpat, instance=tdpicPATproxyfwlb2:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tdpicPATproxyfwlb2, site=viger, type=proxy Value:0xc022586860}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166070786s EvaluationString:[ var='A' labels={__name__=up, client=tdpic, env=uat, host=tdpicPATproxyfwlb2, installation=tdpicpat, instance=tdpicPATproxyfwlb2:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tdpicPATproxyfwlb2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=tdpic, env=uat, host=tdpicPATproxyfwlb2, installation=tdpicpat, instance=tdpicPATproxyfwlb2:1936, job=HAProxy, master=tduatapp3.croesus.com, origin_prometheus=tdpicPATproxyfwlb2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=tdpic, env=uat, host=tdpicPATrpssl1, installation=tdpicpat, instance=tdpicPATrpssl1:1936, job=HAProxy, master=tddiuatapp2.croesus.com, origin_prometheus=tdpicPATrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=tdpic, env=uat, host=tdpicPATrpssl1, installation=tdpicpat, instance=tdpicPATrpssl1:1936, job=HAProxy, master=tddiuatapp2.croesus.com, origin_prometheus=tdpicPATrpssl1, site=viger, type=proxy Value:0xc022586a38} B:{Var:B Labels:__name__=up, client=tdpic, env=uat, host=tdpicPATrpssl1, installation=tdpicpat, instance=tdpicPATrpssl1:1936, job=HAProxy, master=tddiuatapp2.croesus.com, origin_prometheus=tdpicPATrpssl1, site=viger, type=proxy Value:0xc022586998}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166077204s EvaluationString:[ var='A' labels={__name__=up, client=tdpic, env=uat, host=tdpicPATrpssl1, installation=tdpicpat, instance=tdpicPATrpssl1:1936, job=HAProxy, master=tddiuatapp2.croesus.com, origin_prometheus=tdpicPATrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=tdpic, env=uat, host=tdpicPATrpssl1, installation=tdpicpat, instance=tdpicPATrpssl1:1936, job=HAProxy, master=tddiuatapp2.croesus.com, origin_prometheus=tdpicPATrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=tdpic, env=uat, host=tdpicPATrpssl2, installation=tdpicpat, instance=tdpicPATrpssl2:1936, job=HAProxy, master=tddiuatapp3.croesus.com, origin_prometheus=tdpicPATrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=tdpic, env=uat, host=tdpicPATrpssl2, installation=tdpicpat, instance=tdpicPATrpssl2:1936, job=HAProxy, master=tddiuatapp3.croesus.com, origin_prometheus=tdpicPATrpssl2, site=viger, type=proxy Value:0xc022586b68} B:{Var:B Labels:__name__=up, client=tdpic, env=uat, host=tdpicPATrpssl2, installation=tdpicpat, instance=tdpicPATrpssl2:1936, job=HAProxy, master=tddiuatapp3.croesus.com, origin_prometheus=tdpicPATrpssl2, site=viger, type=proxy Value:0xc022586c18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166083669s EvaluationString:[ var='A' labels={__name__=up, client=tdpic, env=uat, host=tdpicPATrpssl2, installation=tdpicpat, instance=tdpicPATrpssl2:1936, job=HAProxy, master=tddiuatapp3.croesus.com, origin_prometheus=tdpicPATrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=tdpic, env=uat, host=tdpicPATrpssl2, installation=tdpicpat, instance=tdpicPATrpssl2:1936, job=HAProxy, master=tddiuatapp3.croesus.com, origin_prometheus=tdpicPATrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=tll, env=prod, host=tllrpssl1, installation=tllprod, instance=tllrpssl1:1936, job=HAProxy, master=clientapp5.croesus.com, origin_prometheus=tllrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=tll, env=prod, host=tllrpssl1, installation=tllprod, instance=tllrpssl1:1936, job=HAProxy, master=clientapp5.croesus.com, origin_prometheus=tllrpssl1, site=viger, type=proxy Value:0xc022586d78} B:{Var:B Labels:__name__=up, client=tll, env=prod, host=tllrpssl1, installation=tllprod, instance=tllrpssl1:1936, job=HAProxy, master=clientapp5.croesus.com, origin_prometheus=tllrpssl1, site=viger, type=proxy Value:0xc022586e20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166090064s EvaluationString:[ var='A' labels={__name__=up, client=tll, env=prod, host=tllrpssl1, installation=tllprod, instance=tllrpssl1:1936, job=HAProxy, master=clientapp5.croesus.com, origin_prometheus=tllrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=tll, env=prod, host=tllrpssl1, installation=tllprod, instance=tllrpssl1:1936, job=HAProxy, master=clientapp5.croesus.com, origin_prometheus=tllrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=tll, env=prod, host=tllrpssl2, installation=tllprod, instance=tllrpssl2:1936, job=HAProxy, master=clientapp7.croesus.com, origin_prometheus=tllrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=tll, env=prod, host=tllrpssl2, installation=tllprod, instance=tllrpssl2:1936, job=HAProxy, master=clientapp7.croesus.com, origin_prometheus=tllrpssl2, site=viger, type=proxy Value:0xc022586f58} B:{Var:B Labels:__name__=up, client=tll, env=prod, host=tllrpssl2, installation=tllprod, instance=tllrpssl2:1936, job=HAProxy, master=clientapp7.croesus.com, origin_prometheus=tllrpssl2, site=viger, type=proxy Value:0xc022587028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166095542s EvaluationString:[ var='A' labels={__name__=up, client=tll, env=prod, host=tllrpssl2, installation=tllprod, instance=tllrpssl2:1936, job=HAProxy, master=clientapp7.croesus.com, origin_prometheus=tllrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=tll, env=prod, host=tllrpssl2, installation=tllprod, instance=tllrpssl2:1936, job=HAProxy, master=clientapp7.croesus.com, origin_prometheus=tllrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=vmbl, env=uat, host=vmblUATrpssl1, installation=vmbluatv9, instance=vmblUATrpssl1:1936, job=HAProxy, master=clientuatapp3.croesus.com, origin_prometheus=vmblUATrpssl1, site=viger, type=proxy State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=vmbl, env=uat, host=vmblUATrpssl1, installation=vmbluatv9, instance=vmblUATrpssl1:1936, job=HAProxy, master=clientuatapp3.croesus.com, origin_prometheus=vmblUATrpssl1, site=viger, type=proxy Value:0xc0225871d8} B:{Var:B Labels:__name__=up, client=vmbl, env=uat, host=vmblUATrpssl1, installation=vmbluatv9, instance=vmblUATrpssl1:1936, job=HAProxy, master=clientuatapp3.croesus.com, origin_prometheus=vmblUATrpssl1, site=viger, type=proxy Value:0xc022587288}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166102348s EvaluationString:[ var='A' labels={__name__=up, client=vmbl, env=uat, host=vmblUATrpssl1, installation=vmbluatv9, instance=vmblUATrpssl1:1936, job=HAProxy, master=clientuatapp3.croesus.com, origin_prometheus=vmblUATrpssl1, site=viger, type=proxy} value=0 ], [ var='B' labels={__name__=up, client=vmbl, env=uat, host=vmblUATrpssl1, installation=vmbluatv9, instance=vmblUATrpssl1:1936, job=HAProxy, master=clientuatapp3.croesus.com, origin_prometheus=vmblUATrpssl1, site=viger, type=proxy} value=1 ]} {Instance:__name__=up, client=vmbl, env=uat, host=vmblUATrpssl2, installation=vmbluatv9, instance=vmblUATrpssl2:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=vmblUATrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=vmbl, env=uat, host=vmblUATrpssl2, installation=vmbluatv9, instance=vmblUATrpssl2:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=vmblUATrpssl2, site=viger, type=proxy Value:0xc0225873e0} B:{Var:B Labels:__name__=up, client=vmbl, env=uat, host=vmblUATrpssl2, installation=vmbluatv9, instance=vmblUATrpssl2:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=vmblUATrpssl2, site=viger, type=proxy Value:0xc022587478}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.1661082s EvaluationString:[ var='A' labels={__name__=up, client=vmbl, env=uat, host=vmblUATrpssl2, installation=vmbluatv9, instance=vmblUATrpssl2:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=vmblUATrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=vmbl, env=uat, host=vmblUATrpssl2, installation=vmbluatv9, instance=vmblUATrpssl2:1936, job=HAProxy, master=clientuatapp2.croesus.com, origin_prometheus=vmblUATrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=vmd, env=prod, host=vmdrpssl1, installation=vmdprod, instance=vmdrpssl1:1936, job=HAProxy, master=vmdapp1.croesus.com, origin_prometheus=vmdrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=vmd, env=prod, host=vmdrpssl1, installation=vmdprod, instance=vmdrpssl1:1936, job=HAProxy, master=vmdapp1.croesus.com, origin_prometheus=vmdrpssl1, site=viger, type=proxy Value:0xc0225875d8} B:{Var:B Labels:__name__=up, client=vmd, env=prod, host=vmdrpssl1, installation=vmdprod, instance=vmdrpssl1:1936, job=HAProxy, master=vmdapp1.croesus.com, origin_prometheus=vmdrpssl1, site=viger, type=proxy Value:0xc0225876a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166113727s EvaluationString:[ var='A' labels={__name__=up, client=vmd, env=prod, host=vmdrpssl1, installation=vmdprod, instance=vmdrpssl1:1936, job=HAProxy, master=vmdapp1.croesus.com, origin_prometheus=vmdrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=vmd, env=prod, host=vmdrpssl1, installation=vmdprod, instance=vmdrpssl1:1936, job=HAProxy, master=vmdapp1.croesus.com, origin_prometheus=vmdrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=vmd, env=prod, host=vmdrpssl2, installation=vmdprod, instance=vmdrpssl2:1936, job=HAProxy, master=vmdapp3.croesus.com, origin_prometheus=vmdrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=vmd, env=prod, host=vmdrpssl2, installation=vmdprod, instance=vmdrpssl2:1936, job=HAProxy, master=vmdapp3.croesus.com, origin_prometheus=vmdrpssl2, site=viger, type=proxy Value:0xc022587898} B:{Var:B Labels:__name__=up, client=vmd, env=prod, host=vmdrpssl2, installation=vmdprod, instance=vmdrpssl2:1936, job=HAProxy, master=vmdapp3.croesus.com, origin_prometheus=vmdrpssl2, site=viger, type=proxy Value:0xc0225877f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166121088s EvaluationString:[ var='A' labels={__name__=up, client=vmd, env=prod, host=vmdrpssl2, installation=vmdprod, instance=vmdrpssl2:1936, job=HAProxy, master=vmdapp3.croesus.com, origin_prometheus=vmdrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=vmd, env=prod, host=vmdrpssl2, installation=vmdprod, instance=vmdrpssl2:1936, job=HAProxy, master=vmdapp3.croesus.com, origin_prometheus=vmdrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=vmd, env=uat, host=vmdPATrpssl1, installation=vmdPAT, instance=vmdPATrpssl1:1936, job=HAProxy, master=vmduatapp1.croesus.com, origin_prometheus=vmdPATrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=vmd, env=uat, host=vmdPATrpssl1, installation=vmdPAT, instance=vmdPATrpssl1:1936, job=HAProxy, master=vmduatapp1.croesus.com, origin_prometheus=vmdPATrpssl1, site=viger, type=proxy Value:0xc0225879b8} B:{Var:B Labels:__name__=up, client=vmd, env=uat, host=vmdPATrpssl1, installation=vmdPAT, instance=vmdPATrpssl1:1936, job=HAProxy, master=vmduatapp1.croesus.com, origin_prometheus=vmdPATrpssl1, site=viger, type=proxy Value:0xc022587a48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.16612703s EvaluationString:[ var='A' labels={__name__=up, client=vmd, env=uat, host=vmdPATrpssl1, installation=vmdPAT, instance=vmdPATrpssl1:1936, job=HAProxy, master=vmduatapp1.croesus.com, origin_prometheus=vmdPATrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=vmd, env=uat, host=vmdPATrpssl1, installation=vmdPAT, instance=vmdPATrpssl1:1936, job=HAProxy, master=vmduatapp1.croesus.com, origin_prometheus=vmdPATrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=vmd, env=uat, host=vmdPATrpssl2, installation=vmdPAT, instance=vmdPATrpssl2:1936, job=HAProxy, master=vmduatapp3.croesus.com, origin_prometheus=vmdPATrpssl2, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=vmd, env=uat, host=vmdPATrpssl2, installation=vmdPAT, instance=vmdPATrpssl2:1936, job=HAProxy, master=vmduatapp3.croesus.com, origin_prometheus=vmdPATrpssl2, site=viger, type=proxy Value:0xc022587b68} B:{Var:B Labels:__name__=up, client=vmd, env=uat, host=vmdPATrpssl2, installation=vmdPAT, instance=vmdPATrpssl2:1936, job=HAProxy, master=vmduatapp3.croesus.com, origin_prometheus=vmdPATrpssl2, site=viger, type=proxy Value:0xc022587c00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.16613374s EvaluationString:[ var='A' labels={__name__=up, client=vmd, env=uat, host=vmdPATrpssl2, installation=vmdPAT, instance=vmdPATrpssl2:1936, job=HAProxy, master=vmduatapp3.croesus.com, origin_prometheus=vmdPATrpssl2, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=vmd, env=uat, host=vmdPATrpssl2, installation=vmdPAT, instance=vmdPATrpssl2:1936, job=HAProxy, master=vmduatapp3.croesus.com, origin_prometheus=vmdPATrpssl2, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=vmd, env=uat, host=vmdPPrpssl1, installation=vmdPP, instance=vmdPPrpssl1:1936, job=HAProxy, master=vmduatapp2.croesus.com, origin_prometheus=vmdPPrpssl1, site=viger, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=vmd, env=uat, host=vmdPPrpssl1, installation=vmdPP, instance=vmdPPrpssl1:1936, job=HAProxy, master=vmduatapp2.croesus.com, origin_prometheus=vmdPPrpssl1, site=viger, type=proxy Value:0xc022587e08} B:{Var:B Labels:__name__=up, client=vmd, env=uat, host=vmdPPrpssl1, installation=vmdPP, instance=vmdPPrpssl1:1936, job=HAProxy, master=vmduatapp2.croesus.com, origin_prometheus=vmdPPrpssl1, site=viger, type=proxy Value:0xc022587d50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166139672s EvaluationString:[ var='A' labels={__name__=up, client=vmd, env=uat, host=vmdPPrpssl1, installation=vmdPP, instance=vmdPPrpssl1:1936, job=HAProxy, master=vmduatapp2.croesus.com, origin_prometheus=vmdPPrpssl1, site=viger, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=vmd, env=uat, host=vmdPPrpssl1, installation=vmdPP, instance=vmdPPrpssl1:1936, job=HAProxy, master=vmduatapp2.croesus.com, origin_prometheus=vmdPPrpssl1, site=viger, type=proxy} value=0 ]} {Instance:__name__=up, client=wellaltus, env=prod, host=wellaltusawsrpssl1, installation=wellaltusaws, instance=wellaltusawsrpssl1:1936, job=HAProxy, master=wellaltusawsproxymaitre, origin_prometheus=wellaltusawsrpssl1, site=aws, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=wellaltus, env=prod, host=wellaltusawsrpssl1, installation=wellaltusaws, instance=wellaltusawsrpssl1:1936, job=HAProxy, master=wellaltusawsproxymaitre, origin_prometheus=wellaltusawsrpssl1, site=aws, type=proxy Value:0xc022587f08} B:{Var:B Labels:__name__=up, client=wellaltus, env=prod, host=wellaltusawsrpssl1, installation=wellaltusaws, instance=wellaltusawsrpssl1:1936, job=HAProxy, master=wellaltusawsproxymaitre, origin_prometheus=wellaltusawsrpssl1, site=aws, type=proxy Value:0xc022587f88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166145529s EvaluationString:[ var='A' labels={__name__=up, client=wellaltus, env=prod, host=wellaltusawsrpssl1, installation=wellaltusaws, instance=wellaltusawsrpssl1:1936, job=HAProxy, master=wellaltusawsproxymaitre, origin_prometheus=wellaltusawsrpssl1, site=aws, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=wellaltus, env=prod, host=wellaltusawsrpssl1, installation=wellaltusaws, instance=wellaltusawsrpssl1:1936, job=HAProxy, master=wellaltusawsproxymaitre, origin_prometheus=wellaltusawsrpssl1, site=aws, type=proxy} value=0 ]} {Instance:__name__=up, client=wellaltus, env=uat, host=wellaltusuatawsrpssl1, installation=wellaltusuataws, instance=wellaltusuatawsrpssl1:1936, job=HAProxy, master=wellaltusuatawsproxymaitre, origin_prometheus=wellaltusuatawsrpssl1, site=aws, type=proxy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, client=wellaltus, env=uat, host=wellaltusuatawsrpssl1, installation=wellaltusuataws, instance=wellaltusuatawsrpssl1:1936, job=HAProxy, master=wellaltusuatawsproxymaitre, origin_prometheus=wellaltusuatawsrpssl1, site=aws, type=proxy Value:0xc01efa21e0} B:{Var:B Labels:__name__=up, client=wellaltus, env=uat, host=wellaltusuatawsrpssl1, installation=wellaltusuataws, instance=wellaltusuatawsrpssl1:1936, job=HAProxy, master=wellaltusuatawsproxymaitre, origin_prometheus=wellaltusuatawsrpssl1, site=aws, type=proxy Value:0xc01efa2270}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166153777s EvaluationString:[ var='A' labels={__name__=up, client=wellaltus, env=uat, host=wellaltusuatawsrpssl1, installation=wellaltusuataws, instance=wellaltusuatawsrpssl1:1936, job=HAProxy, master=wellaltusuatawsproxymaitre, origin_prometheus=wellaltusuatawsrpssl1, site=aws, type=proxy} value=1 ], [ var='B' labels={__name__=up, client=wellaltus, env=uat, host=wellaltusuatawsrpssl1, installation=wellaltusuataws, instance=wellaltusuatawsrpssl1:1936, job=HAProxy, master=wellaltusuatawsproxymaitre, origin_prometheus=wellaltusuatawsrpssl1, site=aws, type=proxy} value=0 ]} {Instance:__name__=up, env=prod, installation=claretprod, instance=claretrpssl1:1936, job=HAProxy, origin_prometheus=claretrpssl1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, env=prod, installation=claretprod, instance=claretrpssl1:1936, job=HAProxy, origin_prometheus=claretrpssl1 Value:0xc01efa26b0} B:{Var:B Labels:__name__=up, env=prod, installation=claretprod, instance=claretrpssl1:1936, job=HAProxy, origin_prometheus=claretrpssl1 Value:0xc01efa2710}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166159544s EvaluationString:[ var='A' labels={__name__=up, env=prod, installation=claretprod, instance=claretrpssl1:1936, job=HAProxy, origin_prometheus=claretrpssl1} value=1 ], [ var='B' labels={__name__=up, env=prod, installation=claretprod, instance=claretrpssl1:1936, job=HAProxy, origin_prometheus=claretrpssl1} value=0 ]} {Instance:__name__=up, env=prod, installation=claretprod, instance=claretrpssl2:1936, job=HAProxy, origin_prometheus=claretrpssl2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, env=prod, installation=claretprod, instance=claretrpssl2:1936, job=HAProxy, origin_prometheus=claretrpssl2 Value:0xc01efa2af0} B:{Var:B Labels:__name__=up, env=prod, installation=claretprod, instance=claretrpssl2:1936, job=HAProxy, origin_prometheus=claretrpssl2 Value:0xc01efa2c10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166172257s EvaluationString:[ var='A' labels={__name__=up, env=prod, installation=claretprod, instance=claretrpssl2:1936, job=HAProxy, origin_prometheus=claretrpssl2} value=1 ], [ var='B' labels={__name__=up, env=prod, installation=claretprod, instance=claretrpssl2:1936, job=HAProxy, origin_prometheus=claretrpssl2} value=0 ]} {Instance:__name__=up, env=prod, installation=iavmprod, instance=iavmrpssl1:1936, job=HAProxy, origin_prometheus=iavmrpssl1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, env=prod, installation=iavmprod, instance=iavmrpssl1:1936, job=HAProxy, origin_prometheus=iavmrpssl1 Value:0xc01efa2e80} B:{Var:B Labels:__name__=up, env=prod, installation=iavmprod, instance=iavmrpssl1:1936, job=HAProxy, origin_prometheus=iavmrpssl1 Value:0xc01efa30b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166177308s EvaluationString:[ var='A' labels={__name__=up, env=prod, installation=iavmprod, instance=iavmrpssl1:1936, job=HAProxy, origin_prometheus=iavmrpssl1} value=1 ], [ var='B' labels={__name__=up, env=prod, installation=iavmprod, instance=iavmrpssl1:1936, job=HAProxy, origin_prometheus=iavmrpssl1} value=0 ]} {Instance:__name__=up, env=prod, installation=iavmprod, instance=iavmrpssl2:1936, job=HAProxy, origin_prometheus=iavmrpssl2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, env=prod, installation=iavmprod, instance=iavmrpssl2:1936, job=HAProxy, origin_prometheus=iavmrpssl2 Value:0xc01efa3180} B:{Var:B Labels:__name__=up, env=prod, installation=iavmprod, instance=iavmrpssl2:1936, job=HAProxy, origin_prometheus=iavmrpssl2 Value:0xc01efa3210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166181678s EvaluationString:[ var='A' labels={__name__=up, env=prod, installation=iavmprod, instance=iavmrpssl2:1936, job=HAProxy, origin_prometheus=iavmrpssl2} value=1 ], [ var='B' labels={__name__=up, env=prod, installation=iavmprod, instance=iavmrpssl2:1936, job=HAProxy, origin_prometheus=iavmrpssl2} value=0 ]} {Instance:__name__=up, instance=clientsuatrpssl1:1936, job=HAProxy, origin_prometheus=clientsuatrpssl1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, instance=clientsuatrpssl1:1936, job=HAProxy, origin_prometheus=clientsuatrpssl1 Value:0xc01efa32d8} B:{Var:B Labels:__name__=up, instance=clientsuatrpssl1:1936, job=HAProxy, origin_prometheus=clientsuatrpssl1 Value:0xc01efa3518}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.16618578s EvaluationString:[ var='A' labels={__name__=up, instance=clientsuatrpssl1:1936, job=HAProxy, origin_prometheus=clientsuatrpssl1} value=1 ], [ var='B' labels={__name__=up, instance=clientsuatrpssl1:1936, job=HAProxy, origin_prometheus=clientsuatrpssl1} value=0 ]} {Instance:__name__=up, instance=sysops1vpntestproxy1:1936, job=HAProxy, origin_prometheus=sysops1vpntestproxy1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, instance=sysops1vpntestproxy1:1936, job=HAProxy, origin_prometheus=sysops1vpntestproxy1 Value:0xc01efa3588} B:{Var:B Labels:__name__=up, instance=sysops1vpntestproxy1:1936, job=HAProxy, origin_prometheus=sysops1vpntestproxy1 Value:0xc01efa35b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166189883s EvaluationString:[ var='A' labels={__name__=up, instance=sysops1vpntestproxy1:1936, job=HAProxy, origin_prometheus=sysops1vpntestproxy1} value=1 ], [ var='B' labels={__name__=up, instance=sysops1vpntestproxy1:1936, job=HAProxy, origin_prometheus=sysops1vpntestproxy1} value=0 ]} {Instance:__name__=up, instance=tddiservicerpssl1:1936, job=HAProxy, origin_prometheus=tddiservicerpssl1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, instance=tddiservicerpssl1:1936, job=HAProxy, origin_prometheus=tddiservicerpssl1 Value:0xc01efa3610} B:{Var:B Labels:__name__=up, instance=tddiservicerpssl1:1936, job=HAProxy, origin_prometheus=tddiservicerpssl1 Value:0xc01efa3640}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166195323s EvaluationString:[ var='A' labels={__name__=up, instance=tddiservicerpssl1:1936, job=HAProxy, origin_prometheus=tddiservicerpssl1} value=1 ], [ var='B' labels={__name__=up, instance=tddiservicerpssl1:1936, job=HAProxy, origin_prometheus=tddiservicerpssl1} value=0 ]} {Instance:__name__=up, instance=tddiservicerpssl2:1936, job=HAProxy, origin_prometheus=tddiservicerpssl2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, instance=tddiservicerpssl2:1936, job=HAProxy, origin_prometheus=tddiservicerpssl2 Value:0xc01efa3698} B:{Var:B Labels:__name__=up, instance=tddiservicerpssl2:1936, job=HAProxy, origin_prometheus=tddiservicerpssl2 Value:0xc01efa36c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166199946s EvaluationString:[ var='A' labels={__name__=up, instance=tddiservicerpssl2:1936, job=HAProxy, origin_prometheus=tddiservicerpssl2} value=1 ], [ var='B' labels={__name__=up, instance=tddiservicerpssl2:1936, job=HAProxy, origin_prometheus=tddiservicerpssl2} value=0 ]} {Instance:__name__=up, instance=tdnonprodportal1:1936, job=HAProxy, origin_prometheus=tdnonprodportal1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, instance=tdnonprodportal1:1936, job=HAProxy, origin_prometheus=tdnonprodportal1 Value:0xc01efa3748} B:{Var:B Labels:__name__=up, instance=tdnonprodportal1:1936, job=HAProxy, origin_prometheus=tdnonprodportal1 Value:0xc01efa3778}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166205372s EvaluationString:[ var='A' labels={__name__=up, instance=tdnonprodportal1:1936, job=HAProxy, origin_prometheus=tdnonprodportal1} value=1 ], [ var='B' labels={__name__=up, instance=tdnonprodportal1:1936, job=HAProxy, origin_prometheus=tdnonprodportal1} value=0 ]} {Instance:__name__=up, instance=tdpicproxyfwlb1:1936, job=HAProxy, origin_prometheus=tdpicproxyfwlb1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, instance=tdpicproxyfwlb1:1936, job=HAProxy, origin_prometheus=tdpicproxyfwlb1 Value:0xc01efa3848} B:{Var:B Labels:__name__=up, instance=tdpicproxyfwlb1:1936, job=HAProxy, origin_prometheus=tdpicproxyfwlb1 Value:0xc01efa3808}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166209267s EvaluationString:[ var='A' labels={__name__=up, instance=tdpicproxyfwlb1:1936, job=HAProxy, origin_prometheus=tdpicproxyfwlb1} value=1 ], [ var='B' labels={__name__=up, instance=tdpicproxyfwlb1:1936, job=HAProxy, origin_prometheus=tdpicproxyfwlb1} value=0 ]} {Instance:__name__=up, instance=tdpicproxyfwlb2:1936, job=HAProxy, origin_prometheus=tdpicproxyfwlb2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, instance=tdpicproxyfwlb2:1936, job=HAProxy, origin_prometheus=tdpicproxyfwlb2 Value:0xc01efa3910} B:{Var:B Labels:__name__=up, instance=tdpicproxyfwlb2:1936, job=HAProxy, origin_prometheus=tdpicproxyfwlb2 Value:0xc01efa38d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166213816s EvaluationString:[ var='A' labels={__name__=up, instance=tdpicproxyfwlb2:1936, job=HAProxy, origin_prometheus=tdpicproxyfwlb2} value=1 ], [ var='B' labels={__name__=up, instance=tdpicproxyfwlb2:1936, job=HAProxy, origin_prometheus=tdpicproxyfwlb2} value=0 ]} {Instance:__name__=up, instance=tdpicrpssl1:1936, job=HAProxy, origin_prometheus=tdpicrpssl1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, instance=tdpicrpssl1:1936, job=HAProxy, origin_prometheus=tdpicrpssl1 Value:0xc01efa3998} B:{Var:B Labels:__name__=up, instance=tdpicrpssl1:1936, job=HAProxy, origin_prometheus=tdpicrpssl1 Value:0xc01efa3a08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166218986s EvaluationString:[ var='A' labels={__name__=up, instance=tdpicrpssl1:1936, job=HAProxy, origin_prometheus=tdpicrpssl1} value=1 ], [ var='B' labels={__name__=up, instance=tdpicrpssl1:1936, job=HAProxy, origin_prometheus=tdpicrpssl1} value=0 ]} {Instance:__name__=up, instance=tdpicrpssl2:1936, job=HAProxy, origin_prometheus=tdpicrpssl2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, instance=tdpicrpssl2:1936, job=HAProxy, origin_prometheus=tdpicrpssl2 Value:0xc01efa3b20} B:{Var:B Labels:__name__=up, instance=tdpicrpssl2:1936, job=HAProxy, origin_prometheus=tdpicrpssl2 Value:0xc01efa3ad0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166223068s EvaluationString:[ var='A' labels={__name__=up, instance=tdpicrpssl2:1936, job=HAProxy, origin_prometheus=tdpicrpssl2} value=1 ], [ var='B' labels={__name__=up, instance=tdpicrpssl2:1936, job=HAProxy, origin_prometheus=tdpicrpssl2} value=0 ]} {Instance:__name__=up, instance=tdsalesforceproxy1:1936, job=HAProxy, origin_prometheus=tdsalesforceproxy1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, instance=tdsalesforceproxy1:1936, job=HAProxy, origin_prometheus=tdsalesforceproxy1 Value:0xc01efa3b90} B:{Var:B Labels:__name__=up, instance=tdsalesforceproxy1:1936, job=HAProxy, origin_prometheus=tdsalesforceproxy1 Value:0xc01efa3bc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.16622713s EvaluationString:[ var='A' labels={__name__=up, instance=tdsalesforceproxy1:1936, job=HAProxy, origin_prometheus=tdsalesforceproxy1} value=1 ], [ var='B' labels={__name__=up, instance=tdsalesforceproxy1:1936, job=HAProxy, origin_prometheus=tdsalesforceproxy1} value=0 ]} {Instance:__name__=up, instance=tdsalesforceproxy2:1936, job=HAProxy, origin_prometheus=tdsalesforceproxy2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, instance=tdsalesforceproxy2:1936, job=HAProxy, origin_prometheus=tdsalesforceproxy2 Value:0xc01efa3c18} B:{Var:B Labels:__name__=up, instance=tdsalesforceproxy2:1936, job=HAProxy, origin_prometheus=tdsalesforceproxy2 Value:0xc01efa3c58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166230641s EvaluationString:[ var='A' labels={__name__=up, instance=tdsalesforceproxy2:1936, job=HAProxy, origin_prometheus=tdsalesforceproxy2} value=1 ], [ var='B' labels={__name__=up, instance=tdsalesforceproxy2:1936, job=HAProxy, origin_prometheus=tdsalesforceproxy2} value=0 ]} {Instance:__name__=up, instance=vmblrpssl1:1936, job=HAProxy, origin_prometheus=vmblrpssl1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, instance=vmblrpssl1:1936, job=HAProxy, origin_prometheus=vmblrpssl1 Value:0xc01efa3d18} B:{Var:B Labels:__name__=up, instance=vmblrpssl1:1936, job=HAProxy, origin_prometheus=vmblrpssl1 Value:0xc01efa3d78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166234097s EvaluationString:[ var='A' labels={__name__=up, instance=vmblrpssl1:1936, job=HAProxy, origin_prometheus=vmblrpssl1} value=1 ], [ var='B' labels={__name__=up, instance=vmblrpssl1:1936, job=HAProxy, origin_prometheus=vmblrpssl1} value=0 ]} {Instance:__name__=up, instance=vmblrpssl2:1936, job=HAProxy, origin_prometheus=vmblrpssl2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, instance=vmblrpssl2:1936, job=HAProxy, origin_prometheus=vmblrpssl2 Value:0xc0191fe088} B:{Var:B Labels:__name__=up, instance=vmblrpssl2:1936, job=HAProxy, origin_prometheus=vmblrpssl2 Value:0xc0191fe0d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.166238107s EvaluationString:[ var='A' labels={__name__=up, instance=vmblrpssl2:1936, job=HAProxy, origin_prometheus=vmblrpssl2} value=1 ], [ var='B' labels={__name__=up, instance=vmblrpssl2:1936, job=HAProxy, origin_prometheus=vmblrpssl2} value=0 ]}]" duration=28.110685ms + level=debug ts=2024-05-29T13:44:15.173811801Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=374423 slug=bitburst t=2024-05-29T13:44:15.173791829Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=374423 slug=bitburst instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.17377826Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=374423 slug=bitburst instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.173771569Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=sessionV2TestPersistent-ab2786b1-e50a-4d48-8788-dcdce91b0ef8" t=2024-05-29T13:44:15.173813129Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=sessionV2TestPersistent-ab2786b1-e50a-4d48-8788-dcdce91b0ef8" t=2024-05-29T13:44:15.173798608Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=374423 slug=bitburst instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.173750611Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Los Angeles, country=United States, datacenter=PIA, environment=production, instance=102.129.145.7:9998, ip=102.129.145.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=losangeles430, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.173788008Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=374423 slug=bitburst version=1 fingerprint=51b454592b9d85ce attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.173657628Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.173336174s EvaluationString:}]" duration=28.594462ms + logger=ngalert.state.manager user=874970 slug=nvidia t=2024-05-29T13:44:15.173649843Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=184127 slug=dpgo t=2024-05-29T13:44:15.173683821Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.537556ms + logger=ngalert.scheduler user=874970 slug=nvidia version=15 fingerprint=0039a51f526f409a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.173548762Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.173143811s EvaluationString:}]" duration=17.461129ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.173686715Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:15.173530039Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=29.38819ms + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=sessionV2TestPersistent-9d14c0c5-a469-4f67-913c-8d28153ba592" t=2024-05-29T13:44:15.173543216Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Los Angeles, country=United States, datacenter=PIA, environment=production, instance=102.129.145.6:9998, ip=102.129.145.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-california.crt, role=vpn, server=losangeles414, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.173545482Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Los Angeles, country=United States, datacenter=PIA, environment=production, instance=102.129.145.6:9998, ip=102.129.145.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-california.crt, role=vpn, server=losangeles414, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.173532487Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696798 slug=mcv instance="name=keepLastValue(eadp.gos.torch.prod.nfs-2022-xbsx-bitc.Users_in_Game,5) Query" t=2024-05-29T13:44:15.173314612Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv instance="name=keepLastValue(eadp.gos.torch.prod.nfs-2022-xbsx-bitc.Users_in_Game,5) Query" t=2024-05-29T13:44:15.173303429Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.historian backend=loki user=893154 slug=cmselfnp t=2024-05-29T13:44:15.173227807Z level=debug msg="Alert state changed creating annotation" newState=Pending oldState=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.173288176Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=70430 slug=dapperlabs t=2024-05-29T13:44:15.173226435Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.16.11.52, job=ZoomRoom Baraboo" t=2024-05-29T13:44:15.173222937Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=893154 slug=cmselfnp t=2024-05-29T13:44:15.173133495Z level=debug msg="Saving alert states done" count=4 max_state_save_concurrency=1 duration=62.033374ms + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=33eafe8835894dd9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.173131756Z level=debug msg="Alert rule evaluated" results="[{Instance:name=keepLastValue(eadp.gos.torch.prod.nfs-2022-xbsx-bitc.Users_in_Game,5) Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc057887fb0} IgnoreBelow:{Var:IgnoreBelow Labels: Value:0xc057887fb8} Threshold:{Var:Threshold Labels: Value:0xc006d02010} compare:{Var:compare Labels:name=keepLastValue(eadp.gos.torch.prod.nfs-2022-xbsx-bitc.Users_in_Game,5) Query Value:0xc057887f80} sum:{Var:sum Labels:name=keepLastValue(eadp.gos.torch.prod.nfs-2022-xbsx-bitc.Users_in_Game,5) Query Value:0xc057887f98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.172981486s EvaluationString:[ var='Breaches' labels={} value=1 ], [ var='IgnoreBelow' labels={} value=0 ], [ var='Threshold' labels={} value=-10 ], [ var='compare' labels={name=keepLastValue(eadp.gos.torch.prod.nfs-2022-xbsx-bitc.Users_in_Game,5) Query} value=0 ], [ var='sum' labels={name=keepLastValue(eadp.gos.torch.prod.nfs-2022-xbsx-bitc.Users_in_Game,5) Query} value=0 ]}]" duration=33.234356ms + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.16.11.51, job=ZoomRoom Alpine" t=2024-05-29T13:44:15.173103172Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=low, instance=172.16.11.51, job=ZoomRoom Alpine" t=2024-05-29T13:44:15.173089507Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Los Angeles, country=United States, datacenter=PIA, environment=production, instance=102.129.145.5:9998, ip=102.129.145.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-california.crt, role=vpn, server=losangeles413, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.173090856Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=sessionV2TestPersistent-46fac94d-a0dc-457c-9989-0a9cee1322b2" t=2024-05-29T13:44:15.173036532Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Los Angeles, country=United States, datacenter=PIA, environment=production, instance=102.129.145.5:9998, ip=102.129.145.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-california.crt, role=vpn, server=losangeles413, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.173073768Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=sessionV2TestPersistent-46fac94d-a0dc-457c-9989-0a9cee1322b2" t=2024-05-29T13:44:15.173014297Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.173012077Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=https://plantuml.inthepocket.org, job=PlantUML, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.172958969Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=https://plantuml.inthepocket.org, job=PlantUML, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.172920401Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq t=2024-05-29T13:44:15.172824316Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=sessionV2TestPersistent-2de64a80-eb1c-462e-8cb0-c8150055eefd" t=2024-05-29T13:44:15.172589868Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.18.40.11, job=paxton_antwerpen_server, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.172365035Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:15.172189247Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.18.40.10, job=paxton_antwerpen_inkom, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.172182211Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=182434 slug=form t=2024-05-29T13:44:15.172195649Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=916139 slug=cmtdspd instance="persistentvolumeclaim=wcs9-tds-prod-git-postgr" t=2024-05-29T13:44:15.172096223Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.172128623Z caller=remote_instance_store.go:51 user=805026 slug=powwro11y msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=191103 slug=amazonadmin version=85 fingerprint=fad18c09a9337a83 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.172055285Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.171766958s EvaluationString:}]" duration=148.275611ms + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.17.90.63, job=ap_entrance, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.172050599Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.17.90.63, job=ap_entrance, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.172032292Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=916139 slug=cmtdspd instance="persistentvolumeclaim=tdsprod-html-pvc" t=2024-05-29T13:44:15.171992043Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.171956288Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=916139 slug=cmtdspd instance="persistentvolumeclaim=tdsprod-cdap-data-pvc" t=2024-05-29T13:44:15.171926821Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=679029 slug=joveoprodaws t=2024-05-29T13:44:15.171834789Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.171827148Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=916139 slug=cmtdspd instance="persistentvolumeclaim=tdsprod-auth-data-pvc" t=2024-05-29T13:44:15.171896101Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=sessionV2TestPersistent-2501b4ce-926a-4c33-bd37-d0553e479c8c" t=2024-05-29T13:44:15.171890175Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.171822968Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.17152979Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=220750 slug=homeys instance="name=homeys-env-prod.celery.queues.cpu.pending A" t=2024-05-29T13:44:15.171768115Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=220750 slug=homeys instance="name=homeys-env-prod.celery.queues.cpu.pending A" t=2024-05-29T13:44:15.171752969Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.171820663Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=220750 slug=homeys instance="name=homeys-env-prod.celery.queues.cpu.pending A" t=2024-05-29T13:44:15.171738046Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=220750 slug=homeys instance="name=homeys-env-prod.celery.queues.cpu.pending A" t=2024-05-29T13:44:15.171720427Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.171766141Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.171668242Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.171780956Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.171736456Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.171757675Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=220750 slug=homeys instance="name=homeys-env-prod.celery.queues.cpu.pending A" t=2024-05-29T13:44:15.17169585Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.171666023Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.17.90.61, job=ap_team1, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.171718597Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=220750 slug=homeys instance="name=homeys-env-prod.celery.queues.cpu.pending A" t=2024-05-29T13:44:15.171676807Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=220750 slug=homeys instance="name=homeys-env-prod.celery.queues.cpu.pending A" t=2024-05-29T13:44:15.171670172Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.171711237Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.scheduler user=679029 slug=joveoprodaws version=7504 fingerprint=9b1acbb90dcba653 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.171646611Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.1711843s EvaluationString:}]" duration=11.453424ms + logger=ngalert.scheduler user=250150 slug=bizagi version=58 fingerprint=9f2af9037a890c98 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.171652421Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.171456592s EvaluationString:}]" duration=182.512522ms + logger=ngalert.state.manager user=220750 slug=homeys instance="name=homeys-env-prod.celery.queues.cpu.pending A" t=2024-05-29T13:44:15.171650382Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=gameSessionPersistentTest-FF76F56E438EBC3578FF758E8C30D21E" t=2024-05-29T13:44:15.171647113Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=gameSessionPersistentTest-FF76F56E438EBC3578FF758E8C30D21E" t=2024-05-29T13:44:15.171634831Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=220750 slug=homeys version=3 fingerprint=0911ebbbf3c98a58 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.171509466Z level=debug msg="Alert rule evaluated" results="[{Instance:name=homeys-env-prod.celery.queues.cpu.pending A State:NoData Error: Results:map[] Values:map[B:{Var:B Labels:name=homeys-env-prod.celery.queues.cpu.pending A Value:} C:{Var:C Labels:name=homeys-env-prod.celery.queues.cpu.pending A Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.171047159s EvaluationString:[ var='B' labels={name=homeys-env-prod.celery.queues.cpu.pending A} value=null ], [ var='C' labels={name=homeys-env-prod.celery.queues.cpu.pending A} value=null ]} {Instance:name=homeys-env-prod.celery.queues.enedis_file.pending A State:NoData Error: Results:map[] Values:map[B:{Var:B Labels:name=homeys-env-prod.celery.queues.enedis_file.pending A Value:} C:{Var:C Labels:name=homeys-env-prod.celery.queues.enedis_file.pending A Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.171058514s EvaluationString:[ var='B' labels={name=homeys-env-prod.celery.queues.enedis_file.pending A} value=null ], [ var='C' labels={name=homeys-env-prod.celery.queues.enedis_file.pending A} value=null ]} {Instance:name=homeys-env-prod.celery.queues.enedis_workflow.pending A State:NoData Error: Results:map[] Values:map[B:{Var:B Labels:name=homeys-env-prod.celery.queues.enedis_workflow.pending A Value:} C:{Var:C Labels:name=homeys-env-prod.celery.queues.enedis_workflow.pending A Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.171064039s EvaluationString:[ var='B' labels={name=homeys-env-prod.celery.queues.enedis_workflow.pending A} value=null ], [ var='C' labels={name=homeys-env-prod.celery.queues.enedis_workflow.pending A} value=null ]} {Instance:name=homeys-env-prod.celery.queues.front.pending A State:NoData Error: Results:map[] Values:map[B:{Var:B Labels:name=homeys-env-prod.celery.queues.front.pending A Value:} C:{Var:C Labels:name=homeys-env-prod.celery.queues.front.pending A Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.171068137s EvaluationString:[ var='B' labels={name=homeys-env-prod.celery.queues.front.pending A} value=null ], [ var='C' labels={name=homeys-env-prod.celery.queues.front.pending A} value=null ]} {Instance:name=homeys-env-prod.celery.queues.request.pending A State:NoData Error: Results:map[] Values:map[B:{Var:B Labels:name=homeys-env-prod.celery.queues.request.pending A Value:} C:{Var:C Labels:name=homeys-env-prod.celery.queues.request.pending A Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.171073838s EvaluationString:[ var='B' labels={name=homeys-env-prod.celery.queues.request.pending A} value=null ], [ var='C' labels={name=homeys-env-prod.celery.queues.request.pending A} value=null ]} {Instance:name=homeys-env-prod.celery.queues.study.pending A State:NoData Error: Results:map[] Values:map[B:{Var:B Labels:name=homeys-env-prod.celery.queues.study.pending A Value:} C:{Var:C Labels:name=homeys-env-prod.celery.queues.study.pending A Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.171078848s EvaluationString:[ var='B' labels={name=homeys-env-prod.celery.queues.study.pending A} value=null ], [ var='C' labels={name=homeys-env-prod.celery.queues.study.pending A} value=null ]} {Instance:name=homeys-env-prod.celery.queues.workflow.pending A State:NoData Error: Results:map[] Values:map[B:{Var:B Labels:name=homeys-env-prod.celery.queues.workflow.pending A Value:} C:{Var:C Labels:name=homeys-env-prod.celery.queues.workflow.pending A Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.171085197s EvaluationString:[ var='B' labels={name=homeys-env-prod.celery.queues.workflow.pending A} value=null ], [ var='C' labels={name=homeys-env-prod.celery.queues.workflow.pending A} value=null ]}]" duration=56.500817ms + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.17.90.60, job=ap_team2, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.171580382Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.171445892Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.171430398Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.17.90.50, job=nvr_leuven, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.171441216Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=gameSessionPersistentTest-F76BDC964299AD3913C0F1B98B903092" t=2024-05-29T13:44:15.171404875Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.171248725Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.171241551Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.171239333Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=gameSessionPersistentTest-E9E4D7C14D69DF1A3656C2A4E6380588" t=2024-05-29T13:44:15.171109005Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Los Angeles, country=United States, datacenter=DataPacket, environment=production, instance=143.244.48.1:9998, ip=143.244.48.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=losangeles446, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.171098472Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.17.70.76, job=ZoomRoom Longhorn, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.170997809Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.170898306Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.17.50.62, job=ZoomRoom Sherpa, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.170808465Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.17.50.62, job=ZoomRoom Sherpa, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.170750782Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=802128 slug=unstatic t=2024-05-29T13:44:15.170607899Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=London, country=United Kingdom, datacenter=PIA, environment=production, instance=194.110.13.65:9998, ip=194.110.13.65, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=london434, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.170665355Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.170701646Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.17.50.61, job=ZoomRoom Memphis, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.170628838Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=802128 slug=unstatic instance="datasource_uid=fddbjo7copeyoc, ref_id=A" t=2024-05-29T13:44:15.170590669Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.170653787Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.17.50.61, job=ZoomRoom Memphis, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.170611776Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=802128 slug=unstatic instance="datasource_uid=fddbjo7copeyoc, ref_id=A" t=2024-05-29T13:44:15.170570298Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=gameSessionPersistentTest-5356BDCD410E782FD946D8B5DED69131" t=2024-05-29T13:44:15.170538893Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=154996 slug=veovo t=2024-05-29T13:44:15.170485648Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.325977ms + level=debug ts=2024-05-29T13:44:15.170379722Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:15.170353751Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.17.40.20, job=paxton_leuven_keuken, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.17032464Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv instance="name=keepLastValue(apex.Australia.players.pc_steam.mh448980.serverstats) Query" t=2024-05-29T13:44:15.170324321Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.17.40.20, job=paxton_leuven_keuken, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.170299498Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:15.17022584Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.170256888Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=1c57877c86dc8bc3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.17016741Z level=debug msg="Alert rule evaluated" results="[{Instance:name=keepLastValue(apex.Australia.players.pc_steam.mh448980.serverstats) Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc06ff5ebb0} IgnoreBelow:{Var:IgnoreBelow Labels: Value:0xc06ff5ebb8} Threshold:{Var:Threshold Labels: Value:0xc06ff5ec00} compare:{Var:compare Labels:name=keepLastValue(apex.Australia.players.pc_steam.mh448980.serverstats) Query Value:0xc06ff5eb80} sum:{Var:sum Labels:name=keepLastValue(apex.Australia.players.pc_steam.mh448980.serverstats) Query Value:0xc06ff5eb98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.169891996s EvaluationString:[ var='Breaches' labels={} value=1 ], [ var='IgnoreBelow' labels={} value=1000 ], [ var='Threshold' labels={} value=-20 ], [ var='compare' labels={name=keepLastValue(apex.Australia.players.pc_steam.mh448980.serverstats) Query} value=0 ], [ var='sum' labels={name=keepLastValue(apex.Australia.players.pc_steam.mh448980.serverstats) Query} value=0 ]}]" duration=26.365475ms + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.17.40.12, job=paxton_leuven_storage, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.169895313Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.169643973Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=562354 slug=exciscope t=2024-05-29T13:44:15.169508345Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.169351638Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.169389871Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=170464 slug=seattleflu t=2024-05-29T13:44:15.169105115Z level=debug msg="Skip rule evaluation because it is paused" + level=debug ts=2024-05-29T13:44:15.169216761Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=895137 slug=uid2 version=42 fingerprint=1728b4bd4e314fd8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.169167671Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels: Value:0xc03ba7b690} THRESHOLD:{Var:THRESHOLD Labels: Value:0xc03ba7b698}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.168794804s EvaluationString:[ var='QUERY' labels={} value=NaN ], [ var='THRESHOLD' labels={} value=0 ]}]" duration=13.824634ms + logger=ngalert.state.manager user=711250 slug=lalamoninfo instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.169117257Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.169006277Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.16.91.66, job=ap_front, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.168806723Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=UETestPersistentFFA79" t=2024-05-29T13:44:15.16882281Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.168768221Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-periodic-reviews-db, env=pp" t=2024-05-29T13:44:15.168642943Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.168620569Z caller=remote_instance_store.go:51 user=815794 slug=fymtech msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.16.91.65, job=ap_poly, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.168654303Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.168615394Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.16.91.64, job=ap_grasvlakte, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.168534738Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=London, country=United Kingdom, datacenter=PIA, environment=production, instance=191.101.209.2:9998, ip=191.101.209.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/uk-london.crt, role=vpn, server=london442, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.168559206Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.16.91.64, job=ap_grasvlakte, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.168519508Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=512940 slug=gruppoquattroits t=2024-05-29T13:44:15.168401314Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=512940 slug=gruppoquattroits instance="datasource_uid=nxIk2Y24k, ref_id=A" t=2024-05-29T13:44:15.168377278Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:15.168449936Z caller=remote_instance_store.go:51 user=512940 slug=gruppoquattroits msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-instant-id-qa-db, env=pp" t=2024-05-29T13:44:15.168035276Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=266087 slug=avantarte t=2024-05-29T13:44:15.168265564Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=266087 slug=avantarte instance= t=2024-05-29T13:44:15.168244402Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=UETestPersistentF26E8" t=2024-05-29T13:44:15.168281522Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=266087 slug=avantarte instance= t=2024-05-29T13:44:15.168231802Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=266087 slug=avantarte t=2024-05-29T13:44:15.168184633Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=266087 slug=avantarte version=2 fingerprint=319cf6da84576c52 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.168084995Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc03f8d7c18} C:{Var:C Labels: Value:0xc03f8d7c20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.167604344s EvaluationString:[ var='B' labels={} value=1.3461538461538463 ], [ var='C' labels={} value=0 ]}]" duration=65.687171ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.168107867Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.16.91.61, job=ap_bender, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.168115605Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=901176 slug=sbceo t=2024-05-29T13:44:15.167593401Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.261596ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=London, country=United Kingdom, datacenter=PIA, environment=production, instance=181.215.176.65:9998, ip=181.215.176.65, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=london410, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.167713291Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.16.91.58, job=ap_gent_keuken, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.1676527Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=373502 slug=stakeandrelax t=2024-05-29T13:44:15.167612658Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.015246ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.16765402Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.16.91.57, job=ap_silentroom, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.167523442Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=815794 slug=fymtech instance= t=2024-05-29T13:44:15.167423994Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:59:10Z next_ends_at=2024-05-29T14:04:10Z + level=debug ts=2024-05-29T13:44:15.167229557Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=903117 slug=essentialist instance= t=2024-05-29T13:44:15.167199967Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.167153462Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.167050958Z caller=remote_instance_store.go:51 user=502468 slug=gmawater msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=London, country=United Kingdom, datacenter=PIA, environment=production, instance=181.215.176.2:9998, ip=181.215.176.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/uk-london.crt, role=vpn, server=london407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.166963387Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=806229 slug=simplisafe instance= t=2024-05-29T13:44:15.166780537Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=London, country=United Kingdom, datacenter=PIA, environment=production, instance=181.215.176.2:9998, ip=181.215.176.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=london407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.16674608Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.1666957Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=UETestPersistentDF637" t=2024-05-29T13:44:15.166667385Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.166287545Z caller=remote_instance_store.go:51 user=266028 slug=tradeplus24 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=266028 slug=tradeplus24 instance= t=2024-05-29T13:44:15.166218907Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=916145 slug=cmselfpd t=2024-05-29T13:44:15.166544244Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=916145 slug=cmselfpd instance="instance=pulon02selwcldbs1001.sel.ecomm.local" t=2024-05-29T13:44:15.166513343Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=London, country=United Kingdom, datacenter=DataPacket, environment=production, instance=212.102.63.129:9998, ip=212.102.63.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/uk-london.crt, role=vpn, server=london402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.166501226Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=798316 slug=hoveprd instance="instance=https://api.navitia.io/v1/coverage/idfm/journeys?from=stop_area%3AIDFM%3A71517&to=stop_area%3AIDFM%3A71673&allowed_id%5B%5D=network%3AIDFM%3A439&, job=HTTP on api.navitia.io coverage idfm Journeys 2" t=2024-05-29T13:44:15.166282483Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=London, country=United Kingdom, datacenter=DataPacket, environment=production, instance=212.102.63.129:9998, ip=212.102.63.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=london402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.166312601Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=798316 slug=hoveprd version=1 fingerprint=ece29b7b67740b31 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.166126109Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=https://api.navitia.io/v1/coverage/idfm/journeys?from=stop_area%3AIDFM%3A71517&to=stop_area%3AIDFM%3A71673&allowed_id%5B%5D=network%3AIDFM%3A439&, job=HTTP on api.navitia.io coverage idfm Journeys 2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://api.navitia.io/v1/coverage/idfm/journeys?from=stop_area%3AIDFM%3A71517&to=stop_area%3AIDFM%3A71673&allowed_id%5B%5D=network%3AIDFM%3A439&, job=HTTP on api.navitia.io coverage idfm Journeys 2 Value:0xc06ad95638} B:{Var:B Labels:instance=https://api.navitia.io/v1/coverage/idfm/journeys?from=stop_area%3AIDFM%3A71517&to=stop_area%3AIDFM%3A71673&allowed_id%5B%5D=network%3AIDFM%3A439&, job=HTTP on api.navitia.io coverage idfm Journeys 2 Value:0xc06ad95650}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165743847s EvaluationString:[ var='A' labels={instance=https://api.navitia.io/v1/coverage/idfm/journeys?from=stop_area%3AIDFM%3A71517&to=stop_area%3AIDFM%3A71673&allowed_id%5B%5D=network%3AIDFM%3A439&, job=HTTP on api.navitia.io coverage idfm Journeys 2} value=100 ], [ var='B' labels={instance=https://api.navitia.io/v1/coverage/idfm/journeys?from=stop_area%3AIDFM%3A71517&to=stop_area%3AIDFM%3A71673&allowed_id%5B%5D=network%3AIDFM%3A439&, job=HTTP on api.navitia.io coverage idfm Journeys 2} value=0 ]}]" duration=22.08814ms + level=debug ts=2024-05-29T13:44:15.165780359Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=266028 slug=tradeplus24 t=2024-05-29T13:44:15.166120272Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:15.16458861Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=75.040749ms + logger=ngalert.state.manager.persist user=494138 slug=takepayments t=2024-05-29T13:44:15.166056756Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=53.97084ms + logger=ngalert.scheduler user=266028 slug=tradeplus24 version=2 fingerprint=7482827f583a2162 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.165244416Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.159948953s EvaluationString:}]" duration=33.199888ms + logger=ngalert.state.manager.persist user=359284 slug=ankorstore t=2024-05-29T13:44:15.165719418Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=359284 slug=ankorstore instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.165708552Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=359284 slug=ankorstore instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.165668286Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.165820737Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=556147 slug=bettercloudholding t=2024-05-29T13:44:15.165796468Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=40.244556ms + logger=ngalert.state.manager user=819809 slug=sprinter instance="datasource_uid=b965ad12-f437-4788-8ce5-00c6448050d7, ref_id=A" t=2024-05-29T13:44:15.165784232Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=819809 slug=sprinter instance="datasource_uid=b965ad12-f437-4788-8ce5-00c6448050d7, ref_id=A" t=2024-05-29T13:44:15.165770782Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=819809 slug=sprinter t=2024-05-29T13:44:15.165739752Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.165775937Z caller=remote_instance_store.go:51 user=273545 slug=strigoio msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=819809 slug=sprinter version=26 fingerprint=a691fedefa6ad87d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.165677541Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=b965ad12-f437-4788-8ce5-00c6448050d7, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.165296389s EvaluationString:}]" duration=31.064829ms + logger=ngalert.state.manager user=359284 slug=ankorstore t=2024-05-29T13:44:15.165629966Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=273545 slug=strigoio instance="datasource_uid=HUe6o4Unz, ref_id=c/m" t=2024-05-29T13:44:15.165638Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.165512411Z caller=remote_instance_store.go:51 user=405431 slug=deepersignals msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=London, country=United Kingdom, datacenter=DataPacket, environment=production, instance=212.102.52.1:9998, ip=212.102.52.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=london445, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.16547455Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:15.165405817Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.165406055Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.165361473Z caller=remote_instance_store.go:51 user=362802 slug=centauri msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=London, country=United Kingdom, datacenter=DataPacket, environment=production, instance=143.244.39.1:9998, ip=143.244.39.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/uk-london.crt, role=vpn, server=london401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.165289697Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.165269039Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=362802 slug=centauri instance="datasource_uid=grafanacloud-prom, ref_id=ftx_exporter" t=2024-05-29T13:44:15.165179574Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=362802 slug=centauri instance="datasource_uid=grafanacloud-prom, ref_id=ftx_exporter" t=2024-05-29T13:44:15.16516505Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=362802 slug=centauri instance="datasource_uid=grafanacloud-prom, ref_id=ftx_exporter" t=2024-05-29T13:44:15.165144304Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=705891 slug=atalink instance="datasource_uid=grafanacloud-prom, ref_id=expire_in_days" t=2024-05-29T13:44:15.165164783Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=705891 slug=atalink version=6 fingerprint=ceddcd4890218a1e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.1650519Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=expire_in_days State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.164744837s EvaluationString:}]" duration=8.165405ms + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.16.90.11, job=switch_gent_tw, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.165050199Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.165035313Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.16.90.11, job=switch_gent_tw, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.165031407Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.164954036Z caller=remote_instance_store.go:51 user=647444 slug=nttaidth135160 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.16.90.10, job=switch_gent_stack, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.164883264Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=647444 slug=nttaidth135160 t=2024-05-29T13:44:15.164916194Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.164860748Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.164903727Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=325302 slug=deepgenomics instance= t=2024-05-29T13:44:15.164900915Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=London, country=United Kingdom, datacenter=DataPacket, environment=production, instance=138.199.30.219:9998, ip=138.199.30.219, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/uk-2.crt, role=streaming-optimized, server=london446, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.16488319Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.164846837Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.164587697Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.164608028Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.164574943Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.164567828Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.164525098Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.164398546Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.164363496Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:15.164410678Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=22.911022ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=London, country=United Kingdom, datacenter=DataPacket, environment=production, instance=138.199.30.150:9998, ip=138.199.30.150, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/uk-2.crt, role=streaming-optimized, server=london441, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.164422407Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.164395727Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=615392 slug=shinemetrics t=2024-05-29T13:44:15.164395643Z level=debug msg="Saving alert states" count=81 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=London, country=United Kingdom, datacenter=DataPacket, environment=production, instance=138.199.30.150:9998, ip=138.199.30.150, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/uk-2.crt, role=streaming-optimized, server=london441, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.16438527Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-transaction-tax-calculation-stream-transactions.update, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.164343416Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.16.40.11, job=paxton_gent_tw, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.164313773Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=757213 slug=opensauced instance= t=2024-05-29T13:44:15.164256867Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=757213 slug=opensauced version=6 fingerprint=e0251335befa638f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.164141035Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc003051270} C:{Var:C Labels: Value:0xc003051278}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.163692814s EvaluationString:[ var='A' labels={} value=0 ], [ var='C' labels={} value=0 ]}]" duration=25.859645ms + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-transaction-tax-calculation-stream-transactions.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.16422649Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.16.40.10, job=paxton_gent_inkom, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.164182503Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=835158 slug=binancee t=2024-05-29T13:44:15.164120553Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-transaction-tax-calculation-stream-transactionToEntityMappings.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.16414887Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-transaction-tax-calculation-stream-taxForecastProfiles.update, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.164026396Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=835158 slug=binancee version=32 fingerprint=2f6355343a4454cf attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.163905209Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[C0:{Var:C Labels: Value:0xc015886470} C1:{Var:C Labels:instance=localhost:8001, job=RSI_1.5h, ticker=SOL-USD, type=diff Value:0xc0158864e8} C2:{Var:C Labels:__name__=current_price, instance=localhost:8005, job=current_price, ticker=SOL-USD Value:0xc015886460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.163156862s EvaluationString:[ var='C0' metric='Value' labels={} value=-0.2135092071617481 ], [ var='C1' metric='Value' labels={instance=localhost:8001, job=RSI_1.5h, ticker=SOL-USD, type=diff} value=0 ], [ var='C2' metric='current_price' labels={__name__=current_price, instance=localhost:8005, job=current_price, ticker=SOL-USD} value=168.09860229492188 ]}]" duration=69.327633ms + logger=ngalert.state.manager.persist user=537068 slug=bitvavotrading t=2024-05-29T13:44:15.163890995Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=22.203513ms + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.16.11.61, job=ZoomRoom Twiggy, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.163882028Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.16.11.61, job=ZoomRoom Twiggy, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.16386462Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=152115 slug=mediakindsaasgcp instance= t=2024-05-29T13:44:15.163791402Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=152115 slug=mediakindsaasgcp t=2024-05-29T13:44:15.163761069Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-transaction-tax-calculation-stream-invoiceMappings.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.16376384Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.163730265Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-transaction-tax-calculation-stream-companiesProfiles.update, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.163662812Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.163547317Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-transaction-fees-stream-transactions.update, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.163585195Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=723897 slug=inthepocket instance="__name__=instance_job_severity:probe_success:mean5m, __source__metric__name__=Value, alert_sensitivity=low, id=bdi3qoe280iyoe, instance=172.16.11.56, job=ZoomRoom Brimstone, name=instance_job_severity:probe_success:mean5m" t=2024-05-29T13:44:15.16355437Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-transaction-fees-stream-transactions.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.163489706Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-transaction-fees-stream-transactions.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.163473311Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.163363401Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=London, country=United Kingdom, datacenter=DataPacket, environment=production, instance=138.199.29.25:9998, ip=138.199.29.25, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=london413, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.163432006Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=UETestPersistentA1CE7" t=2024-05-29T13:44:15.163340909Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=UETestPersistentA1CE7" t=2024-05-29T13:44:15.163326792Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-transaction-enrichment-streaming-transactions.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.163253387Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=723897 slug=inthepocket t=2024-05-29T13:44:15.162718204Z level=debug msg="State manager processing evaluation results" resultCount=110 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=London, country=United Kingdom, datacenter=DataPacket, environment=production, instance=138.199.28.246:9998, ip=138.199.28.246, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=london411, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.163113679Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=772239 slug=seamapi t=2024-05-29T13:44:15.163057204Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.959843ms + level=debug ts=2024-05-29T13:44:15.163052093Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=London, country=United Kingdom, datacenter=DataPacket, environment=production, instance=138.199.28.246:9998, ip=138.199.28.246, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=london411, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.163107097Z level=debug msg="Setting next state" handler=resultNormal + level=error ts=2024-05-29T13:44:15.163046003Z caller=ruler.go:515 msg="failed to load config from grafana instance, skipping instance" user=307450 slug=asosltd err="user has the remote ruler not enabled" + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-treezorUserMappings.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.163033595Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ljubljana, country=Slovenia, datacenter=Estnoc, environment=production, instance=195.80.150.178:9998, ip=195.80.150.178, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/slovenia.crt, role=vpn, server=slovenia401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.162997934Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.16294618Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Ljubljana, country=Slovenia, datacenter=Estnoc, environment=production, instance=195.80.150.178:9998, ip=195.80.150.178, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=slovenia401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.162887684Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.162786853Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=770817 slug=exproment t=2024-05-29T13:44:15.162632525Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.333278ms + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-taxResidencyDocuments.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.162637273Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-taxResidencyDocuments.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.162620826Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=UETestPersistent57D71" t=2024-05-29T13:44:15.162585317Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=173730 slug=nikon t=2024-05-29T13:44:15.162565756Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=28.78341ms + level=debug ts=2024-05-29T13:44:15.162516894Z caller=remote_instance_store.go:51 user=434891 slug=webdeveloper msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-taxResidencies.update, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.162459944Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=698103 slug=vericast t=2024-05-29T13:44:15.162494298Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-taxResidencies.update, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.162444493Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=434891 slug=webdeveloper t=2024-05-29T13:44:15.162452294Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=434891 slug=webdeveloper instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.162418322Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.162283281Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-taxResidencies.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.162345348Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=434891 slug=webdeveloper instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.162353835Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=UETestPersistent46A6D" t=2024-05-29T13:44:15.162263941Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.162283733Z caller=remote_instance_store.go:51 user=87780 slug=zencloudandhosting msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=UETestPersistent46A6D" t=2024-05-29T13:44:15.162245963Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-shineStartNotes.update, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.162255841Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Little Rock, country=United States, datacenter=DataPacket, environment=production, instance=84.239.28.1:9998, ip=84.239.28.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-arkansas-pf.crt, role=vpn, server=littlerock402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.162273839Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.162144072Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.162163624Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=638580 slug=obso instance="__name__=probe_http_status_code, instance=https://api.liynk.com/admin/login/?next=/admin/, job=blackbox" t=2024-05-29T13:44:15.162135343Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.162040118Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-referrals.update, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.162048976Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.161980477Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-development, game_interval_time=12, game_namespace=sdktest, game_template=UETestPersistent3E218" t=2024-05-29T13:44:15.161926854Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.161896618Z caller=remote_instance_store.go:51 user=264941 slug=agnosticeng msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=87780 slug=zencloudandhosting instance="datasource_uid=000000020, ref_id=A,B" t=2024-05-29T13:44:15.161951226Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-referrals.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.161957258Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.161892775Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-receipts.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.161780194Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-receipts.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.161767719Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=941160 slug=uateu t=2024-05-29T13:44:15.16162819Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.910244ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Lisbon, country=Portugal, datacenter=M247, environment=production, instance=146.70.59.35:9998, ip=146.70.59.35, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=lisbon404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.161763659Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.161591153Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=87780 slug=zencloudandhosting version=1 fingerprint=5325897a40995bf9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.161565573Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=000000020, ref_id=A,B State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.161242537s EvaluationString:}]" duration=242.664118ms + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-paymentCardOrders.update, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.161488524Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:15.16148191Z caller=remote_image_capturer.go:61 user=23117 slug=recordpoint rule_org_id=1 rule_uid=J2TDy3d7k dashboard=ZsSg5sGZk panel=5 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:15.16144158Z caller=remote_instance_store.go:51 user=159532 slug=getfabric msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=159532 slug=getfabric t=2024-05-29T13:44:15.161393171Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=159532 slug=getfabric instance="datasource_uid=grafanacloud-prom, ref_id=Avg_change" t=2024-05-29T13:44:15.161385524Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.161354695Z caller=remote_instance_store.go:51 user=326874 slug=fastpath msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-paymentCardOrders.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.16134929Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-paymentCardOrders.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.161330947Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.161285056Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=913778 slug=samenp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.16125227Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=913778 slug=samenp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.161247321Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.161199189Z caller=remote_image_capturer.go:33 user=913778 slug=samenp rule_org_id=1 rule_uid=fdk5an5w3jncwa msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=461396 slug=ultimateai t=2024-05-29T13:44:15.16115534Z level=debug msg="State manager processing evaluation results" resultCount=3 + logger=ngalert.state.manager user=913778 slug=samenp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.16117402Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=913778 slug=samenp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.16115874Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-kybReviews.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.161110343Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.160947386Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=635766 slug=emmaprod t=2024-05-29T13:44:15.160930951Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.970799ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Lima, country=Peru, datacenter=GSL, environment=production, instance=84.247.99.2:9998, ip=84.247.99.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=peru401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.160951852Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-kybProviderUsers.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.160902918Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=473762 slug=intentiq version=22 fingerprint=9e7a9975e622b434 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.160804018Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.160450915s EvaluationString:}]" duration=13.539518ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.160804534Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-kybProviderReviews.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.160732631Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.160655468Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.160728674Z caller=remote_image_capturer.go:54 user=23117 slug=recordpoint rule_org_id=1 rule_uid=J2TDy3d7k dashboard=ZsSg5sGZk panel=5 msg="rendering alert image with grafana" + level=debug ts=2024-05-29T13:44:15.160695862Z caller=remote_instance_store.go:51 user=356716 slug=molecule msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Las Vegas, country=United States, datacenter=PIA, environment=production, instance=191.101.61.9:9998, ip=191.101.61.9, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-lasvegas.crt, role=vpn, server=lasvegas430, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.16067491Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-kybDocuments.update, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.16060964Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-kybDocuments.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.160525844Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.160554044Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.160563754Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-demo, game_interval_time=12, game_namespace=sdktest, game_template=gameSessionPersistentTest-C193495144D447D871D6CC99BF8C9122" t=2024-05-29T13:44:15.160564857Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.160450908Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=356716 slug=molecule t=2024-05-29T13:44:15.160460642Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-frontend-log-db, env=pp" t=2024-05-29T13:44:15.160447036Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.160294208Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-invoiceMappings.update, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.160369933Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-invoiceMappings.update, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.160286884Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-invoiceMappings.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.160212222Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-invoiceMappings.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.160199769Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.160197302Z caller=remote_instance_store.go:51 user=35611 slug=play msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.160195272Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.160167013Z caller=remote_instance_store.go:51 user=471861 slug=planetstaging msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.160062378Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=471861 slug=planetstaging t=2024-05-29T13:44:15.16003441Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-demo, game_interval_time=12, game_namespace=sdktest, game_template=UETestPersistentBB3B9" t=2024-05-29T13:44:15.160061701Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-demo, game_interval_time=12, game_namespace=sdktest, game_template=UETestPersistentBB3B9" t=2024-05-29T13:44:15.16004587Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=471861 slug=planetstaging version=2 fingerprint=32a550423370040c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.159979657Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.159695696s EvaluationString:}]" duration=31.454581ms + level=debug ts=2024-05-29T13:44:15.159925915Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=542894 slug=aize t=2024-05-29T13:44:15.159808038Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.959803ms + level=debug ts=2024-05-29T13:44:15.159797397Z caller=remote_instance_store.go:51 user=432323 slug=lithic msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:15.159735605Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-clients.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.159706441Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.159648776Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Las Vegas, country=United States, datacenter=PIA, environment=production, instance=191.101.61.7:9998, ip=191.101.61.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=lasvegas428, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.159607998Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=384488 slug=g3rv4 t=2024-05-29T13:44:15.159561944Z level=debug msg="Saving alert states" count=4 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.159598193Z caller=remote_instance_store.go:51 user=384488 slug=g3rv4 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=384488 slug=g3rv4 instance="__name__=instance:node_cpu_utilisation:rate2m, agent_hostname=skye-gervas-io, instance=skye, job=integrations/node_exporter" t=2024-05-29T13:44:15.159535918Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.159450332Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=384488 slug=g3rv4 instance="__name__=instance:node_cpu_utilisation:rate2m, agent_hostname=pve, instance=pve, job=integrations/node_exporter" t=2024-05-29T13:44:15.15947108Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="environment_name=accelbyte-justice-demo, game_interval_time=12, game_namespace=sdktest, game_template=UETestPersistent05252" t=2024-05-29T13:44:15.159485746Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=602335 slug=gcbgrupo t=2024-05-29T13:44:15.15934979Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=384488 slug=g3rv4 instance="__name__=instance:node_cpu_utilisation:rate2m, agent_hostname=marshall, instance=marshall, job=integrations/node_exporter" t=2024-05-29T13:44:15.159411644Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-cards.update, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.159378488Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=384488 slug=g3rv4 instance="__name__=instance:node_cpu_utilisation:rate2m, agent_hostname=feliznavidad.gervas.io, instance=feliznavidad, job=integrations/node_exporter" t=2024-05-29T13:44:15.159316132Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.159231955Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=384488 slug=g3rv4 t=2024-05-29T13:44:15.159212826Z level=debug msg="State manager processing evaluation results" resultCount=4 + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.157427332Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.157386793Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.157376591Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.157365529Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.157358052Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=602335 slug=gcbgrupo version=19 fingerprint=56a4a53fb6726b7f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.159097583Z level=debug msg="Alert rule evaluated" results="[{Instance:DBInstanceIdentifier=novobanco State:Normal Error: Results:map[] Values:map[lastValue:{Var:lastValue Labels:DBInstanceIdentifier=novobanco Value:0xc0011140f0} output:{Var:output Labels:DBInstanceIdentifier=novobanco Value:0xc001114120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.158667149s EvaluationString:[ var='lastValue' labels={DBInstanceIdentifier=novobanco} value=1.655382016e+09 ], [ var='output' labels={DBInstanceIdentifier=novobanco} value=0 ]}]" duration=133.851019ms + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.157251096Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.157243193Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.157230655Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.157201066Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.157190106Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.157145108Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.157089858Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.157081565Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:15.158748295Z level=debug msg="State manager processing evaluation results" resultCount=148 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Las Vegas, country=United States, datacenter=PIA, environment=production, instance=191.101.61.5:9998, ip=191.101.61.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-lasvegas.crt, role=vpn, server=lasvegas423, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.159045333Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.159005295Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156961558Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156890156Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156869618Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156846876Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-capitalDepositNotes.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.158873388Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156835742Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156814953Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156792158Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.158864376Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156763296Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.158411434Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156681736Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156659482Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156630714Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156621303Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156606427Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156575799Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156564536Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15655616Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156512805Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-auditLogs, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.158661868Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Las Vegas, country=United States, datacenter=PIA, environment=production, instance=191.101.61.4:9998, ip=191.101.61.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-lasvegas.crt, role=vpn, server=lasvegas422, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.158667477Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156441705Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156422853Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15635748Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.158264631Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-stream-events-to-audit-auditLogs, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.158536785Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156288213Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156280089Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156214992Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15620414Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156183166Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156154125Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156141941Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.15844964Z caller=remote_instance_store.go:51 user=59625 slug=alquilerargentina msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Las Vegas, country=United States, datacenter=PIA, environment=production, instance=191.101.61.4:9998, ip=191.101.61.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=lasvegas422, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.158460512Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Las Vegas, country=United States, datacenter=PIA, environment=production, instance=191.101.61.4:9998, ip=191.101.61.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=lasvegas422, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.158444742Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.1584042Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=59625 slug=alquilerargentina t=2024-05-29T13:44:15.15835014Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=146728 slug=dgc t=2024-05-29T13:44:15.158305334Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=110.994575ms + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.156034673Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=250150 slug=bizagi version=58 fingerprint=ccc69cea1d90b27d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.158344549Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.158105964s EvaluationString:}]" duration=518.917407ms + level=debug ts=2024-05-29T13:44:15.158177739Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + Error parsing panelUID for alert annotationruleID2508dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=320906 slug=techcyte t=2024-05-29T13:44:15.157901703Z level=debug msg="Saving alert states done" count=8 max_state_save_concurrency=1 duration=178.314267ms + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15600518Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155976385Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155965429Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155957785Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155876641Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155844337Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155822479Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155801969Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Las Vegas, country=United States, datacenter=PIA, environment=production, instance=191.101.61.3:9998, ip=191.101.61.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-lasvegas.crt, role=vpn, server=lasvegas421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.158235422Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155779601Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.158152364Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155725769Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155717751Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155688818Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155649155Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155641225Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155572452Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155521582Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155513695Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155493194Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=523906 slug=cyberark t=2024-05-29T13:44:15.158046002Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=45.826811ms + level=debug ts=2024-05-29T13:44:15.158060413Z caller=remote_instance_store.go:51 user=697672 slug=yrpc msg="calling SaveAlertInstance" +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155408839Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155348958Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=697672 slug=yrpc t=2024-05-29T13:44:15.158015072Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155339803Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155295414Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.157920888Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=697672 slug=yrpc instance="env=prod, host=base-node0, instance=ymonitor:8090, job=ymonitor, monitor=example, network=base, provider=dyRPC, type=node" t=2024-05-29T13:44:15.157948711Z level=debug msg="Setting next state" handler=resultAlerting +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155267705Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155256814Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155215439Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155191402Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155160867Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155088523Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-match-invoices-transactions.update, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.157788999Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.157764853Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155080358Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.157782762Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15504422Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.155023009Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.154988555Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15498086Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.154959533Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.154948045Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Las Vegas, country=United States, datacenter=PIA, environment=production, instance=154.16.105.9:9998, ip=154.16.105.9, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=lasvegas420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.157632678Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.154937721Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.157623712Z caller=remote_instance_store.go:51 user=500743 slug=sgr msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-match-invoices-transactions.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.157625554Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.157605025Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.157565546Z caller=remote_instance_store.go:51 user=316418 slug=workmotion msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=1mr10216z5, Method=ANY, Resource=/service-fees/{proxy+}, Stage=$default" t=2024-05-29T13:44:15.157497658Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.154841967Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.157422797Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-match-invoices-invoiceMappings.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.157493829Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-frontend-log-db, env=pp" t=2024-05-29T13:44:15.157503035Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15480946Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15478198Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.154706182Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.154694721Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.154678494Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.154650729Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.157294015Z caller=remote_instance_store.go:51 user=805026 slug=powwro11y msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.154619739Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.154601493Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.157317277Z caller=remote_instance_store.go:51 user=434892 slug=apexfsnzdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=434892 slug=apexfsnzdev t=2024-05-29T13:44:15.157270996Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=434892 slug=apexfsnzdev instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.157247389Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=434892 slug=apexfsnzdev t=2024-05-29T13:44:15.157222454Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.154527133Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=316418 slug=workmotion version=3 fingerprint=637d1244646765a7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.157082081Z level=debug msg="Alert rule evaluated" results="[{Instance:ApiId=1mr10216z5, Method=ANY, Resource=/service-fees/{proxy+}, Stage=$default State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ApiId=1mr10216z5, Method=ANY, Resource=/service-fees/{proxy+}, Stage=$default Value:0xc087d3bff0} C:{Var:C Labels:ApiId=1mr10216z5, Method=ANY, Resource=/service-fees/{proxy+}, Stage=$default Value:0xc0108fa870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.156651107s EvaluationString:[ var='B' labels={ApiId=1mr10216z5, Method=ANY, Resource=/service-fees/{proxy+}, Stage=$default} value=17 ], [ var='C' labels={ApiId=1mr10216z5, Method=ANY, Resource=/service-fees/{proxy+}, Stage=$default} value=0 ]}]" duration=51.117181ms + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15450254Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.157242659Z caller=remote_instance_store.go:51 user=70430 slug=dapperlabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.154484117Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Las Vegas, country=United States, datacenter=PIA, environment=production, instance=154.16.105.7:9998, ip=154.16.105.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-lasvegas.crt, role=vpn, server=lasvegas418, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.157139051Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Las Vegas, country=United States, datacenter=PIA, environment=production, instance=154.16.105.7:9998, ip=154.16.105.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-lasvegas.crt, role=vpn, server=lasvegas418, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.157127663Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.154389507Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.154357032Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.154347428Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Las Vegas, country=United States, datacenter=PIA, environment=production, instance=154.16.105.7:9998, ip=154.16.105.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=lasvegas418, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.156972602Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.154329098Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=436633 slug=swirldslabsproduction t=2024-05-29T13:44:15.156906561Z level=debug msg="Saving alert states" count=19 max_state_save_concurrency=1 + logger=ngalert.state.manager user=436633 slug=swirldslabsproduction instance="__name__=uploader_eventsstream_files_on_disk_count, environment=testnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node02 - 34.85.179.35, job=uploader-stats, metrics_node_id=3, node_id=2" t=2024-05-29T13:44:15.156892303Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.156924308Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.154295252Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.156897976Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15424434Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.156833112Z caller=remote_instance_store.go:51 user=228733 slug=csmoney msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.154147576Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.154136354Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=436633 slug=swirldslabsproduction instance="__name__=uploader_eventsstream_files_on_disk_count, environment=testnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node00 - 34.94.100.41, job=uploader-stats, metrics_node_id=1, node_id=0" t=2024-05-29T13:44:15.156790388Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.154125776Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.156696514Z caller=remote_instance_store.go:51 user=375798 slug=beeworks msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:15.156636918Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.156695014Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.156702742Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=436633 slug=swirldslabsproduction instance="__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node06 - 34.125.125.246, job=uploader-stats, metrics_node_id=7, node_id=6" t=2024-05-29T13:44:15.156715777Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=436633 slug=swirldslabsproduction instance="__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node06 - 34.125.125.246, job=uploader-stats, metrics_node_id=7, node_id=6" t=2024-05-29T13:44:15.156704031Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.154033191Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=436633 slug=swirldslabsproduction instance="__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node05 - 34.106.79.186, job=uploader-stats, metrics_node_id=6, node_id=5" t=2024-05-29T13:44:15.156635114Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=436633 slug=swirldslabsproduction instance="__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node05 - 34.106.79.186, job=uploader-stats, metrics_node_id=6, node_id=5" t=2024-05-29T13:44:15.156620618Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=658231 slug=koomie instance="instance=10.0.100.101:9100, job=logins, monitor=hpcfund" t=2024-05-29T13:44:15.156613587Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153994622Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153988758Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=658231 slug=koomie instance="instance=10.0.100.101:9100, job=logins, monitor=hpcfund" t=2024-05-29T13:44:15.156591958Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153970872Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=658231 slug=koomie t=2024-05-29T13:44:15.156545776Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=436633 slug=swirldslabsproduction instance="__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node04 - 35.236.3.57, job=uploader-stats, metrics_node_id=5, node_id=4" t=2024-05-29T13:44:15.156542886Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153966379Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Las Vegas, country=United States, datacenter=PIA, environment=production, instance=154.16.105.6:9998, ip=154.16.105.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=lasvegas417, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.156548069Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153949149Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.156513987Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153912991Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.156425536Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.156438116Z caller=remote_alert_sender.go:94 user=767797 slug=mgmresorts host=mgmresorts-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.132.178:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=edmlyt6xh05c0d alerts=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Las Vegas, country=United States, datacenter=PIA, environment=production, instance=154.16.105.5:9998, ip=154.16.105.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-lasvegas.crt, role=vpn, server=lasvegas416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.156416613Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.156378306Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=436633 slug=swirldslabsproduction instance="__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node02 - 34.66.191.231, job=uploader-stats, metrics_node_id=3, node_id=2" t=2024-05-29T13:44:15.156349079Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15380869Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.151057169Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153802813Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153781781Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.156182965Z caller=remote_instance_store.go:51 user=739130 slug=redphasetech msg="calling SaveAlertInstance" +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153734173Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=436633 slug=swirldslabsproduction instance="__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node01 - 35.245.55.182, job=uploader-stats, metrics_node_id=2, node_id=1" t=2024-05-29T13:44:15.156216254Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15370462Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=436633 slug=swirldslabsproduction instance="__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node00 - 34.74.62.75, job=uploader-stats, metrics_node_id=1, node_id=0" t=2024-05-29T13:44:15.156136728Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153666886Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-fraud-detection-flags-paymentCards.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.156099906Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153643725Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153639359Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153632509Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153608624Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.155950741Z caller=remote_instance_store.go:51 user=405431 slug=deepersignals msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15360199Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153553974Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-fraud-detection-flags-companiesMetadata.update, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.155956826Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153531457Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15347201Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Las Vegas, country=United States, datacenter=PIA, environment=production, instance=154.16.105.4:9998, ip=154.16.105.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=lasvegas415, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.155861372Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.155824955Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-fraud-detection-alerts-receipts.update, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.155859729Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153390341Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15336862Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153361059Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=436633 slug=swirldslabsproduction instance="__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node27 - 85.195.72.173, job=uploader-stats, metrics_node_id=28, node_id=27" t=2024-05-29T13:44:15.155809934Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153338175Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-fraud-detection-alerts-receipts.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.155753639Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=436633 slug=swirldslabsproduction instance="__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node23 - 216.18.218.2, job=uploader-stats, metrics_node_id=24, node_id=23" t=2024-05-29T13:44:15.155759444Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-frontend-database, env=pp" t=2024-05-29T13:44:15.15570075Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Las Vegas, country=United States, datacenter=PIA, environment=production, instance=154.16.105.3:9998, ip=154.16.105.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-lasvegas.crt, role=vpn, server=lasvegas412, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.15570267Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=436633 slug=swirldslabsproduction instance="__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node08 - 130.76.212.141, job=uploader-stats, metrics_node_id=9, node_id=8" t=2024-05-29T13:44:15.155668423Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.1532567Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.155675728Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=436633 slug=swirldslabsproduction instance="__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node08 - 130.76.212.141, job=uploader-stats, metrics_node_id=9, node_id=8" t=2024-05-29T13:44:15.155656229Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153158397Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153150607Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=436633 slug=swirldslabsproduction instance="__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node07 - 175.110.113.85, job=uploader-stats, metrics_node_id=8, node_id=7" t=2024-05-29T13:44:15.155569868Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Las Vegas, country=United States, datacenter=PIA, environment=production, instance=154.16.105.3:9998, ip=154.16.105.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=lasvegas412, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.155552578Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-elasticsearch-stream-usersProfiles.update, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.155550706Z level=debug msg="Setting next state" handler=resultNormal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153092291Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Las Vegas, country=United States, datacenter=PIA, environment=production, instance=154.16.105.3:9998, ip=154.16.105.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=lasvegas412, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.155544448Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.155459799Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.155361232Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153047263Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.153034968Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=436633 slug=swirldslabsproduction instance="__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node06 - 31.214.8.132, job=uploader-stats, metrics_node_id=7, node_id=6" t=2024-05-29T13:44:15.155477252Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.155405212Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152995786Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152986258Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152976919Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152889796Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=LwbzRMnVk, ref_id=A" t=2024-05-29T13:44:15.155303915Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=436633 slug=swirldslabsproduction instance="__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node01 - 34.16.1.104, job=uploader-stats, metrics_node_id=2, node_id=1" t=2024-05-29T13:44:15.155352081Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:15.1552835Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.155351356Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15285886Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=430961 slug=solifi version=7 fingerprint=0610c79d057995bd attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.15522367Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=LwbzRMnVk, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.154872231s EvaluationString:}]" duration=31.326846ms + logger=ngalert.scheduler user=436633 slug=swirldslabsproduction version=16 fingerprint=baf2a0e092382104 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.154920794Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node01 - 34.16.1.104, job=uploader-stats, metrics_node_id=2, node_id=1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node01 - 34.16.1.104, job=uploader-stats, metrics_node_id=2, node_id=1 Value:0xc03b88fb80} C:{Var:C Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node01 - 34.16.1.104, job=uploader-stats, metrics_node_id=2, node_id=1 Value:0xc03b88fad0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.153360933s EvaluationString:[ var='A' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node01 - 34.16.1.104, job=uploader-stats, metrics_node_id=2, node_id=1} value=0 ], [ var='C' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node01 - 34.16.1.104, job=uploader-stats, metrics_node_id=2, node_id=1} value=0 ]} {Instance:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node02 - 69.46.16.138, job=uploader-stats, metrics_node_id=3, node_id=2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node02 - 69.46.16.138, job=uploader-stats, metrics_node_id=3, node_id=2 Value:0xc03b88fcc8} C:{Var:C Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node02 - 69.46.16.138, job=uploader-stats, metrics_node_id=3, node_id=2 Value:0xc03b88fda0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.153381818s EvaluationString:[ var='A' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node02 - 69.46.16.138, job=uploader-stats, metrics_node_id=3, node_id=2} value=66 ], [ var='C' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node02 - 69.46.16.138, job=uploader-stats, metrics_node_id=3, node_id=2} value=0 ]} {Instance:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node06 - 31.214.8.132, job=uploader-stats, metrics_node_id=7, node_id=6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node06 - 31.214.8.132, job=uploader-stats, metrics_node_id=7, node_id=6 Value:0xc03b88ff90} C:{Var:C Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node06 - 31.214.8.132, job=uploader-stats, metrics_node_id=7, node_id=6 Value:0xc00a196080}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.153393204s EvaluationString:[ var='A' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node06 - 31.214.8.132, job=uploader-stats, metrics_node_id=7, node_id=6} value=0 ], [ var='C' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node06 - 31.214.8.132, job=uploader-stats, metrics_node_id=7, node_id=6} value=0 ]} {Instance:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node07 - 175.110.113.85, job=uploader-stats, metrics_node_id=8, node_id=7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node07 - 175.110.113.85, job=uploader-stats, metrics_node_id=8, node_id=7 Value:0xc00a1962e0} C:{Var:C Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node07 - 175.110.113.85, job=uploader-stats, metrics_node_id=8, node_id=7 Value:0xc00a1964a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.153402754s EvaluationString:[ var='A' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node07 - 175.110.113.85, job=uploader-stats, metrics_node_id=8, node_id=7} value=50 ], [ var='C' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node07 - 175.110.113.85, job=uploader-stats, metrics_node_id=8, node_id=7} value=0 ]} {Instance:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node08 - 130.76.212.141, job=uploader-stats, metrics_node_id=9, node_id=8 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node08 - 130.76.212.141, job=uploader-stats, metrics_node_id=9, node_id=8 Value:0xc00a196660} C:{Var:C Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node08 - 130.76.212.141, job=uploader-stats, metrics_node_id=9, node_id=8 Value:0xc00a1967e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.153411977s EvaluationString:[ var='A' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node08 - 130.76.212.141, job=uploader-stats, metrics_node_id=9, node_id=8} value=51 ], [ var='C' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node08 - 130.76.212.141, job=uploader-stats, metrics_node_id=9, node_id=8} value=0 ]} {Instance:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node23 - 216.18.218.2, job=uploader-stats, metrics_node_id=24, node_id=23 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node23 - 216.18.218.2, job=uploader-stats, metrics_node_id=24, node_id=23 Value:0xc00a196968} C:{Var:C Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node23 - 216.18.218.2, job=uploader-stats, metrics_node_id=24, node_id=23 Value:0xc00a196ac0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.153424218s EvaluationString:[ var='A' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node23 - 216.18.218.2, job=uploader-stats, metrics_node_id=24, node_id=23} value=58 ], [ var='C' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node23 - 216.18.218.2, job=uploader-stats, metrics_node_id=24, node_id=23} value=0 ]} {Instance:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node27 - 85.195.72.173, job=uploader-stats, metrics_node_id=28, node_id=27 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node27 - 85.195.72.173, job=uploader-stats, metrics_node_id=28, node_id=27 Value:0xc00a196c38} C:{Var:C Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node27 - 85.195.72.173, job=uploader-stats, metrics_node_id=28, node_id=27 Value:0xc00a196e10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.15343814s EvaluationString:[ var='A' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node27 - 85.195.72.173, job=uploader-stats, metrics_node_id=28, node_id=27} value=50 ], [ var='C' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node27 - 85.195.72.173, job=uploader-stats, metrics_node_id=28, node_id=27} value=0 ]} {Instance:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node28 - 194.163.186.90, job=uploader-stats, metrics_node_id=29, node_id=28 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node28 - 194.163.186.90, job=uploader-stats, metrics_node_id=29, node_id=28 Value:0xc00a196ff8} C:{Var:C Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node28 - 194.163.186.90, job=uploader-stats, metrics_node_id=29, node_id=28 Value:0xc00a1971b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.153446803s EvaluationString:[ var='A' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node28 - 194.163.186.90, job=uploader-stats, metrics_node_id=29, node_id=28} value=49 ], [ var='C' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node28 - 194.163.186.90, job=uploader-stats, metrics_node_id=29, node_id=28} value=0 ]} {Instance:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node30 - 5.199.164.101, job=uploader-stats, metrics_node_id=31, node_id=30 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node30 - 5.199.164.101, job=uploader-stats, metrics_node_id=31, node_id=30 Value:0xc00a197560} C:{Var:C Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node30 - 5.199.164.101, job=uploader-stats, metrics_node_id=31, node_id=30 Value:0xc00a197480}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.15345895s EvaluationString:[ var='A' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node30 - 5.199.164.101, job=uploader-stats, metrics_node_id=31, node_id=30} value=57 ], [ var='C' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node30 - 5.199.164.101, job=uploader-stats, metrics_node_id=31, node_id=30} value=0 ]} {Instance:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node31 - 64.185.230.146, job=uploader-stats, metrics_node_id=32, node_id=31 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node31 - 64.185.230.146, job=uploader-stats, metrics_node_id=32, node_id=31 Value:0xc00a197868} C:{Var:C Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node31 - 64.185.230.146, job=uploader-stats, metrics_node_id=32, node_id=31 Value:0xc00a197948}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.153466859s EvaluationString:[ var='A' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node31 - 64.185.230.146, job=uploader-stats, metrics_node_id=32, node_id=31} value=0 ], [ var='C' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=mainnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node31 - 64.185.230.146, job=uploader-stats, metrics_node_id=32, node_id=31} value=0 ]} {Instance:__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node00 - 34.74.62.75, job=uploader-stats, metrics_node_id=1, node_id=0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node00 - 34.74.62.75, job=uploader-stats, metrics_node_id=1, node_id=0 Value:0xc01b2641d0} C:{Var:C Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node00 - 34.74.62.75, job=uploader-stats, metrics_node_id=1, node_id=0 Value:0xc01b264120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.153476286s EvaluationString:[ var='A' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node00 - 34.74.62.75, job=uploader-stats, metrics_node_id=1, node_id=0} value=45 ], [ var='C' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node00 - 34.74.62.75, job=uploader-stats, metrics_node_id=1, node_id=0} value=0 ]} {Instance:__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node01 - 35.245.55.182, job=uploader-stats, metrics_node_id=2, node_id=1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node01 - 35.245.55.182, job=uploader-stats, metrics_node_id=2, node_id=1 Value:0xc01b264908} C:{Var:C Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node01 - 35.245.55.182, job=uploader-stats, metrics_node_id=2, node_id=1 Value:0xc01b264aa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.153484146s EvaluationString:[ var='A' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node01 - 35.245.55.182, job=uploader-stats, metrics_node_id=2, node_id=1} value=43 ], [ var='C' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node01 - 35.245.55.182, job=uploader-stats, metrics_node_id=2, node_id=1} value=0 ]} {Instance:__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node02 - 34.66.191.231, job=uploader-stats, metrics_node_id=3, node_id=2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node02 - 34.66.191.231, job=uploader-stats, metrics_node_id=3, node_id=2 Value:0xc01b264f10} C:{Var:C Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node02 - 34.66.191.231, job=uploader-stats, metrics_node_id=3, node_id=2 Value:0xc01b264fb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.153491645s EvaluationString:[ var='A' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node02 - 34.66.191.231, job=uploader-stats, metrics_node_id=3, node_id=2} value=53 ], [ var='C' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node02 - 34.66.191.231, job=uploader-stats, metrics_node_id=3, node_id=2} value=0 ]} {Instance:__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node03 - 34.83.34.239, job=uploader-stats, metrics_node_id=4, node_id=3 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node03 - 34.83.34.239, job=uploader-stats, metrics_node_id=4, node_id=3 Value:0xc01b2656d0} C:{Var:C Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node03 - 34.83.34.239, job=uploader-stats, metrics_node_id=4, node_id=3 Value:0xc01b265820}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.153504375s EvaluationString:[ var='A' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node03 - 34.83.34.239, job=uploader-stats, metrics_node_id=4, node_id=3} value=45 ], [ var='C' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node03 - 34.83.34.239, job=uploader-stats, metrics_node_id=4, node_id=3} value=0 ]} {Instance:__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node04 - 35.236.3.57, job=uploader-stats, metrics_node_id=5, node_id=4 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node04 - 35.236.3.57, job=uploader-stats, metrics_node_id=5, node_id=4 Value:0xc01b265ac8} C:{Var:C Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node04 - 35.236.3.57, job=uploader-stats, metrics_node_id=5, node_id=4 Value:0xc01b265980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.153512377s EvaluationString:[ var='A' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node04 - 35.236.3.57, job=uploader-stats, metrics_node_id=5, node_id=4} value=45 ], [ var='C' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node04 - 35.236.3.57, job=uploader-stats, metrics_node_id=5, node_id=4} value=0 ]} {Instance:__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node05 - 34.106.79.186, job=uploader-stats, metrics_node_id=6, node_id=5 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node05 - 34.106.79.186, job=uploader-stats, metrics_node_id=6, node_id=5 Value:0xc01b265e40} C:{Var:C Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node05 - 34.106.79.186, job=uploader-stats, metrics_node_id=6, node_id=5 Value:0xc01b265fb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.153520254s EvaluationString:[ var='A' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node05 - 34.106.79.186, job=uploader-stats, metrics_node_id=6, node_id=5} value=51 ], [ var='C' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node05 - 34.106.79.186, job=uploader-stats, metrics_node_id=6, node_id=5} value=0 ]} {Instance:__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node06 - 34.125.125.246, job=uploader-stats, metrics_node_id=7, node_id=6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node06 - 34.125.125.246, job=uploader-stats, metrics_node_id=7, node_id=6 Value:0xc02c7a6220} C:{Var:C Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node06 - 34.125.125.246, job=uploader-stats, metrics_node_id=7, node_id=6 Value:0xc02c7a62c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.153529636s EvaluationString:[ var='A' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node06 - 34.125.125.246, job=uploader-stats, metrics_node_id=7, node_id=6} value=53 ], [ var='C' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=previewnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node06 - 34.125.125.246, job=uploader-stats, metrics_node_id=7, node_id=6} value=0 ]} {Instance:__name__=uploader_eventsstream_files_on_disk_count, environment=testnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node00 - 34.94.100.41, job=uploader-stats, metrics_node_id=1, node_id=0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=testnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node00 - 34.94.100.41, job=uploader-stats, metrics_node_id=1, node_id=0 Value:0xc02c7a6520} C:{Var:C Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=testnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node00 - 34.94.100.41, job=uploader-stats, metrics_node_id=1, node_id=0 Value:0xc02c7a65e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.15353829s EvaluationString:[ var='A' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=testnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node00 - 34.94.100.41, job=uploader-stats, metrics_node_id=1, node_id=0} value=45 ], [ var='C' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=testnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node00 - 34.94.100.41, job=uploader-stats, metrics_node_id=1, node_id=0} value=0 ]} {Instance:__name__=uploader_eventsstream_files_on_disk_count, environment=testnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node02 - 34.85.179.35, job=uploader-stats, metrics_node_id=3, node_id=2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=testnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node02 - 34.85.179.35, job=uploader-stats, metrics_node_id=3, node_id=2 Value:0xc02c7a6778} C:{Var:C Labels:__name__=uploader_eventsstream_files_on_disk_count, environment=testnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node02 - 34.85.179.35, job=uploader-stats, metrics_node_id=3, node_id=2 Value:0xc02c7a6888}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.153545572s EvaluationString:[ var='A' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=testnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node02 - 34.85.179.35, job=uploader-stats, metrics_node_id=3, node_id=2} value=49 ], [ var='C' labels={__name__=uploader_eventsstream_files_on_disk_count, environment=testnet, instance=localhost:9090, instance_type=hedera-node, inventory_name=node02 - 34.85.179.35, job=uploader-stats, metrics_node_id=3, node_id=2} value=0 ]}]" duration=13.881425ms + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152785428Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152765813Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152745106Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Las Vegas, country=United States, datacenter=DataPacket, environment=production, instance=173.239.226.190:9998, ip=173.239.226.190, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=lasvegas427, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.155194383Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152725658Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152703739Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152696092Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152663704Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-elasticsearch-stream-transactionPayinTransfer.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.155090063Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152646149Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152605691Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Las Vegas, country=United States, datacenter=DataPacket, environment=production, instance=173.239.226.129:9998, ip=173.239.226.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-lasvegas.crt, role=vpn, server=lasvegas426, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.154976353Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152558376Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152537443Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-frontend-database, env=pp" t=2024-05-29T13:44:15.154953953Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152508387Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.154952284Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152489716Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152478947Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.154897495Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.154935951Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.154887317Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.154870661Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152405494Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi instance="Instance=--" t=2024-05-29T13:44:15.15482795Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-elasticsearch-stream-invoiceMappings.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.15482902Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152357814Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152331924Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152320959Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152303457Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152279262Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15224012Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-elasticsearch-stream-companiesProfiles.update, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.154692234Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152210528Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152180942Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152102465Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152073404Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Las Vegas, country=United States, datacenter=DataPacket, environment=production, instance=143.244.48.188:9998, ip=143.244.48.188, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-lasvegas.crt, role=vpn, server=losangeles408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.154536198Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152044198Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=784151 slug=bmsbv t=2024-05-29T13:44:15.154484817Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152036086Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Las Vegas, country=United States, datacenter=DataPacket, environment=production, instance=143.244.48.188:9998, ip=143.244.48.188, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-lasvegas.crt, role=vpn, server=losangeles408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.15452641Z level=debug msg="Setting next state" handler=resultNormal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.152015035Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=784151 slug=bmsbv instance= t=2024-05-29T13:44:15.154452767Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15198809Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151976815Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-elasticsearch-stream-bankAccounts.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.154376718Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151915335Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-api-staging, resource.label.subscription_id=dataflow-elasticsearch-stream-bankAccounts.create, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.154358777Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151860898Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151849331Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151809449Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=563718 slug=exertus t=2024-05-29T13:44:15.154240653Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=83.028197ms + level=debug ts=2024-05-29T13:44:15.154197993Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=901176 slug=sbceo t=2024-05-29T13:44:15.154206073Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151724097Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151709921Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151694575Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151675914Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=154996 slug=veovo instance= t=2024-05-29T13:44:15.154134776Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151635954Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151625034Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151619014Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151583134Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15152532Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=La Paz, country=Bolivia, datacenter=GSL, environment=production, instance=84.247.91.2:9998, ip=84.247.91.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/bo-bolivia-pf.crt, role=vpn, server=bolivia401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.154029084Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151496137Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151478011Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151466424Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151461254Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151450503Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151400449Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151369162Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151357935Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15135179Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Kuala Lumpur, country=Malaysia, datacenter=M247, environment=production, instance=146.70.15.34:9998, ip=146.70.15.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=malaysia402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.153829316Z level=debug msg="Setting next state" handler=resultNormal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15134135Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.1513123Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151295396Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151268151Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.153601984Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15124096Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Kuala Lumpur, country=Malaysia, datacenter=M247, environment=production, instance=146.70.15.34:9998, ip=146.70.15.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/kualalumpur.crt, role=vpn, server=malaysia402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.15361768Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:15.153565038Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151192396Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151181993Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151164013Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151158842Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-creditsafe-db, env=pp" t=2024-05-29T13:44:15.153429707Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151154681Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=426229 slug=accelbyte version=272 fingerprint=12a6b4f13520f4a4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.153448665Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.153072641s EvaluationString:}]" duration=67.717055ms + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151149908Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151144179Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Kuala Lumpur, country=Malaysia, datacenter=M247, environment=production, instance=146.70.15.17:9998, ip=146.70.15.17, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=malaysia401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.15343899Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Kuala Lumpur, country=Malaysia, datacenter=M247, environment=production, instance=146.70.15.17:9998, ip=146.70.15.17, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=malaysia401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.153428759Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151125611Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151120453Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.15335279Z caller=remote_instance_store.go:51 user=177465 slug=fairtiq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151057135Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151046786Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.153299389Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151016206Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=info ts=2024-05-29T13:44:15.153104223Z caller=remote_image_capturer.go:61 user=184127 slug=dpgo rule_org_id=1 rule_uid=e0a8cf2a-eaad-4a61-ba22-f0bdd4793a43 dashboard=p9lQ2irnk panel=28 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.151003162Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.153159259Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150964962Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15093073Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150923282Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150901792Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150869872Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15086378Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150859206Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.153057145Z caller=remote_instance_store.go:51 user=418250 slug=hazelcastcloud msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150854736Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150811811Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.152983465Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150776393Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.152972541Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Kiev, country=Ukraine, datacenter=DataPacket, environment=production, instance=84.239.42.31:9998, ip=84.239.42.31, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ua.crt, role=vpn, server=kiev407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.152985128Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150753681Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150745356Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:15.152930488Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150733133Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.152939752Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.scheduler user=430961 slug=solifi version=2 fingerprint=474e4c35497601ae attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.152866723Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.152614911s EvaluationString:}]" duration=131.433935ms + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150707099Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150689439Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150682116Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150664868Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Kiev, country=Ukraine, datacenter=DataPacket, environment=production, instance=84.239.42.31:9998, ip=84.239.42.31, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=kiev407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.152815749Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150626103Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=190917 slug=d1cx t=2024-05-29T13:44:15.152783535Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + level=info ts=2024-05-29T13:44:15.152730479Z caller=remote_image_capturer.go:61 user=190917 slug=d1cx rule_org_id=1 rule_uid=fbef0388-8310-4277-ac36-ee5a115b8a55 dashboard=WJw5WQL4z panel=25 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=190917 slug=d1cx instance= t=2024-05-29T13:44:15.152758684Z level=warn msg="Failed to take an image" dashboard=WJw5WQL4z panel=25 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=418250 slug=hazelcastcloud t=2024-05-29T13:44:15.152762585Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15057618Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15056908Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.15055718Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=823624 slug=falconxprod t=2024-05-29T13:44:15.152674758Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=823624 slug=falconxprod version=9 fingerprint=72eb1a0600162c93 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.152583547Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.15210699s EvaluationString:}]" duration=24.460482ms + level=debug ts=2024-05-29T13:44:15.152236749Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150487719Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=112732 slug=gleamer instance= t=2024-05-29T13:44:15.152642327Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=267050 slug=pavilionpay t=2024-05-29T13:44:15.152645442Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150469617Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Kiev, country=Ukraine, datacenter=DataPacket, environment=production, instance=84.239.42.1:9998, ip=84.239.42.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ua.crt, role=vpn, server=kiev408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.15263013Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.152574987Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150402034Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150364615Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=267050 slug=pavilionpay version=1 fingerprint=81f6bf62c6801d55 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.1523905Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=probe_http_status_code, config_version=1713407166824360448, instance=https://vip.gplightspeed.com/healthcheck, job=Prod VIP Health, probe=NewYork State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=probe_http_status_code, config_version=1713407166824360448, instance=https://vip.gplightspeed.com/healthcheck, job=Prod VIP Health, probe=NewYork Value:0xc0037bb6f8} C:{Var:C Labels:__name__=probe_http_status_code, config_version=1713407166824360448, instance=https://vip.gplightspeed.com/healthcheck, job=Prod VIP Health, probe=NewYork Value:0xc0037bb750}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.151924172s EvaluationString:[ var='A' labels={__name__=probe_http_status_code, config_version=1713407166824360448, instance=https://vip.gplightspeed.com/healthcheck, job=Prod VIP Health, probe=NewYork} value=200 ], [ var='C' labels={__name__=probe_http_status_code, config_version=1713407166824360448, instance=https://vip.gplightspeed.com/healthcheck, job=Prod VIP Health, probe=NewYork} value=0 ]} {Instance:__name__=probe_http_status_code, config_version=1713407166824360448, instance=https://vip.gplightspeed.com/healthcheck, job=Prod VIP Health, probe=SanFrancisco State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=probe_http_status_code, config_version=1713407166824360448, instance=https://vip.gplightspeed.com/healthcheck, job=Prod VIP Health, probe=SanFrancisco Value:0xc0037bb808} C:{Var:C Labels:__name__=probe_http_status_code, config_version=1713407166824360448, instance=https://vip.gplightspeed.com/healthcheck, job=Prod VIP Health, probe=SanFrancisco Value:0xc0037bb848}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.151939942s EvaluationString:[ var='A' labels={__name__=probe_http_status_code, config_version=1713407166824360448, instance=https://vip.gplightspeed.com/healthcheck, job=Prod VIP Health, probe=SanFrancisco} value=200 ], [ var='C' labels={__name__=probe_http_status_code, config_version=1713407166824360448, instance=https://vip.gplightspeed.com/healthcheck, job=Prod VIP Health, probe=SanFrancisco} value=0 ]} {Instance:__name__=probe_http_status_code, config_version=1713407166824360448, instance=https://vip.gplightspeed.com/healthcheck, job=Prod VIP Health, probe=Toronto State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=probe_http_status_code, config_version=1713407166824360448, instance=https://vip.gplightspeed.com/healthcheck, job=Prod VIP Health, probe=Toronto Value:0xc0037bb950} C:{Var:C Labels:__name__=probe_http_status_code, config_version=1713407166824360448, instance=https://vip.gplightspeed.com/healthcheck, job=Prod VIP Health, probe=Toronto Value:0xc0037bb8f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.151951233s EvaluationString:[ var='A' labels={__name__=probe_http_status_code, config_version=1713407166824360448, instance=https://vip.gplightspeed.com/healthcheck, job=Prod VIP Health, probe=Toronto} value=200 ], [ var='C' labels={__name__=probe_http_status_code, config_version=1713407166824360448, instance=https://vip.gplightspeed.com/healthcheck, job=Prod VIP Health, probe=Toronto} value=0 ]}]" duration=9.77656ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Kiev, country=Ukraine, datacenter=DataPacket, environment=production, instance=84.239.42.1:9998, ip=84.239.42.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=kiev408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.152478816Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150311268Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150303671Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150282512Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150270855Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150260693Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.152427458Z caller=remote_instance_store.go:51 user=474037 slug=renecmon msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.152332244Z caller=remote_instance_store.go:51 user=672418 slug=streamkap msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.152360559Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.977076ms + level=debug ts=2024-05-29T13:44:15.152357384Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150221956Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=474037 slug=renecmon instance="device=/dev/md1, fstype=ext4, instance=localhost:9100, job=RPC_4, mountpoint=/home/ubuntu/renec-cluster" t=2024-05-29T13:44:15.152367884Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=474037 slug=renecmon instance="device=/dev/md1, fstype=ext4, instance=localhost:9100, job=RPC_4, mountpoint=/home/ubuntu/renec-cluster" t=2024-05-29T13:44:15.152351727Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150193655Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=474037 slug=renecmon t=2024-05-29T13:44:15.152300939Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.152205001Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.152257742Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Kathmandu, country=Nepal, datacenter=GSL, environment=production, instance=84.247.97.2:9998, ip=84.247.97.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=kathmandu401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.152283826Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150132899Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150124282Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.152257217Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150114372Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150103574Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150079973Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150043552Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150023494Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150013504Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.150005103Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.152150356Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149993416Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149962583Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149949158Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149928777Z level=debug msg="Setting next state" handler=resultNoData + level=info ts=2024-05-29T13:44:15.15196767Z caller=remote_image_capturer.go:61 user=190917 slug=d1cx rule_org_id=1 rule_uid=fbef0388-8310-4277-ac36-ee5a115b8a55 dashboard=WJw5WQL4z panel=25 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149879413Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149871591Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.152002128Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149849518Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149839Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149828381Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149799614Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.151770687Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.060618ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Kansas City, country=United States, datacenter=DataPacket, environment=production, instance=84.239.16.129:9998, ip=84.239.16.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=missouri402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.151747164Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149646508Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.151704368Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.510845ms + logger=ngalert.state.manager.persist user=381888 slug=devprintcart t=2024-05-29T13:44:15.151681885Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.23344ms +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149605533Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149576773Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.151552211Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149473214Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.15152478Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149422393Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Johannesburg, country=South Africa, datacenter=DataPacket, environment=production, instance=154.47.30.31:9998, ip=154.47.30.31, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/za.crt, role=vpn, server=johannesburg411, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.151567763Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.15154726Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.15142034Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14935886Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=852841 slug=agrivolt instance= t=2024-05-29T13:44:15.151385247Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.151459719Z caller=remote_instance_store.go:51 user=852841 slug=agrivolt msg="calling SaveAlertInstance" +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149336818Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149296675Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149285993Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=852841 slug=agrivolt version=72 fingerprint=3f8e3c917675aa60 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.151241383Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc00ab57760} B:{Var:B Labels: Value:0xc00ab57768}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.150756106s EvaluationString:[ var='A' labels={} value=1.241 ], [ var='B' labels={} value=0 ]}]" duration=35.292119ms +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14926171Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149255031Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.151243052Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149216011Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149164368Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149158282Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.151171946Z caller=remote_image_capturer.go:54 user=190917 slug=d1cx rule_org_id=1 rule_uid=fbef0388-8310-4277-ac36-ee5a115b8a55 dashboard=WJw5WQL4z panel=25 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149147501Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14913644Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149130154Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14912042Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149109195Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=772239 slug=seamapi instance="__name__=node_load15, job=integrations/metrics_endpoint/1252379-metrics-endpoint-seam_connect_postgres_host, scrape_job=seam_connect_postgres_host" t=2024-05-29T13:44:15.151069311Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=772239 slug=seamapi t=2024-05-29T13:44:15.15098803Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149048885Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.149038071Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=438185 slug=nodeinfra t=2024-05-29T13:44:15.150958635Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug component=discovery ts=2024-05-29T13:44:15.150945989Z caller=retry.go:58 user=328744 msg="retrying grpc request" method=/remoteruler.rules.v1.RulesService/GetByRuleGroup attempt=4 +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148998134Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=438185 slug=nodeinfra version=20 fingerprint=42a4a3ef15540f4f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.150768964Z level=debug msg="Alert rule evaluated" results="[{Instance:chain=HVH, channel=fca3fc, cloud=gcp, deployment=production, hostname=7cabdaf7, instance=104.198.0.186:9000, job=prod-HVH-mainnet-validator, network=mainnet, node_name=prod_HVH_mainnet_validator_org_1, region=oregon, servicetype=validator State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=HVH, channel=fca3fc, cloud=gcp, deployment=production, hostname=7cabdaf7, instance=104.198.0.186:9000, job=prod-HVH-mainnet-validator, network=mainnet, node_name=prod_HVH_mainnet_validator_org_1, region=oregon, servicetype=validator Value:0xc00ebdf9f8} B:{Var:B Labels:chain=HVH, channel=fca3fc, cloud=gcp, deployment=production, hostname=7cabdaf7, instance=104.198.0.186:9000, job=prod-HVH-mainnet-validator, network=mainnet, node_name=prod_HVH_mainnet_validator_org_1, region=oregon, servicetype=validator Value:0xc00ebdfaa0} C:{Var:C Labels:chain=HVH, channel=fca3fc, cloud=gcp, deployment=production, hostname=7cabdaf7, instance=104.198.0.186:9000, job=prod-HVH-mainnet-validator, network=mainnet, node_name=prod_HVH_mainnet_validator_org_1, region=oregon, servicetype=validator Value:0xc00ebdfb50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.150364458s EvaluationString:[ var='A' labels={chain=HVH, channel=fca3fc, cloud=gcp, deployment=production, hostname=7cabdaf7, instance=104.198.0.186:9000, job=prod-HVH-mainnet-validator, network=mainnet, node_name=prod_HVH_mainnet_validator_org_1, region=oregon, servicetype=validator} value=149.4736842105263 ], [ var='B' labels={chain=HVH, channel=fca3fc, cloud=gcp, deployment=production, hostname=7cabdaf7, instance=104.198.0.186:9000, job=prod-HVH-mainnet-validator, network=mainnet, node_name=prod_HVH_mainnet_validator_org_1, region=oregon, servicetype=validator} value=149.4736842105263 ], [ var='C' labels={chain=HVH, channel=fca3fc, cloud=gcp, deployment=production, hostname=7cabdaf7, instance=104.198.0.186:9000, job=prod-HVH-mainnet-validator, network=mainnet, node_name=prod_HVH_mainnet_validator_org_1, region=oregon, servicetype=validator} value=0 ]}]" duration=41.246029ms + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148966083Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Jerusalem, country=Israel, datacenter=DataPacket, environment=production, instance=149.88.26.129:9998, ip=149.88.26.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=jerusalem414, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.150910198Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=337951 slug=pawapay t=2024-05-29T13:44:15.150785752Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.581379ms + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148942888Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=554711 slug=bekci instance= t=2024-05-29T13:44:15.150842084Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.150859034Z caller=remote_instance_store.go:51 user=829340 slug=unfnboprod msg="calling SaveAlertInstance" +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148933697Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148922739Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.150861886Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148914634Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148908709Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148900386Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Jerusalem, country=Israel, datacenter=DataPacket, environment=production, instance=149.88.26.129:9998, ip=149.88.26.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/israel.crt, role=vpn, server=jerusalem414, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.150744622Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Jerusalem, country=Israel, datacenter=DataPacket, environment=production, instance=149.88.26.129:9998, ip=149.88.26.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/israel.crt, role=vpn, server=jerusalem414, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.150728689Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148822323Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148816501Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148803533Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148784747Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.150629571Z caller=remote_instance_store.go:51 user=373502 slug=stakeandrelax msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148742499Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.150542191Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148733135Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148727211Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148722073Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Jakarta, country=Indonesia, datacenter=DataPacket, environment=production, instance=84.17.39.187:9998, ip=84.17.39.187, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=indonesia403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.150525169Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.150454969Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148652706Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148641317Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.150336394Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148603196Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod t=2024-05-29T13:44:15.150263497Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.150310065Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148582511Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148577761Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=543654 slug=jobcloudprogrammaticprod version=1 fingerprint=b62409932f95b008 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.150098414Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141786059s EvaluationString:}]" duration=18.253939ms + level=debug ts=2024-05-29T13:44:15.150253594Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.150241203Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.149970244Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.150151637Z caller=remote_instance_store.go:51 user=524410 slug=syso msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148478053Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148449548Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148443089Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148432459Z level=debug msg="Setting next state" handler=resultNoData + level=info ts=2024-05-29T13:44:15.150107418Z caller=remote_alert_sender.go:94 user=21051 slug=mojio host=mojio-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.80.247:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=e0b58d8d-5030-4561-9b50-6bfac5178f36 alerts=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Jackson, country=United States, datacenter=DataPacket, environment=production, instance=84.239.31.129:9998, ip=84.239.31.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-mississippi-pf.crt, role=vpn, server=mississippi402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.150036046Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148378363Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148358607Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.149979591Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148319847Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148271644Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148253162Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148221974Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148212151Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148180736Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:15.14975313Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148166012Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148158253Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.149765288Z caller=remote_instance_store.go:51 user=810932 slug=perdoctus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Jackson, country=United States, datacenter=DataPacket, environment=production, instance=84.239.31.129:9998, ip=84.239.31.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=mississippi402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.149809894Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148151148Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.149811723Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148138133Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Jackson, country=United States, datacenter=DataPacket, environment=production, instance=84.239.31.129:9998, ip=84.239.31.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=mississippi402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.149793326Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148078494Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.148033305Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147996702Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=307649 slug=canaryspeech version=64 fingerprint=3a2214729af90824 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.14958399Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.149251636s EvaluationString:}]" duration=31.037459ms + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147975621Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147965348Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Istanbul, country=Turkey, datacenter=M247, environment=production, instance=188.213.34.98:9998, ip=188.213.34.98, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/tr.crt, role=streaming-optimized, server=istanbul403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.149622595Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.14950221Z caller=remote_instance_store.go:51 user=137351 slug=pinnacle21 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147920958Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:15.149530387Z caller=remote_alert_sender.go:94 user=99366 slug=artery host=artery-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.160.26.227:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=C4Pds3d7k alerts=1 +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147887824Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147855085Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147835764Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147810715Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14780159Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147790208Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=109452 slug=deltarisk t=2024-05-29T13:44:15.149406415Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=23.12701ms + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147771962Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Istanbul, country=Turkey, datacenter=M247, environment=production, instance=188.213.34.98:9998, ip=188.213.34.98, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=istanbul403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.149420982Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147743592Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.149337226Z caller=remote_instance_store.go:51 user=770817 slug=exproment msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147670255Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14766219Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14765033Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147621418Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.148917237Z caller=remote_instance_store.go:51 user=417450 slug=legitsecurity msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147556994Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147532662Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147520797Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.148955173Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.148891508Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147439076Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=463354 slug=bridgeops instance= t=2024-05-29T13:44:15.148858015Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:15.148841098Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147424456Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Istanbul, country=Turkey, datacenter=M247, environment=production, instance=188.213.34.66:9998, ip=188.213.34.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/tr.crt, role=streaming-optimized, server=istanbul401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.148824973Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147407824Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=417450 slug=legitsecurity instance="account_id=470877518723, container=yace, dimension_DatabaseClass=db.r5d.2xlarge, endpoint=5000, instance=10.2.0.175:5000, job=yace, name=global, namespace=monitoring, pod=yace-67dbf6cd9c-7pwwk, prometheus=monitoring/metrics-kube-prometheus-st-prometheus, region=us-east-1, service=yace" t=2024-05-29T13:44:15.148662673Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=542894 slug=aize t=2024-05-29T13:44:15.148758535Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=417450 slug=legitsecurity instance="account_id=470877518723, container=yace, dimension_DatabaseClass=db.m5d.large, endpoint=5000, instance=10.2.0.175:5000, job=yace, name=global, namespace=monitoring, pod=yace-67dbf6cd9c-7pwwk, prometheus=monitoring/metrics-kube-prometheus-st-prometheus, region=us-east-1, service=yace" t=2024-05-29T13:44:15.148632802Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=417450 slug=legitsecurity instance="account_id=470877518723, container=yace, dimension_DatabaseClass=db.m5d.large, endpoint=5000, instance=10.2.0.175:5000, job=yace, name=global, namespace=monitoring, pod=yace-67dbf6cd9c-7pwwk, prometheus=monitoring/metrics-kube-prometheus-st-prometheus, region=us-east-1, service=yace" t=2024-05-29T13:44:15.148622636Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147391766Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=417450 slug=legitsecurity instance="account_id=470877518723, container=yace, dimension_DBInstanceIdentifier=read-replica-legit-postgres-dm-db, endpoint=5000, instance=10.2.0.175:5000, job=yace, name=arn:aws:rds:us-east-1:470877518723:db:read-replica-legit-postgres-dm-db, namespace=monitoring, pod=yace-67dbf6cd9c-7pwwk, prometheus=monitoring/metrics-kube-prometheus-st-prometheus, region=us-east-1, service=yace" t=2024-05-29T13:44:15.148579969Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.148742244Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=417450 slug=legitsecurity instance="account_id=470877518723, container=yace, dimension_DBInstanceIdentifier=legit-postgres-issues-db, endpoint=5000, instance=10.2.0.175:5000, job=yace, name=arn:aws:rds:us-east-1:470877518723:db:legit-postgres-issues-db, namespace=monitoring, pod=yace-67dbf6cd9c-7pwwk, prometheus=monitoring/metrics-kube-prometheus-st-prometheus, region=us-east-1, service=yace" t=2024-05-29T13:44:15.14855232Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=417450 slug=legitsecurity instance="account_id=470877518723, container=yace, dimension_DBInstanceIdentifier=legit-postgres-dm-db, endpoint=5000, instance=10.2.0.175:5000, job=yace, name=arn:aws:rds:us-east-1:470877518723:db:legit-postgres-dm-db, namespace=monitoring, pod=yace-67dbf6cd9c-7pwwk, prometheus=monitoring/metrics-kube-prometheus-st-prometheus, region=us-east-1, service=yace" t=2024-05-29T13:44:15.148517323Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=417450 slug=legitsecurity instance="account_id=470877518723, container=yace, dimension_DBInstanceIdentifier=legit-postgres-db, endpoint=5000, instance=10.2.0.175:5000, job=yace, name=arn:aws:rds:us-east-1:470877518723:db:legit-postgres-db, namespace=monitoring, pod=yace-67dbf6cd9c-7pwwk, prometheus=monitoring/metrics-kube-prometheus-st-prometheus, region=us-east-1, service=yace" t=2024-05-29T13:44:15.148465876Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=417450 slug=legitsecurity instance="account_id=470877518723, container=yace, dimension_DBInstanceIdentifier=avivc2c, endpoint=5000, instance=10.2.0.175:5000, job=yace, name=arn:aws:rds:us-east-1:470877518723:db:avivc2c, namespace=monitoring, pod=yace-67dbf6cd9c-7pwwk, prometheus=monitoring/metrics-kube-prometheus-st-prometheus, region=us-east-1, service=yace" t=2024-05-29T13:44:15.148411417Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147339197Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147317983Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147305849Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=417450 slug=legitsecurity t=2024-05-29T13:44:15.14835762Z level=debug msg="State manager processing evaluation results" resultCount=9 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Istanbul, country=Turkey, datacenter=M247, environment=production, instance=188.213.34.18:9998, ip=188.213.34.18, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/tr.crt, role=streaming-optimized, server=istanbul405, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.148453494Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147218417Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.148398465Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147199522Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147162538Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.1471256Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147114771Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147109203Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147095499Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.148224519Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147065249Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147044177Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147038476Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=652809 slug=glassnode t=2024-05-29T13:44:15.148193826Z level=debug msg="Saving alert states" count=19 max_state_save_concurrency=1 + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147020182Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=652809 slug=glassnode instance="pod=grafana-mimir-store-gateway-2" t=2024-05-29T13:44:15.148135937Z level=debug msg="Setting next state" handler=resultNormal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.147009321Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146987751Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146978561Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=652809 slug=glassnode instance="pod=grafana-mimir-store-gateway-0" t=2024-05-29T13:44:15.148073574Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Istanbul, country=Turkey, datacenter=M247, environment=production, instance=188.213.34.138:9998, ip=188.213.34.138, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/tr.crt, role=streaming-optimized, server=istanbul404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.148030076Z level=debug msg="Setting next state" handler=resultNormal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146935317Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146901523Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=652809 slug=glassnode instance="pod=grafana-mimir-query-frontend-787b57f9c6-fmrxx" t=2024-05-29T13:44:15.14785353Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.14782225Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14689692Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146890105Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146874215Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146868612Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146863788Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14685903Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=652809 slug=glassnode instance="pod=grafana-mimir-querier-66dc459d7-ztkkr" t=2024-05-29T13:44:15.147790767Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146853579Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146848885Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146830455Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=652809 slug=glassnode instance="pod=grafana-mimir-querier-66dc459d7-c9snp" t=2024-05-29T13:44:15.147714716Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146809375Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.147644078Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146792276Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146779069Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=652809 slug=glassnode instance="pod=grafana-mimir-overrides-exporter-75bd99d79d-pb7wb" t=2024-05-29T13:44:15.147583615Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146745283Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146734967Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=652809 slug=glassnode instance="pod=grafana-mimir-ingester-2" t=2024-05-29T13:44:15.147534701Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Indianapolis, country=United States, datacenter=DataPacket, environment=production, instance=84.239.16.22:9998, ip=84.239.16.22, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=indiana404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.147504201Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=652809 slug=glassnode instance="pod=grafana-mimir-ingester-1" t=2024-05-29T13:44:15.147461649Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146692955Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146669867Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146643238Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146638705Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146621364Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146607502Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Indianapolis, country=United States, datacenter=DataPacket, environment=production, instance=84.239.16.1:9998, ip=84.239.16.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-indiana-pf.crt, role=vpn, server=indiana403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.14732346Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146566321Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=508403 slug=zyax t=2024-05-29T13:44:15.147276221Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=508403 slug=zyax instance= t=2024-05-29T13:44:15.147265233Z level=debug msg="Changing state" previous_state=Pending next_state=Normal previous_ends_at=2024-05-29T13:46:10Z next_ends_at=2024-05-29T13:44:10Z + logger=ngalert.state.manager user=508403 slug=zyax instance= t=2024-05-29T13:44:15.147252271Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=508403 slug=zyax t=2024-05-29T13:44:15.147232836Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146521646Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=652809 slug=glassnode instance="pod=grafana-mimir-gateway-848c7559f5-6tvc6" t=2024-05-29T13:44:15.147238815Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=652809 slug=glassnode instance="pod=grafana-mimir-gateway-848c7559f5-6tvc6" t=2024-05-29T13:44:15.147210083Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146506244Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Indianapolis, country=United States, datacenter=DataPacket, environment=production, instance=84.239.16.1:9998, ip=84.239.16.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=indiana403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.147124209Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=652809 slug=glassnode instance="pod=grafana-mimir-distributor-868cf44b85-gctdm" t=2024-05-29T13:44:15.147139603Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146451197Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146446855Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146425817Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146402435Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146396025Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146388694Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146382525Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146373371Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146362887Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146356267Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146344961Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=652809 slug=glassnode instance="pod=grafana-mimir-compactor-0" t=2024-05-29T13:44:15.146977407Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146338485Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Houston, country=United States, datacenter=DataPacket, environment=production, instance=37.19.221.28:9998, ip=37.19.221.28, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-houston.crt, role=vpn, server=houston433, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.146958531Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.146892889Z caller=remote_instance_store.go:51 user=354676 slug=gridmarketenergy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Houston, country=United States, datacenter=DataPacket, environment=production, instance=37.19.221.28:9998, ip=37.19.221.28, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-houston.crt, role=vpn, server=houston433, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.146942938Z level=debug msg="Setting next state" handler=resultNormal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146322309Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146311949Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146307373Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146301801Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.146914471Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146275699Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146213907Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146181834Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146168478Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146163981Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.146657358Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146107382Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.146380716Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146101798Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146078666Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.146559696Z caller=remote_instance_store.go:51 user=829352 slug=unfnbonp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146054555Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146045483Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146039894Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146024215Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.146001372Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.146502988Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145966463Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145916214Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145911383Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145904076Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145877466Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145857299Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14584256Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145824577Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.14614345Z caller=remote_instance_store.go:51 user=405431 slug=deepersignals msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.146130044Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145782651Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14577712Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.146069989Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14576164Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14575687Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145741665Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145697306Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.145999073Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145667818Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145624486Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.145825161Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145575592Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145567998Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.145910337Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Houston, country=United States, datacenter=DataPacket, environment=production, instance=191.96.67.91:9998, ip=191.96.67.91, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=houston422, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.14593519Z level=debug msg="Setting next state" handler=resultNormal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145522354Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.145892371Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145493863Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.145877085Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.145862703Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.145838104Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145472874Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=231061 slug=teamaround version=74 fingerprint=fb6afb0273a3819f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.145761722Z level=debug msg="Alert rule evaluated" results="[{Instance:CacheClusterId=redis-broker-prod-euwe3-001 State:Normal Error: Results:map[] Values:map[H:{Var:H Labels:CacheClusterId=redis-broker-prod-euwe3-001 Value:0xc07e826798} I:{Var:I Labels:CacheClusterId=redis-broker-prod-euwe3-001 Value:0xc07e8267d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.145320831s EvaluationString:[ var='H' labels={CacheClusterId=redis-broker-prod-euwe3-001} value=0 ], [ var='I' labels={CacheClusterId=redis-broker-prod-euwe3-001} value=0 ]}]" duration=134.213377ms + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.14581009Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.145770061Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145377752Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145367119Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.145701877Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145261792Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145244605Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.14564771Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145224279Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=941160 slug=uateu t=2024-05-29T13:44:15.145657745Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.14556272Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145170362Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.145596349Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145150416Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.145541873Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145070672Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.145046492Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.145490465Z caller=remote_instance_store.go:51 user=697627 slug=haqq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.145488773Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.145477323Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144982107Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.145441069Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144943193Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144925809Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.145407275Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.145400098Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.145365669Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144892212Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.145352683Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144870021Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.145322031Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144862082Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144853341Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.145307008Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.145273687Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144842957Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Houston, country=United States, datacenter=DataPacket, environment=production, instance=191.96.67.1:9998, ip=191.96.67.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-houston.crt, role=vpn, server=houston427, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.145372053Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.145249845Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144805189Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.14523259Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.145326656Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.145216546Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.145186362Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.145156339Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144695006Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.14511912Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.14510952Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=421567 slug=nexx360 version=89 fingerprint=e91ec7b19dfcce7d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.145091007Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.144706785s EvaluationString:}]" duration=167.063867ms + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.145052292Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144650951Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14460597Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144598275Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.144916887Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.144905834Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144533635Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144515349Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144504341Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Houston, country=United States, datacenter=DataPacket, environment=production, instance=191.96.67.190:9998, ip=191.96.67.190, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-houston.crt, role=vpn, server=houston425, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.144962777Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144492283Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.144920147Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144460069Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144430527Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14442009Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.144725799Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.144718141Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144392164Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.144809489Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.14467494Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.144667194Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.144656836Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Houston, country=United States, datacenter=DataPacket, environment=production, instance=191.96.67.190:9998, ip=191.96.67.190, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=houston425, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.144777351Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144303581Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Houston, country=United States, datacenter=DataPacket, environment=production, instance=191.96.67.190:9998, ip=191.96.67.190, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=houston425, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.144764911Z level=debug msg="Setting next state" handler=resultNormal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144285365Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.144637344Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14425437Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.144623456Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:15.144727148Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.144612636Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.144700076Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144213335Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144185791Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=396787 slug=carri instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.144654765Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.144583554Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396787 slug=carri instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.144643993Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144164462Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.144540023Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144138544Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.144597054Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=516847 slug=signit t=2024-05-29T13:44:15.144558498Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=29.284781ms + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.144480583Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.144047483Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.144568115Z caller=remote_instance_store.go:51 user=536824 slug=forgerockit msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Houston, country=United States, datacenter=DataPacket, environment=production, instance=191.96.67.160:9998, ip=191.96.67.160, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-houston.crt, role=vpn, server=houston424, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.144549262Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.144417725Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.144437095Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143966021Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.144422962Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.144369916Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143916581Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.144358933Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.144293431Z caller=remote_instance_store.go:51 user=465816 slug=metricgamingqa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143893663Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.144307593Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143886972Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.144289653Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.144222818Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.144184593Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.1438268Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143807945Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143802918Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.144125825Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14378473Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14375522Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143733367Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:15.144122953Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143711356Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.144150283Z caller=remote_instance_store.go:51 user=191103 slug=amazonadmin msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.144040297Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=191103 slug=amazonadmin t=2024-05-29T13:44:15.144084923Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143685842Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=191103 slug=amazonadmin version=105 fingerprint=f542e658d307cd13 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.144026385Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.143816867s EvaluationString:}]" duration=178.832627ms + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143674468Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143666846Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.143960973Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143646316Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143635449Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.historian backend=loki user=190917 slug=d1cx t=2024-05-29T13:44:15.144023898Z level=debug msg="Done saving alert state history batch" +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143619376Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.144034742Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.144009379Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.143912892Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143579911Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.143870036Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143450877Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.143802186Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143401585Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.143804278Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.143756524Z caller=remote_instance_store.go:51 user=228733 slug=csmoney msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.143782253Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143370313Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143356242Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank instance="DBInstanceIdentifier=iam-prod-12022050309120716060000001f" t=2024-05-29T13:44:15.143750291Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank instance="DBInstanceIdentifier=iam-prod-12022050309120716060000001f" t=2024-05-29T13:44:15.143738443Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143339877Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank t=2024-05-29T13:44:15.143692919Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=228733 slug=csmoney t=2024-05-29T13:44:15.143696142Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.14373511Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.143726088Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=228733 slug=csmoney instance= t=2024-05-29T13:44:15.143679706Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.143649673Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143300665Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=228733 slug=csmoney t=2024-05-29T13:44:15.143622145Z level=debug msg="State manager processing evaluation results" resultCount=1 +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143249392Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.143598636Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.143585786Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.143573149Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.143547748Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.143517427Z caller=remote_instance_store.go:51 user=832871 slug=testzendutyag msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.143504393Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143156403Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.143495901Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143134721Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.1433634Z caller=remote_instance_store.go:51 user=177465 slug=fairtiq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.143397946Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143040035Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.143389973Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.143339223Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143023572Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.143317478Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.143009617Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.143279363Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142990632Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14295969Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.143269401Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=370455 slug=lab71 instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.14324529Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142927418Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=370455 slug=lab71 instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.143228042Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142905268Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.14312386Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.143104992Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.143087212Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.143051826Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142863224Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.143034337Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.14302619Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.143017247Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=540097 slug=kbcinc t=2024-05-29T13:44:15.143094157Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.880744ms + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14275913Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=228733 slug=csmoney instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.142790221Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=228733 slug=csmoney instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.142781264Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=228733 slug=csmoney instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.142766477Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142708963Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142668419Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Helsinki, country=Finland, datacenter=Glesys, environment=production, instance=188.126.89.66:9998, ip=188.126.89.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=helsinki404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.142936919Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142637683Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14261984Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.142656135Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.142583654Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.142566005Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.1425532Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142529899Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142522684Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142518305Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142506605Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.1425005Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=134486 slug=podigee t=2024-05-29T13:44:15.142252385Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=22.532365ms + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14249249Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.142279432Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142485802Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.142223178Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.142206753Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.142198618Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14241164Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142397269Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142389494Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142368761Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14235891Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.14218118Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.142164447Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.142155281Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.142519822Z caller=remote_instance_store.go:51 user=656284 slug=cencosudx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.142147248Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.142018067Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142336841Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.14200314Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.142445725Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.14196123Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14229809Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141938374Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14229287Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142273534Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141908027Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142239002Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141880872Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141873447Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142195247Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141866046Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.142321539Z caller=remote_instance_store.go:51 user=326888 slug=buildingblocks msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142187248Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142177574Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Helsinki, country=Finland, datacenter=Glesys, environment=production, instance=188.126.89.34:9998, ip=188.126.89.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fi.crt, role=vpn, server=helsinki402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.142360805Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142167553Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141833931Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141804067Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.14179134Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142088383Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141751554Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142049374Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142013273Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.142131611Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141704699Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.142096705Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.142008393Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141662244Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14200349Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141654655Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141998146Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141989664Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141636989Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141967805Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141961356Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141955928Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.141904451Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141615503Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141902348Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141880584Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141596824Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141872828Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141860169Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141834063Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141562504Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141752314Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141536891Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=635766 slug=emmaprod t=2024-05-29T13:44:15.141957823Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141724264Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141685866Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141676794Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141658889Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141454838Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141614708Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=635766 slug=emmaprod version=1 fingerprint=f6f150cff0e4b63a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.141834867Z level=debug msg="Alert rule evaluated" results="[{Instance:QueueName=dev-content-processor State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:QueueName=dev-content-processor Value:0xc00f2bf038} C:{Var:C Labels:QueueName=dev-content-processor Value:0xc00f2bf030}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141388772s EvaluationString:[ var='B' labels={QueueName=dev-content-processor} value=0 ], [ var='C' labels={QueueName=dev-content-processor} value=0 ]}]" duration=27.246937ms + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141597053Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.141835528Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141526808Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14151525Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141507547Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141340053Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141500593Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141307167Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=932433 slug=cmhdmxnp instance="job=hdmx/hdmxdevlivets-app" t=2024-05-29T13:44:15.141755713Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.14126046Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141483655Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141474676Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141461459Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141237133Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=537068 slug=bitvavotrading t=2024-05-29T13:44:15.141679011Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=537068 slug=bitvavotrading instance="datasource_uid=grafanacloud-prom, ref_id=Transfers Memory Usage" t=2024-05-29T13:44:15.14165718Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=932433 slug=cmhdmxnp instance="job=hdmx/hdmxdevlivecrs-app" t=2024-05-29T13:44:15.141726811Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141193105Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=932433 slug=cmhdmxnp instance="job=hdmx/hdmxdevauthts-app" t=2024-05-29T13:44:15.14169753Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141162775Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=932433 slug=cmhdmxnp instance="job=hdmx/hdmxdevauthquery-app" t=2024-05-29T13:44:15.141680789Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141400538Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=932433 slug=cmhdmxnp instance="job=hdmx/hdmxdevauthcrs-app" t=2024-05-29T13:44:15.141650879Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141082733Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141359809Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.1410575Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141329979Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141322951Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141307621Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141019717Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=932433 slug=cmhdmxnp instance="job=hdmx-qa/hdmxqaauthquery-app" t=2024-05-29T13:44:15.141505661Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141297641Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.141012427Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141287265Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141265525Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141257604Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=550189 slug=flippa t=2024-05-29T13:44:15.141455121Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:15.141493377Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.140891662Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:15.141477547Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=114492 slug=railsbank t=2024-05-29T13:44:15.14145462Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.140843843Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=114492 slug=railsbank version=55 fingerprint=f7d8fcff0cfdb816 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.141415557Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.141195407s EvaluationString:}]" duration=112.619687ms + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141173718Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141152612Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.140810167Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141143346Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.140781917Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141106179Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Hanoi, country=Vietnam, datacenter=GSL, environment=production, instance=173.239.247.152:9998, ip=173.239.247.152, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/vietnam.crt, role=vpn, server=vietnam404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.141331597Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141065407Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.14069794Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.140657911Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.141023732Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140995001Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.140546486Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140955837Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.140509398Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.141159937Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.140478856Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140861306Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.140419742Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.140412216Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.141044634Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.141003172Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140783131Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.140922158Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.140398313Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140725837Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140714747Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140695905Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140678278Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.140863026Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.14064618Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.140811283Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140626179Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Hanoi, country=Vietnam, datacenter=GSL, environment=production, instance=173.239.247.128:9998, ip=173.239.247.128, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/vietnam.crt, role=vpn, server=vietnam403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.140862918Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140533406Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140524724Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140514176Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140435753Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140425771Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.140355085Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140350963Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.140337116Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.1403244Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.140300501Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.140255998Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140271517Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140260858Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.140217376Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.140389731Z caller=remote_instance_store.go:51 user=810932 slug=perdoctus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Guatemala City, country=Guatemala, datacenter=GSL, environment=production, instance=84.247.95.2:9998, ip=84.247.95.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=guatemala401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.140373028Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.14016124Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140191287Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.140264969Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.140279463Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140109407Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140101388Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.140056289Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140091176Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=810932 slug=perdoctus instance="__name__=up, instance=100.79.89.120:9162, job=apcupsd_exporter" t=2024-05-29T13:44:15.140157929Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-comply-advantage-db, env=pp" t=2024-05-29T13:44:15.140181153Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140045904Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140037934Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139972353Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.140026348Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139999885Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139987305Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139964173Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=810932 slug=perdoctus instance="__name__=up, instance=100.79.89.120:9100, job=node_exporter" t=2024-05-29T13:44:15.140060828Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.140065018Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139898224Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:15.140025871Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139851536Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=206107 slug=hydrolix instance="app=query-peer" t=2024-05-29T13:44:15.140013502Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139839801Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=810932 slug=perdoctus version=7 fingerprint=b22582c5fe24d6dc attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.139762785Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=up, instance=1.1.1.1, job=blackbox State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, instance=1.1.1.1, job=blackbox Value:0xc030ed48a8} B:{Var:B Labels:__name__=up, instance=1.1.1.1, job=blackbox Value:0xc030ed48e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139112761s EvaluationString:[ var='A' labels={__name__=up, instance=1.1.1.1, job=blackbox} value=1 ], [ var='B' labels={__name__=up, instance=1.1.1.1, job=blackbox} value=0 ]} {Instance:__name__=up, instance=100.79.89.120:9100, job=node_exporter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, instance=100.79.89.120:9100, job=node_exporter Value:0xc030ed4950} B:{Var:B Labels:__name__=up, instance=100.79.89.120:9100, job=node_exporter Value:0xc030ed4980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139133081s EvaluationString:[ var='A' labels={__name__=up, instance=100.79.89.120:9100, job=node_exporter} value=1 ], [ var='B' labels={__name__=up, instance=100.79.89.120:9100, job=node_exporter} value=0 ]} {Instance:__name__=up, instance=100.79.89.120:9115, job=blackbox_exporter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, instance=100.79.89.120:9115, job=blackbox_exporter Value:0xc030ed49d8} B:{Var:B Labels:__name__=up, instance=100.79.89.120:9115, job=blackbox_exporter Value:0xc030ed4a00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139142341s EvaluationString:[ var='A' labels={__name__=up, instance=100.79.89.120:9115, job=blackbox_exporter} value=1 ], [ var='B' labels={__name__=up, instance=100.79.89.120:9115, job=blackbox_exporter} value=0 ]} {Instance:__name__=up, instance=100.79.89.120:9162, job=apcupsd_exporter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, instance=100.79.89.120:9162, job=apcupsd_exporter Value:0xc030ed4a70} B:{Var:B Labels:__name__=up, instance=100.79.89.120:9162, job=apcupsd_exporter Value:0xc030ed4ab8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139151441s EvaluationString:[ var='A' labels={__name__=up, instance=100.79.89.120:9162, job=apcupsd_exporter} value=1 ], [ var='B' labels={__name__=up, instance=100.79.89.120:9162, job=apcupsd_exporter} value=0 ]} {Instance:__name__=up, instance=8.8.8.8, job=blackbox State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, instance=8.8.8.8, job=blackbox Value:0xc030ed4b70} B:{Var:B Labels:__name__=up, instance=8.8.8.8, job=blackbox Value:0xc030ed4b38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139158611s EvaluationString:[ var='A' labels={__name__=up, instance=8.8.8.8, job=blackbox} value=1 ], [ var='B' labels={__name__=up, instance=8.8.8.8, job=blackbox} value=0 ]} {Instance:__name__=up, instance=homeassistant.home.arpa:8123, job=home_assistant State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, instance=homeassistant.home.arpa:8123, job=home_assistant Value:0xc030ed4be0} B:{Var:B Labels:__name__=up, instance=homeassistant.home.arpa:8123, job=home_assistant Value:0xc030ed4c08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139168381s EvaluationString:[ var='A' labels={__name__=up, instance=homeassistant.home.arpa:8123, job=home_assistant} value=1 ], [ var='B' labels={__name__=up, instance=homeassistant.home.arpa:8123, job=home_assistant} value=0 ]} {Instance:__name__=up, instance=localhost:9090, job=prometheus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, instance=localhost:9090, job=prometheus Value:0xc030ed4ca8} B:{Var:B Labels:__name__=up, instance=localhost:9090, job=prometheus Value:0xc030ed4d00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.139177551s EvaluationString:[ var='A' labels={__name__=up, instance=localhost:9090, job=prometheus} value=1 ], [ var='B' labels={__name__=up, instance=localhost:9090, job=prometheus} value=0 ]}]" duration=9.068493ms + level=debug ts=2024-05-29T13:44:15.139948159Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Frankfurt, country=Germany, datacenter=PIA, environment=production, instance=45.88.97.6:9998, ip=45.88.97.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=frankfurt415, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.140005949Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Frankfurt, country=Germany, datacenter=PIA, environment=production, instance=45.88.97.6:9998, ip=45.88.97.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=frankfurt415, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.139992162Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.139964906Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139782167Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:15.139943608Z caller=remote_alert_sender.go:94 user=538355 slug=flogic host=flogic-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.160.4.254:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=a94b549c-8a69-47e8-95b2-a18e85929294 alerts=1 + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139772469Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139815907Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139756262Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139717457Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139778733Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139691945Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.139849235Z caller=remote_instance_store.go:51 user=170844 slug=kryon msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.13977058Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139762025Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Frankfurt, country=Germany, datacenter=PIA, environment=production, instance=45.88.97.6:9998, ip=45.88.97.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/de-frankfurt.crt, role=vpn, server=frankfurt415, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.139819666Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=170844 slug=kryon instance= t=2024-05-29T13:44:15.139814423Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139666663Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139636961Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=170844 slug=kryon t=2024-05-29T13:44:15.139784442Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Standard Bank RPA (Dev) - Active Threads" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139702219Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139618415Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.13974437Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.13958539Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.13972975Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139679356Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139522111Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.13966324Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139647434Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139497004Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139639046Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139473234Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139455189Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139441329Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139585772Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139387322Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139378438Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139576136Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139358514Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139556244Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139546518Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139495256Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.139435222Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.13944608Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139295653Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139422868Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139372886Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=678515 slug=paradisemobile t=2024-05-29T13:44:15.139392979Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139250182Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=678515 slug=paradisemobile instance="LoadBalancer=app/awseb--AWSEB-eKtqYcpF629u/03fcf630652a40fd" t=2024-05-29T13:44:15.139377758Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139244567Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139235602Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139340825Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Frankfurt, country=Germany, datacenter=PIA, environment=production, instance=45.88.97.2:9998, ip=45.88.97.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/de-frankfurt.crt, role=vpn, server=frankfurt414, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.139385247Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139329983Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=832320 slug=gabrielgomez756 t=2024-05-29T13:44:15.139368955Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139311876Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=832320 slug=gabrielgomez756 instance="instance=localhost:9182" t=2024-05-29T13:44:15.139339013Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139226911Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=832320 slug=gabrielgomez756 t=2024-05-29T13:44:15.139273132Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=832320 slug=gabrielgomez756 version=11 fingerprint=b3afdfe57632569b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.139169952Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=localhost:9182 State:Normal Error: Results:map[] Values:map[Alert:{Var:Alert Labels:instance=localhost:9182 Value:0xc050337fe8} CPU usage:{Var:CPU usage Labels:instance=localhost:9182 Value:0xc014e82028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.13855774s EvaluationString:[ var='Alert' labels={instance=localhost:9182} value=0 ], [ var='CPU usage' labels={instance=localhost:9182} value=0.5699187398563907 ]}]" duration=50.053756ms + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139209879Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139170958Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=488634 slug=smartcall t=2024-05-29T13:44:15.139163682Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=23.072398ms + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139181138Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139136134Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139119909Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139112671Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139081229Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Frankfurt, country=Germany, datacenter=PIA, environment=production, instance=216.24.216.7:9998, ip=216.24.216.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=frankfurt416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.139114473Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139074894Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139102731Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139056731Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139095003Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.139049309Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139082618Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139072797Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=523906 slug=cyberark t=2024-05-29T13:44:15.139083319Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.13908454Z caller=remote_instance_store.go:51 user=467357 slug=peturs05 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.13901692Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.138988576Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.138983081Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.139000988Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=467357 slug=peturs05 t=2024-05-29T13:44:15.139027531Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.138957499Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.138950982Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.138933593Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.13892497Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.138910732Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.138887306Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.138903095Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.138965948Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.138846525Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.138890627Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.138838662Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.138869525Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.138799829Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.138825994Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.138808677Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=83990 slug=greynoise t=2024-05-29T13:44:15.138831444Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=22.193187ms + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.138742702Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.138764101Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.13870385Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.138753288Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.138663387Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.138652202Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Frankfurt, country=Germany, datacenter=PIA, environment=production, instance=216.24.216.7:9998, ip=216.24.216.7, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/de-frankfurt.crt, role=vpn, server=frankfurt416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.138699351Z level=debug msg="Keeping state" state=Normal +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.138692321Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.138623512Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.138631616Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.138611769Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.138601512Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.138611755Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.138599703Z level=debug msg="Setting next state" handler=resultNoData +} logger=ngalert.state.manager user=500743 slug=sgr instance="datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A" t=2024-05-29T13:44:15.138564609Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Frankfurt, country=Germany, datacenter=PIA, environment=production, instance=216.24.216.6:9998, ip=216.24.216.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=frankfurt419, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.138479829Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.138457946Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.138449906Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.138408973Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.138384492Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +} logger=ngalert.scheduler user=500743 slug=sgr version=6 fingerprint=749e056f9bf2db26 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.138336555Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=e15268b7-b20a-4b78-8f8c-ae7cd220796a, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.137999891s EvaluationString:}]" duration=66.349901ms + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.138356546Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.138309847Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.138380011Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.138353768Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=c4fec05c-1179-4700-a452-19af7c26bdf3, ref_id=A" t=2024-05-29T13:44:15.138348681Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.138194843Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.138186491Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.138139977Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.138072477Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.138083905Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.138063086Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.138129243Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.137967314Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.137927807Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=cf2003f07cd09899 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.137935562Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc03334aec0} Threshold:{Var:Threshold Labels: Value:0xc03334aec8} compare:{Var:compare Labels: Value:0xc03334af00} sum:{Var:sum Labels: Value:0xc03334af10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.137438408s EvaluationString:[ var='Breaches' labels={} value=15 ], [ var='Threshold' labels={} value=100 ], [ var='compare' labels={} value=0 ], [ var='sum' labels={} value=0 ]}]" duration=106.903624ms + level=debug ts=2024-05-29T13:44:15.137974675Z caller=remote_instance_store.go:51 user=21051 slug=mojio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Frankfurt, country=Germany, datacenter=PIA, environment=production, instance=216.24.213.2:9998, ip=216.24.213.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=frankfurt413, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.137954589Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=21051 slug=mojio t=2024-05-29T13:44:15.137938185Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.137922739Z caller=remote_instance_store.go:51 user=442863 slug=numan msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=442863 slug=numan instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.137866878Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=442863 slug=numan instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.137862133Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.137865317Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.137851092Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.137838336Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=442863 slug=numan instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.137824953Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.137828522Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=442863 slug=numan t=2024-05-29T13:44:15.137810393Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.137798607Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.137704172Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Frankfurt, country=Germany, datacenter=PIA, environment=production, instance=216.24.213.2:9998, ip=216.24.213.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/de-frankfurt.crt, role=vpn, server=frankfurt413, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.137718003Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.137692855Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.137671922Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.137619988Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.137608431Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.137620452Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.13759513Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.137530987Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Frankfurt, country=Germany, datacenter=DataPacket, environment=production, instance=212.102.57.65:9998, ip=212.102.57.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=frankfurt406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.137564556Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.137450349Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.137423528Z caller=remote_instance_store.go:51 user=871095 slug=cmcnginp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.137446385Z caller=remote_instance_store.go:51 user=375798 slug=beeworks msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.137412025Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Frankfurt, country=Germany, datacenter=DataPacket, environment=production, instance=212.102.57.65:9998, ip=212.102.57.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/de-frankfurt.crt, role=vpn, server=frankfurt406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.137394602Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Frankfurt, country=Germany, datacenter=DataPacket, environment=production, instance=212.102.57.65:9998, ip=212.102.57.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/de-frankfurt.crt, role=vpn, server=frankfurt406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.137378085Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.137350319Z caller=remote_instance_store.go:51 user=829340 slug=unfnboprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.137292181Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=405431 slug=deepersignals instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.137276989Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.13729325Z caller=remote_instance_store.go:51 user=194539 slug=sharris msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Frankfurt, country=Germany, datacenter=DataPacket, environment=production, instance=212.102.57.1:9998, ip=212.102.57.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=frankfurt405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.137204473Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=194539 slug=sharris t=2024-05-29T13:44:15.137180151Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=405431 slug=deepersignals t=2024-05-29T13:44:15.137063396Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=405431 slug=deepersignals version=80 fingerprint=4f7e4eb5072c15c1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.136906468Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.136609974s EvaluationString:}]" duration=53.30235ms + level=debug ts=2024-05-29T13:44:15.136885247Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.136911489Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Frankfurt, country=Germany, datacenter=DataPacket, environment=production, instance=212.102.57.138:9998, ip=212.102.57.138, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=frankfurt410, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.136755767Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-billing-db, env=pp" t=2024-05-29T13:44:15.136773783Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=76255 slug=benzinga t=2024-05-29T13:44:15.136754009Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=76255 slug=benzinga instance= t=2024-05-29T13:44:15.136743711Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=76255 slug=benzinga version=1 fingerprint=d7a75685d5423c0e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.136621769Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.136371477s EvaluationString:}]" duration=132.891496ms + logger=ngalert.state.manager.persist user=877555 slug=cmbe t=2024-05-29T13:44:15.136487611Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.136353611Z caller=remote_instance_store.go:51 user=767797 slug=mgmresorts msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.136456333Z caller=remote_instance_store.go:51 user=235895 slug=nathanprenzler msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=235895 slug=nathanprenzler instance= t=2024-05-29T13:44:15.136405945Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager.persist user=8625 slug=dovetail t=2024-05-29T13:44:15.136358416Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.071017ms + logger=ngalert.state.manager user=235895 slug=nathanprenzler t=2024-05-29T13:44:15.136356082Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.136255279Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=235895 slug=nathanprenzler version=1 fingerprint=4fc074655ab6dee4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.136314524Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=25.740958ms + level=error ts=2024-05-29T13:44:15.136281969Z caller=remote_rule_evaluator.go:110 user=235895 slug=nathanprenzler msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Frankfurt, country=Germany, datacenter=DataPacket, environment=production, instance=195.181.170.225:9998, ip=195.181.170.225, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=frankfurt402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.136303678Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.136270022Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:15.136184398Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=767797 slug=mgmresorts instance="datasource_uid=d1aebc62-96b9-4d63-9239-4734a6bc96ce, ref_id=A" t=2024-05-29T13:44:15.136215159Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=236496 slug=improbable t=2024-05-29T13:44:15.136138116Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=767797 slug=mgmresorts t=2024-05-29T13:44:15.136101258Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.135987101Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.136032703Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=397650 slug=gofirefly instance="datasource_uid=5lEKSWRVz, ref_id=A" t=2024-05-29T13:44:15.135740708Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=397650 slug=gofirefly t=2024-05-29T13:44:15.135711626Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=397650 slug=gofirefly version=2 fingerprint=e651c38df3355b00 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.135618281Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=5lEKSWRVz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.135351873s EvaluationString:}]" duration=55.149922ms + logger=ngalert.state.manager.persist user=277970 slug=teckresourcestest t=2024-05-29T13:44:15.135584387Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=29.327175ms + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:15.135686971Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:15.135651352Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.135419001Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Frankfurt, country=Germany, datacenter=DataPacket, environment=production, instance=138.199.18.129:9998, ip=138.199.18.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=frankfurt407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.135300637Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.135260184Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.135168349Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.134732083Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=314015 slug=fberggren t=2024-05-29T13:44:15.13480451Z level=debug msg="Skip rule evaluation because it is paused" + level=debug ts=2024-05-29T13:44:15.134777016Z caller=remote_instance_store.go:51 user=302415 slug=mgbcoreinfraprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.134787789Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=255034 slug=onetone t=2024-05-29T13:44:15.134764684Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=255034 slug=onetone instance="datasource_uid=grafanacloud-prom, ref_id=95TH PERC." t=2024-05-29T13:44:15.13475102Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=255034 slug=onetone instance="datasource_uid=grafanacloud-prom, ref_id=95TH PERC." t=2024-05-29T13:44:15.134734744Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=302415 slug=mgbcoreinfraprod t=2024-05-29T13:44:15.134718035Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=302415 slug=mgbcoreinfraprod instance= t=2024-05-29T13:44:15.134689959Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=302415 slug=mgbcoreinfraprod t=2024-05-29T13:44:15.134650885Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=302415 slug=mgbcoreinfraprod version=17 fingerprint=8c2ae8999078f816 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.134550361Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc0071f1968} B:{Var:B Labels: Value:0xc0071f1a20} C:{Var:C Labels: Value:0xc0071f1a28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.134239314s EvaluationString:[ var='A' labels={} value=0 ], [ var='B' labels={} value=0 ], [ var='C' labels={} value=0 ]}]" duration=15.672114ms + logger=ngalert.state.manager.persist user=183214 slug=vectorizedio t=2024-05-29T13:44:15.134625259Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.817336ms + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=use1, instance=use1-001-pk8w03, job=integrations/kubernetes/kubelet, namespace=redis, persistentvolumeclaim=redis-data-redis-node-2" t=2024-05-29T13:44:15.134608973Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.133411906Z caller=remote_instance_store.go:51 user=543654 slug=jobcloudprogrammaticprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=303449 slug=rcmalli t=2024-05-29T13:44:15.134489315Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Fargo, country=United States, datacenter=DataPacket, environment=production, instance=84.239.48.1:9998, ip=84.239.48.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=northdakota402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.134522321Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.134506909Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.13422268Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dublin, country=Ireland, datacenter=M247, environment=production, instance=193.56.252.34:9998, ip=193.56.252.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ireland.crt, role=vpn, server=dublin403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.13400753Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=use1, instance=use1-001-pk8w03, job=integrations/kubernetes/kubelet, namespace=kafka-core, persistentvolumeclaim=data-core-zookeeper-2" t=2024-05-29T13:44:15.134521848Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.134411634Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dublin, country=Ireland, datacenter=M247, environment=production, instance=193.56.252.242:9998, ip=193.56.252.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=dublin408, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.133365325Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.133327036Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dublin, country=Ireland, datacenter=M247, environment=production, instance=193.56.252.242:9998, ip=193.56.252.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ireland.crt, role=vpn, server=dublin408, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.133158659Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=381888 slug=devprintcart instance= t=2024-05-29T13:44:15.134438534Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dublin, country=Ireland, datacenter=M247, environment=production, instance=193.56.252.226:9998, ip=193.56.252.226, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ireland.crt, role=vpn, server=dublin407, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.132735569Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dublin, country=Ireland, datacenter=M247, environment=production, instance=193.56.252.210:9998, ip=193.56.252.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=dublin406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.132532516Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dublin, country=Ireland, datacenter=M247, environment=production, instance=193.56.252.210:9998, ip=193.56.252.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=dublin406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.132521781Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.132493536Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=use1, instance=use1-001-pk8w03, job=integrations/kubernetes/kubelet, namespace=kafka-core, persistentvolumeclaim=data-core-kafka-2" t=2024-05-29T13:44:15.134364284Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dublin, country=Ireland, datacenter=M247, environment=production, instance=193.56.252.210:9998, ip=193.56.252.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ireland.crt, role=vpn, server=dublin406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.132345491Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=277807 slug=info96f8 t=2024-05-29T13:44:15.13426549Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dublin, country=Ireland, datacenter=M247, environment=production, instance=188.241.178.34:9998, ip=188.241.178.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ireland.crt, role=vpn, server=dublin409, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.131957606Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dublin, country=Ireland, datacenter=M247, environment=production, instance=188.241.178.34:9998, ip=188.241.178.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ireland.crt, role=vpn, server=dublin409, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.131944873Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.131914452Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dublin, country=Ireland, datacenter=M247, environment=production, instance=188.241.178.2:9998, ip=188.241.178.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=dublin410, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.13180715Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.132116195Z caller=remote_instance_store.go:51 user=387869 slug=lantor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dublin, country=Ireland, datacenter=M247, environment=production, instance=146.70.130.18:9998, ip=146.70.130.18, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=dublin404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.13115576Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dublin, country=Ireland, datacenter=M247, environment=production, instance=146.70.130.18:9998, ip=146.70.130.18, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ireland.crt, role=vpn, server=dublin404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.13096977Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dublin, country=Ireland, datacenter=DataPacket, environment=production, instance=149.34.242.129:9998, ip=149.34.242.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=dublin421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.130764728Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.134090876Z caller=remote_instance_store.go:51 user=310637 slug=notino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-mgmt-metabase-cluster-instance-1, env=global" t=2024-05-29T13:44:15.133953471Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=use1, instance=use1-001-pk8w02, job=integrations/kubernetes/kubelet, namespace=kafka-core, persistentvolumeclaim=data-core-zookeeper-0" t=2024-05-29T13:44:15.133920106Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-mgmt-metabase-cluster-instance-1, env=global" t=2024-05-29T13:44:15.133778054Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=173730 slug=nikon instance= t=2024-05-29T13:44:15.13377282Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=173730 slug=nikon t=2024-05-29T13:44:15.133743144Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=173730 slug=nikon version=84 fingerprint=156cb81e3960574b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.133699386Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.133435488s EvaluationString:}]" duration=232.239726ms + level=debug ts=2024-05-29T13:44:15.133685826Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.13369747Z caller=remote_instance_store.go:51 user=494138 slug=takepayments msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=855636 slug=ginkgobioworks instance="app=cluster-autoscaler, cluster=eks-shared-nonprod, instance=ip-100-65-8-105.us-east-2.compute.internal, instance_name=cluster-autoscaler-674d6f75f8-6b88l, job=cluster-autoscaler, k8s_cluster=eks-shared-nonprod, k8s_cluster_name=eks-shared-nonprod, k8s_container_name=cluster-autoscaler, k8s_controller_kind=ReplicaSet, k8s_controller_name=cluster-autoscaler-674d6f75f8, k8s_namespace=kube-system, k8s_namespace_name=kube-system, k8s_pod_name=cluster-autoscaler-674d6f75f8-6b88l, k8s_pod_node_name=ip-100-65-8-105.us-east-2.compute.internal, namespace=kube-system, otel_service_name=cluster-autoscaler, owner=sda" t=2024-05-29T13:44:15.13357376Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=855636 slug=ginkgobioworks instance="app=cluster-autoscaler, cluster=eks-shared-nonprod, instance=ip-100-65-8-105.us-east-2.compute.internal, instance_name=cluster-autoscaler-674d6f75f8-6b88l, job=cluster-autoscaler, k8s_cluster=eks-shared-nonprod, k8s_cluster_name=eks-shared-nonprod, k8s_container_name=cluster-autoscaler, k8s_controller_kind=ReplicaSet, k8s_controller_name=cluster-autoscaler-674d6f75f8, k8s_namespace=kube-system, k8s_namespace_name=kube-system, k8s_pod_name=cluster-autoscaler-674d6f75f8-6b88l, k8s_pod_node_name=ip-100-65-8-105.us-east-2.compute.internal, namespace=kube-system, otel_service_name=cluster-autoscaler, owner=sda" t=2024-05-29T13:44:15.13352853Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=307381 slug=kambitaskforce t=2024-05-29T13:44:15.133522209Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=307381 slug=kambitaskforce t=2024-05-29T13:44:15.133396562Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=855636 slug=ginkgobioworks version=6 fingerprint=197cb6eeae5000c3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.133177801Z level=debug msg="Alert rule evaluated" results="[{Instance:app=cluster-autoscaler, cluster=eks-shared-nonprod, instance=ip-100-65-8-105.us-east-2.compute.internal, instance_name=cluster-autoscaler-674d6f75f8-6b88l, job=cluster-autoscaler, k8s_cluster=eks-shared-nonprod, k8s_cluster_name=eks-shared-nonprod, k8s_container_name=cluster-autoscaler, k8s_controller_kind=ReplicaSet, k8s_controller_name=cluster-autoscaler-674d6f75f8, k8s_namespace=kube-system, k8s_namespace_name=kube-system, k8s_pod_name=cluster-autoscaler-674d6f75f8-6b88l, k8s_pod_node_name=ip-100-65-8-105.us-east-2.compute.internal, namespace=kube-system, otel_service_name=cluster-autoscaler, owner=sda State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:app=cluster-autoscaler, cluster=eks-shared-nonprod, instance=ip-100-65-8-105.us-east-2.compute.internal, instance_name=cluster-autoscaler-674d6f75f8-6b88l, job=cluster-autoscaler, k8s_cluster=eks-shared-nonprod, k8s_cluster_name=eks-shared-nonprod, k8s_container_name=cluster-autoscaler, k8s_controller_kind=ReplicaSet, k8s_controller_name=cluster-autoscaler-674d6f75f8, k8s_namespace=kube-system, k8s_namespace_name=kube-system, k8s_pod_name=cluster-autoscaler-674d6f75f8-6b88l, k8s_pod_node_name=ip-100-65-8-105.us-east-2.compute.internal, namespace=kube-system, otel_service_name=cluster-autoscaler, owner=sda Value:0xc006696728} B:{Var:B Labels:app=cluster-autoscaler, cluster=eks-shared-nonprod, instance=ip-100-65-8-105.us-east-2.compute.internal, instance_name=cluster-autoscaler-674d6f75f8-6b88l, job=cluster-autoscaler, k8s_cluster=eks-shared-nonprod, k8s_cluster_name=eks-shared-nonprod, k8s_container_name=cluster-autoscaler, k8s_controller_kind=ReplicaSet, k8s_controller_name=cluster-autoscaler-674d6f75f8, k8s_namespace=kube-system, k8s_namespace_name=kube-system, k8s_pod_name=cluster-autoscaler-674d6f75f8-6b88l, k8s_pod_node_name=ip-100-65-8-105.us-east-2.compute.internal, namespace=kube-system, otel_service_name=cluster-autoscaler, owner=sda Value:0xc006696af0} C:{Var:C Labels:app=cluster-autoscaler, cluster=eks-shared-nonprod, instance=ip-100-65-8-105.us-east-2.compute.internal, instance_name=cluster-autoscaler-674d6f75f8-6b88l, job=cluster-autoscaler, k8s_cluster=eks-shared-nonprod, k8s_cluster_name=eks-shared-nonprod, k8s_container_name=cluster-autoscaler, k8s_controller_kind=ReplicaSet, k8s_controller_name=cluster-autoscaler-674d6f75f8, k8s_namespace=kube-system, k8s_namespace_name=kube-system, k8s_pod_name=cluster-autoscaler-674d6f75f8-6b88l, k8s_pod_node_name=ip-100-65-8-105.us-east-2.compute.internal, namespace=kube-system, otel_service_name=cluster-autoscaler, owner=sda Value:0xc006696ce0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.132469384s EvaluationString:[ var='A' labels={app=cluster-autoscaler, cluster=eks-shared-nonprod, instance=ip-100-65-8-105.us-east-2.compute.internal, instance_name=cluster-autoscaler-674d6f75f8-6b88l, job=cluster-autoscaler, k8s_cluster=eks-shared-nonprod, k8s_cluster_name=eks-shared-nonprod, k8s_container_name=cluster-autoscaler, k8s_controller_kind=ReplicaSet, k8s_controller_name=cluster-autoscaler-674d6f75f8, k8s_namespace=kube-system, k8s_namespace_name=kube-system, k8s_pod_name=cluster-autoscaler-674d6f75f8-6b88l, k8s_pod_node_name=ip-100-65-8-105.us-east-2.compute.internal, namespace=kube-system, otel_service_name=cluster-autoscaler, owner=sda} value=0 ], [ var='B' labels={app=cluster-autoscaler, cluster=eks-shared-nonprod, instance=ip-100-65-8-105.us-east-2.compute.internal, instance_name=cluster-autoscaler-674d6f75f8-6b88l, job=cluster-autoscaler, k8s_cluster=eks-shared-nonprod, k8s_cluster_name=eks-shared-nonprod, k8s_container_name=cluster-autoscaler, k8s_controller_kind=ReplicaSet, k8s_controller_name=cluster-autoscaler-674d6f75f8, k8s_namespace=kube-system, k8s_namespace_name=kube-system, k8s_pod_name=cluster-autoscaler-674d6f75f8-6b88l, k8s_pod_node_name=ip-100-65-8-105.us-east-2.compute.internal, namespace=kube-system, otel_service_name=cluster-autoscaler, owner=sda} value=0 ], [ var='C' labels={app=cluster-autoscaler, cluster=eks-shared-nonprod, instance=ip-100-65-8-105.us-east-2.compute.internal, instance_name=cluster-autoscaler-674d6f75f8-6b88l, job=cluster-autoscaler, k8s_cluster=eks-shared-nonprod, k8s_cluster_name=eks-shared-nonprod, k8s_container_name=cluster-autoscaler, k8s_controller_kind=ReplicaSet, k8s_controller_name=cluster-autoscaler-674d6f75f8, k8s_namespace=kube-system, k8s_namespace_name=kube-system, k8s_pod_name=cluster-autoscaler-674d6f75f8-6b88l, k8s_pod_node_name=ip-100-65-8-105.us-east-2.compute.internal, namespace=kube-system, otel_service_name=cluster-autoscaler, owner=sda} value=0 ]}]" duration=22.875651ms + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=logstash_router_006 A" t=2024-05-29T13:44:15.133372673Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.133331311Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=logstash_router_004 A" t=2024-05-29T13:44:15.133326697Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.133284906Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.133274725Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.133298234Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=logstash_router_003 A" t=2024-05-29T13:44:15.133287465Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=logstash_router_002 A" t=2024-05-29T13:44:15.133268589Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=215563 slug=pbnoc instance="datasource_uid=rwj4m-eGz, ref_id=A" t=2024-05-29T13:44:15.133296009Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.131843562Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=215563 slug=pbnoc version=1 fingerprint=0cae604b4849c85e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.133239932Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=rwj4m-eGz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.133063967s EvaluationString:}]" duration=14.533147ms + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=logstash_router_001 A" t=2024-05-29T13:44:15.13323824Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=use1, instance=use1-001-pk8w01, job=integrations/kubernetes/kubelet, namespace=elasticsearch, persistentvolumeclaim=data-elasticsearch-master-1" t=2024-05-29T13:44:15.133222988Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=logstash_nginx_005 A" t=2024-05-29T13:44:15.133210307Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=use1, instance=use1-001-pk8w01, job=integrations/kubernetes/kubelet, namespace=elasticsearch, persistentvolumeclaim=data-elasticsearch-master-1" t=2024-05-29T13:44:15.133209393Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=logstash_nginx_001 A" t=2024-05-29T13:44:15.133184196Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=337951 slug=pawapay instance= t=2024-05-29T13:44:15.131883985Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=logstash_kong_005 A" t=2024-05-29T13:44:15.133109828Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.13302208Z caller=remote_instance_store.go:51 user=829352 slug=unfnbonp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=logstash_kong_004 A" t=2024-05-29T13:44:15.133079145Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=logstash_kong_003 A" t=2024-05-29T13:44:15.133061571Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=uks2, instance=uks2-001-pk8w03, job=integrations/kubernetes/kubelet, namespace=redis, persistentvolumeclaim=redis-data-redis-node-0" t=2024-05-29T13:44:15.133034763Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=logstash_kong_001 A" t=2024-05-29T13:44:15.133010991Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=logstash_kong_001 A" t=2024-05-29T13:44:15.133007083Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="dimension_QueueName=talabat-prod-grocery-fulfillment-robocall-originator-queue, name=arn:aws:sqs:eu-west-2:457710302499:talabat-prod-grocery-fulfillment-robocall-originator-queue, region=eu-west-2" t=2024-05-29T13:44:15.132914769Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=logstash_general_005 A" t=2024-05-29T13:44:15.13296425Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="dimension_QueueName=talabat-prod-grocery-fulfillment-robocall-originator-dlq, name=arn:aws:sqs:eu-west-2:457710302499:talabat-prod-grocery-fulfillment-robocall-originator-dlq, region=eu-west-2" t=2024-05-29T13:44:15.132892988Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=logstash_general_003 A" t=2024-05-29T13:44:15.132884104Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=uks2, instance=uks2-001-pk8w03, job=integrations/kubernetes/kubelet, namespace=kafka-core, persistentvolumeclaim=data-core-zookeeper-0" t=2024-05-29T13:44:15.132923126Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="dimension_QueueName=talabat-prod-grocery-fulfillment-robocall-originator-dlq, name=arn:aws:sqs:eu-west-2:457710302499:talabat-prod-grocery-fulfillment-robocall-originator-dlq, region=eu-west-2" t=2024-05-29T13:44:15.132880458Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="dimension_QueueName=talabat-prod-grocery-fulfillment-referee-vendor-ranking-queue, name=arn:aws:sqs:eu-west-2:457710302499:talabat-prod-grocery-fulfillment-referee-vendor-ranking-queue, region=eu-west-2" t=2024-05-29T13:44:15.132843877Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=uks2, instance=uks2-001-pk8w03, job=integrations/kubernetes/kubelet, namespace=kafka-core, persistentvolumeclaim=data-core-kafka-0" t=2024-05-29T13:44:15.132834454Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="dimension_QueueName=talabat-prod-grocery-fulfillment-referee-vendor-ranking-dlq, name=arn:aws:sqs:eu-west-2:457710302499:talabat-prod-grocery-fulfillment-referee-vendor-ranking-dlq, region=eu-west-2" t=2024-05-29T13:44:15.132825056Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=logstash_general_001 A" t=2024-05-29T13:44:15.132818174Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=logstash_default_001 A" t=2024-05-29T13:44:15.132776147Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="dimension_QueueName=talabat-prod-grocery-fulfillment-referee-vendor-ranking-dlq, name=arn:aws:sqs:eu-west-2:457710302499:talabat-prod-grocery-fulfillment-referee-vendor-ranking-dlq, region=eu-west-2" t=2024-05-29T13:44:15.132813086Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="dimension_QueueName=talabat-prod-grocery-fulfillment-order-replacement-notifier-queue, name=arn:aws:sqs:eu-west-2:457710302499:talabat-prod-grocery-fulfillment-order-replacement-notifier-queue, region=eu-west-2" t=2024-05-29T13:44:15.132790306Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="dimension_QueueName=talabat-prod-grocery-fulfillment-order-replacement-notifier-dlq, name=arn:aws:sqs:eu-west-2:457710302499:talabat-prod-grocery-fulfillment-order-replacement-notifier-dlq, region=eu-west-2" t=2024-05-29T13:44:15.132750995Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=logstash_callback_002 A" t=2024-05-29T13:44:15.132697269Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.132639271Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="dimension_QueueName=talabat-prod-grocery-fulfillment-order-invoices-queue, name=arn:aws:sqs:eu-west-2:457710302499:talabat-prod-grocery-fulfillment-order-invoices-queue, region=eu-west-2" t=2024-05-29T13:44:15.13254279Z level=debug msg="Changing state" previous_state=Pending next_state=Normal previous_ends_at=2024-05-29T13:43:10Z next_ends_at=2024-05-29T13:44:10Z + logger=ngalert.state.manager user=173730 slug=nikon instance= t=2024-05-29T13:44:15.132461322Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=uks2, instance=uks2-001-pk8w02, job=integrations/kubernetes/kubelet, namespace=kafka-core, persistentvolumeclaim=data-core-zookeeper-2" t=2024-05-29T13:44:15.132468412Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.132392863Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.944194ms + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="dimension_QueueName=talabat-prod-grocery-fulfillment-item-heartbeat-queue, name=arn:aws:sqs:eu-west-2:457710302499:talabat-prod-grocery-fulfillment-item-heartbeat-queue, region=eu-west-2" t=2024-05-29T13:44:15.132360455Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="dimension_QueueName=talabat-prod-grocery-fulfillment-item-heartbeat-queue, name=arn:aws:sqs:eu-west-2:457710302499:talabat-prod-grocery-fulfillment-item-heartbeat-queue, region=eu-west-2" t=2024-05-29T13:44:15.132353105Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="dimension_QueueName=talabat-prod-grocery-fulfillment-item-heartbeat-oos-prediction-queue, name=arn:aws:sqs:eu-west-2:457710302499:talabat-prod-grocery-fulfillment-item-heartbeat-oos-prediction-queue, region=eu-west-2" t=2024-05-29T13:44:15.132321804Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=uks2, instance=uks2-001-pk8w01, job=integrations/kubernetes/kubelet, namespace=redis, persistentvolumeclaim=redis-data-redis-node-2" t=2024-05-29T13:44:15.132223335Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-veridas-db, env=dev" t=2024-05-29T13:44:15.132110245Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="dimension_QueueName=talabat-prod-grocery-fulfillment-grocery-fulfillment-report-queue, name=arn:aws:sqs:eu-west-2:457710302499:talabat-prod-grocery-fulfillment-grocery-fulfillment-report-queue, region=eu-west-2" t=2024-05-29T13:44:15.13216285Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=uks2, instance=uks2-001-pk8w01, job=integrations/kubernetes/kubelet, namespace=kafka-core, persistentvolumeclaim=data-core-zookeeper-1" t=2024-05-29T13:44:15.132160695Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="dimension_QueueName=talabat-prod-grocery-fulfillment-grocery-fulfillment-report-queue, name=arn:aws:sqs:eu-west-2:457710302499:talabat-prod-grocery-fulfillment-grocery-fulfillment-report-queue, region=eu-west-2" t=2024-05-29T13:44:15.1321347Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=uks2, instance=uks2-001-pk8w01, job=integrations/kubernetes/kubelet, namespace=kafka-core, persistentvolumeclaim=data-core-zookeeper-1" t=2024-05-29T13:44:15.132152648Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.132060178Z caller=remote_image_capturer.go:33 user=845543 slug=deliveryhero rule_org_id=1 rule_uid=fdl39fpbjzncwe msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=uks2, instance=uks2-001-pk8w01, job=integrations/kubernetes/kubelet, namespace=kafka-core, persistentvolumeclaim=data-core-kafka-2" t=2024-05-29T13:44:15.132086037Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.131959935Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="dimension_QueueName=talabat-prod-grocery-fulfillment-grocery-fulfillment-caller-dlq, name=arn:aws:sqs:eu-west-2:457710302499:talabat-prod-grocery-fulfillment-grocery-fulfillment-caller-dlq, region=eu-west-2" t=2024-05-29T13:44:15.131847193Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=387869 slug=lantor instance= t=2024-05-29T13:44:15.131927072Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=543654 slug=jobcloudprogrammaticprod t=2024-05-29T13:44:15.131539726Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=97.045377ms + level=debug ts=2024-05-29T13:44:15.131774519Z caller=remote_instance_store.go:51 user=456850 slug=juniz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=B" t=2024-05-29T13:44:15.13178348Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=B" t=2024-05-29T13:44:15.131774592Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=401442 slug=missioncontrolss t=2024-05-29T13:44:15.131706563Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=22398 slug=sunfolding version=1 fingerprint=dc4e5efda539a4d2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.131652476Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-sunfolding, ref_id=B State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.131251883s EvaluationString:}]" duration=16.653802ms + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=uks1, instance=uks1-001-pk8w03, job=integrations/kubernetes/kubelet, namespace=kafka-core, persistentvolumeclaim=data-core-kafka-1" t=2024-05-29T13:44:15.131731737Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.131639192Z caller=remote_instance_store.go:51 user=350551 slug=loopme msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=uks1, instance=uks1-001-pk8w03, job=integrations/kubernetes/kubelet, namespace=kafka-core, persistentvolumeclaim=data-core-kafka-1" t=2024-05-29T13:44:15.13172013Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=401442 slug=missioncontrolss version=11 fingerprint=21343a2f014c8d84 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.131641768Z level=debug msg="Alert rule evaluated" results="[{Instance:dyno=web.1, env=prod, herokuApp=heroku, job=drainlogs, serviceName=sf-auth, service_name=drainlogs, token=52d5811f-2ac0-452b-b0e1-5089b132b5be State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:dyno=web.1, env=prod, herokuApp=heroku, job=drainlogs, serviceName=sf-auth, service_name=drainlogs, token=52d5811f-2ac0-452b-b0e1-5089b132b5be Value:0xc022475068} B:{Var:B Labels:dyno=web.1, env=prod, herokuApp=heroku, job=drainlogs, serviceName=sf-auth, service_name=drainlogs, token=52d5811f-2ac0-452b-b0e1-5089b132b5be Value:0xc0224750e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.13137011s EvaluationString:[ var='A' labels={dyno=web.1, env=prod, herokuApp=heroku, job=drainlogs, serviceName=sf-auth, service_name=drainlogs, token=52d5811f-2ac0-452b-b0e1-5089b132b5be} value=0.06422222222222224 ], [ var='B' labels={dyno=web.1, env=prod, herokuApp=heroku, job=drainlogs, serviceName=sf-auth, service_name=drainlogs, token=52d5811f-2ac0-452b-b0e1-5089b132b5be} value=0 ]}]" duration=50.041892ms + level=debug ts=2024-05-29T13:44:15.131595318Z caller=remote_instance_store.go:51 user=290313 slug=replit msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=uks1, instance=uks1-001-pk8w02, job=integrations/kubernetes/kubelet, namespace=kafka-core, persistentvolumeclaim=data-core-zookeeper-1" t=2024-05-29T13:44:15.131433363Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=275406 slug=aceable t=2024-05-29T13:44:15.131316328Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.519429ms + logger=ngalert.scheduler user=253365 slug=mgroup t=2024-05-29T13:44:15.131085095Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=uks1, instance=uks1-001-pk8w02, job=integrations/kubernetes/kubelet, namespace=elasticsearch, persistentvolumeclaim=data-elasticsearch-master-2" t=2024-05-29T13:44:15.131253933Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.131088886Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=4947 slug=mediamath instance= t=2024-05-29T13:44:15.131111642Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.131079207Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:15.131081409Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.13102478Z caller=remote_instance_store.go:51 user=461948 slug=compuco msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=461948 slug=compuco instance="datasource_uid=grafanacloud-logs, ref_id=401_Count" t=2024-05-29T13:44:15.130951275Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=461948 slug=compuco instance="datasource_uid=grafanacloud-logs, ref_id=401_Count" t=2024-05-29T13:44:15.130932368Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=461948 slug=compuco instance="datasource_uid=grafanacloud-logs, ref_id=401_Count" t=2024-05-29T13:44:15.130921177Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=461948 slug=compuco instance="datasource_uid=grafanacloud-logs, ref_id=401_Count" t=2024-05-29T13:44:15.130907861Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.130725839Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.130649551Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=uks1, instance=uks1-001-pk8w01, job=integrations/kubernetes/kubelet, namespace=elasticsearch, persistentvolumeclaim=data-elasticsearch-master-0" t=2024-05-29T13:44:15.130733217Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=394895 slug=houroneai instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.130677808Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=394895 slug=houroneai instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.130671675Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.130581852Z caller=remote_instance_store.go:51 user=164951 slug=upet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=uks1, instance=uks1-001-pk8w01, job=integrations/kubernetes/kubelet, namespace=elasticsearch, persistentvolumeclaim=data-elasticsearch-data-2" t=2024-05-29T13:44:15.130648523Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=437245 slug=kosayuspun t=2024-05-29T13:44:15.130557841Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=437245 slug=kosayuspun instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.130547077Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=437245 slug=kosayuspun instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.130535822Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=437245 slug=kosayuspun t=2024-05-29T13:44:15.130521643Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dublin, country=Ireland, datacenter=DataPacket, environment=production, instance=149.34.242.129:9998, ip=149.34.242.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ireland.crt, role=vpn, server=dublin421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.13056374Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:15.13050852Z caller=remote_alert_sender.go:94 user=560104 slug=northwestnodes host=northwestnodes-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.49.104.204:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=f4455db1-667b-4455-91ca-851821006a33 alerts=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Douglas, country=Isle of Man, datacenter=M247, environment=production, instance=91.90.124.2:9998, ip=91.90.124.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=douglas403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.130354607Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Douglas, country=Isle of Man, datacenter=M247, environment=production, instance=91.90.124.2:9998, ip=91.90.124.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=douglas403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.130324871Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=164951 slug=upet instance= t=2024-05-29T13:44:15.130506735Z level=debug msg="Keeping state" state=Normal + level=error ts=2024-05-29T13:44:15.130226934Z caller=remote_rule_evaluator.go:110 user=330911 slug=actian msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Douglas, country=Isle of Man, datacenter=M247, environment=production, instance=91.90.124.2:9998, ip=91.90.124.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/man.crt, role=vpn, server=douglas403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.130083294Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Douglas, country=Isle of Man, datacenter=M247, environment=production, instance=91.90.124.2:9998, ip=91.90.124.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/man.crt, role=vpn, server=douglas403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.130070735Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=330911 slug=actian t=2024-05-29T13:44:15.130373914Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=330911 slug=actian instance= t=2024-05-29T13:44:15.130320314Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.scheduler user=164951 slug=upet version=2 fingerprint=b6c1b6390b13284f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.130344028Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.129934996s EvaluationString:}]" duration=57.558537ms + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=preprod, instance=uks0-dev-pk8w03, job=integrations/kubernetes/kubelet, namespace=kafka-core, persistentvolumeclaim=data-core-zookeeper-0" t=2024-05-29T13:44:15.130440271Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.130359215Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=330911 slug=actian version=27 fingerprint=093522d422ba53bd attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.13026104Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=4.070436ms + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=preprod, instance=uks0-dev-pk8w03, job=integrations/kubernetes/kubelet, namespace=elasticsearch, persistentvolumeclaim=data-elasticsearch-master-0" t=2024-05-29T13:44:15.130257736Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.130192397Z caller=remote_instance_store.go:51 user=656284 slug=cencosudx msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.13013895Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=preprod, instance=uks0-dev-pk8w03, job=integrations/kubernetes/kubelet, namespace=elasticsearch, persistentvolumeclaim=data-elasticsearch-data-2" t=2024-05-29T13:44:15.130155828Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=656284 slug=cencosudx instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.130099886Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=preprod, instance=uks0-dev-pk8w02, job=integrations/kubernetes/kubelet, namespace=redis, persistentvolumeclaim=redis-data-redis-node-2" t=2024-05-29T13:44:15.130049444Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.129686735Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.129962641Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Douglas, country=Isle of Man, datacenter=M247, environment=production, instance=91.90.124.20:9998, ip=91.90.124.20, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=douglas404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.129884018Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:15.129745492Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.677048ms + logger=ngalert.state.manager user=148654 slug=tinybeans instance= t=2024-05-29T13:44:15.129767988Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.129731515Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.523319ms + level=debug ts=2024-05-29T13:44:15.12971297Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=preprod, instance=uks0-dev-pk8w02, job=integrations/kubernetes/kubelet, namespace=elasticsearch, persistentvolumeclaim=data-elasticsearch-master-2" t=2024-05-29T13:44:15.129718659Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=preprod, instance=uks0-dev-pk8w02, job=integrations/kubernetes/kubelet, namespace=elasticsearch, persistentvolumeclaim=data-elasticsearch-master-2" t=2024-05-29T13:44:15.129704938Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Douglas, country=Isle of Man, datacenter=M247, environment=production, instance=91.90.124.20:9998, ip=91.90.124.20, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/man.crt, role=vpn, server=douglas404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.129674711Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=678643 slug=communityhub t=2024-05-29T13:44:15.129526766Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=399183 slug=guidion t=2024-05-29T13:44:15.129454259Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.690009ms + logger=ngalert.state.manager user=678643 slug=communityhub t=2024-05-29T13:44:15.129465777Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Doha, country=Qatar, datacenter=M247, environment=production, instance=95.181.234.2:9998, ip=95.181.234.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=qatar403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.12947784Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=471861 slug=planetstaging instance= t=2024-05-29T13:44:15.129456309Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + level=debug ts=2024-05-29T13:44:15.129378789Z caller=remote_instance_store.go:51 user=111653 slug=theassociationmxp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=349246 slug=metricgamingdev t=2024-05-29T13:44:15.129296907Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.975599ms + logger=ngalert.state.manager user=442934 slug=arqit instance="cluster=preprod, instance=uks0-dev-pk8w01, job=integrations/kubernetes/kubelet, namespace=kafka-core, persistentvolumeclaim=data-core-kafka-0" t=2024-05-29T13:44:15.129314303Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=111653 slug=theassociationmxp instance= t=2024-05-29T13:44:15.129249759Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=111653 slug=theassociationmxp t=2024-05-29T13:44:15.12922796Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.12922313Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=info ts=2024-05-29T13:44:15.129146307Z caller=remote_alert_sender.go:94 user=70430 slug=dapperlabs host=dapperlabs-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.176.29:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=c2537485-7694-4933-9f21-a0b8d0ef99f7 alerts=1 + level=debug ts=2024-05-29T13:44:15.129089592Z caller=remote_instance_store.go:51 user=206439 slug=relaypro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.129073208Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.129058217Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.128989061Z caller=remote_image_capturer.go:61 user=391359 slug=linklogistics rule_org_id=1 rule_uid=cdh3w2rz4d62od dashboard=ddh3r9avwaqdce panel=1 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Doha, country=Qatar, datacenter=M247, environment=production, instance=95.181.234.26:9998, ip=95.181.234.26, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=qatar402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.129025211Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=612695 slug=ocipprod t=2024-05-29T13:44:15.128873148Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=33.772283ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Doha, country=Qatar, datacenter=M247, environment=production, instance=95.181.234.26:9998, ip=95.181.234.26, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/qatar.crt, role=vpn, server=qatar402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.128821627Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.12877115Z caller=remote_instance_store.go:51 user=264941 slug=agnosticeng msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=891812 slug=eigencert t=2024-05-29T13:44:15.128566518Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.190818ms + logger=ngalert.state.manager.persist user=805026 slug=powwro11y t=2024-05-29T13:44:15.128604466Z level=debug msg="Saving alert states" count=36 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.128557442Z caller=remote_instance_store.go:51 user=71676 slug=lemonbeat msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.128553093Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=264941 slug=agnosticeng instance="chain_name=polygon, chain_network_name=mainnet, datacenter=speedynodes, node=polygon-archive" t=2024-05-29T13:44:15.128478863Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=264941 slug=agnosticeng instance="chain_name=polygon, chain_network_name=mainnet, datacenter=eu-02, node=erigon-polygon-mainnet-archive-1" t=2024-05-29T13:44:15.128384872Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.128358918Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.128320499Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.128314288Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.128314026Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=805026 slug=powwro11y instance="config_version=1706799597213293568, instance=https://tpigateway.eonenergy.com, job=Sales360UK - SMEPortals, probe=London" t=2024-05-29T13:44:15.128305382Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.128252547Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.128221432Z caller=remote_instance_store.go:51 user=94501 slug=datastax msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=371756 slug=asapp t=2024-05-29T13:44:15.128215126Z level=debug msg="Saving alert states" count=6 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.128162241Z caller=remote_instance_store.go:51 user=776563 slug=eagleeye4els msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=spectrum-cable" t=2024-05-29T13:44:15.128188539Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=805026 slug=powwro11y instance="config_version=1706799164558755840, instance=https://dynamicpricingapi.powwr.com, job=Sales360US - EPT, probe=NewYork" t=2024-05-29T13:44:15.128131753Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=264941 slug=agnosticeng instance="chain_name=ethereum, chain_network_name=mainnet, datacenter=eu-02, node=erigon-ethereum-mainnet-archive-1" t=2024-05-29T13:44:15.128140312Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.128045224Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.128050633Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager.persist user=112387 slug=lucidhq t=2024-05-29T13:44:15.128034997Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=112387 slug=lucidhq instance= t=2024-05-29T13:44:15.128023057Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.127917783Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=jetblue" t=2024-05-29T13:44:15.127945461Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.127888178Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=805026 slug=powwro11y instance="config_version=1706798559786388736, instance=https://salestoolkit.business.totalenergies.uk, job=Sales360UK - SMEPortals, probe=London" t=2024-05-29T13:44:15.127914414Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dhaka, country=Bangladesh, datacenter=GSL, environment=production, instance=64.64.112.128:9998, ip=64.64.112.128, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/bangladesh.crt, role=vpn, server=bangladesh404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.127873811Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishwireless" t=2024-05-29T13:44:15.12777945Z level=debug msg="Keeping state" state=Normal +level=info ts=2024-05-29T13:44:15.126204305Z caller=remote_alert_sender.go:94 user=537072 slug=devbitvavo host=devbitvavo-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.97.118:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=c7d48550-c812-496d-a6fa-8a26b3f1a118 alerts=1 +level=debug ts=2024-05-29T13:44:15.127638188Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Detroit, country=United States, datacenter=DataPacket, environment=production, instance=84.239.17.150:9998, ip=84.239.17.150, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-michigan-pf.crt, role=vpn, server=michigan404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.127592984Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishpostpaid" t=2024-05-29T13:44:15.12763598Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.127525597Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=805026 slug=powwro11y instance="config_version=1706798121521765632, instance=https://dev-reporting.powwr.com, job=Shared - Tableau, probe=Dallas" t=2024-05-29T13:44:15.127601348Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=american-airlines" t=2024-05-29T13:44:15.127479182Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=391359 slug=linklogistics instance= t=2024-05-29T13:44:15.127446444Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=805026 slug=powwro11y instance="config_version=1703156605687169280, instance=https://udcoreapiusa.com/, job=Shared - UDCoreAPIUSA, probe=London" t=2024-05-29T13:44:15.127425369Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=99366 slug=artery t=2024-05-29T13:44:15.127399187Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=99366 slug=artery instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.127379874Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=805026 slug=powwro11y instance="config_version=1703156605687169280, instance=https://udcoreapiusa.com/, job=Shared - UDCoreAPIUSA, probe=London" t=2024-05-29T13:44:15.127412118Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=371756 slug=asapp t=2024-05-29T13:44:15.127348956Z level=debug msg="State manager processing evaluation results" resultCount=6 +logger=ngalert.state.manager.persist user=8625 slug=dovetail t=2024-05-29T13:44:15.127285101Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=8625 slug=dovetail instance="datasource_uid=grafanacloud-dovetailsoftware, ref_id=A" t=2024-05-29T13:44:15.127264031Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=8625 slug=dovetail t=2024-05-29T13:44:15.127221761Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.scheduler user=8625 slug=dovetail version=3 fingerprint=e5674649e00e52a3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.127172918Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-dovetailsoftware, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.126867661s EvaluationString:}]" duration=22.171441ms +logger=ngalert.state.manager user=805026 slug=powwro11y instance="config_version=1703156573722419712, instance=https://portal.powwr.com, job=Sales360US - EPT, probe=Dallas" t=2024-05-29T13:44:15.12722534Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.127103353Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.126967511Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.126915711Z caller=remote_instance_store.go:51 user=504517 slug=mohdkhairi msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=504517 slug=mohdkhairi t=2024-05-29T13:44:15.126821072Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.126825577Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.126768562Z caller=remote_instance_store.go:51 user=729364 slug=gridsz msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.12661953Z caller=remote_instance_store.go:51 user=672418 slug=streamkap msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.126748735Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:15.1267281Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=729364 slug=gridsz instance= t=2024-05-29T13:44:15.126683797Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=805026 slug=powwro11y instance="config_version=1703154578882993152, instance=https://signon.brokerutilityhub.com, job=Sales360UK - SMEPortals, probe=London" t=2024-05-29T13:44:15.12662544Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=805026 slug=powwro11y instance="config_version=1703154578882993152, instance=https://signon.brokerutilityhub.com, job=Sales360UK - SMEPortals, probe=London" t=2024-05-29T13:44:15.12661094Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.126564599Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.126431915Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.126496269Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=700399 slug=demo19344 instance="__name__=probe_icmp_duration_rtt_max_seconds, config_version=1690182213571201792, instance=www.xf1869.com, job=xf, probe=Tokyo" t=2024-05-29T13:44:15.126423441Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.126485681Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=292165 slug=singleorigin t=2024-05-29T13:44:15.126366744Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.911678ms +logger=ngalert.state.manager user=805026 slug=powwro11y instance="config_version=1703154534537392384, instance=https://sales.npowerbrokerportal.com, job=Sales360UK - SMEPortals, probe=London" t=2024-05-29T13:44:15.126238773Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=700399 slug=demo19344 instance="__name__=probe_icmp_duration_rtt_max_seconds, config_version=1690182213571201792, instance=www.xf1869.com, job=xf, probe=Oregon" t=2024-05-29T13:44:15.126369368Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.126294867Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=700399 slug=demo19344 instance="__name__=probe_icmp_duration_rtt_max_seconds, config_version=1690182213571201792, instance=www.xf1869.com, job=xf, probe=London" t=2024-05-29T13:44:15.126345898Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=700399 slug=demo19344 t=2024-05-29T13:44:15.126273243Z level=debug msg="State manager processing evaluation results" resultCount=5 +logger=ngalert.state.manager.persist user=109452 slug=deltarisk t=2024-05-29T13:44:15.126276235Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=109452 slug=deltarisk instance= t=2024-05-29T13:44:15.126265342Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.126202965Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=109452 slug=deltarisk instance= t=2024-05-29T13:44:15.126254951Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=109452 slug=deltarisk t=2024-05-29T13:44:15.126214264Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=info ts=2024-05-29T13:44:15.12594867Z caller=remote_alert_sender.go:94 user=607982 slug=soberdude host=soberdude-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.59.45:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=eb2ce2b3-cc56-4bd9-ae78-f1b7a6e2d93d alerts=1 +logger=ngalert.state.manager.persist user=537072 slug=devbitvavo t=2024-05-29T13:44:15.125748886Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.803279ms +level=debug ts=2024-05-29T13:44:15.125814983Z caller=remote_instance_store.go:51 user=739130 slug=redphasetech msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=805026 slug=powwro11y instance="config_version=1703154447830886400, instance=https://edfenergybrokers.co.uk, job=Sales360UK - SMEPortals, probe=London" t=2024-05-29T13:44:15.12576263Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Detroit, country=United States, datacenter=DataPacket, environment=production, instance=84.239.17.150:9998, ip=84.239.17.150, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=michigan404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.125712762Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.125575449Z caller=remote_instance_store.go:51 user=556147 slug=bettercloudholding msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=556147 slug=bettercloudholding instance= t=2024-05-29T13:44:15.12552748Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=805026 slug=powwro11y instance="config_version=1703154392850382080, instance=https://bglbrokerportal.com, job=Sales360UK - SMEPortals, probe=London" t=2024-05-29T13:44:15.125486616Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.125361672Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=316418 slug=workmotion t=2024-05-29T13:44:15.125287712Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=1mr10216z5, Method=--, Resource=/docusign-events, Stage=--" t=2024-05-29T13:44:15.125269022Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=805026 slug=powwro11y instance="config_version=1703154339915573504, instance=https://www.powwr.com, job=Powwr Website, probe=Dallas" t=2024-05-29T13:44:15.125297176Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=316418 slug=workmotion version=3 fingerprint=46e96951f76fbd9e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.125044678Z level=debug msg="Alert rule evaluated" results="[{Instance:ApiId=1mr10216z5, Method=--, Resource=/docusign-events, Stage=-- State:NoData Error: Results:map[] Values:map[B:{Var:B Labels:ApiId=1mr10216z5, Method=--, Resource=/docusign-events, Stage=-- Value:} C:{Var:C Labels:ApiId=1mr10216z5, Method=--, Resource=/docusign-events, Stage=-- Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.124577431s EvaluationString:[ var='B' labels={ApiId=1mr10216z5, Method=--, Resource=/docusign-events, Stage=--} value=null ], [ var='C' labels={ApiId=1mr10216z5, Method=--, Resource=/docusign-events, Stage=--} value=null ]}]" duration=44.955071ms +level=debug ts=2024-05-29T13:44:15.125026098Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.125012145Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=751219 slug=melissaaveryweir instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.124886596Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager.persist user=326888 slug=buildingblocks t=2024-05-29T13:44:15.124919388Z level=debug msg="Saving alert states" count=8 max_state_save_concurrency=1 +logger=ngalert.state.manager user=751219 slug=melissaaveryweir instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.124849276Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=751219 slug=melissaaveryweir t=2024-05-29T13:44:15.124831406Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.124908949Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=805026 slug=powwro11y instance="config_version=1703154308304353024, instance=https://udcloudpcw.co.uk, job=Broker360 - PCW, probe=London" t=2024-05-29T13:44:15.124905277Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=805026 slug=powwro11y instance="config_version=1703090413607988736, instance=https://demodfs.powwr.com, job=Risk360 - DFS, probe=Dallas" t=2024-05-29T13:44:15.124830203Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=326888 slug=buildingblocks instance="datasource_uid=LKDV0NQnz, ref_id=A" t=2024-05-29T13:44:15.124772663Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=326888 slug=buildingblocks instance="datasource_uid=LKDV0NQnz, ref_id=A" t=2024-05-29T13:44:15.12475101Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=326888 slug=buildingblocks instance="datasource_uid=LKDV0NQnz, ref_id=A" t=2024-05-29T13:44:15.124727559Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=326888 slug=buildingblocks instance="datasource_uid=LKDV0NQnz, ref_id=A" t=2024-05-29T13:44:15.124716334Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=326888 slug=buildingblocks instance="datasource_uid=LKDV0NQnz, ref_id=A" t=2024-05-29T13:44:15.124703832Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=326888 slug=buildingblocks instance="datasource_uid=LKDV0NQnz, ref_id=A" t=2024-05-29T13:44:15.124673043Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=326888 slug=buildingblocks instance="datasource_uid=LKDV0NQnz, ref_id=A" t=2024-05-29T13:44:15.12465557Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=326888 slug=buildingblocks instance="datasource_uid=LKDV0NQnz, ref_id=A" t=2024-05-29T13:44:15.124643934Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:15.124240337Z caller=remote_instance_store.go:51 user=177574 slug=flanderijnbv msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.124162831Z caller=remote_instance_store.go:51 user=799774 slug=sagacitysoft msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=371756 slug=asapp instance="namespace=prod-assurantlifestyle" t=2024-05-29T13:44:15.124196706Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177574 slug=flanderijnbv t=2024-05-29T13:44:15.124145939Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.124108034Z caller=remote_instance_store.go:51 user=502468 slug=gmawater msg="calling SaveAlertInstance" +logger=ngalert.scheduler user=177574 slug=flanderijnbv version=1 fingerprint=2fb067e8bd2cbd78 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.12408352Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.123840378s EvaluationString:}]" duration=40.869112ms +logger=ngalert.state.manager user=262248 slug=jeanphilippequemener instance= t=2024-05-29T13:44:15.124033187Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.124069617Z caller=remote_instance_store.go:51 user=829340 slug=unfnboprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Des Moines, country=United States, datacenter=DataPacket, environment=production, instance=84.239.17.1:9998, ip=84.239.17.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=iowa402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.124101179Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=262248 slug=jeanphilippequemener t=2024-05-29T13:44:15.12394651Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager.persist user=624834 slug=adongre t=2024-05-29T13:44:15.123896928Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 +logger=ngalert.state.manager user=624834 slug=adongre instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.123882379Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=262248 slug=jeanphilippequemener t=2024-05-29T13:44:15.123192492Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.475745ms +logger=ngalert.state.manager.persist user=802643 slug=eigenda t=2024-05-29T13:44:15.123151522Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 +level=debug ts=2024-05-29T13:44:15.123196484Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=802643 slug=eigenda instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.123131279Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=802643 slug=eigenda instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.123089777Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Denver, country=United States, datacenter=PIA, environment=production, instance=181.41.206.9:9998, ip=181.41.206.9, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=denver424, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.123093899Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.122808376Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.122829047Z caller=remote_instance_store.go:51 user=682586 slug=nielsonquea msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=682586 slug=nielsonquea t=2024-05-29T13:44:15.122790015Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=debug ts=2024-05-29T13:44:15.122760634Z caller=remote_image_capturer.go:33 user=682586 slug=nielsonquea rule_org_id=1 rule_uid=dea14b8b-62a9-4c82-974f-5a397794d472 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" +logger=ngalert.state.manager user=682586 slug=nielsonquea instance= t=2024-05-29T13:44:15.122749944Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=682586 slug=nielsonquea t=2024-05-29T13:44:15.122697443Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.122705824Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +level=debug ts=2024-05-29T13:44:15.122567145Z caller=remote_instance_store.go:51 user=177465 slug=fairtiq msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.122531851Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Denver, country=United States, datacenter=PIA, environment=production, instance=181.41.206.6:9998, ip=181.41.206.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-denver.crt, role=vpn, server=denver421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.122326044Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Denver, country=United States, datacenter=PIA, environment=production, instance=181.41.206.6:9998, ip=181.41.206.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=denver421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.122127227Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.122019932Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.122043968Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.122037548Z caller=remote_instance_store.go:51 user=473395 slug=binaryronin msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.121938666Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Denver, country=United States, datacenter=PIA, environment=production, instance=181.41.206.5:9998, ip=181.41.206.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-denver.crt, role=vpn, server=denver420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.121942215Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Denver, country=United States, datacenter=PIA, environment=production, instance=181.41.206.5:9998, ip=181.41.206.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-denver.crt, role=vpn, server=denver420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.121931547Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=248027 slug=mishp t=2024-05-29T13:44:15.121910448Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.087098ms +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.12190286Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +level=debug ts=2024-05-29T13:44:15.121796059Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.121737153Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Denver, country=United States, datacenter=PIA, environment=production, instance=181.41.206.5:9998, ip=181.41.206.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=denver420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.121747236Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=823141 slug=yomafleetobservability t=2024-05-29T13:44:15.121655249Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.scheduler user=406811 slug=clabsmento t=2024-05-29T13:44:15.121627055Z level=debug msg="Skip rule evaluation because it is paused" +logger=ngalert.state.manager user=823141 slug=yomafleetobservability t=2024-05-29T13:44:15.121582866Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.121517901Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Denver, country=United States, datacenter=PIA, environment=production, instance=181.41.206.4:9998, ip=181.41.206.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=denver419, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.121397405Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.121343181Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.121333666Z caller=remote_instance_store.go:51 user=375798 slug=beeworks msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Kusama Zetetic 1 (OVH-SBG), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=37.59.29.95:39617, job=Cloud, location=Strasbourg, FR, pool=Zetetic 1, status=best" t=2024-05-29T13:44:15.121346983Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=375798 slug=beeworks instance="DBInstanceIdentifier=prod-recon-db" t=2024-05-29T13:44:15.12128934Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=538355 slug=flogic instance="__name__=grafanacloud_org_logs_usage, cluster=prod-us-central-0, org_id=796510" t=2024-05-29T13:44:15.121283639Z level=warn msg="Failed to take an image" dashboard=wwVPQc1Vk panel=10 error="rpc error: code = Code(422) desc = screenshots unavailable" +logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Kusama Zetetic (OVH-SBG), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=37.59.29.95:39616, job=Cloud, location=Strasbourg, FR, pool=Zetetic, status=best" t=2024-05-29T13:44:15.121309828Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=375798 slug=beeworks instance="DBInstanceIdentifier=prod-recon-db" t=2024-05-29T13:44:15.121277948Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Kusama Xalamus 1 (RPS-MDH), chain=Kusama, exported_chain=ksmcc3, host=Rapidswitch, instance=188.227.164.110:39618, job=Cloud, location=Maidenhead, GB, pool=Xalamus 1, status=best" t=2024-05-29T13:44:15.121267683Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.121214121Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=375798 slug=beeworks instance="DBInstanceIdentifier=oracle-test" t=2024-05-29T13:44:15.121226769Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=397201 slug=zultys t=2024-05-29T13:44:15.12120957Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=47.686888ms +logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Kusama Xalamus (ARU-AZO), chain=Kusama, exported_chain=ksmcc3, host=Aruba, instance=95.110.225.55:39619, job=Cloud, location=Arezzo, IT, pool=Xalamus, status=best" t=2024-05-29T13:44:15.121222901Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:15.121194265Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=375798 slug=beeworks instance="DBInstanceIdentifier=oracle-prod" t=2024-05-29T13:44:15.12119898Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Kusama Xalamus (ARU-AZO), chain=Kusama, exported_chain=ksmcc3, host=Aruba, instance=95.110.225.55:39619, job=Cloud, location=Arezzo, IT, pool=Xalamus, status=best" t=2024-05-29T13:44:15.1212077Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=375798 slug=beeworks instance="DBInstanceIdentifier=bee-poc-staging-db" t=2024-05-29T13:44:15.121139106Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.121098729Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Denver, country=United States, datacenter=PIA, environment=production, instance=181.41.206.3:9998, ip=181.41.206.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=denver418, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.121010146Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Kusama Uno (OVH-WAW), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=146.59.68.162:39616, job=Cloud, location=Warsaw, PL, pool=Uno, status=best" t=2024-05-29T13:44:15.120988952Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Kusama Titan 1 (OVH-LON), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=57.128.141.149:39617, job=Cloud, location=London, GB, pool=Titan 1, status=best" t=2024-05-29T13:44:15.120966363Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Kusama Titan 1 (OVH-LON), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=57.128.141.149:39617, job=Cloud, location=London, GB, pool=Titan 1, status=best" t=2024-05-29T13:44:15.120951103Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Kusama Pruned (ZTC-HFH), chain=Kusama, exported_chain=ksmcc3, host=Zetetic Technologies, instance=192.168.1.36:39616, job=Higham Home, location=Higham Ferrers, GB, pool=Pruned, status=best" t=2024-05-29T13:44:15.120831679Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.120729973Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.120860741Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.120729315Z caller=remote_instance_store.go:51 user=526847 slug=soniclabs msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Denver, country=United States, datacenter=PIA, environment=production, instance=181.41.206.2:9998, ip=181.41.206.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-denver.crt, role=vpn, server=denver406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.120823284Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Kusama Mermaid 1 (ARU-AZO), chain=Kusama, exported_chain=ksmcc3, host=Aruba, instance=95.110.225.55:39617, job=Cloud, location=Arezzo, IT, pool=Mermaid 1, status=best" t=2024-05-29T13:44:15.12073454Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="instance=suwdc07tdswcldbs1001.tds.ecomm.local" t=2024-05-29T13:44:15.120716799Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="instance=suwdc07tdswcldbs1001.tds.ecomm.local" t=2024-05-29T13:44:15.120699398Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Kusama Mermaid (ARU-AZO), chain=Kusama, exported_chain=ksmcc3, host=Aruba, instance=95.110.225.55:39616, job=Cloud, location=Arezzo, IT, pool=Mermaid, status=best" t=2024-05-29T13:44:15.120704744Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.120707574Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.120660374Z caller=remote_instance_store.go:51 user=560104 slug=northwestnodes msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=560104 slug=northwestnodes instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.120597372Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=560104 slug=northwestnodes t=2024-05-29T13:44:15.120576272Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=CCIP" +logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Kusama Lightning 1 (RST-MIA), chain=Kusama, exported_chain=ksmcc3, host=ReliableSite, instance=209.222.97.206:39617, job=Cloud, location=Miami, US, pool=Lightning 1, status=best" t=2024-05-29T13:44:15.120663354Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=560104 slug=northwestnodes version=41 fingerprint=40aa40994e29b3e9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.120486569Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.115131168s EvaluationString:}]" duration=9.513237ms +logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Kusama Lightning (RST-MIA), chain=Kusama, exported_chain=ksmcc3, host=ReliableSite, instance=209.222.97.206:39616, job=Cloud, location=Miami, US, pool=Lightning, status=best" t=2024-05-29T13:44:15.12062281Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Kusama Lightning (RST-MIA), chain=Kusama, exported_chain=ksmcc3, host=ReliableSite, instance=209.222.97.206:39616, job=Cloud, location=Miami, US, pool=Lightning, status=best" t=2024-05-29T13:44:15.120611078Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=125136 slug=arnzo1 instance= t=2024-05-29T13:44:15.120587623Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=125136 slug=arnzo1 instance= t=2024-05-29T13:44:15.120574758Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=538355 slug=flogic instance="__name__=grafanacloud_org_logs_usage, cluster=prod-us-central-0, org_id=796510" t=2024-05-29T13:44:15.120513503Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Kusama Green (VNM-CHG), chain=Kusama, exported_chain=ksmcc3, host=Purple IO, instance=83.219.58.214:39616, job=Cloud, location=Colchester, GB, pool=Green, status=best" t=2024-05-29T13:44:15.120535645Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Kusama Archive (ZTC-HFH), chain=Kusama, exported_chain=ksmcc3, host=Zetetic Technologies, instance=192.168.1.36:39618, job=Higham Home, location=Higham Ferrers, GB, pool=Archive, status=best" t=2024-05-29T13:44:15.120466767Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=538355 slug=flogic instance="__name__=grafanacloud_org_logs_usage, cluster=prod-us-central-0, org_id=796510" t=2024-05-29T13:44:15.120504549Z level=debug msg="Setting next state" handler=resultAlerting +logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Kusama Archive (ZTC-HFH), chain=Kusama, exported_chain=ksmcc3, host=Zetetic Technologies, instance=192.168.1.36:39618, job=Higham Home, location=Higham Ferrers, GB, pool=Archive, status=best" t=2024-05-29T13:44:15.12045234Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Kusama Amiga 1 (HAH-SHD), chain=Kusama, exported_chain=ksmcc3, host=HA Hosting, instance=185.91.219.250:39617, job=Cloud, location=Sheffield, GB, pool=Amiga 1, status=best" t=2024-05-29T13:44:15.120419858Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Kusama Amiga 1 (HAH-SHD), chain=Kusama, exported_chain=ksmcc3, host=HA Hosting, instance=185.91.219.250:39617, job=Cloud, location=Sheffield, GB, pool=Amiga 1, status=best" t=2024-05-29T13:44:15.120405823Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=538355 slug=flogic version=15 fingerprint=34e86ca21ec3aaa7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.120411605Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=grafanacloud_org_logs_usage, cluster=prod-us-central-0, org_id=796510 State:Alerting Error: Results:map[] Values:map[E:{Var:E Labels:__name__=grafanacloud_org_logs_usage, cluster=prod-us-central-0, org_id=796510 Value:0xc0256ff518} F:{Var:F Labels:__name__=grafanacloud_org_logs_usage, cluster=prod-us-central-0, org_id=796510 Value:0xc0256ff540}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.120083247s EvaluationString:[ var='E' labels={__name__=grafanacloud_org_logs_usage, cluster=prod-us-central-0, org_id=796510} value=21.221540849380197 ], [ var='F' labels={__name__=grafanacloud_org_logs_usage, cluster=prod-us-central-0, org_id=796510} value=1 ]}]" duration=26.077179ms +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Denver, country=United States, datacenter=PIA, environment=production, instance=181.41.206.16:9998, ip=181.41.206.16, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-denver.crt, role=vpn, server=denver431, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.120479117Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Kusama Amiga (HAH-SHD), chain=Kusama, exported_chain=ksmcc3, host=HA Hosting, instance=185.91.219.250:39616, job=Cloud, location=Sheffield, GB, pool=Amiga, status=best" t=2024-05-29T13:44:15.12036931Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=893151 slug=cmtdsnp version=1 fingerprint=08c632df10052f3d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.120298118Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=quwdc07tdswcldbs1001.tds.ecomm.local State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=quwdc07tdswcldbs1001.tds.ecomm.local Value:0xc013233b00} B:{Var:B Labels:instance=quwdc07tdswcldbs1001.tds.ecomm.local Value:0xc013233b10} C:{Var:C Labels:instance=quwdc07tdswcldbs1001.tds.ecomm.local Value:0xc013233b20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.119746136s EvaluationString:[ var='A' labels={instance=quwdc07tdswcldbs1001.tds.ecomm.local} value=50.84644433291726 ], [ var='B' labels={instance=quwdc07tdswcldbs1001.tds.ecomm.local} value=50.84644433291726 ], [ var='C' labels={instance=quwdc07tdswcldbs1001.tds.ecomm.local} value=0 ]} {Instance:instance=suwdc07tdswcadbs1001.tds.ecomm.local State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=suwdc07tdswcadbs1001.tds.ecomm.local Value:0xc013233b40} B:{Var:B Labels:instance=suwdc07tdswcadbs1001.tds.ecomm.local Value:0xc013233b50} C:{Var:C Labels:instance=suwdc07tdswcadbs1001.tds.ecomm.local Value:0xc013233b60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.119764036s EvaluationString:[ var='A' labels={instance=suwdc07tdswcadbs1001.tds.ecomm.local} value=52.89814736531391 ], [ var='B' labels={instance=suwdc07tdswcadbs1001.tds.ecomm.local} value=52.89814736531391 ], [ var='C' labels={instance=suwdc07tdswcadbs1001.tds.ecomm.local} value=0 ]} {Instance:instance=suwdc07tdswcldbs1001.tds.ecomm.local State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=suwdc07tdswcldbs1001.tds.ecomm.local Value:0xc013233ba0} B:{Var:B Labels:instance=suwdc07tdswcldbs1001.tds.ecomm.local Value:0xc013233b80} C:{Var:C Labels:instance=suwdc07tdswcldbs1001.tds.ecomm.local Value:0xc013233b90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.119773927s EvaluationString:[ var='A' labels={instance=suwdc07tdswcldbs1001.tds.ecomm.local} value=25.238317482968597 ], [ var='B' labels={instance=suwdc07tdswcldbs1001.tds.ecomm.local} value=25.238317482968597 ], [ var='C' labels={instance=suwdc07tdswcldbs1001.tds.ecomm.local} value=0 ]} {Instance:instance=suwdc07tdswcllog1001.tds.ecomm.local State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=suwdc07tdswcllog1001.tds.ecomm.local Value:0xc013233bc0} B:{Var:B Labels:instance=suwdc07tdswcllog1001.tds.ecomm.local Value:0xc013233be0} C:{Var:C Labels:instance=suwdc07tdswcllog1001.tds.ecomm.local Value:0xc013233bf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.119785357s EvaluationString:[ var='A' labels={instance=suwdc07tdswcllog1001.tds.ecomm.local} value=23.128651946583545 ], [ var='B' labels={instance=suwdc07tdswcllog1001.tds.ecomm.local} value=23.128651946583545 ], [ var='C' labels={instance=suwdc07tdswcllog1001.tds.ecomm.local} value=0 ]}]" duration=13.964448ms +logger=ngalert.state.manager.persist user=349246 slug=metricgamingdev t=2024-05-29T13:44:15.120315844Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.scheduler user=530405 slug=zetetic version=90 fingerprint=dcf737778778ba7f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.120012095Z level=debug msg="Alert rule evaluated" results="[{Instance:alias=Kusama Amiga (HAH-SHD), chain=Kusama, exported_chain=ksmcc3, host=HA Hosting, instance=185.91.219.250:39616, job=Cloud, location=Sheffield, GB, pool=Amiga, status=best State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:alias=Kusama Amiga (HAH-SHD), chain=Kusama, exported_chain=ksmcc3, host=HA Hosting, instance=185.91.219.250:39616, job=Cloud, location=Sheffield, GB, pool=Amiga, status=best Value:0xc053f13ae8} C:{Var:C Labels:alias=Kusama Amiga (HAH-SHD), chain=Kusama, exported_chain=ksmcc3, host=HA Hosting, instance=185.91.219.250:39616, job=Cloud, location=Sheffield, GB, pool=Amiga, status=best Value:0xc053f139c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.118718593s EvaluationString:[ var='B' labels={alias=Kusama Amiga (HAH-SHD), chain=Kusama, exported_chain=ksmcc3, host=HA Hosting, instance=185.91.219.250:39616, job=Cloud, location=Sheffield, GB, pool=Amiga, status=best} value=99.48717948717947 ], [ var='C' labels={alias=Kusama Amiga (HAH-SHD), chain=Kusama, exported_chain=ksmcc3, host=HA Hosting, instance=185.91.219.250:39616, job=Cloud, location=Sheffield, GB, pool=Amiga, status=best} value=0 ]} {Instance:alias=Kusama Amiga 1 (HAH-SHD), chain=Kusama, exported_chain=ksmcc3, host=HA Hosting, instance=185.91.219.250:39617, job=Cloud, location=Sheffield, GB, pool=Amiga 1, status=best State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:alias=Kusama Amiga 1 (HAH-SHD), chain=Kusama, exported_chain=ksmcc3, host=HA Hosting, instance=185.91.219.250:39617, job=Cloud, location=Sheffield, GB, pool=Amiga 1, status=best Value:0xc053f13c88} C:{Var:C Labels:alias=Kusama Amiga 1 (HAH-SHD), chain=Kusama, exported_chain=ksmcc3, host=HA Hosting, instance=185.91.219.250:39617, job=Cloud, location=Sheffield, GB, pool=Amiga 1, status=best Value:0xc053f13d10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.118733843s EvaluationString:[ var='B' labels={alias=Kusama Amiga 1 (HAH-SHD), chain=Kusama, exported_chain=ksmcc3, host=HA Hosting, instance=185.91.219.250:39617, job=Cloud, location=Sheffield, GB, pool=Amiga 1, status=best} value=99.48717948717947 ], [ var='C' labels={alias=Kusama Amiga 1 (HAH-SHD), chain=Kusama, exported_chain=ksmcc3, host=HA Hosting, instance=185.91.219.250:39617, job=Cloud, location=Sheffield, GB, pool=Amiga 1, status=best} value=0 ]} {Instance:alias=Kusama Archive (ZTC-HFH), chain=Kusama, exported_chain=ksmcc3, host=Zetetic Technologies, instance=192.168.1.36:39618, job=Higham Home, location=Higham Ferrers, GB, pool=Archive, status=best State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:alias=Kusama Archive (ZTC-HFH), chain=Kusama, exported_chain=ksmcc3, host=Zetetic Technologies, instance=192.168.1.36:39618, job=Higham Home, location=Higham Ferrers, GB, pool=Archive, status=best Value:0xc053f13e98} C:{Var:C Labels:alias=Kusama Archive (ZTC-HFH), chain=Kusama, exported_chain=ksmcc3, host=Zetetic Technologies, instance=192.168.1.36:39618, job=Higham Home, location=Higham Ferrers, GB, pool=Archive, status=best Value:0xc053f13fc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.118742273s EvaluationString:[ var='B' labels={alias=Kusama Archive (ZTC-HFH), chain=Kusama, exported_chain=ksmcc3, host=Zetetic Technologies, instance=192.168.1.36:39618, job=Higham Home, location=Higham Ferrers, GB, pool=Archive, status=best} value=98.46153846153845 ], [ var='C' labels={alias=Kusama Archive (ZTC-HFH), chain=Kusama, exported_chain=ksmcc3, host=Zetetic Technologies, instance=192.168.1.36:39618, job=Higham Home, location=Higham Ferrers, GB, pool=Archive, status=best} value=0 ]} {Instance:alias=Kusama Green (VNM-CHG), chain=Kusama, exported_chain=ksmcc3, host=Purple IO, instance=83.219.58.214:39616, job=Cloud, location=Colchester, GB, pool=Green, status=best State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:alias=Kusama Green (VNM-CHG), chain=Kusama, exported_chain=ksmcc3, host=Purple IO, instance=83.219.58.214:39616, job=Cloud, location=Colchester, GB, pool=Green, status=best Value:0xc04a3f2328} C:{Var:C Labels:alias=Kusama Green (VNM-CHG), chain=Kusama, exported_chain=ksmcc3, host=Purple IO, instance=83.219.58.214:39616, job=Cloud, location=Colchester, GB, pool=Green, status=best Value:0xc04a3f2870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.118751899s EvaluationString:[ var='B' labels={alias=Kusama Green (VNM-CHG), chain=Kusama, exported_chain=ksmcc3, host=Purple IO, instance=83.219.58.214:39616, job=Cloud, location=Colchester, GB, pool=Green, status=best} value=98.46153846153845 ], [ var='C' labels={alias=Kusama Green (VNM-CHG), chain=Kusama, exported_chain=ksmcc3, host=Purple IO, instance=83.219.58.214:39616, job=Cloud, location=Colchester, GB, pool=Green, status=best} value=0 ]} {Instance:alias=Kusama Green 1 (VNM-CHG), chain=Kusama, exported_chain=ksmcc3, host=Purple IO, instance=83.219.58.214:39617, job=Cloud, location=Colchester, GB, pool=Green 1, status=best State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:alias=Kusama Green 1 (VNM-CHG), chain=Kusama, exported_chain=ksmcc3, host=Purple IO, instance=83.219.58.214:39617, job=Cloud, location=Colchester, GB, pool=Green 1, status=best Value:0xc04a3f3230} C:{Var:C Labels:alias=Kusama Green 1 (VNM-CHG), chain=Kusama, exported_chain=ksmcc3, host=Purple IO, instance=83.219.58.214:39617, job=Cloud, location=Colchester, GB, pool=Green 1, status=best Value:0xc04a3f2dc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.118758756s EvaluationString:[ var='B' labels={alias=Kusama Green 1 (VNM-CHG), chain=Kusama, exported_chain=ksmcc3, host=Purple IO, instance=83.219.58.214:39617, job=Cloud, location=Colchester, GB, pool=Green 1, status=best} value=99.48717948717947 ], [ var='C' labels={alias=Kusama Green 1 (VNM-CHG), chain=Kusama, exported_chain=ksmcc3, host=Purple IO, instance=83.219.58.214:39617, job=Cloud, location=Colchester, GB, pool=Green 1, status=best} value=0 ]} {Instance:alias=Kusama Lightning (RST-MIA), chain=Kusama, exported_chain=ksmcc3, host=ReliableSite, instance=209.222.97.206:39616, job=Cloud, location=Miami, US, pool=Lightning, status=best State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:alias=Kusama Lightning (RST-MIA), chain=Kusama, exported_chain=ksmcc3, host=ReliableSite, instance=209.222.97.206:39616, job=Cloud, location=Miami, US, pool=Lightning, status=best Value:0xc04a3f3780} C:{Var:C Labels:alias=Kusama Lightning (RST-MIA), chain=Kusama, exported_chain=ksmcc3, host=ReliableSite, instance=209.222.97.206:39616, job=Cloud, location=Miami, US, pool=Lightning, status=best Value:0xc04a3f3b78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.118764935s EvaluationString:[ var='B' labels={alias=Kusama Lightning (RST-MIA), chain=Kusama, exported_chain=ksmcc3, host=ReliableSite, instance=209.222.97.206:39616, job=Cloud, location=Miami, US, pool=Lightning, status=best} value=99.48717948717947 ], [ var='C' labels={alias=Kusama Lightning (RST-MIA), chain=Kusama, exported_chain=ksmcc3, host=ReliableSite, instance=209.222.97.206:39616, job=Cloud, location=Miami, US, pool=Lightning, status=best} value=0 ]} {Instance:alias=Kusama Lightning 1 (RST-MIA), chain=Kusama, exported_chain=ksmcc3, host=ReliableSite, instance=209.222.97.206:39617, job=Cloud, location=Miami, US, pool=Lightning 1, status=best State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:alias=Kusama Lightning 1 (RST-MIA), chain=Kusama, exported_chain=ksmcc3, host=ReliableSite, instance=209.222.97.206:39617, job=Cloud, location=Miami, US, pool=Lightning 1, status=best Value:0xc04c746540} C:{Var:C Labels:alias=Kusama Lightning 1 (RST-MIA), chain=Kusama, exported_chain=ksmcc3, host=ReliableSite, instance=209.222.97.206:39617, job=Cloud, location=Miami, US, pool=Lightning 1, status=best Value:0xc04c747920}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.11877066s EvaluationString:[ var='B' labels={alias=Kusama Lightning 1 (RST-MIA), chain=Kusama, exported_chain=ksmcc3, host=ReliableSite, instance=209.222.97.206:39617, job=Cloud, location=Miami, US, pool=Lightning 1, status=best} value=98.46153846153845 ], [ var='C' labels={alias=Kusama Lightning 1 (RST-MIA), chain=Kusama, exported_chain=ksmcc3, host=ReliableSite, instance=209.222.97.206:39617, job=Cloud, location=Miami, US, pool=Lightning 1, status=best} value=0 ]} {Instance:alias=Kusama Mermaid (ARU-AZO), chain=Kusama, exported_chain=ksmcc3, host=Aruba, instance=95.110.225.55:39616, job=Cloud, location=Arezzo, IT, pool=Mermaid, status=best State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:alias=Kusama Mermaid (ARU-AZO), chain=Kusama, exported_chain=ksmcc3, host=Aruba, instance=95.110.225.55:39616, job=Cloud, location=Arezzo, IT, pool=Mermaid, status=best Value:0xc054bf40c0} C:{Var:C Labels:alias=Kusama Mermaid (ARU-AZO), chain=Kusama, exported_chain=ksmcc3, host=Aruba, instance=95.110.225.55:39616, job=Cloud, location=Arezzo, IT, pool=Mermaid, status=best Value:0xc054bf4148}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.118777459s EvaluationString:[ var='B' labels={alias=Kusama Mermaid (ARU-AZO), chain=Kusama, exported_chain=ksmcc3, host=Aruba, instance=95.110.225.55:39616, job=Cloud, location=Arezzo, IT, pool=Mermaid, status=best} value=98.46153846153845 ], [ var='C' labels={alias=Kusama Mermaid (ARU-AZO), chain=Kusama, exported_chain=ksmcc3, host=Aruba, instance=95.110.225.55:39616, job=Cloud, location=Arezzo, IT, pool=Mermaid, status=best} value=0 ]} {Instance:alias=Kusama Mermaid 1 (ARU-AZO), chain=Kusama, exported_chain=ksmcc3, host=Aruba, instance=95.110.225.55:39617, job=Cloud, location=Arezzo, IT, pool=Mermaid 1, status=best State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:alias=Kusama Mermaid 1 (ARU-AZO), chain=Kusama, exported_chain=ksmcc3, host=Aruba, instance=95.110.225.55:39617, job=Cloud, location=Arezzo, IT, pool=Mermaid 1, status=best Value:0xc054bf43f8} C:{Var:C Labels:alias=Kusama Mermaid 1 (ARU-AZO), chain=Kusama, exported_chain=ksmcc3, host=Aruba, instance=95.110.225.55:39617, job=Cloud, location=Arezzo, IT, pool=Mermaid 1, status=best Value:0xc054bf4300}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.118785903s EvaluationString:[ var='B' labels={alias=Kusama Mermaid 1 (ARU-AZO), chain=Kusama, exported_chain=ksmcc3, host=Aruba, instance=95.110.225.55:39617, job=Cloud, location=Arezzo, IT, pool=Mermaid 1, status=best} value=98.46153846153845 ], [ var='C' labels={alias=Kusama Mermaid 1 (ARU-AZO), chain=Kusama, exported_chain=ksmcc3, host=Aruba, instance=95.110.225.55:39617, job=Cloud, location=Arezzo, IT, pool=Mermaid 1, status=best} value=0 ]} {Instance:alias=Kusama Pruned (ZTC-HFH), chain=Kusama, exported_chain=ksmcc3, host=Zetetic Technologies, instance=192.168.1.36:39616, job=Higham Home, location=Higham Ferrers, GB, pool=Pruned, status=best State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:alias=Kusama Pruned (ZTC-HFH), chain=Kusama, exported_chain=ksmcc3, host=Zetetic Technologies, instance=192.168.1.36:39616, job=Higham Home, location=Higham Ferrers, GB, pool=Pruned, status=best Value:0xc054bf44e0} C:{Var:C Labels:alias=Kusama Pruned (ZTC-HFH), chain=Kusama, exported_chain=ksmcc3, host=Zetetic Technologies, instance=192.168.1.36:39616, job=Higham Home, location=Higham Ferrers, GB, pool=Pruned, status=best Value:0xc054bf4560}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.118795743s EvaluationString:[ var='B' labels={alias=Kusama Pruned (ZTC-HFH), chain=Kusama, exported_chain=ksmcc3, host=Zetetic Technologies, instance=192.168.1.36:39616, job=Higham Home, location=Higham Ferrers, GB, pool=Pruned, status=best} value=97.24052631578947 ], [ var='C' labels={alias=Kusama Pruned (ZTC-HFH), chain=Kusama, exported_chain=ksmcc3, host=Zetetic Technologies, instance=192.168.1.36:39616, job=Higham Home, location=Higham Ferrers, GB, pool=Pruned, status=best} value=0 ]} {Instance:alias=Kusama Titan (OVH-LON), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=57.128.141.149:39616, job=Cloud, location=London, GB, pool=Titan, status=best State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:alias=Kusama Titan (OVH-LON), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=57.128.141.149:39616, job=Cloud, location=London, GB, pool=Titan, status=best Value:0xc054bf46c8} C:{Var:C Labels:alias=Kusama Titan (OVH-LON), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=57.128.141.149:39616, job=Cloud, location=London, GB, pool=Titan, status=best Value:0xc054bf4740}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.11880556s EvaluationString:[ var='B' labels={alias=Kusama Titan (OVH-LON), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=57.128.141.149:39616, job=Cloud, location=London, GB, pool=Titan, status=best} value=98.46153846153845 ], [ var='C' labels={alias=Kusama Titan (OVH-LON), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=57.128.141.149:39616, job=Cloud, location=London, GB, pool=Titan, status=best} value=0 ]} {Instance:alias=Kusama Titan 1 (OVH-LON), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=57.128.141.149:39617, job=Cloud, location=London, GB, pool=Titan 1, status=best State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:alias=Kusama Titan 1 (OVH-LON), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=57.128.141.149:39617, job=Cloud, location=London, GB, pool=Titan 1, status=best Value:0xc054bf4900} C:{Var:C Labels:alias=Kusama Titan 1 (OVH-LON), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=57.128.141.149:39617, job=Cloud, location=London, GB, pool=Titan 1, status=best Value:0xc054bf4980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.11881425s EvaluationString:[ var='B' labels={alias=Kusama Titan 1 (OVH-LON), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=57.128.141.149:39617, job=Cloud, location=London, GB, pool=Titan 1, status=best} value=99.48717948717947 ], [ var='C' labels={alias=Kusama Titan 1 (OVH-LON), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=57.128.141.149:39617, job=Cloud, location=London, GB, pool=Titan 1, status=best} value=0 ]} {Instance:alias=Kusama Uno (OVH-WAW), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=146.59.68.162:39616, job=Cloud, location=Warsaw, PL, pool=Uno, status=best State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:alias=Kusama Uno (OVH-WAW), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=146.59.68.162:39616, job=Cloud, location=Warsaw, PL, pool=Uno, status=best Value:0xc054bf4a70} C:{Var:C Labels:alias=Kusama Uno (OVH-WAW), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=146.59.68.162:39616, job=Cloud, location=Warsaw, PL, pool=Uno, status=best Value:0xc054bf4b60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.118822607s EvaluationString:[ var='B' labels={alias=Kusama Uno (OVH-WAW), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=146.59.68.162:39616, job=Cloud, location=Warsaw, PL, pool=Uno, status=best} value=99.48717948717947 ], [ var='C' labels={alias=Kusama Uno (OVH-WAW), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=146.59.68.162:39616, job=Cloud, location=Warsaw, PL, pool=Uno, status=best} value=0 ]} {Instance:alias=Kusama Uno 1 (OVH-WAW), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=146.59.68.162:39617, job=Cloud, location=Warsaw, PL, pool=Uno 1, status=best State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:alias=Kusama Uno 1 (OVH-WAW), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=146.59.68.162:39617, job=Cloud, location=Warsaw, PL, pool=Uno 1, status=best Value:0xc054bf4c60} C:{Var:C Labels:alias=Kusama Uno 1 (OVH-WAW), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=146.59.68.162:39617, job=Cloud, location=Warsaw, PL, pool=Uno 1, status=best Value:0xc054bf4cf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.118831809s EvaluationString:[ var='B' labels={alias=Kusama Uno 1 (OVH-WAW), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=146.59.68.162:39617, job=Cloud, location=Warsaw, PL, pool=Uno 1, status=best} value=98.46153846153845 ], [ var='C' labels={alias=Kusama Uno 1 (OVH-WAW), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=146.59.68.162:39617, job=Cloud, location=Warsaw, PL, pool=Uno 1, status=best} value=0 ]} {Instance:alias=Kusama Uno 1 Standby (RST-MIA), chain=Kusama, exported_chain=ksmcc3, host=ReliableSite, instance=209.222.97.206:39618, job=Cloud, location=Miami, US, pool=Uno 1, status=best State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:alias=Kusama Uno 1 Standby (RST-MIA), chain=Kusama, exported_chain=ksmcc3, host=ReliableSite, instance=209.222.97.206:39618, job=Cloud, location=Miami, US, pool=Uno 1, status=best Value:0xc054bf4e70} C:{Var:C Labels:alias=Kusama Uno 1 Standby (RST-MIA), chain=Kusama, exported_chain=ksmcc3, host=ReliableSite, instance=209.222.97.206:39618, job=Cloud, location=Miami, US, pool=Uno 1, status=best Value:0xc054bf4ee8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.118841448s EvaluationString:[ var='B' labels={alias=Kusama Uno 1 Standby (RST-MIA), chain=Kusama, exported_chain=ksmcc3, host=ReliableSite, instance=209.222.97.206:39618, job=Cloud, location=Miami, US, pool=Uno 1, status=best} value=99.48717948717947 ], [ var='C' labels={alias=Kusama Uno 1 Standby (RST-MIA), chain=Kusama, exported_chain=ksmcc3, host=ReliableSite, instance=209.222.97.206:39618, job=Cloud, location=Miami, US, pool=Uno 1, status=best} value=0 ]} {Instance:alias=Kusama Watermelon (RPS-MDH), chain=Kusama, exported_chain=ksmcc3, host=Rapidswitch, instance=188.227.164.110:39616, job=Cloud, location=Maidenhead, GB, pool=Watermelon, status=best State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:alias=Kusama Watermelon (RPS-MDH), chain=Kusama, exported_chain=ksmcc3, host=Rapidswitch, instance=188.227.164.110:39616, job=Cloud, location=Maidenhead, GB, pool=Watermelon, status=best Value:0xc054bf5050} C:{Var:C Labels:alias=Kusama Watermelon (RPS-MDH), chain=Kusama, exported_chain=ksmcc3, host=Rapidswitch, instance=188.227.164.110:39616, job=Cloud, location=Maidenhead, GB, pool=Watermelon, status=best Value:0xc054bf5138}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.1188517s EvaluationString:[ var='B' labels={alias=Kusama Watermelon (RPS-MDH), chain=Kusama, exported_chain=ksmcc3, host=Rapidswitch, instance=188.227.164.110:39616, job=Cloud, location=Maidenhead, GB, pool=Watermelon, status=best} value=99.48717948717947 ], [ var='C' labels={alias=Kusama Watermelon (RPS-MDH), chain=Kusama, exported_chain=ksmcc3, host=Rapidswitch, instance=188.227.164.110:39616, job=Cloud, location=Maidenhead, GB, pool=Watermelon, status=best} value=0 ]} {Instance:alias=Kusama Watermelon 1 (RPS-MDH), chain=Kusama, exported_chain=ksmcc3, host=Rapidswitch, instance=188.227.164.110:39617, job=Cloud, location=Maidenhead, GB, pool=Watermelon 1, status=best State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:alias=Kusama Watermelon 1 (RPS-MDH), chain=Kusama, exported_chain=ksmcc3, host=Rapidswitch, instance=188.227.164.110:39617, job=Cloud, location=Maidenhead, GB, pool=Watermelon 1, status=best Value:0xc054bf5318} C:{Var:C Labels:alias=Kusama Watermelon 1 (RPS-MDH), chain=Kusama, exported_chain=ksmcc3, host=Rapidswitch, instance=188.227.164.110:39617, job=Cloud, location=Maidenhead, GB, pool=Watermelon 1, status=best Value:0xc054bf5278}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.118857377s EvaluationString:[ var='B' labels={alias=Kusama Watermelon 1 (RPS-MDH), chain=Kusama, exported_chain=ksmcc3, host=Rapidswitch, instance=188.227.164.110:39617, job=Cloud, location=Maidenhead, GB, pool=Watermelon 1, status=best} value=98.46153846153845 ], [ var='C' labels={alias=Kusama Watermelon 1 (RPS-MDH), chain=Kusama, exported_chain=ksmcc3, host=Rapidswitch, instance=188.227.164.110:39617, job=Cloud, location=Maidenhead, GB, pool=Watermelon 1, status=best} value=0 ]} {Instance:alias=Kusama Xalamus (ARU-AZO), chain=Kusama, exported_chain=ksmcc3, host=Aruba, instance=95.110.225.55:39619, job=Cloud, location=Arezzo, IT, pool=Xalamus, status=best State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:alias=Kusama Xalamus (ARU-AZO), chain=Kusama, exported_chain=ksmcc3, host=Aruba, instance=95.110.225.55:39619, job=Cloud, location=Arezzo, IT, pool=Xalamus, status=best Value:0xc054bf5470} C:{Var:C Labels:alias=Kusama Xalamus (ARU-AZO), chain=Kusama, exported_chain=ksmcc3, host=Aruba, instance=95.110.225.55:39619, job=Cloud, location=Arezzo, IT, pool=Xalamus, status=best Value:0xc054bf5538}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.118862819s EvaluationString:[ var='B' labels={alias=Kusama Xalamus (ARU-AZO), chain=Kusama, exported_chain=ksmcc3, host=Aruba, instance=95.110.225.55:39619, job=Cloud, location=Arezzo, IT, pool=Xalamus, status=best} value=98.46153846153845 ], [ var='C' labels={alias=Kusama Xalamus (ARU-AZO), chain=Kusama, exported_chain=ksmcc3, host=Aruba, instance=95.110.225.55:39619, job=Cloud, location=Arezzo, IT, pool=Xalamus, status=best} value=0 ]} {Instance:alias=Kusama Xalamus 1 (RPS-MDH), chain=Kusama, exported_chain=ksmcc3, host=Rapidswitch, instance=188.227.164.110:39618, job=Cloud, location=Maidenhead, GB, pool=Xalamus 1, status=best State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:alias=Kusama Xalamus 1 (RPS-MDH), chain=Kusama, exported_chain=ksmcc3, host=Rapidswitch, instance=188.227.164.110:39618, job=Cloud, location=Maidenhead, GB, pool=Xalamus 1, status=best Value:0xc054bf5660} C:{Var:C Labels:alias=Kusama Xalamus 1 (RPS-MDH), chain=Kusama, exported_chain=ksmcc3, host=Rapidswitch, instance=188.227.164.110:39618, job=Cloud, location=Maidenhead, GB, pool=Xalamus 1, status=best Value:0xc054bf56e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.118868351s EvaluationString:[ var='B' labels={alias=Kusama Xalamus 1 (RPS-MDH), chain=Kusama, exported_chain=ksmcc3, host=Rapidswitch, instance=188.227.164.110:39618, job=Cloud, location=Maidenhead, GB, pool=Xalamus 1, status=best} value=98.46153846153845 ], [ var='C' labels={alias=Kusama Xalamus 1 (RPS-MDH), chain=Kusama, exported_chain=ksmcc3, host=Rapidswitch, instance=188.227.164.110:39618, job=Cloud, location=Maidenhead, GB, pool=Xalamus 1, status=best} value=0 ]} {Instance:alias=Kusama Zetetic (OVH-SBG), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=37.59.29.95:39616, job=Cloud, location=Strasbourg, FR, pool=Zetetic, status=best State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:alias=Kusama Zetetic (OVH-SBG), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=37.59.29.95:39616, job=Cloud, location=Strasbourg, FR, pool=Zetetic, status=best Value:0xc054bf58e0} C:{Var:C Labels:alias=Kusama Zetetic (OVH-SBG), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=37.59.29.95:39616, job=Cloud, location=Strasbourg, FR, pool=Zetetic, status=best Value:0xc054bf5860}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.118875101s EvaluationString:[ var='B' labels={alias=Kusama Zetetic (OVH-SBG), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=37.59.29.95:39616, job=Cloud, location=Strasbourg, FR, pool=Zetetic, status=best} value=99.48717948717947 ], [ var='C' labels={alias=Kusama Zetetic (OVH-SBG), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=37.59.29.95:39616, job=Cloud, location=Strasbourg, FR, pool=Zetetic, status=best} value=0 ]} {Instance:alias=Kusama Zetetic 1 (OVH-SBG), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=37.59.29.95:39617, job=Cloud, location=Strasbourg, FR, pool=Zetetic 1, status=best State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:alias=Kusama Zetetic 1 (OVH-SBG), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=37.59.29.95:39617, job=Cloud, location=Strasbourg, FR, pool=Zetetic 1, status=best Value:0xc054bf5a50} C:{Var:C Labels:alias=Kusama Zetetic 1 (OVH-SBG), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=37.59.29.95:39617, job=Cloud, location=Strasbourg, FR, pool=Zetetic 1, status=best Value:0xc054bf5b78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.118881374s EvaluationString:[ var='B' labels={alias=Kusama Zetetic 1 (OVH-SBG), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=37.59.29.95:39617, job=Cloud, location=Strasbourg, FR, pool=Zetetic 1, status=best} value=98.46153846153845 ], [ var='C' labels={alias=Kusama Zetetic 1 (OVH-SBG), chain=Kusama, exported_chain=ksmcc3, host=OVH, instance=37.59.29.95:39617, job=Cloud, location=Strasbourg, FR, pool=Zetetic 1, status=best} value=0 ]}]" duration=21.141788ms +logger=ngalert.state.manager user=349246 slug=metricgamingdev instance="Cluster Name=dev" t=2024-05-29T13:44:15.12029482Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Denver, country=United States, datacenter=PIA, environment=production, instance=181.41.206.16:9998, ip=181.41.206.16, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=denver431, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.120317532Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.12026121Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Denver, country=United States, datacenter=PIA, environment=production, instance=181.41.206.16:9998, ip=181.41.206.16, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=denver431, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.120302353Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=349246 slug=metricgamingdev version=20 fingerprint=82d3a56d599ad056 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.120062712Z level=debug msg="Alert rule evaluated" results="[{Instance:Cluster Name=dev State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=dev Value:0xc025887d20} C:{Var:C Labels:Cluster Name=dev Value:0xc025887d00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.119668826s EvaluationString:[ var='B' labels={Cluster Name=dev} value=0.4170819333333333 ], [ var='C' labels={Cluster Name=dev} value=0 ]}]" duration=36.462878ms +logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.120033187Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=debug ts=2024-05-29T13:44:15.120004749Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.11997167Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.119937729Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:15.119893142Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Denver, country=United States, datacenter=PIA, environment=production, instance=181.41.206.14:9998, ip=181.41.206.14, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-denver.crt, role=vpn, server=denver429, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.119883905Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.119859227Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +level=debug ts=2024-05-29T13:44:15.119721981Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.119682338Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=537591 slug=btldevops t=2024-05-29T13:44:15.119652558Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.119643837Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.119605619Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.119566383Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="name=prod01awsuswest2wg018-r2us-prod01awsuswe" t=2024-05-29T13:44:15.119563167Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Denver, country=United States, datacenter=PIA, environment=production, instance=181.41.206.13:9998, ip=181.41.206.13, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-denver.crt, role=vpn, server=denver428, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.119497157Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.119472927Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.1194715Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=838012 slug=lepton instance="name=prod01awsuswest2wg004-r2us-prod01awsuswe" t=2024-05-29T13:44:15.119395156Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="name=dedicated0001wgpixai-r2us-prod01awsuswes" t=2024-05-29T13:44:15.119305565Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton t=2024-05-29T13:44:15.119250314Z level=debug msg="State manager processing evaluation results" resultCount=8 +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.119269Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=177320 slug=snyk t=2024-05-29T13:44:15.119138853Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.scheduler user=177320 slug=snyk version=12 fingerprint=acea649f49dbdb36 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.119095422Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=2.773203ms +level=error ts=2024-05-29T13:44:15.119067698Z caller=remote_rule_evaluator.go:110 user=177320 slug=snyk msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" +level=debug ts=2024-05-29T13:44:15.118848458Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.11888854Z caller=remote_instance_store.go:51 user=510895 slug=circatree msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=510895 slug=circatree instance= t=2024-05-29T13:44:15.118815259Z level=debug msg="Setting next state" handler=resultAlerting +level=debug ts=2024-05-29T13:44:15.118827006Z caller=remote_instance_store.go:51 user=628944 slug=dragonflydb msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=452240 slug=trulioo t=2024-05-29T13:44:15.118786529Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=54.772048ms +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=R3cOulc4z, ref_id=A" t=2024-05-29T13:44:15.118788634Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.scheduler user=510895 slug=circatree version=4 fingerprint=213d81584d920492 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.1187453Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc0039641f8} log count:{Var:log count Labels: Value:0xc003964220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.118503992s EvaluationString:[ var='B' labels={} value=1 ], [ var='log count' labels={} value=137 ]}]" duration=20.988026ms +logger=ngalert.state.manager.persist user=628944 slug=dragonflydb t=2024-05-29T13:44:15.118757104Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=628944 slug=dragonflydb instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.118745565Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=R3cOulc4z, ref_id=A" t=2024-05-29T13:44:15.118774062Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:15.118740374Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=628944 slug=dragonflydb instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.118712823Z level=debug msg="Setting next state" handler=resultNoData +level=info ts=2024-05-29T13:44:15.11857356Z caller=remote_alert_sender.go:94 user=351190 slug=stagebryxx host=stagebryxx-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.152.97.162:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=a2LizLr7z alerts=1 +level=debug ts=2024-05-29T13:44:15.118448422Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.118422848Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=MUMBAI Query" t=2024-05-29T13:44:15.118449919Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=f7421a1f3bd9e720 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.118277305Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=MUMBAI Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc01a60eb18} IgnoreBelow:{Var:IgnoreBelow Labels: Value:0xc01a60eb50} Threshold:{Var:Threshold Labels: Value:0xc01a60ea90} compare:{Var:compare Labels:aggregatedBy=sum, name=MUMBAI Query Value:0xc01a60ead0} sum:{Var:sum Labels:aggregatedBy=sum, name=MUMBAI Query Value:0xc01a60eb00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.117955464s EvaluationString:[ var='Breaches' labels={} value=1 ], [ var='IgnoreBelow' labels={} value=100 ], [ var='Threshold' labels={} value=-25 ], [ var='compare' labels={aggregatedBy=sum, name=MUMBAI Query} value=0 ], [ var='sum' labels={aggregatedBy=sum, name=MUMBAI Query} value=0 ]}]" duration=62.028384ms +logger=ngalert.scheduler user=816697 slug=avidxpr t=2024-05-29T13:44:15.117995542Z level=debug msg="Skip rule evaluation because it is paused" +level=debug ts=2024-05-29T13:44:15.117724065Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Denver, country=United States, datacenter=DataPacket, environment=production, instance=37.19.210.65:9998, ip=37.19.210.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-denver.crt, role=vpn, server=denver433, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.117740837Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=275406 slug=aceable instance= t=2024-05-29T13:44:15.11759783Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Denver, country=United States, datacenter=DataPacket, environment=production, instance=37.19.210.65:9998, ip=37.19.210.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=denver433, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.117578903Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.117586203Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=594394 slug=obh t=2024-05-29T13:44:15.117472238Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.152435ms +logger=ngalert.state.manager user=698963 slug=lemonade instance="app=car-xp-worker, cluster=lmnd-staging-us-east-1, container=kube-state-metrics, deployment=car-xp-worker, endpoint=http, instance=10.24.74.71:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-t4kxx, region=us-east-1, service=kube-state-metrics, stage=staging" t=2024-05-29T13:44:15.117496649Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.117354762Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.117435436Z caller=remote_instance_store.go:51 user=263317 slug=ymc msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=e88914a3-d4ff-4bed-b456-44713182c10d, ref_id=A" t=2024-05-29T13:44:15.117416893Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=e88914a3-d4ff-4bed-b456-44713182c10d, ref_id=A" t=2024-05-29T13:44:15.117403887Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=263317 slug=ymc instance= t=2024-05-29T13:44:15.117360766Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:15.11734655Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.scheduler user=263317 slug=ymc version=5 fingerprint=79148b793063b8ba attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.117257278Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.116951676s EvaluationString:}]" duration=17.085086ms +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Denver, country=United States, datacenter=DataPacket, environment=production, instance=37.19.210.130:9998, ip=37.19.210.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=denver432, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.117236526Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=469068 slug=dvulpe instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.11708812Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:15.117158555Z caller=remote_instance_store.go:51 user=469068 slug=dvulpe msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=469068 slug=dvulpe instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.116991316Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=432323 slug=lithic t=2024-05-29T13:44:15.1169317Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=469068 slug=dvulpe instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.116962006Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=701807 slug=dabba instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.116819266Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=701807 slug=dabba instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.116804466Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=198391 slug=zozio t=2024-05-29T13:44:15.116811663Z level=debug msg="Skip rule evaluation because it is paused" +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.116824084Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=PIA, environment=production, instance=181.215.182.50:9998, ip=181.215.182.50, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-texas.crt, role=vpn, server=dallas416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.116709167Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=201644 slug=thoughtspot t=2024-05-29T13:44:15.116658787Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=33.210231ms +logger=ngalert.state.manager user=83990 slug=greynoise instance="datasource_uid=000000017, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:15.116612893Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.116661326Z caller=remote_instance_store.go:51 user=83990 slug=greynoise msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=83990 slug=greynoise instance="datasource_uid=000000017, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:15.116603995Z level=debug msg="Execution keep last state is Normal" handler=resultNormal +logger=ngalert.state.manager user=465816 slug=metricgamingqa instance="__name__=kube_node_status_condition, cluster=qa_01, condition=MemoryPressure, instance=grafana-agent-flow-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, node=ip-10-64-60-46.eu-west-2.compute.internal, status=false" t=2024-05-29T13:44:15.116584173Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=465816 slug=metricgamingqa instance="__name__=kube_node_status_condition, cluster=qa_01, condition=MemoryPressure, instance=grafana-agent-flow-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, node=ip-10-64-60-46.eu-west-2.compute.internal, status=false" t=2024-05-29T13:44:15.11656243Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=PIA, environment=production, instance=181.215.182.50:9998, ip=181.215.182.50, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=dallas416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.116546858Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=PIA, environment=production, instance=181.215.182.50:9998, ip=181.215.182.50, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=dallas416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.116532008Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.116186143Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=465816 slug=metricgamingqa instance="__name__=kube_node_status_condition, cluster=qa_01, condition=MemoryPressure, instance=grafana-agent-flow-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, node=ip-10-64-31-23.eu-west-2.compute.internal, status=false" t=2024-05-29T13:44:15.116217869Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.116148043Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.116127831Z caller=remote_instance_store.go:51 user=488634 slug=smartcall msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=465816 slug=metricgamingqa instance="__name__=kube_node_status_condition, cluster=qa_01, condition=MemoryPressure, instance=grafana-agent-flow-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, node=ip-10-64-28-174.eu-west-2.compute.internal, status=false" t=2024-05-29T13:44:15.116060038Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=488634 slug=smartcall instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.116074806Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=488634 slug=smartcall t=2024-05-29T13:44:15.116017891Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=799774 slug=sagacitysoft instance="agent_hostname=carma-sagacitysoft-ca, device=/dev/nvme0n1p15, fstype=vfat, instance=carma-ec2, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.115924211Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.115959599Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +level=debug ts=2024-05-29T13:44:15.115928387Z caller=remote_instance_store.go:51 user=648636 slug=xrplayer765 msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.115897065Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.11590064Z caller=remote_image_capturer.go:54 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="rendering alert image with grafana" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=PIA, environment=production, instance=181.215.182.190:9998, ip=181.215.182.190, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=dallas419, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.115830192Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=PIA, environment=production, instance=181.215.182.190:9998, ip=181.215.182.190, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=dallas419, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.115819222Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.115763214Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.115780325Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.115823839Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.115817159Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +logger=ngalert.state.manager.persist user=467258 slug=neonprod t=2024-05-29T13:44:15.115793Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=93.274915ms +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.115789897Z level=warn msg="Failed to take an image" dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 error="rpc error: code = Code(422) desc = screenshots unavailable" +level=debug ts=2024-05-29T13:44:15.115757641Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.115696928Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=PIA, environment=production, instance=181.215.182.143:9998, ip=181.215.182.143, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-texas.crt, role=vpn, server=dallas418, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.115638876Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=465816 slug=metricgamingqa t=2024-05-29T13:44:15.115527936Z level=debug msg="State manager processing evaluation results" resultCount=6 +level=debug ts=2024-05-29T13:44:15.115566669Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=907609 slug=calerts t=2024-05-29T13:44:15.11547916Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=29.931016ms +level=debug ts=2024-05-29T13:44:15.11553798Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=726122 slug=lifeway t=2024-05-29T13:44:15.115282835Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=PIA, environment=production, instance=181.214.93.97:9998, ip=181.214.93.97, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-texas.crt, role=vpn, server=dallas406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.115217868Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.115082938Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.114960469Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:15.114910078Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=PIA, environment=production, instance=181.214.93.97:9998, ip=181.214.93.97, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=dallas406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.115011647Z level=debug msg="Keeping state" state=Normal +level=info ts=2024-05-29T13:44:15.114916228Z caller=remote_image_capturer.go:61 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=PIA, environment=production, instance=181.214.93.50:9998, ip=181.214.93.50, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-texas.crt, role=vpn, server=dallas405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.114812176Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=4947 slug=mediamath instance= t=2024-05-29T13:44:15.114734239Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:15.114656814Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=PIA, environment=production, instance=181.214.93.50:9998, ip=181.214.93.50, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=dallas405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.11459464Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.114562198Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=PIA, environment=production, instance=181.214.93.3:9998, ip=181.214.93.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-texas.crt, role=vpn, server=dallas404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.114394857Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.114337402Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://preview-api-data-proxy.notspent.dev/healthz_all, job=https://preview-api-data-proxy.notspent.dev/healthz_all@unspent, label_check=unspent, label_site=https://preview-api-data-proxy.notspent.dev/healthz_all" t=2024-05-29T13:44:15.114356261Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.114312652Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://notspent.dev, job=Unspent check https://notspent.dev, label_check=unspent, label_site=https://notspent.dev" t=2024-05-29T13:44:15.114282061Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=351895 slug=abacusworks version=55 fingerprint=804f9d72805bd405 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.1141506Z level=debug msg="Alert rule evaluated" results="[{Instance:chain_name=nautilus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain_name=nautilus Value:0xc01bda2208} B:{Var:B Labels:chain_name=nautilus Value:0xc01bda2228} C:{Var:C Labels:chain_name=nautilus Value:0xc01bda2248}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.113806986s EvaluationString:[ var='A' labels={chain_name=nautilus} value=1.1316587686244507e+08 ], [ var='B' labels={chain_name=nautilus} value=1.1316587686244507e+08 ], [ var='C' labels={chain_name=nautilus} value=0 ]}]" duration=19.425874ms +level=debug ts=2024-05-29T13:44:15.114167012Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://grafana.haqqgen.dev/unspent/tempo/api/search, job=Grafana Self-Hosted Tempo check unspent, label_check=unspent, label_site=https://grafana.haqqgen.dev/unspent/tempo" t=2024-05-29T13:44:15.114202646Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.114131579Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:15.114183925Z caller=remote_instance_store.go:51 user=304032 slug=clearbanc msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=304032 slug=clearbanc instance= t=2024-05-29T13:44:15.114127127Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://api-data-proxy.notspent.dev/healthz_all, job=https://api-data-proxy.notspent.dev/healthz_all@unspent, label_check=unspent, label_site=https://api-data-proxy.notspent.dev/healthz_all" t=2024-05-29T13:44:15.113990627Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=304032 slug=clearbanc t=2024-05-29T13:44:15.114033376Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=PIA, environment=production, instance=181.214.93.191:9998, ip=181.214.93.191, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-texas.crt, role=vpn, server=dallas408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.113975381Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=PIA, environment=production, instance=181.214.93.191:9998, ip=181.214.93.191, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-texas.crt, role=vpn, server=dallas408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.113960238Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=304032 slug=clearbanc version=45 fingerprint=366740e0b51edc02 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.113668634Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc00ebdf420} B:{Var:B Labels: Value:0xc00ebdf410} C:{Var:C Labels: Value:0xc00ebdf418}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.113270191s EvaluationString:[ var='A' labels={} value=2138 ], [ var='B' labels={} value=2138 ], [ var='C' labels={} value=0 ]}]" duration=22.586579ms +level=debug ts=2024-05-29T13:44:15.11391854Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=139426 slug=nmsalerts t=2024-05-29T13:44:15.11388257Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=27.754783ms +level=debug ts=2024-05-29T13:44:15.113834358Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:15.113766293Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=29.957258ms +logger=ngalert.state.manager user=109452 slug=deltarisk t=2024-05-29T13:44:15.113843584Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.113663002Z caller=remote_image_capturer.go:33 user=262248 slug=jeanphilippequemener rule_org_id=1 rule_uid=ORCRnBzVk msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" +logger=ngalert.state.manager.persist user=224047 slug=ppbtradingtribeprd t=2024-05-29T13:44:15.113687036Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=31.071016ms +level=debug ts=2024-05-29T13:44:15.11359614Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=262248 slug=jeanphilippequemener t=2024-05-29T13:44:15.113563867Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=PIA, environment=production, instance=181.214.93.144:9998, ip=181.214.93.144, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-texas.crt, role=vpn, server=dallas407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.113572227Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=PIA, environment=production, instance=181.214.93.144:9998, ip=181.214.93.144, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-texas.crt, role=vpn, server=dallas407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.113555074Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.11336314Z caller=remote_image_capturer.go:54 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="rendering alert image with grafana" +level=debug ts=2024-05-29T13:44:15.113316504Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=891812 slug=eigencert instance= t=2024-05-29T13:44:15.11334465Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=891812 slug=eigencert t=2024-05-29T13:44:15.113315868Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=info ts=2024-05-29T13:44:15.113304954Z caller=remote_alert_sender.go:94 user=288032 slug=dapperlabssre host=dapperlabssre-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.145.122:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=Ywe6ygPVk alerts=1 +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.113262308Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:15.113127901Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=PIA, environment=production, instance=102.129.234.97:9998, ip=102.129.234.97, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-texas.crt, role=vpn, server=dallas412, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.113116072Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=PIA, environment=production, instance=102.129.234.97:9998, ip=102.129.234.97, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-texas.crt, role=vpn, server=dallas412, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.113100665Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.112970358Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.11294955Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.112939557Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.112858516Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.11286294Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.112780187Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +level=debug ts=2024-05-29T13:44:15.112687017Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.112521481Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.112571267Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=PIA, environment=production, instance=102.129.234.50:9998, ip=102.129.234.50, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-texas.crt, role=vpn, server=dallas411, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.11257584Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=PIA, environment=production, instance=102.129.234.50:9998, ip=102.129.234.50, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-texas.crt, role=vpn, server=dallas411, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.112565293Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=410774 slug=shipyardprod t=2024-05-29T13:44:15.112502095Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=info ts=2024-05-29T13:44:15.112498459Z caller=remote_alert_sender.go:94 user=233863 slug=rtsystems host=rtsystems-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.152.31.9:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=5okchS67k alerts=1 +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.112438719Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +logger=ngalert.state.manager user=292165 slug=singleorigin instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.112405255Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=292165 slug=singleorigin instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.112367615Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=523906 slug=cyberark t=2024-05-29T13:44:15.112091901Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager.persist user=494138 slug=takepayments t=2024-05-29T13:44:15.112083025Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.112049711Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=494138 slug=takepayments instance="instance=GYDIALLER-EPC:12345" t=2024-05-29T13:44:15.112037781Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=494138 slug=takepayments t=2024-05-29T13:44:15.111975191Z level=debug msg="State manager processing evaluation results" resultCount=2 +level=debug ts=2024-05-29T13:44:15.111714281Z caller=remote_image_capturer.go:54 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="rendering alert image with grafana" +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.111628489Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.11162218Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=60408 slug=perfectserve instance= t=2024-05-29T13:44:15.111429695Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=60408 slug=perfectserve t=2024-05-29T13:44:15.111390611Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.111382161Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +level=debug ts=2024-05-29T13:44:15.111369703Z caller=remote_instance_store.go:51 user=273717 slug=seventyfivef msg="calling SaveAlertInstance" +logger=ngalert.scheduler user=60408 slug=perfectserve version=3 fingerprint=92f9bee682db05c4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.111322865Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.111054192s EvaluationString:}]" duration=62.445992ms +logger=ngalert.state.manager.persist user=273717 slug=seventyfivef t=2024-05-29T13:44:15.111327467Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.scheduler user=273717 slug=seventyfivef version=16 fingerprint=b866a614d68bec05 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.111209551Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.110886529s EvaluationString:}]" duration=55.892686ms +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=PIA, environment=production, instance=102.129.234.144:9998, ip=102.129.234.144, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=dallas413, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.111236693Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=DataPacket, environment=production, instance=156.146.39.31:9998, ip=156.146.39.31, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-texas.crt, role=vpn, server=dallas426, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.11106929Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=893154 slug=cmselfnp instance="job=sep/seppreprodlivets-app" t=2024-05-29T13:44:15.1110827Z level=debug msg="Keeping state" state=Normal +level=info ts=2024-05-29T13:44:15.110990513Z caller=remote_alert_sender.go:94 user=250150 slug=bizagi host=bizagi-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.117.7:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=dc4d693e-b00b-4ea4-a445-0d406bdbf70e alerts=1 +logger=ngalert.state.manager user=893154 slug=cmselfnp instance="job=sep/seppreprodlivets-app" t=2024-05-29T13:44:15.111070811Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.11102605Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=893154 slug=cmselfnp instance="job=sel/sels02livets-app" t=2024-05-29T13:44:15.111012459Z level=debug msg="Changing state" previous_state=Normal next_state=Pending previous_ends_at=2024-05-29T13:42:10Z next_ends_at=2024-05-29T13:48:10Z +level=debug ts=2024-05-29T13:44:15.110861417Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.110750689Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +level=info ts=2024-05-29T13:44:15.110675193Z caller=remote_alert_sender.go:94 user=106794 slug=tsikah host=tsikah-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.152.134.253:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=-TJCGoO7z alerts=1 +level=debug ts=2024-05-29T13:44:15.110585004Z caller=remote_instance_store.go:51 user=829340 slug=unfnboprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.110534254Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.996333ms +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=DataPacket, environment=production, instance=156.146.39.1:9998, ip=156.146.39.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=dallas425, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.11052433Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.110482111Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager.persist user=716631 slug=sugatsune t=2024-05-29T13:44:15.110342486Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=716631 slug=sugatsune instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.110331225Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.110371513Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.110389826Z caller=remote_instance_store.go:51 user=716631 slug=sugatsune msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.110288079Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Dallas, country=United States, datacenter=DataPacket, environment=production, instance=156.146.39.129:9998, ip=156.146.39.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=dallas429, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.110141892Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.110107513Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.109991312Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Copenhagen, country=Denmark, datacenter=Glesys, environment=production, instance=46.246.106.34:9998, ip=46.246.106.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=copenhagen408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.109968785Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Copenhagen, country=Denmark, datacenter=Glesys, environment=production, instance=46.246.106.34:9998, ip=46.246.106.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=copenhagen408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.109954872Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.109925406Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=430959 slug=econoinfo t=2024-05-29T13:44:15.109590758Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.795787ms +level=debug ts=2024-05-29T13:44:15.109510734Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=930945 slug=cotjoey t=2024-05-29T13:44:15.10957072Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.scheduler user=63991 slug=voicelayer t=2024-05-29T13:44:15.109564183Z level=debug msg="Skip rule evaluation because it is paused" +logger=ngalert.state.manager user=930945 slug=cotjoey instance= t=2024-05-29T13:44:15.109556081Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.109531229Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" +logger=ngalert.scheduler user=930945 slug=cotjoey version=11 fingerprint=8748328f65b58bd4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.109431308Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc05f23b518} C:{Var:C Labels: Value:0xc05f23b520}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.109081873s EvaluationString:[ var='A' labels={} value=0 ], [ var='C' labels={} value=0 ]}]" duration=8.127239ms +level=debug ts=2024-05-29T13:44:15.109420181Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Copenhagen, country=Denmark, datacenter=Glesys, environment=production, instance=188.126.94.98:9998, ip=188.126.94.98, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/denmark.crt, role=vpn, server=copenhagen403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.109423393Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.109438393Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.109368659Z caller=remote_instance_store.go:51 user=594394 slug=obh msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.10937346Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.10939116Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=594394 slug=obh instance= t=2024-05-29T13:44:15.109301163Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=594394 slug=obh instance= t=2024-05-29T13:44:15.109286917Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.109199596Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=debug ts=2024-05-29T13:44:15.109077013Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=115220 slug=ufirst instance="datasource_uid=grafanacloud-graphite, ref_id=A,B,C,D,E" t=2024-05-29T13:44:15.109070803Z level=warn msg="Failed to take an image" dashboard=uexftseWz panel=4 error="rpc error: code = Code(422) desc = screenshots unavailable" +level=info ts=2024-05-29T13:44:15.109023596Z caller=remote_alert_sender.go:94 user=453497 slug=n444151595 host=n444151595-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.160.2.155:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=f9ykHpo4k alerts=1 +level=info ts=2024-05-29T13:44:15.108953654Z caller=remote_alert_sender.go:94 user=607648 slug=kalandharfasil019 host=kalandharfasil019-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.184.31.42:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=2K4VC9fVz alerts=1 +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.108996668Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +level=debug ts=2024-05-29T13:44:15.108875688Z caller=remote_image_capturer.go:54 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="rendering alert image with grafana" +logger=ngalert.state.manager.persist user=607982 slug=soberdude t=2024-05-29T13:44:15.108884729Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=607982 slug=soberdude instance= t=2024-05-29T13:44:15.108869558Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager.persist user=941160 slug=uateu t=2024-05-29T13:44:15.108828081Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.452804ms +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Copenhagen, country=Denmark, datacenter=Glesys, environment=production, instance=188.126.94.34:9998, ip=188.126.94.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=copenhagen405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.10885174Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=607982 slug=soberdude version=2 fingerprint=38687125f76f4b58 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.108789148Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=3.506942ms +level=error ts=2024-05-29T13:44:15.108768307Z caller=remote_rule_evaluator.go:110 user=607982 slug=soberdude msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.10880814Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager.persist user=399183 slug=guidion t=2024-05-29T13:44:15.108760953Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=info ts=2024-05-29T13:44:15.108629734Z caller=remote_alert_sender.go:94 user=934316 slug=saadzafar005 host=saadzafar005-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.16.220.225:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=cdmwezopqiz28a alerts=1 +logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.108702866Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=debug ts=2024-05-29T13:44:15.108751053Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.108672297Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=399183 slug=guidion t=2024-05-29T13:44:15.108704815Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:15.108639318Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Copenhagen, country=Denmark, datacenter=Glesys, environment=production, instance=188.126.94.34:9998, ip=188.126.94.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/denmark-2.crt, role=streaming-optimized, server=copenhagen405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.108671602Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.108632467Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.108517363Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.10831245Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.108110819Z level=warn msg="Failed to take an image" dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 error="rpc error: code = Code(422) desc = screenshots unavailable" +level=debug ts=2024-05-29T13:44:15.108066981Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=115220 slug=ufirst instance="datasource_uid=grafanacloud-graphite, ref_id=A,B,C,D,E" t=2024-05-29T13:44:15.108034367Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:15.10798549Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:15.108013779Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.107930834Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:15.107962558Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:15.10800882Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.107991157Z caller=remote_instance_store.go:51 user=540828 slug=finfoprod153 msg="calling SaveAlertInstance" +logger=ngalert.scheduler user=430961 slug=solifi version=1 fingerprint=06a9f561c3851a81 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.107875635Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.107519837s EvaluationString:}]" duration=413.156599ms +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Concord, country=United States, datacenter=DataPacket, environment=production, instance=84.239.41.2:9998, ip=84.239.41.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=newhampshire402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.107950154Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.107755184Z caller=remote_instance_store.go:51 user=310637 slug=notino msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2844-prod-eastus" t=2024-05-29T13:44:15.107813367Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2843-prod-westus" t=2024-05-29T13:44:15.107797312Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Columbus, country=United States, datacenter=DataPacket, environment=production, instance=84.239.27.150:9998, ip=84.239.27.150, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-ohio-pf.crt, role=vpn, server=ohio404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.107750342Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2841-prod-westus" t=2024-05-29T13:44:15.107743031Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2841-prod-westus" t=2024-05-29T13:44:15.107734558Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.107606279Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2841-prod-eastus" t=2024-05-29T13:44:15.10771985Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2840-prod-westus" t=2024-05-29T13:44:15.107681516Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2839-prod-eastus" t=2024-05-29T13:44:15.107618294Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.107537472Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2834-prod-westus" t=2024-05-29T13:44:15.107545713Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2834-prod-westus" t=2024-05-29T13:44:15.107537016Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2834-prod-eastus" t=2024-05-29T13:44:15.107510483Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2827-prod-westus" t=2024-05-29T13:44:15.107305135Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2827-prod-westus" t=2024-05-29T13:44:15.107296028Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2825-prod-westus" t=2024-05-29T13:44:15.107248309Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Columbus, country=United States, datacenter=DataPacket, environment=production, instance=84.239.27.129:9998, ip=84.239.27.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=ohio403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.107225356Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.107173266Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2824-prod-westus" t=2024-05-29T13:44:15.107209021Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2824-prod-eastus" t=2024-05-29T13:44:15.107177824Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2823-prod-westus" t=2024-05-29T13:44:15.107153852Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2823-prod-eastus" t=2024-05-29T13:44:15.107138331Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2821-prod-westus" t=2024-05-29T13:44:15.107104309Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=129076 slug=marginalunit t=2024-05-29T13:44:15.107105734Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2821-prod-eastus" t=2024-05-29T13:44:15.107087502Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.107064616Z caller=remote_instance_store.go:51 user=487988 slug=microstrategyits msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=312340 slug=lakefs t=2024-05-29T13:44:15.106994518Z level=debug msg="Saving alert states done" count=72 max_state_save_concurrency=1 duration=1.028067836s +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2819-prod-eastus" t=2024-05-29T13:44:15.107029511Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=934316 slug=saadzafar005 t=2024-05-29T13:44:15.106849058Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.385918ms +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2818-prod-westus" t=2024-05-29T13:44:15.106992938Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Columbia, country=United States, datacenter=DataPacket, environment=production, instance=84.239.7.129:9998, ip=84.239.7.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-south-carolina-pf.crt, role=vpn, server=southcarolina402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.107006015Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.106977388Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2818-prod-eastus" t=2024-05-29T13:44:15.106977637Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=770212 slug=trimarkprd t=2024-05-29T13:44:15.106887136Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=21.777371ms +level=debug ts=2024-05-29T13:44:15.106810585Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2817-prod-westus" t=2024-05-29T13:44:15.106947496Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2816-prod-westus" t=2024-05-29T13:44:15.106908638Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Columbia, country=United States, datacenter=DataPacket, environment=production, instance=84.239.7.129:9998, ip=84.239.7.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=southcarolina402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.106852595Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.106819953Z caller=remote_image_capturer.go:54 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="rendering alert image with grafana" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2815-prod-eastus" t=2024-05-29T13:44:15.106830717Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2814-prod-westus" t=2024-05-29T13:44:15.106815254Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2814-prod-westus" t=2024-05-29T13:44:15.106806574Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2814-prod-eastus" t=2024-05-29T13:44:15.106786218Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.106740413Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +level=info ts=2024-05-29T13:44:15.106702612Z caller=remote_image_capturer.go:61 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2813-prod-eastus" t=2024-05-29T13:44:15.106707116Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2808-prod-westus" t=2024-05-29T13:44:15.106643078Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2808-prod-westus" t=2024-05-29T13:44:15.106635167Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.106567689Z caller=remote_instance_store.go:51 user=524410 slug=syso msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2807-prod-westus" t=2024-05-29T13:44:15.106593933Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.10657284Z caller=remote_instance_store.go:51 user=473395 slug=binaryronin msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2807-prod-westus" t=2024-05-29T13:44:15.106584238Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.106424403Z caller=remote_instance_store.go:51 user=907609 slug=calerts msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=473395 slug=binaryronin instance="instance=tachikoma:9090" t=2024-05-29T13:44:15.106525931Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.106537729Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:15.106504985Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2806-prod-eastus" t=2024-05-29T13:44:15.106512452Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=473395 slug=binaryronin instance="instance=section9:9090" t=2024-05-29T13:44:15.106488066Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2805-prod-westus" t=2024-05-29T13:44:15.106486888Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Colombo, country=Sri Lanka, datacenter=M247, environment=production, instance=95.181.239.2:9998, ip=95.181.239.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=srilanka403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.106500757Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.106435009Z caller=remote_instance_store.go:51 user=714686 slug=pvtawscba msg="calling SaveAlertInstance" +logger=ngalert.scheduler user=245291 slug=pismo version=470 fingerprint=def16f864ccaaac8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.106340368Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.10604218s EvaluationString:}]" duration=476.380528ms +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2804-prod-westus" t=2024-05-29T13:44:15.10642427Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.106381482Z caller=remote_rule_evaluator.go:193 user=893151 slug=cmtdsnp msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2803-prod-eastus" t=2024-05-29T13:44:15.106331462Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chisinau, country=Moldova, datacenter=Trabia, environment=production, instance=178.175.129.34:9998, ip=178.175.129.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=chisinau401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.106331088Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2802-prod-westus" t=2024-05-29T13:44:15.106305947Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.10612618Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2802-prod-eastus" t=2024-05-29T13:44:15.106272355Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2801-prod-westus" t=2024-05-29T13:44:15.106256349Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2801-prod-westus" t=2024-05-29T13:44:15.10624755Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2801-prod-eastus" t=2024-05-29T13:44:15.106229612Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=714686 slug=pvtawscba t=2024-05-29T13:44:15.106219725Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:15.106218591Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:15.106207723Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2800-prod-westus" t=2024-05-29T13:44:15.10620488Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=277970 slug=teckresourcestest version=2 fingerprint=ece2c74f7949d9fa attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.106072505Z level=error msg="Failed to evaluate rule" error="failed to build query 'Front Field Temp': data source not found" duration=4.937973ms +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2800-prod-westus" t=2024-05-29T13:44:15.106196447Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2800-prod-eastus" t=2024-05-29T13:44:15.106178904Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=115314 slug=aperturecode instance= t=2024-05-29T13:44:15.106114435Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2705-prod-westus" t=2024-05-29T13:44:15.106144477Z level=debug msg="Setting next state" handler=resultNormal +level=error ts=2024-05-29T13:44:15.105975278Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'Front Field Temp': data source not found" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2705-prod-eastus" t=2024-05-29T13:44:15.106128742Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=115314 slug=aperturecode t=2024-05-29T13:44:15.106068049Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2704-prod-eastus" t=2024-05-29T13:44:15.106067613Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.106016654Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2702-prod-westus" t=2024-05-29T13:44:15.106039288Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2702-prod-eastus" t=2024-05-29T13:44:15.106000375Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2701-prod-eastus" t=2024-05-29T13:44:15.105943267Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.105925012Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +level=debug ts=2024-05-29T13:44:15.105834025Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2699-prod-westus" t=2024-05-29T13:44:15.105865317Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=702631 slug=hrtconditionmonitoring t=2024-05-29T13:44:15.105775062Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.943551ms +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2698-prod-westus" t=2024-05-29T13:44:15.105819653Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.105782646Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2698-prod-westus" t=2024-05-29T13:44:15.105811302Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2698-prod-eastus" t=2024-05-29T13:44:15.105785601Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.105730157Z caller=remote_instance_store.go:51 user=716527 slug=newpigqa msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chisinau, country=Moldova, datacenter=Trabia, environment=production, instance=178.175.128.34:9998, ip=178.175.128.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/md.crt, role=vpn, server=chisinau402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.105762268Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=536824 slug=forgerockit t=2024-05-29T13:44:15.105735031Z level=debug msg="Saving alert states" count=49 max_state_save_concurrency=1 +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chisinau, country=Moldova, datacenter=Trabia, environment=production, instance=178.175.128.34:9998, ip=178.175.128.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/md.crt, role=vpn, server=chisinau402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.10574851Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2697-prod-eastus" t=2024-05-29T13:44:15.105734738Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.105630904Z caller=remote_instance_store.go:51 user=829352 slug=unfnbonp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2696-prod-westus" t=2024-05-29T13:44:15.105697012Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2696-prod-eastus" t=2024-05-29T13:44:15.105670611Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2695-prod-westus" t=2024-05-29T13:44:15.105651963Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=716527 slug=newpigqa version=1 fingerprint=411e989ba8addf76 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.105564014Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.105388894s EvaluationString:}]" duration=6.562651ms +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2695-prod-eastus" t=2024-05-29T13:44:15.105618179Z level=debug msg="Setting next state" handler=resultNormal +level=info ts=2024-05-29T13:44:15.10563661Z caller=remote_alert_sender.go:94 user=916854 slug=lalamuru host=lalamuru-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.64.46:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=adjx5lplujh8ga alerts=1 +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=services-scraper-s09t, device=/dev/sda1, env=services, fstype=vfat, instance=services-scraper-s09t, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.105610182Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=886980 slug=althq t=2024-05-29T13:44:15.105570293Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.036037ms +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2694-prod-westus" t=2024-05-29T13:44:15.105590521Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2694-prod-eastus" t=2024-05-29T13:44:15.105544401Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2693-prod-eastus" t=2024-05-29T13:44:15.105490508Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.105451461Z caller=remote_instance_store.go:51 user=739130 slug=redphasetech msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.105477898Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:15.105425579Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2692-prod-eastus" t=2024-05-29T13:44:15.105439657Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2690-prod-westus" t=2024-05-29T13:44:15.105325786Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=services-rsyslog-sq64, device=/dev/sda1, env=services, fstype=vfat, instance=services-rsyslog-sq64, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.105383125Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2688-prod-westus" t=2024-05-29T13:44:15.105275384Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=services-rsyslog-sq64, device=/dev/sda1, env=services, fstype=vfat, instance=services-rsyslog-sq64, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.105373973Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2687-prod-westus" t=2024-05-29T13:44:15.105229143Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.105243145Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2685-prod-westus" t=2024-05-29T13:44:15.105123348Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2685-prod-eastus" t=2024-05-29T13:44:15.105096873Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2684-prod-eastus" t=2024-05-29T13:44:15.10504358Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.105192555Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.105185514Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2684-prod-eastus" t=2024-05-29T13:44:15.105033858Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=radius-singapore-4159, device=/dev/sda2, env=services, fstype=xfs, instance=radius-singapore-4159, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:15.105100888Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=radius-singapore-4159, device=/dev/sda1, env=services, fstype=vfat, instance=radius-singapore-4159, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.105011768Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2683-prod-eastus" t=2024-05-29T13:44:15.104977503Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chicago, country=United States, datacenter=PIA, environment=production, instance=181.214.166.51:9998, ip=181.214.166.51, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=chicago417, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.104918305Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=radius-sanfrancisco-3fgz, device=/dev/sda2, env=services, fstype=xfs, instance=radius-sanfrancisco-3fgz, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:15.10491862Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.1048909Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2681-prod-eastus" t=2024-05-29T13:44:15.104862096Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2679-prod-westus" t=2024-05-29T13:44:15.10478989Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2679-prod-eastus" t=2024-05-29T13:44:15.104760456Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.104682298Z caller=remote_instance_store.go:51 user=797387 slug=roadrunnerdev msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.104698018Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager.persist user=797387 slug=roadrunnerdev t=2024-05-29T13:44:15.104626118Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=797387 slug=roadrunnerdev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.104611787Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=797387 slug=roadrunnerdev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.104601517Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:15.104633982Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2669-prod-westus" t=2024-05-29T13:44:15.104575035Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.10456783Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chicago, country=United States, datacenter=PIA, environment=production, instance=181.214.166.4:9998, ip=181.214.166.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=chicago416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.104548437Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2668-prod-eastus" t=2024-05-29T13:44:15.104530174Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2668-prod-eastus" t=2024-05-29T13:44:15.104524568Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2660-prod-westus" t=2024-05-29T13:44:15.104506882Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=radius-grenoble-7c6z, device=/dev/sda1, env=services, fstype=vfat, instance=radius-grenoble-7c6z, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.104487307Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2660-prod-eastus" t=2024-05-29T13:44:15.10446574Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=radius-grenoble-7c6z, device=/dev/sda1, env=services, fstype=vfat, instance=radius-grenoble-7c6z, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.104439615Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=612525 slug=adleyeview instance="instance=api.telemetry.confluent.cloud:443, job=Confluent Cloud, kafka_id=lkc-gkvr3r" t=2024-05-29T13:44:15.104430094Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.104365484Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.104360715Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#1081 Sos.xxx - Mobile Popunder" t=2024-05-29T13:44:15.104416417Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chicago, country=United States, datacenter=PIA, environment=production, instance=181.214.166.221:9998, ip=181.214.166.221, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-chicago.crt, role=vpn, server=chicago421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.10439637Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#1080 Sos.xxx Mobile Banner 300x100" t=2024-05-29T13:44:15.104397379Z level=debug msg="Keeping state" state=Normal +level=info ts=2024-05-29T13:44:15.104320554Z caller=remote_image_capturer.go:61 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" +level=debug ts=2024-05-29T13:44:15.104295428Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.104355536Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#1078 Sos.xxx Banner 300x250" t=2024-05-29T13:44:15.104331249Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=612525 slug=adleyeview t=2024-05-29T13:44:15.104269492Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2657-prod-westus" t=2024-05-29T13:44:15.10430151Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2656-prod-westus" t=2024-05-29T13:44:15.104245159Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.104232192Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2652-prod-westus" t=2024-05-29T13:44:15.104202746Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#1023 GayPorn.Video - Desktop Footer #2 (300x250)" t=2024-05-29T13:44:15.104208131Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=287424 slug=ercande instance="datasource_uid=uMOTrHb7k, ref_id=A" t=2024-05-29T13:44:15.104129766Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2651-prod-westus" t=2024-05-29T13:44:15.104134173Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#1021 GayPorn.Video - Mobile Header (300x100)" t=2024-05-29T13:44:15.104142737Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=radius-bristol-w8q2, device=/dev/sda1, env=services, fstype=vfat, instance=radius-bristol-w8q2, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.10410134Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2651-prod-westus" t=2024-05-29T13:44:15.104125227Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2651-prod-eastus" t=2024-05-29T13:44:15.104107989Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2651-prod-eastus" t=2024-05-29T13:44:15.104098192Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#1019 GayPorn.Video - NTVC (300x250)" t=2024-05-29T13:44:15.104071931Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2649-prod-westus" t=2024-05-29T13:44:15.104080568Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=287424 slug=ercande instance="datasource_uid=uMOTrHb7k, ref_id=A" t=2024-05-29T13:44:15.104096414Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.104024999Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +level=debug ts=2024-05-29T13:44:15.103984254Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.10397978Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2641-prod-westus" t=2024-05-29T13:44:15.103978046Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2641-prod-eastus" t=2024-05-29T13:44:15.103961919Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.103927133Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2629-prod-westus" t=2024-05-29T13:44:15.103945587Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#995 Bull - FOOTER 1" t=2024-05-29T13:44:15.103906582Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2629-prod-eastus" t=2024-05-29T13:44:15.103922069Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#994 Bull - ITVB" t=2024-05-29T13:44:15.103890049Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#993 Bull - ITVA" t=2024-05-29T13:44:15.103846131Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#992 Bull - NTVC" t=2024-05-29T13:44:15.10383683Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2622-prod-eastus" t=2024-05-29T13:44:15.103890926Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chicago, country=United States, datacenter=PIA, environment=production, instance=181.214.166.190:9998, ip=181.214.166.190, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=chicago420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.103897398Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#991 Bull - NTVB" t=2024-05-29T13:44:15.103820816Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2621-prod-eastus" t=2024-05-29T13:44:15.103855909Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.103781274Z caller=remote_instance_store.go:51 user=612695 slug=ocipprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2619-prod-eastus" t=2024-05-29T13:44:15.103771933Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2342-prod-westus" t=2024-05-29T13:44:15.103754653Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2342-prod-westus" t=2024-05-29T13:44:15.103748029Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chicago, country=United States, datacenter=PIA, environment=production, instance=181.214.166.144:9998, ip=181.214.166.144, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-chicago.crt, role=vpn, server=chicago419, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.103684016Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#908 Gay_Mobile Header (300x100)" t=2024-05-29T13:44:15.103675018Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#908 Gay_Mobile Header (300x100)" t=2024-05-29T13:44:15.103667011Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2342-prod-eastus" t=2024-05-29T13:44:15.103716866Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2335-prod-westus" t=2024-05-29T13:44:15.103692757Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#906 Gay_Desktop Footer (300x250)" t=2024-05-29T13:44:15.103633122Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2335-prod-eastus" t=2024-05-29T13:44:15.103665783Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#861 Pornogram.tv - Desktop Footer 3" t=2024-05-29T13:44:15.103616677Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#861 Pornogram.tv - Desktop Footer 3" t=2024-05-29T13:44:15.103610986Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#857 Pornogram.tv - Interstitial" t=2024-05-29T13:44:15.103586732Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2200-prod-westus" t=2024-05-29T13:44:15.103632953Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#857 Pornogram.tv - Interstitial" t=2024-05-29T13:44:15.103581064Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#854 Pornogram.tv - UVB Desktop" t=2024-05-29T13:44:15.103563437Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#853 Pornogram.tv ITVB" t=2024-05-29T13:44:15.103552435Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2145-prod-westus" t=2024-05-29T13:44:15.103581912Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#852 Pornogram.tv - Desktop Footer" t=2024-05-29T13:44:15.103518655Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2145-prod-eastus" t=2024-05-29T13:44:15.103549559Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#851 Pornogram.tv - NTVC" t=2024-05-29T13:44:15.103501354Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.103474694Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.103469473Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.103463214Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:15.103481777Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#844 Pornogram.tv - NTVB" t=2024-05-29T13:44:15.103438756Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2040-prod-westus" t=2024-05-29T13:44:15.103424434Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2040-prod-eastus" t=2024-05-29T13:44:15.103389053Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=puppetdb-jenkins-3r5p, device=/dev/sda2, env=services, fstype=xfs, instance=puppetdb-jenkins-3r5p, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:15.103401058Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#842 Pornogram.tv - Popunder" t=2024-05-29T13:44:15.103404003Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-2006-prod-westus" t=2024-05-29T13:44:15.103354833Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#830 HDPornVideos.tv - ITVB" t=2024-05-29T13:44:15.103393697Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#830 HDPornVideos.tv - ITVB" t=2024-05-29T13:44:15.103387525Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#826 General Banner 300x250" t=2024-05-29T13:44:15.103340051Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#824 Nsfw Video Slider" t=2024-05-29T13:44:15.103314456Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1600-prod-eastus" t=2024-05-29T13:44:15.103277779Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#806 HotPornTubes VideoSlider" t=2024-05-29T13:44:15.103258243Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1295-prod-westus" t=2024-05-29T13:44:15.103233354Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#806 HotPornTubes VideoSlider" t=2024-05-29T13:44:15.103249154Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1295-prod-westus" t=2024-05-29T13:44:15.103223037Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=puppetdb-h51c, device=/dev/sda2, env=services, fstype=xfs, instance=puppetdb-h51c, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:15.103154771Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1295-prod-eastus" t=2024-05-29T13:44:15.103184311Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=puppetdb-h51c, device=/dev/sda2, env=services, fstype=xfs, instance=puppetdb-h51c, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:15.103147611Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=puppetdb-h51c, device=/dev/sda1, env=services, fstype=vfat, instance=puppetdb-h51c, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.10303524Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#781 BigFuck Network - Desktop Footer 1 (300x250)" t=2024-05-29T13:44:15.10315189Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=puppetdb-h51c, device=/dev/sda1, env=services, fstype=vfat, instance=puppetdb-h51c, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.103018719Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1253-prod-westus" t=2024-05-29T13:44:15.103148611Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chicago, country=United States, datacenter=PIA, environment=production, instance=181.214.165.98:9998, ip=181.214.165.98, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=chicago408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.103086317Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#773 HDPornVideos.tv - Desktop + Tablet Footer 3" t=2024-05-29T13:44:15.103097932Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#772 HDPornVideos.tv - Desktop + Tablet Footer 2" t=2024-05-29T13:44:15.103073196Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1247-prod-eastus" t=2024-05-29T13:44:15.103067666Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.103042452Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +level=debug ts=2024-05-29T13:44:15.102963632Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#770 CJ Network - Desktop + Tablet Footer 3 (300x250)" t=2024-05-29T13:44:15.103022586Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#768 CJ Network - Desktop + Tablet Footer 1 (300x250)" t=2024-05-29T13:44:15.102973039Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1150-prod-eastus" t=2024-05-29T13:44:15.102993483Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=615095 slug=gsaxena2 instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.102927402Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=615095 slug=gsaxena2 instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.102889588Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1135-prod-eastus" t=2024-05-29T13:44:15.102941623Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#764 [HDPorn Network] Native Ad Unit for Internal Ads [Desktop]" t=2024-05-29T13:44:15.102905653Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1125-prod-westus" t=2024-05-29T13:44:15.102913033Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#763 [HDPorn Network] Native Ad Unit for Internal Ads [Mobile]" t=2024-05-29T13:44:15.102870764Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.10286048Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#761 Shemale Network - Desktop Footer #3" t=2024-05-29T13:44:15.102843016Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#760 Shemale Network - Desktop Footer #2" t=2024-05-29T13:44:15.102808136Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1125-prod-eastus" t=2024-05-29T13:44:15.102876166Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1103-prod-westus" t=2024-05-29T13:44:15.102860263Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#759 Shemale Network - Desktop Footer #1" t=2024-05-29T13:44:15.102785782Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1103-prod-eastus" t=2024-05-29T13:44:15.102833396Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#756 4k - VideoSlider" t=2024-05-29T13:44:15.102757981Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1099-prod-eastus" t=2024-05-29T13:44:15.102777593Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.102731726Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=664100 slug=lson instance= t=2024-05-29T13:44:15.102618438Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1090-prod-westus" t=2024-05-29T13:44:15.102741993Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#754 4k - VastOverEmbed" t=2024-05-29T13:44:15.102731416Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chicago, country=United States, datacenter=PIA, environment=production, instance=181.214.165.51:9998, ip=181.214.165.51, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=chicago407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.102711074Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1090-prod-eastus" t=2024-05-29T13:44:15.102707642Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=696798 slug=mcv instance="name=keepLastValue(apex.US_West.players.pc_steam.mh448980.serverstats) Query" t=2024-05-29T13:44:15.102645214Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.102370104Z caller=remote_instance_store.go:51 user=764616 slug=jarif81231 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#751 BigRoundAss - VideoSlider" t=2024-05-29T13:44:15.102686141Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1078-prod-westus" t=2024-05-29T13:44:15.102681802Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=it-jenkins-8wtb, device=/dev/sda2, env=services, fstype=xfs, instance=it-jenkins-8wtb, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:15.102616478Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=664100 slug=lson instance= t=2024-05-29T13:44:15.102567896Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:15.102503022Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#627 HDPornVideos.TV - Pre-Roll" t=2024-05-29T13:44:15.102614629Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=it-jenkins-8wtb, device=/dev/sda2, env=services, fstype=xfs, instance=it-jenkins-8wtb, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:15.102600031Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1063-prod-eastus" t=2024-05-29T13:44:15.102611212Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#625 CJ Network - ITVA (300x250)" t=2024-05-29T13:44:15.102588479Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1063-prod-eastus" t=2024-05-29T13:44:15.102602447Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.102515013Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#624 CJ Network - Pre-Roll" t=2024-05-29T13:44:15.102562311Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#623 4k - IM Mobile" t=2024-05-29T13:44:15.102533321Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.102409301Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1060-prod-westus" t=2024-05-29T13:44:15.102529445Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1060-prod-westus" t=2024-05-29T13:44:15.102518703Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.102507982Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1060-prod-eastus" t=2024-05-29T13:44:15.1024798Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#585 [CJ Network] Native Ad Unit for Internal Ads [Desktop]" t=2024-05-29T13:44:15.102479564Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#579 4K Main Network - Popunder" t=2024-05-29T13:44:15.102462006Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1058-prod-westus" t=2024-05-29T13:44:15.102452957Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1058-prod-eastus" t=2024-05-29T13:44:15.102435342Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.102477949Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1058-prod-eastus" t=2024-05-29T13:44:15.102425681Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#578 4K Main Network - Mobile Footer 2" t=2024-05-29T13:44:15.102432762Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=664100 slug=lson t=2024-05-29T13:44:15.102366676Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#578 4K Main Network - Mobile Footer 2" t=2024-05-29T13:44:15.10242647Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.10238025Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#577 4K Main Network - Mobile Footer 1" t=2024-05-29T13:44:15.102399423Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#576 4K Main Network - IM Mobile (300x100)" t=2024-05-29T13:44:15.102373511Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chicago, country=United States, datacenter=PIA, environment=production, instance=181.214.165.4:9998, ip=181.214.165.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=chicago405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.102320805Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=it-jenkins-8wtb, device=/dev/mapper/jenkins_vg-jenkins_lv, env=services, fstype=ext4, instance=it-jenkins-8wtb, job=integrations/node_exporter, mountpoint=/var/lib/jenkins" t=2024-05-29T13:44:15.102327321Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-operations-1015-prod-eastus" t=2024-05-29T13:44:15.102195656Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.102309726Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#574 4K Main Network - Mobile Header" t=2024-05-29T13:44:15.102327093Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.102234094Z caller=remote_instance_store.go:51 user=147497 slug=rhodev msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=540828 slug=finfoprod153 instance="container=phoenix-eventengine-2669-prod-eastus" t=2024-05-29T13:44:15.102127271Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#571 4K Main Network - Footer 2" t=2024-05-29T13:44:15.102248652Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#571 4K Main Network - Footer 2" t=2024-05-29T13:44:15.102239123Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#570 4K Main Network - Footer 1" t=2024-05-29T13:44:15.102223391Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#569 4K Main Network - ITVB" t=2024-05-29T13:44:15.102195091Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#569 4K Main Network - ITVB" t=2024-05-29T13:44:15.102186223Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=internal-dns-singapore-secondary-0r3x, device=/dev/sda1, env=services, fstype=vfat, instance=internal-dns-singapore-secondary-0r3x, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.10210872Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chicago, country=United States, datacenter=PIA, environment=production, instance=181.214.165.190:9998, ip=181.214.165.190, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-chicago.crt, role=vpn, server=chicago410, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.102128807Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#568 4K Main Network - ITVA" t=2024-05-29T13:44:15.102166505Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=540828 slug=finfoprod153 version=1 fingerprint=a6b75c055de16b7f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.09999632Z level=debug msg="Alert rule evaluated" results="[{Instance:container=phoenix-eventengine-2669-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-eventengine-2669-prod-eastus Value:0xc012ebbca0} C:{Var:C Labels:container=phoenix-eventengine-2669-prod-eastus Value:0xc012ebbca8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095360661s EvaluationString:[ var='A' labels={container=phoenix-eventengine-2669-prod-eastus} value=29.12133891213389 ], [ var='C' labels={container=phoenix-eventengine-2669-prod-eastus} value=0 ]} {Instance:container=phoenix-eventengine-2669-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-eventengine-2669-prod-westus Value:0xc012ebbcf0} C:{Var:C Labels:container=phoenix-eventengine-2669-prod-westus Value:0xc012ebbcf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095376225s EvaluationString:[ var='A' labels={container=phoenix-eventengine-2669-prod-westus} value=0 ], [ var='C' labels={container=phoenix-eventengine-2669-prod-westus} value=0 ]} {Instance:container=phoenix-operations-1015-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1015-prod-eastus Value:0xc012ebbd48} C:{Var:C Labels:container=phoenix-operations-1015-prod-eastus Value:0xc012ebbd40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095382213s EvaluationString:[ var='A' labels={container=phoenix-operations-1015-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-1015-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-1015-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1015-prod-westus Value:0xc012ebbd80} C:{Var:C Labels:container=phoenix-operations-1015-prod-westus Value:0xc012ebbd88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095461276s EvaluationString:[ var='A' labels={container=phoenix-operations-1015-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-1015-prod-westus} value=0 ]} {Instance:container=phoenix-operations-1019-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1019-prod-eastus Value:0xc012ebbdd0} C:{Var:C Labels:container=phoenix-operations-1019-prod-eastus Value:0xc012ebbdd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095466063s EvaluationString:[ var='A' labels={container=phoenix-operations-1019-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-1019-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-1019-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1019-prod-westus Value:0xc012ebbe20} C:{Var:C Labels:container=phoenix-operations-1019-prod-westus Value:0xc012ebbe28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095471073s EvaluationString:[ var='A' labels={container=phoenix-operations-1019-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-1019-prod-westus} value=0 ]} {Instance:container=phoenix-operations-1041-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1041-prod-eastus Value:0xc012ebbe60} C:{Var:C Labels:container=phoenix-operations-1041-prod-eastus Value:0xc012ebbe68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095475786s EvaluationString:[ var='A' labels={container=phoenix-operations-1041-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-1041-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-1041-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1041-prod-westus Value:0xc012ebbea0} C:{Var:C Labels:container=phoenix-operations-1041-prod-westus Value:0xc012ebbea8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095480949s EvaluationString:[ var='A' labels={container=phoenix-operations-1041-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-1041-prod-westus} value=0 ]} {Instance:container=phoenix-operations-1058-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1058-prod-eastus Value:0xc012ebbee0} C:{Var:C Labels:container=phoenix-operations-1058-prod-eastus Value:0xc012ebbee8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095484632s EvaluationString:[ var='A' labels={container=phoenix-operations-1058-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-1058-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-1058-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1058-prod-westus Value:0xc012ebbf20} C:{Var:C Labels:container=phoenix-operations-1058-prod-westus Value:0xc012ebbf28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095488481s EvaluationString:[ var='A' labels={container=phoenix-operations-1058-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-1058-prod-westus} value=0 ]} {Instance:container=phoenix-operations-1060-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1060-prod-eastus Value:0xc012ebbf68} C:{Var:C Labels:container=phoenix-operations-1060-prod-eastus Value:0xc012ebbf60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095494344s EvaluationString:[ var='A' labels={container=phoenix-operations-1060-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-1060-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-1060-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1060-prod-westus Value:0xc012ebbfa0} C:{Var:C Labels:container=phoenix-operations-1060-prod-westus Value:0xc012ebbfa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095499426s EvaluationString:[ var='A' labels={container=phoenix-operations-1060-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-1060-prod-westus} value=0 ]} {Instance:container=phoenix-operations-1062-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1062-prod-eastus Value:0xc047b16020} C:{Var:C Labels:container=phoenix-operations-1062-prod-eastus Value:0xc047b16028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095503351s EvaluationString:[ var='A' labels={container=phoenix-operations-1062-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-1062-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-1062-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1062-prod-westus Value:0xc047b160f0} C:{Var:C Labels:container=phoenix-operations-1062-prod-westus Value:0xc047b160f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095507385s EvaluationString:[ var='A' labels={container=phoenix-operations-1062-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-1062-prod-westus} value=0 ]} {Instance:container=phoenix-operations-1063-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1063-prod-eastus Value:0xc047b161c8} C:{Var:C Labels:container=phoenix-operations-1063-prod-eastus Value:0xc047b161c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095511883s EvaluationString:[ var='A' labels={container=phoenix-operations-1063-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-1063-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-1063-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1063-prod-westus Value:0xc047b16330} C:{Var:C Labels:container=phoenix-operations-1063-prod-westus Value:0xc047b16338}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095516257s EvaluationString:[ var='A' labels={container=phoenix-operations-1063-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-1063-prod-westus} value=0 ]} {Instance:container=phoenix-operations-1078-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1078-prod-eastus Value:0xc047b16490} C:{Var:C Labels:container=phoenix-operations-1078-prod-eastus Value:0xc047b16498}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095520035s EvaluationString:[ var='A' labels={container=phoenix-operations-1078-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-1078-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-1078-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1078-prod-westus Value:0xc047b165d0} C:{Var:C Labels:container=phoenix-operations-1078-prod-westus Value:0xc047b165d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095524837s EvaluationString:[ var='A' labels={container=phoenix-operations-1078-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-1078-prod-westus} value=0 ]} {Instance:container=phoenix-operations-1090-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1090-prod-eastus Value:0xc047b16700} C:{Var:C Labels:container=phoenix-operations-1090-prod-eastus Value:0xc047b16708}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095529293s EvaluationString:[ var='A' labels={container=phoenix-operations-1090-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-1090-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-1090-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1090-prod-westus Value:0xc047b167f8} C:{Var:C Labels:container=phoenix-operations-1090-prod-westus Value:0xc047b167f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095534351s EvaluationString:[ var='A' labels={container=phoenix-operations-1090-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-1090-prod-westus} value=0 ]} {Instance:container=phoenix-operations-1099-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1099-prod-eastus Value:0xc047b16880} C:{Var:C Labels:container=phoenix-operations-1099-prod-eastus Value:0xc047b16888}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095538053s EvaluationString:[ var='A' labels={container=phoenix-operations-1099-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-1099-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-1099-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1099-prod-westus Value:0xc047b168e0} C:{Var:C Labels:container=phoenix-operations-1099-prod-westus Value:0xc047b168e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095541696s EvaluationString:[ var='A' labels={container=phoenix-operations-1099-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-1099-prod-westus} value=0 ]} {Instance:container=phoenix-operations-1103-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1103-prod-eastus Value:0xc047b169b0} C:{Var:C Labels:container=phoenix-operations-1103-prod-eastus Value:0xc047b169b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095545147s EvaluationString:[ var='A' labels={container=phoenix-operations-1103-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-1103-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-1103-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1103-prod-westus Value:0xc047b16b28} C:{Var:C Labels:container=phoenix-operations-1103-prod-westus Value:0xc047b16b20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095549068s EvaluationString:[ var='A' labels={container=phoenix-operations-1103-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-1103-prod-westus} value=0 ]} {Instance:container=phoenix-operations-1125-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1125-prod-eastus Value:0xc047b16c98} C:{Var:C Labels:container=phoenix-operations-1125-prod-eastus Value:0xc047b16c90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09555413s EvaluationString:[ var='A' labels={container=phoenix-operations-1125-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-1125-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-1125-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1125-prod-westus Value:0xc047b16e10} C:{Var:C Labels:container=phoenix-operations-1125-prod-westus Value:0xc047b16e18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095558116s EvaluationString:[ var='A' labels={container=phoenix-operations-1125-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-1125-prod-westus} value=0 ]} {Instance:container=phoenix-operations-1135-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1135-prod-eastus Value:0xc047b16e80} C:{Var:C Labels:container=phoenix-operations-1135-prod-eastus Value:0xc047b16e88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095562355s EvaluationString:[ var='A' labels={container=phoenix-operations-1135-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-1135-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-1135-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1135-prod-westus Value:0xc047b16f28} C:{Var:C Labels:container=phoenix-operations-1135-prod-westus Value:0xc047b16f20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095566268s EvaluationString:[ var='A' labels={container=phoenix-operations-1135-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-1135-prod-westus} value=0 ]} {Instance:container=phoenix-operations-1150-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1150-prod-eastus Value:0xc047b16f80} C:{Var:C Labels:container=phoenix-operations-1150-prod-eastus Value:0xc047b16f88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095570416s EvaluationString:[ var='A' labels={container=phoenix-operations-1150-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-1150-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-1150-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1150-prod-westus Value:0xc047b17070} C:{Var:C Labels:container=phoenix-operations-1150-prod-westus Value:0xc047b17078}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095574703s EvaluationString:[ var='A' labels={container=phoenix-operations-1150-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-1150-prod-westus} value=0 ]} {Instance:container=phoenix-operations-1247-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1247-prod-eastus Value:0xc047b170c8} C:{Var:C Labels:container=phoenix-operations-1247-prod-eastus Value:0xc047b170c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095580003s EvaluationString:[ var='A' labels={container=phoenix-operations-1247-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-1247-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-1247-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1247-prod-westus Value:0xc047b171c0} C:{Var:C Labels:container=phoenix-operations-1247-prod-westus Value:0xc047b171c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095584882s EvaluationString:[ var='A' labels={container=phoenix-operations-1247-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-1247-prod-westus} value=0 ]} {Instance:container=phoenix-operations-1253-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1253-prod-eastus Value:0xc047b172a0} C:{Var:C Labels:container=phoenix-operations-1253-prod-eastus Value:0xc047b172a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095589448s EvaluationString:[ var='A' labels={container=phoenix-operations-1253-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-1253-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-1253-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1253-prod-westus Value:0xc047b17388} C:{Var:C Labels:container=phoenix-operations-1253-prod-westus Value:0xc047b17380}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095594017s EvaluationString:[ var='A' labels={container=phoenix-operations-1253-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-1253-prod-westus} value=0 ]} {Instance:container=phoenix-operations-1295-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1295-prod-eastus Value:0xc047b173c0} C:{Var:C Labels:container=phoenix-operations-1295-prod-eastus Value:0xc047b173c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095598179s EvaluationString:[ var='A' labels={container=phoenix-operations-1295-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-1295-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-1295-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1295-prod-westus Value:0xc047b174e8} C:{Var:C Labels:container=phoenix-operations-1295-prod-westus Value:0xc047b174e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09560272s EvaluationString:[ var='A' labels={container=phoenix-operations-1295-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-1295-prod-westus} value=0 ]} {Instance:container=phoenix-operations-1600-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1600-prod-eastus Value:0xc047b17648} C:{Var:C Labels:container=phoenix-operations-1600-prod-eastus Value:0xc047b17640}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095606897s EvaluationString:[ var='A' labels={container=phoenix-operations-1600-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-1600-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-1600-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-1600-prod-westus Value:0xc047b17778} C:{Var:C Labels:container=phoenix-operations-1600-prod-westus Value:0xc047b17770}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095610491s EvaluationString:[ var='A' labels={container=phoenix-operations-1600-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-1600-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2006-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2006-prod-eastus Value:0xc047b178d8} C:{Var:C Labels:container=phoenix-operations-2006-prod-eastus Value:0xc047b178d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095614706s EvaluationString:[ var='A' labels={container=phoenix-operations-2006-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2006-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2006-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2006-prod-westus Value:0xc047b17a10} C:{Var:C Labels:container=phoenix-operations-2006-prod-westus Value:0xc047b17a18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095618526s EvaluationString:[ var='A' labels={container=phoenix-operations-2006-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2006-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2040-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2040-prod-eastus Value:0xc047b17b20} C:{Var:C Labels:container=phoenix-operations-2040-prod-eastus Value:0xc047b17b28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095623325s EvaluationString:[ var='A' labels={container=phoenix-operations-2040-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2040-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2040-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2040-prod-westus Value:0xc047b17bc0} C:{Var:C Labels:container=phoenix-operations-2040-prod-westus Value:0xc047b17bc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095627628s EvaluationString:[ var='A' labels={container=phoenix-operations-2040-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2040-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2132-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2132-prod-eastus Value:0xc047b17c90} C:{Var:C Labels:container=phoenix-operations-2132-prod-eastus Value:0xc047b17c98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09563202s EvaluationString:[ var='A' labels={container=phoenix-operations-2132-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2132-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2132-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2132-prod-westus Value:0xc047b17cf0} C:{Var:C Labels:container=phoenix-operations-2132-prod-westus Value:0xc047b17cf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095638778s EvaluationString:[ var='A' labels={container=phoenix-operations-2132-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2132-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2145-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2145-prod-eastus Value:0xc047b17da8} C:{Var:C Labels:container=phoenix-operations-2145-prod-eastus Value:0xc047b17da0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095642786s EvaluationString:[ var='A' labels={container=phoenix-operations-2145-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2145-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2145-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2145-prod-westus Value:0xc047b17de0} C:{Var:C Labels:container=phoenix-operations-2145-prod-westus Value:0xc047b17de8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095646923s EvaluationString:[ var='A' labels={container=phoenix-operations-2145-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2145-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2200-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2200-prod-eastus Value:0xc047b17f40} C:{Var:C Labels:container=phoenix-operations-2200-prod-eastus Value:0xc047b17f48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095651213s EvaluationString:[ var='A' labels={container=phoenix-operations-2200-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2200-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2200-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2200-prod-westus Value:0xc02bf8e008} C:{Var:C Labels:container=phoenix-operations-2200-prod-westus Value:0xc02bf8e000}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095656356s EvaluationString:[ var='A' labels={container=phoenix-operations-2200-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2200-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2335-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2335-prod-eastus Value:0xc02bf8e040} C:{Var:C Labels:container=phoenix-operations-2335-prod-eastus Value:0xc02bf8e048}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095662161s EvaluationString:[ var='A' labels={container=phoenix-operations-2335-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2335-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2335-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2335-prod-westus Value:0xc02bf8e0d8} C:{Var:C Labels:container=phoenix-operations-2335-prod-westus Value:0xc02bf8e0d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095667532s EvaluationString:[ var='A' labels={container=phoenix-operations-2335-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2335-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2342-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2342-prod-eastus Value:0xc02bf8e150} C:{Var:C Labels:container=phoenix-operations-2342-prod-eastus Value:0xc02bf8e158}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095672708s EvaluationString:[ var='A' labels={container=phoenix-operations-2342-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2342-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2342-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2342-prod-westus Value:0xc02bf8e1a0} C:{Var:C Labels:container=phoenix-operations-2342-prod-westus Value:0xc02bf8e1a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095677371s EvaluationString:[ var='A' labels={container=phoenix-operations-2342-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2342-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2619-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2619-prod-eastus Value:0xc02bf8e308} C:{Var:C Labels:container=phoenix-operations-2619-prod-eastus Value:0xc02bf8e300}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095682195s EvaluationString:[ var='A' labels={container=phoenix-operations-2619-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2619-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2619-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2619-prod-westus Value:0xc02bf8e348} C:{Var:C Labels:container=phoenix-operations-2619-prod-westus Value:0xc02bf8e340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095685948s EvaluationString:[ var='A' labels={container=phoenix-operations-2619-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2619-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2620-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2620-prod-eastus Value:0xc02bf8e380} C:{Var:C Labels:container=phoenix-operations-2620-prod-eastus Value:0xc02bf8e388}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095691083s EvaluationString:[ var='A' labels={container=phoenix-operations-2620-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2620-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2620-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2620-prod-westus Value:0xc02bf8e580} C:{Var:C Labels:container=phoenix-operations-2620-prod-westus Value:0xc02bf8e588}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095695175s EvaluationString:[ var='A' labels={container=phoenix-operations-2620-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2620-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2621-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2621-prod-eastus Value:0xc02bf8e650} C:{Var:C Labels:container=phoenix-operations-2621-prod-eastus Value:0xc02bf8e658}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095699538s EvaluationString:[ var='A' labels={container=phoenix-operations-2621-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2621-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2621-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2621-prod-westus Value:0xc02bf8e858} C:{Var:C Labels:container=phoenix-operations-2621-prod-westus Value:0xc02bf8e850}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095704511s EvaluationString:[ var='A' labels={container=phoenix-operations-2621-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2621-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2622-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2622-prod-eastus Value:0xc02bf8e8a0} C:{Var:C Labels:container=phoenix-operations-2622-prod-eastus Value:0xc02bf8e8a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095709345s EvaluationString:[ var='A' labels={container=phoenix-operations-2622-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2622-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2622-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2622-prod-westus Value:0xc02bf8ead0} C:{Var:C Labels:container=phoenix-operations-2622-prod-westus Value:0xc02bf8ead8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095713733s EvaluationString:[ var='A' labels={container=phoenix-operations-2622-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2622-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2629-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2629-prod-eastus Value:0xc02bf8ec28} C:{Var:C Labels:container=phoenix-operations-2629-prod-eastus Value:0xc02bf8ec20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095718468s EvaluationString:[ var='A' labels={container=phoenix-operations-2629-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2629-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2629-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2629-prod-westus Value:0xc02bf8ef50} C:{Var:C Labels:container=phoenix-operations-2629-prod-westus Value:0xc02bf8ef58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095722663s EvaluationString:[ var='A' labels={container=phoenix-operations-2629-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2629-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2641-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2641-prod-eastus Value:0xc02bf8ef98} C:{Var:C Labels:container=phoenix-operations-2641-prod-eastus Value:0xc02bf8ef90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095727658s EvaluationString:[ var='A' labels={container=phoenix-operations-2641-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2641-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2641-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2641-prod-westus Value:0xc02bf8efd0} C:{Var:C Labels:container=phoenix-operations-2641-prod-westus Value:0xc02bf8efd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095732044s EvaluationString:[ var='A' labels={container=phoenix-operations-2641-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2641-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2648-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2648-prod-eastus Value:0xc02bf8f020} C:{Var:C Labels:container=phoenix-operations-2648-prod-eastus Value:0xc02bf8f028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095736624s EvaluationString:[ var='A' labels={container=phoenix-operations-2648-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2648-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2648-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2648-prod-westus Value:0xc02bf8f238} C:{Var:C Labels:container=phoenix-operations-2648-prod-westus Value:0xc02bf8f230}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095742086s EvaluationString:[ var='A' labels={container=phoenix-operations-2648-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2648-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2649-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2649-prod-eastus Value:0xc02bf8f280} C:{Var:C Labels:container=phoenix-operations-2649-prod-eastus Value:0xc02bf8f288}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095746615s EvaluationString:[ var='A' labels={container=phoenix-operations-2649-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2649-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2649-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2649-prod-westus Value:0xc02bf8f320} C:{Var:C Labels:container=phoenix-operations-2649-prod-westus Value:0xc02bf8f328}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095751243s EvaluationString:[ var='A' labels={container=phoenix-operations-2649-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2649-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2651-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2651-prod-eastus Value:0xc02bf8f370} C:{Var:C Labels:container=phoenix-operations-2651-prod-eastus Value:0xc02bf8f378}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095757487s EvaluationString:[ var='A' labels={container=phoenix-operations-2651-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2651-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2651-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2651-prod-westus Value:0xc02bf8f630} C:{Var:C Labels:container=phoenix-operations-2651-prod-westus Value:0xc02bf8f638}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095761819s EvaluationString:[ var='A' labels={container=phoenix-operations-2651-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2651-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2652-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2652-prod-eastus Value:0xc02bf8f670} C:{Var:C Labels:container=phoenix-operations-2652-prod-eastus Value:0xc02bf8f678}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095766701s EvaluationString:[ var='A' labels={container=phoenix-operations-2652-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2652-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2652-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2652-prod-westus Value:0xc02bf8f8e0} C:{Var:C Labels:container=phoenix-operations-2652-prod-westus Value:0xc02bf8f8e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095771066s EvaluationString:[ var='A' labels={container=phoenix-operations-2652-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2652-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2656-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2656-prod-eastus Value:0xc02bf8f938} C:{Var:C Labels:container=phoenix-operations-2656-prod-eastus Value:0xc02bf8f930}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095775811s EvaluationString:[ var='A' labels={container=phoenix-operations-2656-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2656-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2656-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2656-prod-westus Value:0xc02bf8f970} C:{Var:C Labels:container=phoenix-operations-2656-prod-westus Value:0xc02bf8f978}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095781378s EvaluationString:[ var='A' labels={container=phoenix-operations-2656-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2656-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2657-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2657-prod-eastus Value:0xc02bf8fa10} C:{Var:C Labels:container=phoenix-operations-2657-prod-eastus Value:0xc02bf8fa18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095786117s EvaluationString:[ var='A' labels={container=phoenix-operations-2657-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2657-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2657-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2657-prod-westus Value:0xc02bf8faa0} C:{Var:C Labels:container=phoenix-operations-2657-prod-westus Value:0xc02bf8faa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095790587s EvaluationString:[ var='A' labels={container=phoenix-operations-2657-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2657-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2658-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2658-prod-eastus Value:0xc02bf8fb80} C:{Var:C Labels:container=phoenix-operations-2658-prod-eastus Value:0xc02bf8fb88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095795661s EvaluationString:[ var='A' labels={container=phoenix-operations-2658-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2658-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2658-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2658-prod-westus Value:0xc02bf8fbc0} C:{Var:C Labels:container=phoenix-operations-2658-prod-westus Value:0xc02bf8fbc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095801165s EvaluationString:[ var='A' labels={container=phoenix-operations-2658-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2658-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2660-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2660-prod-eastus Value:0xc02bf8fc00} C:{Var:C Labels:container=phoenix-operations-2660-prod-eastus Value:0xc02bf8fc08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095805538s EvaluationString:[ var='A' labels={container=phoenix-operations-2660-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2660-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2660-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2660-prod-westus Value:0xc02bf8fc50} C:{Var:C Labels:container=phoenix-operations-2660-prod-westus Value:0xc02bf8fc58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095810229s EvaluationString:[ var='A' labels={container=phoenix-operations-2660-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2660-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2668-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2668-prod-eastus Value:0xc02bf8fe30} C:{Var:C Labels:container=phoenix-operations-2668-prod-eastus Value:0xc02bf8fe38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095814461s EvaluationString:[ var='A' labels={container=phoenix-operations-2668-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2668-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2668-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2668-prod-westus Value:0xc02bf8feb0} C:{Var:C Labels:container=phoenix-operations-2668-prod-westus Value:0xc02bf8feb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095819184s EvaluationString:[ var='A' labels={container=phoenix-operations-2668-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2668-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2669-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2669-prod-eastus Value:0xc02bf8ff08} C:{Var:C Labels:container=phoenix-operations-2669-prod-eastus Value:0xc02bf8ff00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095823206s EvaluationString:[ var='A' labels={container=phoenix-operations-2669-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2669-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2669-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2669-prod-westus Value:0xc0150ac028} C:{Var:C Labels:container=phoenix-operations-2669-prod-westus Value:0xc0150ac020}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095827592s EvaluationString:[ var='A' labels={container=phoenix-operations-2669-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2669-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2675-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2675-prod-eastus Value:0xc0150ac180} C:{Var:C Labels:container=phoenix-operations-2675-prod-eastus Value:0xc0150ac188}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095831736s EvaluationString:[ var='A' labels={container=phoenix-operations-2675-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2675-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2675-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2675-prod-westus Value:0xc0150ac1f0} C:{Var:C Labels:container=phoenix-operations-2675-prod-westus Value:0xc0150ac1f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095836075s EvaluationString:[ var='A' labels={container=phoenix-operations-2675-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2675-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2677-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2677-prod-eastus Value:0xc0150ac240} C:{Var:C Labels:container=phoenix-operations-2677-prod-eastus Value:0xc0150ac248}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095850348s EvaluationString:[ var='A' labels={container=phoenix-operations-2677-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2677-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2677-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2677-prod-westus Value:0xc0150ac2a0} C:{Var:C Labels:container=phoenix-operations-2677-prod-westus Value:0xc0150ac2a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095855498s EvaluationString:[ var='A' labels={container=phoenix-operations-2677-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2677-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2678-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2678-prod-eastus Value:0xc0150ac308} C:{Var:C Labels:container=phoenix-operations-2678-prod-eastus Value:0xc0150ac300}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095859718s EvaluationString:[ var='A' labels={container=phoenix-operations-2678-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2678-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2678-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2678-prod-westus Value:0xc0150ac478} C:{Var:C Labels:container=phoenix-operations-2678-prod-westus Value:0xc0150ac470}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095863751s EvaluationString:[ var='A' labels={container=phoenix-operations-2678-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2678-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2679-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2679-prod-eastus Value:0xc0150ac4e8} C:{Var:C Labels:container=phoenix-operations-2679-prod-eastus Value:0xc0150ac4e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095868042s EvaluationString:[ var='A' labels={container=phoenix-operations-2679-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2679-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2679-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2679-prod-westus Value:0xc0150ac540} C:{Var:C Labels:container=phoenix-operations-2679-prod-westus Value:0xc0150ac548}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095872669s EvaluationString:[ var='A' labels={container=phoenix-operations-2679-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2679-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2680-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2680-prod-eastus Value:0xc0150ac610} C:{Var:C Labels:container=phoenix-operations-2680-prod-eastus Value:0xc0150ac618}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095876633s EvaluationString:[ var='A' labels={container=phoenix-operations-2680-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2680-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2680-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2680-prod-westus Value:0xc0150ac990} C:{Var:C Labels:container=phoenix-operations-2680-prod-westus Value:0xc0150ac998}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09588047s EvaluationString:[ var='A' labels={container=phoenix-operations-2680-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2680-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2681-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2681-prod-eastus Value:0xc0150aca00} C:{Var:C Labels:container=phoenix-operations-2681-prod-eastus Value:0xc0150aca08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095884858s EvaluationString:[ var='A' labels={container=phoenix-operations-2681-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2681-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2681-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2681-prod-westus Value:0xc0150aca60} C:{Var:C Labels:container=phoenix-operations-2681-prod-westus Value:0xc0150aca68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095889512s EvaluationString:[ var='A' labels={container=phoenix-operations-2681-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2681-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2682-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2682-prod-eastus Value:0xc0150acbf0} C:{Var:C Labels:container=phoenix-operations-2682-prod-eastus Value:0xc0150acbf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095893657s EvaluationString:[ var='A' labels={container=phoenix-operations-2682-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2682-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2682-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2682-prod-westus Value:0xc0150acc40} C:{Var:C Labels:container=phoenix-operations-2682-prod-westus Value:0xc0150acc48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095897761s EvaluationString:[ var='A' labels={container=phoenix-operations-2682-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2682-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2683-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2683-prod-eastus Value:0xc0150acd40} C:{Var:C Labels:container=phoenix-operations-2683-prod-eastus Value:0xc0150acd48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095901676s EvaluationString:[ var='A' labels={container=phoenix-operations-2683-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2683-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2683-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2683-prod-westus Value:0xc0150acda0} C:{Var:C Labels:container=phoenix-operations-2683-prod-westus Value:0xc0150acda8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095906501s EvaluationString:[ var='A' labels={container=phoenix-operations-2683-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2683-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2684-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2684-prod-eastus Value:0xc0150ace00} C:{Var:C Labels:container=phoenix-operations-2684-prod-eastus Value:0xc0150ace08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095911155s EvaluationString:[ var='A' labels={container=phoenix-operations-2684-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2684-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2684-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2684-prod-westus Value:0xc0150ace60} C:{Var:C Labels:container=phoenix-operations-2684-prod-westus Value:0xc0150ace68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095915013s EvaluationString:[ var='A' labels={container=phoenix-operations-2684-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2684-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2685-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2685-prod-eastus Value:0xc0150aced0} C:{Var:C Labels:container=phoenix-operations-2685-prod-eastus Value:0xc0150aced8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095918935s EvaluationString:[ var='A' labels={container=phoenix-operations-2685-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2685-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2685-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2685-prod-westus Value:0xc0150acf48} C:{Var:C Labels:container=phoenix-operations-2685-prod-westus Value:0xc0150acf40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095922995s EvaluationString:[ var='A' labels={container=phoenix-operations-2685-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2685-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2686-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2686-prod-eastus Value:0xc0150acfd0} C:{Var:C Labels:container=phoenix-operations-2686-prod-eastus Value:0xc0150acfd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09592672s EvaluationString:[ var='A' labels={container=phoenix-operations-2686-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2686-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2686-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2686-prod-westus Value:0xc0150ad020} C:{Var:C Labels:container=phoenix-operations-2686-prod-westus Value:0xc0150ad028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095930666s EvaluationString:[ var='A' labels={container=phoenix-operations-2686-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2686-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2687-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2687-prod-eastus Value:0xc0150ad0e0} C:{Var:C Labels:container=phoenix-operations-2687-prod-eastus Value:0xc0150ad0e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095935531s EvaluationString:[ var='A' labels={container=phoenix-operations-2687-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2687-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2687-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2687-prod-westus Value:0xc0150ad150} C:{Var:C Labels:container=phoenix-operations-2687-prod-westus Value:0xc0150ad158}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095939688s EvaluationString:[ var='A' labels={container=phoenix-operations-2687-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2687-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2688-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2688-prod-eastus Value:0xc0150ad1f0} C:{Var:C Labels:container=phoenix-operations-2688-prod-eastus Value:0xc0150ad1f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095943136s EvaluationString:[ var='A' labels={container=phoenix-operations-2688-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2688-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2688-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2688-prod-westus Value:0xc0150ad260} C:{Var:C Labels:container=phoenix-operations-2688-prod-westus Value:0xc0150ad268}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095946782s EvaluationString:[ var='A' labels={container=phoenix-operations-2688-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2688-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2690-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2690-prod-eastus Value:0xc0150ad2a0} C:{Var:C Labels:container=phoenix-operations-2690-prod-eastus Value:0xc0150ad2a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09595082s EvaluationString:[ var='A' labels={container=phoenix-operations-2690-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2690-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2690-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2690-prod-westus Value:0xc0150ad648} C:{Var:C Labels:container=phoenix-operations-2690-prod-westus Value:0xc0150ad640}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095954808s EvaluationString:[ var='A' labels={container=phoenix-operations-2690-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2690-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2691-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2691-prod-eastus Value:0xc0150ad6a0} C:{Var:C Labels:container=phoenix-operations-2691-prod-eastus Value:0xc0150ad6a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095958461s EvaluationString:[ var='A' labels={container=phoenix-operations-2691-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2691-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2691-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2691-prod-westus Value:0xc0150ad818} C:{Var:C Labels:container=phoenix-operations-2691-prod-westus Value:0xc0150ad810}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095961891s EvaluationString:[ var='A' labels={container=phoenix-operations-2691-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2691-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2692-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2692-prod-eastus Value:0xc0150ad880} C:{Var:C Labels:container=phoenix-operations-2692-prod-eastus Value:0xc0150ad888}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095965645s EvaluationString:[ var='A' labels={container=phoenix-operations-2692-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2692-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2692-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2692-prod-westus Value:0xc0150ad8e0} C:{Var:C Labels:container=phoenix-operations-2692-prod-westus Value:0xc0150ad8e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095969256s EvaluationString:[ var='A' labels={container=phoenix-operations-2692-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2692-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2693-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2693-prod-eastus Value:0xc0150ad950} C:{Var:C Labels:container=phoenix-operations-2693-prod-eastus Value:0xc0150ad958}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095972774s EvaluationString:[ var='A' labels={container=phoenix-operations-2693-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2693-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2693-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2693-prod-westus Value:0xc0150ad9d0} C:{Var:C Labels:container=phoenix-operations-2693-prod-westus Value:0xc0150ad9d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095976568s EvaluationString:[ var='A' labels={container=phoenix-operations-2693-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2693-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2694-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2694-prod-eastus Value:0xc0150ada30} C:{Var:C Labels:container=phoenix-operations-2694-prod-eastus Value:0xc0150ada38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095980059s EvaluationString:[ var='A' labels={container=phoenix-operations-2694-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2694-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2694-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2694-prod-westus Value:0xc0150adac0} C:{Var:C Labels:container=phoenix-operations-2694-prod-westus Value:0xc0150adac8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095983486s EvaluationString:[ var='A' labels={container=phoenix-operations-2694-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2694-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2695-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2695-prod-eastus Value:0xc0150adb28} C:{Var:C Labels:container=phoenix-operations-2695-prod-eastus Value:0xc0150adb20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095987108s EvaluationString:[ var='A' labels={container=phoenix-operations-2695-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2695-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2695-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2695-prod-westus Value:0xc0150adc90} C:{Var:C Labels:container=phoenix-operations-2695-prod-westus Value:0xc0150adc98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095990603s EvaluationString:[ var='A' labels={container=phoenix-operations-2695-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2695-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2696-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2696-prod-eastus Value:0xc0150adcd0} C:{Var:C Labels:container=phoenix-operations-2696-prod-eastus Value:0xc0150adcd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095993884s EvaluationString:[ var='A' labels={container=phoenix-operations-2696-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2696-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2696-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2696-prod-westus Value:0xc0150add30} C:{Var:C Labels:container=phoenix-operations-2696-prod-westus Value:0xc0150add38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.095997285s EvaluationString:[ var='A' labels={container=phoenix-operations-2696-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2696-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2697-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2697-prod-eastus Value:0xc0150addd0} C:{Var:C Labels:container=phoenix-operations-2697-prod-eastus Value:0xc0150addd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096000144s EvaluationString:[ var='A' labels={container=phoenix-operations-2697-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2697-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2697-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2697-prod-westus Value:0xc0150ade20} C:{Var:C Labels:container=phoenix-operations-2697-prod-westus Value:0xc0150ade28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096004577s EvaluationString:[ var='A' labels={container=phoenix-operations-2697-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2697-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2698-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2698-prod-eastus Value:0xc0150adf70} C:{Var:C Labels:container=phoenix-operations-2698-prod-eastus Value:0xc0150adf78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096008121s EvaluationString:[ var='A' labels={container=phoenix-operations-2698-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2698-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2698-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2698-prod-westus Value:0xc0059ea2e0} C:{Var:C Labels:container=phoenix-operations-2698-prod-westus Value:0xc0059ea2e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096015328s EvaluationString:[ var='A' labels={container=phoenix-operations-2698-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2698-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2699-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2699-prod-eastus Value:0xc0059ea320} C:{Var:C Labels:container=phoenix-operations-2699-prod-eastus Value:0xc0059ea328}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096018843s EvaluationString:[ var='A' labels={container=phoenix-operations-2699-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2699-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2699-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2699-prod-westus Value:0xc0059ea380} C:{Var:C Labels:container=phoenix-operations-2699-prod-westus Value:0xc0059ea388}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096022821s EvaluationString:[ var='A' labels={container=phoenix-operations-2699-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2699-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2700-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2700-prod-eastus Value:0xc0059ea3c0} C:{Var:C Labels:container=phoenix-operations-2700-prod-eastus Value:0xc0059ea3c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096026698s EvaluationString:[ var='A' labels={container=phoenix-operations-2700-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2700-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2700-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2700-prod-westus Value:0xc0059ea430} C:{Var:C Labels:container=phoenix-operations-2700-prod-westus Value:0xc0059ea438}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096030156s EvaluationString:[ var='A' labels={container=phoenix-operations-2700-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2700-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2701-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2701-prod-eastus Value:0xc0059ea470} C:{Var:C Labels:container=phoenix-operations-2701-prod-eastus Value:0xc0059ea478}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096034557s EvaluationString:[ var='A' labels={container=phoenix-operations-2701-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2701-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2701-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2701-prod-westus Value:0xc0059ea4b0} C:{Var:C Labels:container=phoenix-operations-2701-prod-westus Value:0xc0059ea4b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096038599s EvaluationString:[ var='A' labels={container=phoenix-operations-2701-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2701-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2702-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2702-prod-eastus Value:0xc0059ea4f0} C:{Var:C Labels:container=phoenix-operations-2702-prod-eastus Value:0xc0059ea4f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09604307s EvaluationString:[ var='A' labels={container=phoenix-operations-2702-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2702-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2702-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2702-prod-westus Value:0xc0059ea530} C:{Var:C Labels:container=phoenix-operations-2702-prod-westus Value:0xc0059ea538}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09604676s EvaluationString:[ var='A' labels={container=phoenix-operations-2702-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2702-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2704-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2704-prod-eastus Value:0xc0059ea570} C:{Var:C Labels:container=phoenix-operations-2704-prod-eastus Value:0xc0059ea578}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09605056s EvaluationString:[ var='A' labels={container=phoenix-operations-2704-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2704-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2704-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2704-prod-westus Value:0xc0059ea5b0} C:{Var:C Labels:container=phoenix-operations-2704-prod-westus Value:0xc0059ea5b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096055214s EvaluationString:[ var='A' labels={container=phoenix-operations-2704-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2704-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2705-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2705-prod-eastus Value:0xc0059eab28} C:{Var:C Labels:container=phoenix-operations-2705-prod-eastus Value:0xc0059eab20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096059386s EvaluationString:[ var='A' labels={container=phoenix-operations-2705-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2705-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2705-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2705-prod-westus Value:0xc0059eab60} C:{Var:C Labels:container=phoenix-operations-2705-prod-westus Value:0xc0059eab68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096063825s EvaluationString:[ var='A' labels={container=phoenix-operations-2705-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2705-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2800-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2800-prod-eastus Value:0xc0059eabb0} C:{Var:C Labels:container=phoenix-operations-2800-prod-eastus Value:0xc0059eabb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096067705s EvaluationString:[ var='A' labels={container=phoenix-operations-2800-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2800-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2800-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2800-prod-westus Value:0xc0059eabf0} C:{Var:C Labels:container=phoenix-operations-2800-prod-westus Value:0xc0059eabf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096071661s EvaluationString:[ var='A' labels={container=phoenix-operations-2800-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2800-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2801-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2801-prod-eastus Value:0xc0059eac30} C:{Var:C Labels:container=phoenix-operations-2801-prod-eastus Value:0xc0059eac38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096076268s EvaluationString:[ var='A' labels={container=phoenix-operations-2801-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2801-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2801-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2801-prod-westus Value:0xc0059eadb0} C:{Var:C Labels:container=phoenix-operations-2801-prod-westus Value:0xc0059eadb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09607989s EvaluationString:[ var='A' labels={container=phoenix-operations-2801-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2801-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2802-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2802-prod-eastus Value:0xc0059eadf0} C:{Var:C Labels:container=phoenix-operations-2802-prod-eastus Value:0xc0059eadf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096085237s EvaluationString:[ var='A' labels={container=phoenix-operations-2802-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2802-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2802-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2802-prod-westus Value:0xc0059eae30} C:{Var:C Labels:container=phoenix-operations-2802-prod-westus Value:0xc0059eae38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096089656s EvaluationString:[ var='A' labels={container=phoenix-operations-2802-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2802-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2803-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2803-prod-eastus Value:0xc0059eae70} C:{Var:C Labels:container=phoenix-operations-2803-prod-eastus Value:0xc0059eae78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096094344s EvaluationString:[ var='A' labels={container=phoenix-operations-2803-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2803-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2803-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2803-prod-westus Value:0xc0059eaed0} C:{Var:C Labels:container=phoenix-operations-2803-prod-westus Value:0xc0059eaed8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096098196s EvaluationString:[ var='A' labels={container=phoenix-operations-2803-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2803-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2804-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2804-prod-eastus Value:0xc0059eaf10} C:{Var:C Labels:container=phoenix-operations-2804-prod-eastus Value:0xc0059eaf18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096102789s EvaluationString:[ var='A' labels={container=phoenix-operations-2804-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2804-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2804-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2804-prod-westus Value:0xc0059eaf50} C:{Var:C Labels:container=phoenix-operations-2804-prod-westus Value:0xc0059eaf58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09610684s EvaluationString:[ var='A' labels={container=phoenix-operations-2804-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2804-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2805-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2805-prod-eastus Value:0xc0059eaf90} C:{Var:C Labels:container=phoenix-operations-2805-prod-eastus Value:0xc0059eaf98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096111923s EvaluationString:[ var='A' labels={container=phoenix-operations-2805-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2805-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2805-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2805-prod-westus Value:0xc0059eafd0} C:{Var:C Labels:container=phoenix-operations-2805-prod-westus Value:0xc0059eafd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096116111s EvaluationString:[ var='A' labels={container=phoenix-operations-2805-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2805-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2806-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2806-prod-eastus Value:0xc0059eb018} C:{Var:C Labels:container=phoenix-operations-2806-prod-eastus Value:0xc0059eb010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096120585s EvaluationString:[ var='A' labels={container=phoenix-operations-2806-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2806-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2806-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2806-prod-westus Value:0xc0059eb050} C:{Var:C Labels:container=phoenix-operations-2806-prod-westus Value:0xc0059eb058}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096125863s EvaluationString:[ var='A' labels={container=phoenix-operations-2806-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2806-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2807-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2807-prod-eastus Value:0xc0059eb090} C:{Var:C Labels:container=phoenix-operations-2807-prod-eastus Value:0xc0059eb098}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096130239s EvaluationString:[ var='A' labels={container=phoenix-operations-2807-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2807-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2807-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2807-prod-westus Value:0xc0059eb0d0} C:{Var:C Labels:container=phoenix-operations-2807-prod-westus Value:0xc0059eb0d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096134992s EvaluationString:[ var='A' labels={container=phoenix-operations-2807-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2807-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2808-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2808-prod-eastus Value:0xc0059eb110} C:{Var:C Labels:container=phoenix-operations-2808-prod-eastus Value:0xc0059eb118}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096140443s EvaluationString:[ var='A' labels={container=phoenix-operations-2808-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2808-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2808-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2808-prod-westus Value:0xc0059eb158} C:{Var:C Labels:container=phoenix-operations-2808-prod-westus Value:0xc0059eb150}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09614491s EvaluationString:[ var='A' labels={container=phoenix-operations-2808-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2808-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2809-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2809-prod-eastus Value:0xc0059eb190} C:{Var:C Labels:container=phoenix-operations-2809-prod-eastus Value:0xc0059eb198}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096149908s EvaluationString:[ var='A' labels={container=phoenix-operations-2809-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2809-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2809-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2809-prod-westus Value:0xc0059eb1d0} C:{Var:C Labels:container=phoenix-operations-2809-prod-westus Value:0xc0059eb1d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096154091s EvaluationString:[ var='A' labels={container=phoenix-operations-2809-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2809-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2813-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2813-prod-eastus Value:0xc0059eb218} C:{Var:C Labels:container=phoenix-operations-2813-prod-eastus Value:0xc0059eb210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096158768s EvaluationString:[ var='A' labels={container=phoenix-operations-2813-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2813-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2813-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2813-prod-westus Value:0xc0059eb258} C:{Var:C Labels:container=phoenix-operations-2813-prod-westus Value:0xc0059eb250}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096163161s EvaluationString:[ var='A' labels={container=phoenix-operations-2813-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2813-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2814-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2814-prod-eastus Value:0xc0059eb290} C:{Var:C Labels:container=phoenix-operations-2814-prod-eastus Value:0xc0059eb298}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096167826s EvaluationString:[ var='A' labels={container=phoenix-operations-2814-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2814-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2814-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2814-prod-westus Value:0xc0059eb2f8} C:{Var:C Labels:container=phoenix-operations-2814-prod-westus Value:0xc0059eb2f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096172746s EvaluationString:[ var='A' labels={container=phoenix-operations-2814-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2814-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2815-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2815-prod-eastus Value:0xc0059eb398} C:{Var:C Labels:container=phoenix-operations-2815-prod-eastus Value:0xc0059eb390}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096178201s EvaluationString:[ var='A' labels={container=phoenix-operations-2815-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2815-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2815-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2815-prod-westus Value:0xc0059eb3d0} C:{Var:C Labels:container=phoenix-operations-2815-prod-westus Value:0xc0059eb3d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096183353s EvaluationString:[ var='A' labels={container=phoenix-operations-2815-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2815-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2816-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2816-prod-eastus Value:0xc0059eb448} C:{Var:C Labels:container=phoenix-operations-2816-prod-eastus Value:0xc0059eb440}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096188726s EvaluationString:[ var='A' labels={container=phoenix-operations-2816-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2816-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2816-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2816-prod-westus Value:0xc0059eb490} C:{Var:C Labels:container=phoenix-operations-2816-prod-westus Value:0xc0059eb498}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09619304s EvaluationString:[ var='A' labels={container=phoenix-operations-2816-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2816-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2817-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2817-prod-eastus Value:0xc0059eb4d8} C:{Var:C Labels:container=phoenix-operations-2817-prod-eastus Value:0xc0059eb4d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096198024s EvaluationString:[ var='A' labels={container=phoenix-operations-2817-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2817-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2817-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2817-prod-westus Value:0xc0059eb558} C:{Var:C Labels:container=phoenix-operations-2817-prod-westus Value:0xc0059eb550}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096203047s EvaluationString:[ var='A' labels={container=phoenix-operations-2817-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2817-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2818-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2818-prod-eastus Value:0xc0059eb590} C:{Var:C Labels:container=phoenix-operations-2818-prod-eastus Value:0xc0059eb598}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096207996s EvaluationString:[ var='A' labels={container=phoenix-operations-2818-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2818-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2818-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2818-prod-westus Value:0xc0059eb600} C:{Var:C Labels:container=phoenix-operations-2818-prod-westus Value:0xc0059eb608}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096218368s EvaluationString:[ var='A' labels={container=phoenix-operations-2818-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2818-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2819-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2819-prod-eastus Value:0xc0059eb640} C:{Var:C Labels:container=phoenix-operations-2819-prod-eastus Value:0xc0059eb648}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096222937s EvaluationString:[ var='A' labels={container=phoenix-operations-2819-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2819-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2819-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2819-prod-westus Value:0xc0059eb6a8} C:{Var:C Labels:container=phoenix-operations-2819-prod-westus Value:0xc0059eb6a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096227619s EvaluationString:[ var='A' labels={container=phoenix-operations-2819-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2819-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2821-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2821-prod-eastus Value:0xc0059eb6e8} C:{Var:C Labels:container=phoenix-operations-2821-prod-eastus Value:0xc0059eb6e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096232248s EvaluationString:[ var='A' labels={container=phoenix-operations-2821-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2821-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2821-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2821-prod-westus Value:0xc0059eb720} C:{Var:C Labels:container=phoenix-operations-2821-prod-westus Value:0xc0059eb728}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096236707s EvaluationString:[ var='A' labels={container=phoenix-operations-2821-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2821-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2823-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2823-prod-eastus Value:0xc0059eb760} C:{Var:C Labels:container=phoenix-operations-2823-prod-eastus Value:0xc0059eb768}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096241542s EvaluationString:[ var='A' labels={container=phoenix-operations-2823-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2823-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2823-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2823-prod-westus Value:0xc0059eb7a0} C:{Var:C Labels:container=phoenix-operations-2823-prod-westus Value:0xc0059eb7a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096245818s EvaluationString:[ var='A' labels={container=phoenix-operations-2823-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2823-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2824-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2824-prod-eastus Value:0xc0059eb7e0} C:{Var:C Labels:container=phoenix-operations-2824-prod-eastus Value:0xc0059eb7e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096249584s EvaluationString:[ var='A' labels={container=phoenix-operations-2824-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2824-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2824-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2824-prod-westus Value:0xc0059eb820} C:{Var:C Labels:container=phoenix-operations-2824-prod-westus Value:0xc0059eb828}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096253466s EvaluationString:[ var='A' labels={container=phoenix-operations-2824-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2824-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2825-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2825-prod-eastus Value:0xc0059eb8a0} C:{Var:C Labels:container=phoenix-operations-2825-prod-eastus Value:0xc0059eb8a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096256972s EvaluationString:[ var='A' labels={container=phoenix-operations-2825-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2825-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2825-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2825-prod-westus Value:0xc0059eb8e0} C:{Var:C Labels:container=phoenix-operations-2825-prod-westus Value:0xc0059eb8e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096260959s EvaluationString:[ var='A' labels={container=phoenix-operations-2825-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2825-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2827-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2827-prod-eastus Value:0xc0059eb920} C:{Var:C Labels:container=phoenix-operations-2827-prod-eastus Value:0xc0059eb928}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09627136s EvaluationString:[ var='A' labels={container=phoenix-operations-2827-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2827-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2827-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2827-prod-westus Value:0xc0059eb960} C:{Var:C Labels:container=phoenix-operations-2827-prod-westus Value:0xc0059eb968}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09627509s EvaluationString:[ var='A' labels={container=phoenix-operations-2827-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2827-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2828-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2828-prod-eastus Value:0xc0059eb9a8} C:{Var:C Labels:container=phoenix-operations-2828-prod-eastus Value:0xc0059eb9a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096278898s EvaluationString:[ var='A' labels={container=phoenix-operations-2828-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2828-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2828-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2828-prod-westus Value:0xc0059eb9e8} C:{Var:C Labels:container=phoenix-operations-2828-prod-westus Value:0xc0059eb9e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096282707s EvaluationString:[ var='A' labels={container=phoenix-operations-2828-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2828-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2830-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2830-prod-eastus Value:0xc0059eba20} C:{Var:C Labels:container=phoenix-operations-2830-prod-eastus Value:0xc0059eba28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096286924s EvaluationString:[ var='A' labels={container=phoenix-operations-2830-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2830-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2830-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2830-prod-westus Value:0xc0059eba68} C:{Var:C Labels:container=phoenix-operations-2830-prod-westus Value:0xc0059eba60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096290881s EvaluationString:[ var='A' labels={container=phoenix-operations-2830-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2830-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2832-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2832-prod-eastus Value:0xc0059ebaa0} C:{Var:C Labels:container=phoenix-operations-2832-prod-eastus Value:0xc0059ebaa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096294655s EvaluationString:[ var='A' labels={container=phoenix-operations-2832-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2832-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2832-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2832-prod-westus Value:0xc0059ebae0} C:{Var:C Labels:container=phoenix-operations-2832-prod-westus Value:0xc0059ebae8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096298294s EvaluationString:[ var='A' labels={container=phoenix-operations-2832-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2832-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2834-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2834-prod-eastus Value:0xc0059ebb20} C:{Var:C Labels:container=phoenix-operations-2834-prod-eastus Value:0xc0059ebb28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096302546s EvaluationString:[ var='A' labels={container=phoenix-operations-2834-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2834-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2834-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2834-prod-westus Value:0xc0059ebb68} C:{Var:C Labels:container=phoenix-operations-2834-prod-westus Value:0xc0059ebb60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096305881s EvaluationString:[ var='A' labels={container=phoenix-operations-2834-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2834-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2836-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2836-prod-eastus Value:0xc0059ebba0} C:{Var:C Labels:container=phoenix-operations-2836-prod-eastus Value:0xc0059ebba8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096309365s EvaluationString:[ var='A' labels={container=phoenix-operations-2836-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2836-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2836-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2836-prod-westus Value:0xc0059ebbe8} C:{Var:C Labels:container=phoenix-operations-2836-prod-westus Value:0xc0059ebbe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096313593s EvaluationString:[ var='A' labels={container=phoenix-operations-2836-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2836-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2839-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2839-prod-eastus Value:0xc0059ebc20} C:{Var:C Labels:container=phoenix-operations-2839-prod-eastus Value:0xc0059ebc28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09631827s EvaluationString:[ var='A' labels={container=phoenix-operations-2839-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2839-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2839-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2839-prod-westus Value:0xc0059ebc60} C:{Var:C Labels:container=phoenix-operations-2839-prod-westus Value:0xc0059ebc68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096322123s EvaluationString:[ var='A' labels={container=phoenix-operations-2839-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2839-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2840-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2840-prod-eastus Value:0xc0059ebca0} C:{Var:C Labels:container=phoenix-operations-2840-prod-eastus Value:0xc0059ebca8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096325771s EvaluationString:[ var='A' labels={container=phoenix-operations-2840-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2840-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2840-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2840-prod-westus Value:0xc0059ebd20} C:{Var:C Labels:container=phoenix-operations-2840-prod-westus Value:0xc0059ebd28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096329981s EvaluationString:[ var='A' labels={container=phoenix-operations-2840-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2840-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2841-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2841-prod-eastus Value:0xc0059ebd60} C:{Var:C Labels:container=phoenix-operations-2841-prod-eastus Value:0xc0059ebd68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096333984s EvaluationString:[ var='A' labels={container=phoenix-operations-2841-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2841-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2841-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2841-prod-westus Value:0xc0059ebdd8} C:{Var:C Labels:container=phoenix-operations-2841-prod-westus Value:0xc0059ebdd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096337663s EvaluationString:[ var='A' labels={container=phoenix-operations-2841-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2841-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2843-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2843-prod-eastus Value:0xc0059ebe18} C:{Var:C Labels:container=phoenix-operations-2843-prod-eastus Value:0xc0059ebe10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096341511s EvaluationString:[ var='A' labels={container=phoenix-operations-2843-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2843-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2843-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2843-prod-westus Value:0xc0059ebe60} C:{Var:C Labels:container=phoenix-operations-2843-prod-westus Value:0xc0059ebe68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096346468s EvaluationString:[ var='A' labels={container=phoenix-operations-2843-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2843-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2844-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2844-prod-eastus Value:0xc0059ebea8} C:{Var:C Labels:container=phoenix-operations-2844-prod-eastus Value:0xc0059ebea0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096350659s EvaluationString:[ var='A' labels={container=phoenix-operations-2844-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2844-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2844-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2844-prod-westus Value:0xc03a004020} C:{Var:C Labels:container=phoenix-operations-2844-prod-westus Value:0xc03a004028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09636713s EvaluationString:[ var='A' labels={container=phoenix-operations-2844-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2844-prod-westus} value=0 ]} {Instance:container=phoenix-operations-2846-prod-eastus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2846-prod-eastus Value:0xc03a004060} C:{Var:C Labels:container=phoenix-operations-2846-prod-eastus Value:0xc03a004068}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096371019s EvaluationString:[ var='A' labels={container=phoenix-operations-2846-prod-eastus} value=0 ], [ var='C' labels={container=phoenix-operations-2846-prod-eastus} value=0 ]} {Instance:container=phoenix-operations-2846-prod-westus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=phoenix-operations-2846-prod-westus Value:0xc03a0040a0} C:{Var:C Labels:container=phoenix-operations-2846-prod-westus Value:0xc03a0040a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096374909s EvaluationString:[ var='A' labels={container=phoenix-operations-2846-prod-westus} value=0 ], [ var='C' labels={container=phoenix-operations-2846-prod-westus} value=0 ]}]" duration=38.723735ms +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#567 4K Main Network - NTVB" t=2024-05-29T13:44:15.102130648Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.102097388Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#566 4K Main Network - NTVA" t=2024-05-29T13:44:15.102114419Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=internal-dns-singapore-primary-hvs8, device=/dev/sda2, env=services, fstype=xfs, instance=internal-dns-singapore-primary-hvs8, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:15.102004278Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#339 HDPornVideos.tv - NTVB" t=2024-05-29T13:44:15.101991116Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chicago, country=United States, datacenter=PIA, environment=production, instance=181.214.165.190:9998, ip=181.214.165.190, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=chicago410, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.101947355Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#336 4k - Mobile In Thumbs" t=2024-05-29T13:44:15.101945783Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=internal-dns-singapore-primary-hvs8, device=/dev/sda1, env=services, fstype=vfat, instance=internal-dns-singapore-primary-hvs8, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.101923142Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#336 4k - Mobile In Thumbs" t=2024-05-29T13:44:15.101936214Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#328 BigRoundAss - Inside Gallery #1" t=2024-05-29T13:44:15.101851679Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.101798792Z caller=remote_instance_store.go:51 user=316418 slug=workmotion msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=internal-dns-sanfrancisco-secondary-hmql, device=/dev/sda2, env=services, fstype=xfs, instance=internal-dns-sanfrancisco-secondary-hmql, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:15.101824671Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#318 PornPics.com - Footer 300x250" t=2024-05-29T13:44:15.10181643Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#317 TeenPornXXX - Index Cube #4" t=2024-05-29T13:44:15.101798581Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=797387 slug=roadrunnerdev t=2024-05-29T13:44:15.101626917Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=17.956826ms +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#316 TeenPornXXX - Index Cube #3" t=2024-05-29T13:44:15.101769847Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.101698783Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.101691033Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#315 TeenPornXXX - Index Cube #2" t=2024-05-29T13:44:15.101729446Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.101686183Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.101670683Z level=warn msg="Failed to take an image" dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 error="rpc error: code = Code(422) desc = screenshots unavailable" +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#312 TeenPornXXX - Index Cube #1" t=2024-05-29T13:44:15.101685129Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#311 BigRoundAss - Gallery Cube #3" t=2024-05-29T13:44:15.101656502Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=755975 slug=franprd t=2024-05-29T13:44:15.101609796Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#310 BigRoundAss - Gallery Cube #2" t=2024-05-29T13:44:15.101615617Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=755975 slug=franprd version=1 fingerprint=8d06cd11c7ca1fff attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.101438214Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.101278007s EvaluationString:}]" duration=7.762482ms +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.101547452Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager.persist user=357734 slug=potloc t=2024-05-29T13:44:15.101403267Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#293 CJ Network - Interstitial" t=2024-05-29T13:44:15.101521321Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.101475149Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=internal-dns-sanfrancisco-primary-6mzv, device=/dev/sda1, env=services, fstype=vfat, instance=internal-dns-sanfrancisco-primary-6mzv, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.10151789Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#291 CJ Network - UVB Mobile (300x100)" t=2024-05-29T13:44:15.101468661Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=357734 slug=potloc t=2024-05-29T13:44:15.101316805Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#290 CJ Network - UVB Desktop (728x90)" t=2024-05-29T13:44:15.101433644Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chicago, country=United States, datacenter=PIA, environment=production, instance=181.214.164.98:9998, ip=181.214.164.98, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-chicago.crt, role=vpn, server=chicago413, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.10141165Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#289 CJ Network - NTVC (300x250)" t=2024-05-29T13:44:15.10140534Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=713300 slug=tpcnanonprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.101353122Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=internal-dns-grenoble-secondary-q4wt, device=/dev/sda2, env=services, fstype=xfs, instance=internal-dns-grenoble-secondary-q4wt, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:15.101428518Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#288 CJ Network - NTVB (300x250)" t=2024-05-29T13:44:15.101387479Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#287 CJ Network - NTVA (300x250)" t=2024-05-29T13:44:15.101360591Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#286 CJ Network - Mobile Footer (300x250)" t=2024-05-29T13:44:15.101321067Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=237629 slug=ocrolus t=2024-05-29T13:44:15.101354279Z level=debug msg="Saving alert states" count=11 max_state_save_concurrency=1 +logger=ngalert.scheduler user=713300 slug=tpcnanonprod version=1 fingerprint=2c67eb48a513b59e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.101247371Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.100969453s EvaluationString:}]" duration=9.160686ms +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#280 4k - Mobile Footer (300x250)" t=2024-05-29T13:44:15.101265203Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=internal-dns-grenoble-secondary-q4wt, device=/dev/sda1, env=services, fstype=vfat, instance=internal-dns-grenoble-secondary-q4wt, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.10133888Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#277 Gay Network (SPONSORS) - UVB Mobile (300x100)" t=2024-05-29T13:44:15.101238155Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=internal-dns-grenoble-secondary-q4wt, device=/dev/sda1, env=services, fstype=vfat, instance=internal-dns-grenoble-secondary-q4wt, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.101328324Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#265 4k - Popunder" t=2024-05-29T13:44:15.101184275Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.101185483Z caller=remote_instance_store.go:51 user=776563 slug=eagleeye4els msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=237629 slug=ocrolus instance="app=kafka-connect-notification-ocrolus, clientid=connector-producer-notification-ocrolus-0, cluster=production01, environment=prod, instance=10.160.122.144:9103, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=kafka-connect-notification-ocrolus-865d6b9894-sv4vz, pod_template_hash=865d6b9894, topic=prod_notification_public_debezium_heartbeat" t=2024-05-29T13:44:15.101279206Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=237629 slug=ocrolus instance="app=kafka-connect-hierarchy-ocrolus, clientid=connector-producer-hierarchy-ocrolus-0, cluster=production01, environment=prod, instance=10.160.65.4:9103, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=kafka-connect-hierarchy-ocrolus-9f4d49c5c-fql4v, pod_template_hash=9f4d49c5c, topic=prod_hierarchy_public_debezium_heartbeat" t=2024-05-29T13:44:15.101249054Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=237629 slug=ocrolus instance="app=kafka-connect-hierarchy-ocrolus, clientid=connector-producer-hierarchy-ocrolus-0, cluster=production01, environment=prod, instance=10.160.65.4:9103, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=kafka-connect-hierarchy-ocrolus-9f4d49c5c-fql4v, pod_template_hash=9f4d49c5c, topic=prod_hierarchy_public_debezium_heartbeat" t=2024-05-29T13:44:15.101235349Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=61472 slug=glasslewis instance= t=2024-05-29T13:44:15.101122655Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=237629 slug=ocrolus instance="app=kafka-connect-encompass-ocrolus, clientid=connector-producer-encompass-ocrolus-0, cluster=production01, environment=prod, instance=10.160.125.144:9103, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=kafka-connect-encompass-ocrolus-7c4d657ddc-dlmv2, pod_template_hash=7c4d657ddc, topic=prod_encompass_public_debezium_heartbeat" t=2024-05-29T13:44:15.101180652Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#258 4k - Mobile Header (300x100)" t=2024-05-29T13:44:15.100982921Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#257 SendVid - Mobile Adhession (300x100)" t=2024-05-29T13:44:15.100955149Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=internal-dns-grenoble-primary-1kdc, device=/dev/sda1, env=services, fstype=vfat, instance=internal-dns-grenoble-primary-1kdc, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.101144168Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#250 4K - VideoSlider" t=2024-05-29T13:44:15.100833479Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=237629 slug=ocrolus instance="app=kafka-connect-detect-ocrolus, clientid=connector-producer-detect-ocrolus-0, cluster=production01, environment=prod, instance=10.160.127.222:9103, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=kafka-connect-detect-ocrolus-65546956ff-lb59b, pod_template_hash=65546956ff, topic=prod_detect_public_debezium_heartbeat" t=2024-05-29T13:44:15.101121519Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#249 Shemale Network - VideoSlider" t=2024-05-29T13:44:15.100815723Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.101084431Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.101002095Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=237629 slug=ocrolus instance="app=kafka-connect-curanopk-ocrolus, clientid=connector-producer-curanopk-ocrolus-0, cluster=production01, environment=prod, instance=10.160.103.206:9103, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=kafka-connect-curanopk-ocrolus-78bc5ffbb7-7j2nj, pod_template_hash=78bc5ffbb7, topic=prod_cura_public_debezium_heartbeat" t=2024-05-29T13:44:15.101079409Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#246 4K - Pre-Roll Spot" t=2024-05-29T13:44:15.100768818Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=237629 slug=ocrolus instance="app=kafka-connect-cura-ocrolus, clientid=connector-producer-cura-ocrolus-0, cluster=production01, environment=prod, instance=10.160.100.211:9103, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=kafka-connect-cura-ocrolus-69fb6665fd-lft8g, pod_template_hash=69fb6665fd, topic=prod_cura_public_debezium_heartbeat" t=2024-05-29T13:44:15.101030103Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#208 ImageBam - Desktop Header C (300x250)" t=2024-05-29T13:44:15.100748387Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=237629 slug=ocrolus instance="app=kafka-connect-cura-ocrolus, clientid=connector-producer-cura-ocrolus-0, cluster=production01, environment=prod, instance=10.160.100.211:9103, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=kafka-connect-cura-ocrolus-69fb6665fd-lft8g, pod_template_hash=69fb6665fd, topic=prod_cura_public_debezium_heartbeat" t=2024-05-29T13:44:15.101015474Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.100926763Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=internal-dns-bristol-secondary-32gf, device=/dev/sda2, env=services, fstype=xfs, instance=internal-dns-bristol-secondary-32gf, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:15.100974628Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.100918934Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:15.100943481Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.100982189Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#204 ImageBam - Mobile Footer (300x250)" t=2024-05-29T13:44:15.100707824Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.100892594Z level=warn msg="Failed to take an image" dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 error="rpc error: code = Code(422) desc = screenshots unavailable" +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#200 AllYourTubes - Mobile Embed (300x250)" t=2024-05-29T13:44:15.100669555Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#199 AllYourTubes - Mobile Footer (300x250)" t=2024-05-29T13:44:15.100654995Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#198 AllYourTubes - Mobile Header (300x100)" t=2024-05-29T13:44:15.100644048Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#197 AllYourTubes - Mobile Popunder" t=2024-05-29T13:44:15.100629482Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#193 AllYourTubes - Desktop Embed (300x250)" t=2024-05-29T13:44:15.100610115Z level=debug msg="Setting next state" handler=resultNormal +level=error ts=2024-05-29T13:44:15.100801298Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" +logger=ngalert.scheduler user=277970 slug=teckresourcestest version=4 fingerprint=4f78365d67346214 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.100829785Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=5.292691ms +level=debug ts=2024-05-29T13:44:15.100750333Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chicago, country=United States, datacenter=PIA, environment=production, instance=181.214.164.51:9998, ip=181.214.164.51, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=chicago412, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.100820282Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#156 Gay Network - Video Slider" t=2024-05-29T13:44:15.100547586Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#153 BigFuck Network - Native Ad" t=2024-05-29T13:44:15.100529823Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#118 Shemale Network - Popunder" t=2024-05-29T13:44:15.100520457Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=internal-dns-bristol-primary-vx6l, device=/dev/sda2, env=services, fstype=xfs, instance=internal-dns-bristol-primary-vx6l, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:15.100692745Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#118 Shemale Network - Popunder" t=2024-05-29T13:44:15.100514689Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#117 Gay Network - Popunder" t=2024-05-29T13:44:15.100503964Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#117 Gay Network - Popunder" t=2024-05-29T13:44:15.100497468Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#116 Shemale Network - Interstitial" t=2024-05-29T13:44:15.100481716Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chicago, country=United States, datacenter=PIA, environment=production, instance=181.214.164.4:9998, ip=181.214.164.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-chicago.crt, role=vpn, server=chicago411, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.100653122Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.100624523Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#109 Shemale Network - Mobile Header (300x100)" t=2024-05-29T13:44:15.100424849Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=internal-dns-bristol-primary-vx6l, device=/dev/sda1, env=services, fstype=vfat, instance=internal-dns-bristol-primary-vx6l, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.100560307Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#106 Shemale Network - NTVA (300x250)" t=2024-05-29T13:44:15.100380377Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#105 Gay Network - Pre-Roll" t=2024-05-29T13:44:15.100361512Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#104 Gay Network - UVB Mobile (300x100)" t=2024-05-29T13:44:15.100352635Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#101 Gay Network - Mobile Footer (300x250)" t=2024-05-29T13:44:15.100306273Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chicago, country=United States, datacenter=PIA, environment=production, instance=181.214.164.4:9998, ip=181.214.164.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=chicago411, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.100483208Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=196413 slug=form3production t=2024-05-29T13:44:15.100339237Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#98 Gay Network - Desktop Footer #1 (300x250)" t=2024-05-29T13:44:15.100259616Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#97 Gay Network - Mobile Header (300x100)" t=2024-05-29T13:44:15.100243668Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#96 Gay Network - NTVC (300x250)" t=2024-05-29T13:44:15.100227473Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#94 Gay Network - NTVB (300x250)" t=2024-05-29T13:44:15.100211039Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#94 Gay Network - NTVB (300x250)" t=2024-05-29T13:44:15.100205387Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#88 BigFuck Network - Interstitial" t=2024-05-29T13:44:15.100167239Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=internal-dns-austin-secondary-jftz, device=/dev/sda1, env=services, fstype=vfat, instance=internal-dns-austin-secondary-jftz, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.100278321Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=internal-dns-austin-secondary-jftz, device=/dev/sda1, env=services, fstype=vfat, instance=internal-dns-austin-secondary-jftz, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.100260918Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#8 BigFuck Network - UVB Desktop (728x90)" t=2024-05-29T13:44:15.10010424Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#6 BigFuck Network - ITVA (300x250)" t=2024-05-29T13:44:15.100094973Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#5 BigFuck Network - NTVC (300x250)" t=2024-05-29T13:44:15.100080332Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#2 BigFuck Network - Mobile Header (300x100)" t=2024-05-29T13:44:15.100024424Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#1006 Bull - Interstitial" t=2024-05-29T13:44:15.099998711Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#1002 Bull - Popunder" t=2024-05-29T13:44:15.099983502Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#1002 Bull - Popunder" t=2024-05-29T13:44:15.099976822Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#298 Shemale Network - Menu Item #2 (Dating)" t=2024-05-29T13:44:15.099967268Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#298 Shemale Network - Menu Item #2 (Dating)" t=2024-05-29T13:44:15.09996176Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#996 Bull - FOOTER 2" t=2024-05-29T13:44:15.09993393Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.09978129Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#996 Bull - FOOTER 2" t=2024-05-29T13:44:15.099928257Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#278 Shemale Network - Tablet Footer #2 (300x250)" t=2024-05-29T13:44:15.09986216Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.100038593Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +level=debug ts=2024-05-29T13:44:15.100006558Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#113 Shemale Network - UVB Mobile (300x100)" t=2024-05-29T13:44:15.099848658Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.100030724Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:15.099992287Z caller=remote_rule_evaluator.go:193 user=356716 slug=molecule msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#420 PlanetSuzy - Desktop Header B" t=2024-05-29T13:44:15.099829179Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.099746104Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#419 PlanetSuzy - Desktop Header A" t=2024-05-29T13:44:15.099819465Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#419 PlanetSuzy - Desktop Header A" t=2024-05-29T13:44:15.099813342Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.099733531Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.099719909Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#346 HDPornVideos.tv - ITVA" t=2024-05-29T13:44:15.099801169Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#346 HDPornVideos.tv - ITVA" t=2024-05-29T13:44:15.099792209Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=internal-dns-austin-primary-qclc, device=/dev/sda1, env=services, fstype=vfat, instance=internal-dns-austin-primary-qclc, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.099972583Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#1042 Banner Player Video" t=2024-05-29T13:44:15.099775406Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#1003 Bull - Native" t=2024-05-29T13:44:15.099759057Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#1033 GayPorn.Video (SPONSORS) - UVB Mobile (300x100)" t=2024-05-29T13:44:15.099746776Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#1033 GayPorn.Video (SPONSORS) - UVB Mobile (300x100)" t=2024-05-29T13:44:15.099740701Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#997 Bull - FOOTER 3" t=2024-05-29T13:44:15.099725785Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chicago, country=United States, datacenter=PIA, environment=production, instance=181.214.164.144:9998, ip=181.214.164.144, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-chicago.crt, role=vpn, server=chicago414, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.099926316Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=843304 slug=ppcgroup instance= t=2024-05-29T13:44:15.099882844Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#423 PlanetSuzy - Desktop Footer B" t=2024-05-29T13:44:15.099701103Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=642786 slug=sophoscomnsg t=2024-05-29T13:44:15.09982691Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.492401ms +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#422 PlanetSuzy - Desktop Footer A" t=2024-05-29T13:44:15.099679578Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#1004 Bull - Exit Widget" t=2024-05-29T13:44:15.099651273Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=843304 slug=ppcgroup t=2024-05-29T13:44:15.099835743Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=bastion-eu-6863, device=/dev/sda2, env=services, fstype=xfs, instance=bastion-eu-6863, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:15.099834298Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.099726993Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=895137 slug=uid2 t=2024-05-29T13:44:15.099782261Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.35145ms +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#1036 GayPorn.Video - Menu Item #3 (HD Porn)" t=2024-05-29T13:44:15.09958227Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#1036 GayPorn.Video - Menu Item #3 (HD Porn)" t=2024-05-29T13:44:15.099578278Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#971 Rat.xxx Mobile Popunder" t=2024-05-29T13:44:15.099499057Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#971 Rat.xxx Mobile Popunder" t=2024-05-29T13:44:15.099491163Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.099636825Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=502468 slug=gmawater instance="premium_deal_id=#859 Pornogram.tv - VideoSlider" t=2024-05-29T13:44:15.09947141Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=bastion-eu-6863, device=/dev/sda1, env=services, fstype=vfat, instance=bastion-eu-6863, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.099678528Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=536824 slug=forgerockit instance="agent_hostname=bastion-eu-6863, device=/dev/sda1, env=services, fstype=vfat, instance=bastion-eu-6863, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.099656293Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.099569646Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.099552409Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.099573467Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=536824 slug=forgerockit t=2024-05-29T13:44:15.099482612Z level=debug msg="State manager processing evaluation results" resultCount=49 +logger=ngalert.scheduler user=502468 slug=gmawater version=14 fingerprint=9ec2fdb78dc43927 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.097819748Z level=debug msg="Alert rule evaluated" results="[{Instance:premium_deal_id=#859 Pornogram.tv - VideoSlider State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#859 Pornogram.tv - VideoSlider Value:0xc006ce6b70} main_query:{Var:main_query Labels:premium_deal_id=#859 Pornogram.tv - VideoSlider Value:0xc006ce6b78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092585375s EvaluationString:[ var='C' labels={premium_deal_id=#859 Pornogram.tv - VideoSlider} value=0 ], [ var='main_query' labels={premium_deal_id=#859 Pornogram.tv - VideoSlider} value=0 ]} {Instance:premium_deal_id=#971 Rat.xxx Mobile Popunder State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#971 Rat.xxx Mobile Popunder Value:0xc006ce6bc8} main_query:{Var:main_query Labels:premium_deal_id=#971 Rat.xxx Mobile Popunder Value:0xc006ce6c20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092610883s EvaluationString:[ var='C' labels={premium_deal_id=#971 Rat.xxx Mobile Popunder} value=0 ], [ var='main_query' labels={premium_deal_id=#971 Rat.xxx Mobile Popunder} value=0 ]} {Instance:premium_deal_id=#1035 GayPorn.Video - Menu Item #2 (Live Chat) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1035 GayPorn.Video - Menu Item #2 (Live Chat) Value:0xc006ce6c88} main_query:{Var:main_query Labels:premium_deal_id=#1035 GayPorn.Video - Menu Item #2 (Live Chat) Value:0xc006ce6c80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092618418s EvaluationString:[ var='C' labels={premium_deal_id=#1035 GayPorn.Video - Menu Item #2 (Live Chat)} value=0 ], [ var='main_query' labels={premium_deal_id=#1035 GayPorn.Video - Menu Item #2 (Live Chat)} value=0 ]} {Instance:premium_deal_id=#114 Shemale Network - ITVA (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#114 Shemale Network - ITVA (300x250) Value:0xc006ce6cb8} main_query:{Var:main_query Labels:premium_deal_id=#114 Shemale Network - ITVA (300x250) Value:0xc006ce6d00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09262709s EvaluationString:[ var='C' labels={premium_deal_id=#114 Shemale Network - ITVA (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#114 Shemale Network - ITVA (300x250)} value=0 ]} {Instance:premium_deal_id=#425 PlanetSuzy - Mobile Header State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#425 PlanetSuzy - Mobile Header Value:0xc006ce6d30} main_query:{Var:main_query Labels:premium_deal_id=#425 PlanetSuzy - Mobile Header Value:0xc006ce6d38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09263223s EvaluationString:[ var='C' labels={premium_deal_id=#425 PlanetSuzy - Mobile Header} value=0 ], [ var='main_query' labels={premium_deal_id=#425 PlanetSuzy - Mobile Header} value=0 ]} {Instance:premium_deal_id=#1036 GayPorn.Video - Menu Item #3 (HD Porn) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1036 GayPorn.Video - Menu Item #3 (HD Porn) Value:0xc006ce6dc8} main_query:{Var:main_query Labels:premium_deal_id=#1036 GayPorn.Video - Menu Item #3 (HD Porn) Value:0xc006ce6dc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092639173s EvaluationString:[ var='C' labels={premium_deal_id=#1036 GayPorn.Video - Menu Item #3 (HD Porn)} value=0 ], [ var='main_query' labels={premium_deal_id=#1036 GayPorn.Video - Menu Item #3 (HD Porn)} value=0 ]} {Instance:premium_deal_id=#0 Unknown State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#0 Unknown Value:0xc006ce6e30} main_query:{Var:main_query Labels:premium_deal_id=#0 Unknown Value:0xc006ce6e38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092644422s EvaluationString:[ var='C' labels={premium_deal_id=#0 Unknown} value=0 ], [ var='main_query' labels={premium_deal_id=#0 Unknown} value=0 ]} {Instance:premium_deal_id=#787 BigFuck Network - General Text Link State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#787 BigFuck Network - General Text Link Value:0xc006ce6ea8} main_query:{Var:main_query Labels:premium_deal_id=#787 BigFuck Network - General Text Link Value:0xc006ce6ef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092671915s EvaluationString:[ var='C' labels={premium_deal_id=#787 BigFuck Network - General Text Link} value=0 ], [ var='main_query' labels={premium_deal_id=#787 BigFuck Network - General Text Link} value=0 ]} {Instance:premium_deal_id=#999 Bull - UVB Desktop State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#999 Bull - UVB Desktop Value:0xc006ce6f48} main_query:{Var:main_query Labels:premium_deal_id=#999 Bull - UVB Desktop Value:0xc006ce6f40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092678029s EvaluationString:[ var='C' labels={premium_deal_id=#999 Bull - UVB Desktop} value=0 ], [ var='main_query' labels={premium_deal_id=#999 Bull - UVB Desktop} value=0 ]} {Instance:premium_deal_id=#1004 Bull - Exit Widget State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1004 Bull - Exit Widget Value:0xc006ce6f78} main_query:{Var:main_query Labels:premium_deal_id=#1004 Bull - Exit Widget Value:0xc006ce6fd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092683049s EvaluationString:[ var='C' labels={premium_deal_id=#1004 Bull - Exit Widget} value=0 ], [ var='main_query' labels={premium_deal_id=#1004 Bull - Exit Widget} value=0 ]} {Instance:premium_deal_id=#421 PlanetSuzy - Desktop Header C State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#421 PlanetSuzy - Desktop Header C Value:0xc006ce7138} main_query:{Var:main_query Labels:premium_deal_id=#421 PlanetSuzy - Desktop Header C Value:0xc006ce7130}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092690488s EvaluationString:[ var='C' labels={premium_deal_id=#421 PlanetSuzy - Desktop Header C} value=0 ], [ var='main_query' labels={premium_deal_id=#421 PlanetSuzy - Desktop Header C} value=0 ]} {Instance:premium_deal_id=#422 PlanetSuzy - Desktop Footer A State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#422 PlanetSuzy - Desktop Footer A Value:0xc006ce7168} main_query:{Var:main_query Labels:premium_deal_id=#422 PlanetSuzy - Desktop Footer A Value:0xc006ce71d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092697285s EvaluationString:[ var='C' labels={premium_deal_id=#422 PlanetSuzy - Desktop Footer A} value=0 ], [ var='main_query' labels={premium_deal_id=#422 PlanetSuzy - Desktop Footer A} value=0 ]} {Instance:premium_deal_id=#423 PlanetSuzy - Desktop Footer B State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#423 PlanetSuzy - Desktop Footer B Value:0xc006ce7200} main_query:{Var:main_query Labels:premium_deal_id=#423 PlanetSuzy - Desktop Footer B Value:0xc006ce7208}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092701875s EvaluationString:[ var='C' labels={premium_deal_id=#423 PlanetSuzy - Desktop Footer B} value=0 ], [ var='main_query' labels={premium_deal_id=#423 PlanetSuzy - Desktop Footer B} value=0 ]} {Instance:premium_deal_id=#424 PlanetSuzy - Desktop Footer C State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#424 PlanetSuzy - Desktop Footer C Value:0xc006ce7258} main_query:{Var:main_query Labels:premium_deal_id=#424 PlanetSuzy - Desktop Footer C Value:0xc006ce72a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092707035s EvaluationString:[ var='C' labels={premium_deal_id=#424 PlanetSuzy - Desktop Footer C} value=0 ], [ var='main_query' labels={premium_deal_id=#424 PlanetSuzy - Desktop Footer C} value=0 ]} {Instance:premium_deal_id=#997 Bull - FOOTER 3 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#997 Bull - FOOTER 3 Value:0xc006ce72e0} main_query:{Var:main_query Labels:premium_deal_id=#997 Bull - FOOTER 3 Value:0xc006ce72e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092712195s EvaluationString:[ var='C' labels={premium_deal_id=#997 Bull - FOOTER 3} value=0 ], [ var='main_query' labels={premium_deal_id=#997 Bull - FOOTER 3} value=0 ]} {Instance:premium_deal_id=#1033 GayPorn.Video (SPONSORS) - UVB Mobile (300x100) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1033 GayPorn.Video (SPONSORS) - UVB Mobile (300x100) Value:0xc006ce7338} main_query:{Var:main_query Labels:premium_deal_id=#1033 GayPorn.Video (SPONSORS) - UVB Mobile (300x100) Value:0xc006ce7390}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09271701s EvaluationString:[ var='C' labels={premium_deal_id=#1033 GayPorn.Video (SPONSORS) - UVB Mobile (300x100)} value=0 ], [ var='main_query' labels={premium_deal_id=#1033 GayPorn.Video (SPONSORS) - UVB Mobile (300x100)} value=0 ]} {Instance:premium_deal_id=#1003 Bull - Native State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1003 Bull - Native Value:0xc006ce73f8} main_query:{Var:main_query Labels:premium_deal_id=#1003 Bull - Native Value:0xc006ce73f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092722262s EvaluationString:[ var='C' labels={premium_deal_id=#1003 Bull - Native} value=0 ], [ var='main_query' labels={premium_deal_id=#1003 Bull - Native} value=0 ]} {Instance:premium_deal_id=#1042 Banner Player Video State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1042 Banner Player Video Value:0xc006ce7428} main_query:{Var:main_query Labels:premium_deal_id=#1042 Banner Player Video Value:0xc006ce7470}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092727175s EvaluationString:[ var='C' labels={premium_deal_id=#1042 Banner Player Video} value=0 ], [ var='main_query' labels={premium_deal_id=#1042 Banner Player Video} value=0 ]} {Instance:premium_deal_id=#346 HDPornVideos.tv - ITVA State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#346 HDPornVideos.tv - ITVA Value:0xc006ce74c8} main_query:{Var:main_query Labels:premium_deal_id=#346 HDPornVideos.tv - ITVA Value:0xc006ce74c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092733205s EvaluationString:[ var='C' labels={premium_deal_id=#346 HDPornVideos.tv - ITVA} value=0 ], [ var='main_query' labels={premium_deal_id=#346 HDPornVideos.tv - ITVA} value=0 ]} {Instance:premium_deal_id=#419 PlanetSuzy - Desktop Header A State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#419 PlanetSuzy - Desktop Header A Value:0xc006ce7508} main_query:{Var:main_query Labels:premium_deal_id=#419 PlanetSuzy - Desktop Header A Value:0xc006ce7550}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092743054s EvaluationString:[ var='C' labels={premium_deal_id=#419 PlanetSuzy - Desktop Header A} value=0 ], [ var='main_query' labels={premium_deal_id=#419 PlanetSuzy - Desktop Header A} value=0 ]} {Instance:premium_deal_id=#420 PlanetSuzy - Desktop Header B State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#420 PlanetSuzy - Desktop Header B Value:0xc006ce7590} main_query:{Var:main_query Labels:premium_deal_id=#420 PlanetSuzy - Desktop Header B Value:0xc006ce7598}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.0927489s EvaluationString:[ var='C' labels={premium_deal_id=#420 PlanetSuzy - Desktop Header B} value=0 ], [ var='main_query' labels={premium_deal_id=#420 PlanetSuzy - Desktop Header B} value=0 ]} {Instance:premium_deal_id=#113 Shemale Network - UVB Mobile (300x100) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#113 Shemale Network - UVB Mobile (300x100) Value:0xc006ce75e8} main_query:{Var:main_query Labels:premium_deal_id=#113 Shemale Network - UVB Mobile (300x100) Value:0xc006ce7640}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09275444s EvaluationString:[ var='C' labels={premium_deal_id=#113 Shemale Network - UVB Mobile (300x100)} value=0 ], [ var='main_query' labels={premium_deal_id=#113 Shemale Network - UVB Mobile (300x100)} value=0 ]} {Instance:premium_deal_id=#278 Shemale Network - Tablet Footer #2 (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#278 Shemale Network - Tablet Footer #2 (300x250) Value:0xc006ce7670} main_query:{Var:main_query Labels:premium_deal_id=#278 Shemale Network - Tablet Footer #2 (300x250) Value:0xc006ce7678}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092759948s EvaluationString:[ var='C' labels={premium_deal_id=#278 Shemale Network - Tablet Footer #2 (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#278 Shemale Network - Tablet Footer #2 (300x250)} value=0 ]} {Instance:premium_deal_id=#279 Shemale Network - Tablet Footer #3 (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#279 Shemale Network - Tablet Footer #3 (300x250) Value:0xc006ce76c8} main_query:{Var:main_query Labels:premium_deal_id=#279 Shemale Network - Tablet Footer #3 (300x250) Value:0xc006ce7720}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092764643s EvaluationString:[ var='C' labels={premium_deal_id=#279 Shemale Network - Tablet Footer #3 (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#279 Shemale Network - Tablet Footer #3 (300x250)} value=0 ]} {Instance:premium_deal_id=#303 Gay Network - Text Link (Above Player) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#303 Gay Network - Text Link (Above Player) Value:0xc006ce7750} main_query:{Var:main_query Labels:premium_deal_id=#303 Gay Network - Text Link (Above Player) Value:0xc006ce7758}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09276973s EvaluationString:[ var='C' labels={premium_deal_id=#303 Gay Network - Text Link (Above Player)} value=0 ], [ var='main_query' labels={premium_deal_id=#303 Gay Network - Text Link (Above Player)} value=0 ]} {Instance:premium_deal_id=#996 Bull - FOOTER 2 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#996 Bull - FOOTER 2 Value:0xc006ce77e8} main_query:{Var:main_query Labels:premium_deal_id=#996 Bull - FOOTER 2 Value:0xc006ce77e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092775264s EvaluationString:[ var='C' labels={premium_deal_id=#996 Bull - FOOTER 2} value=0 ], [ var='main_query' labels={premium_deal_id=#996 Bull - FOOTER 2} value=0 ]} {Instance:premium_deal_id=#296 Shemale Network - Menu Item #1 (Live Sex) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#296 Shemale Network - Menu Item #1 (Live Sex) Value:0xc006ce7818} main_query:{Var:main_query Labels:premium_deal_id=#296 Shemale Network - Menu Item #1 (Live Sex) Value:0xc006ce7870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092780379s EvaluationString:[ var='C' labels={premium_deal_id=#296 Shemale Network - Menu Item #1 (Live Sex)} value=0 ], [ var='main_query' labels={premium_deal_id=#296 Shemale Network - Menu Item #1 (Live Sex)} value=0 ]} {Instance:premium_deal_id=#298 Shemale Network - Menu Item #2 (Dating) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#298 Shemale Network - Menu Item #2 (Dating) Value:0xc006ce78c8} main_query:{Var:main_query Labels:premium_deal_id=#298 Shemale Network - Menu Item #2 (Dating) Value:0xc006ce78c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092785952s EvaluationString:[ var='C' labels={premium_deal_id=#298 Shemale Network - Menu Item #2 (Dating)} value=0 ], [ var='main_query' labels={premium_deal_id=#298 Shemale Network - Menu Item #2 (Dating)} value=0 ]} {Instance:premium_deal_id=#1002 Bull - Popunder State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1002 Bull - Popunder Value:0xc006ce78f8} main_query:{Var:main_query Labels:premium_deal_id=#1002 Bull - Popunder Value:0xc006ce7950}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092791769s EvaluationString:[ var='C' labels={premium_deal_id=#1002 Bull - Popunder} value=0 ], [ var='main_query' labels={premium_deal_id=#1002 Bull - Popunder} value=0 ]} {Instance:premium_deal_id=#1006 Bull - Interstitial State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1006 Bull - Interstitial Value:0xc006ce79a8} main_query:{Var:main_query Labels:premium_deal_id=#1006 Bull - Interstitial Value:0xc006ce79a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092796684s EvaluationString:[ var='C' labels={premium_deal_id=#1006 Bull - Interstitial} value=0 ], [ var='main_query' labels={premium_deal_id=#1006 Bull - Interstitial} value=0 ]} {Instance:premium_deal_id=#1 BigFuck Network - Mobile footer (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1 BigFuck Network - Mobile footer (300x250) Value:0xc006ce79e8} main_query:{Var:main_query Labels:premium_deal_id=#1 BigFuck Network - Mobile footer (300x250) Value:0xc006ce7a30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092809403s EvaluationString:[ var='C' labels={premium_deal_id=#1 BigFuck Network - Mobile footer (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#1 BigFuck Network - Mobile footer (300x250)} value=4.213694507148236 ]} {Instance:premium_deal_id=#2 BigFuck Network - Mobile Header (300x100) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#2 BigFuck Network - Mobile Header (300x100) Value:0xc006ce7a70} main_query:{Var:main_query Labels:premium_deal_id=#2 BigFuck Network - Mobile Header (300x100) Value:0xc006ce7a78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092817569s EvaluationString:[ var='C' labels={premium_deal_id=#2 BigFuck Network - Mobile Header (300x100)} value=0 ], [ var='main_query' labels={premium_deal_id=#2 BigFuck Network - Mobile Header (300x100)} value=6.177508185071434 ]} {Instance:premium_deal_id=#3 BigFuck Network - NTVA (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#3 BigFuck Network - NTVA (300x250) Value:0xc006ce7ac8} main_query:{Var:main_query Labels:premium_deal_id=#3 BigFuck Network - NTVA (300x250) Value:0xc006ce7b10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092825268s EvaluationString:[ var='C' labels={premium_deal_id=#3 BigFuck Network - NTVA (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#3 BigFuck Network - NTVA (300x250)} value=23.814450767373696 ]} {Instance:premium_deal_id=#4 BigFuck Network - NTVB (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#4 BigFuck Network - NTVB (300x250) Value:0xc006ce7b40} main_query:{Var:main_query Labels:premium_deal_id=#4 BigFuck Network - NTVB (300x250) Value:0xc006ce7b48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092831435s EvaluationString:[ var='C' labels={premium_deal_id=#4 BigFuck Network - NTVB (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#4 BigFuck Network - NTVB (300x250)} value=24.323392518531286 ]} {Instance:premium_deal_id=#5 BigFuck Network - NTVC (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#5 BigFuck Network - NTVC (300x250) Value:0xc006ce7ba8} main_query:{Var:main_query Labels:premium_deal_id=#5 BigFuck Network - NTVC (300x250) Value:0xc006ce7c00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092837559s EvaluationString:[ var='C' labels={premium_deal_id=#5 BigFuck Network - NTVC (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#5 BigFuck Network - NTVC (300x250)} value=23.816060398078243 ]} {Instance:premium_deal_id=#6 BigFuck Network - ITVA (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#6 BigFuck Network - ITVA (300x250) Value:0xc006ce7c30} main_query:{Var:main_query Labels:premium_deal_id=#6 BigFuck Network - ITVA (300x250) Value:0xc006ce7c38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092843433s EvaluationString:[ var='C' labels={premium_deal_id=#6 BigFuck Network - ITVA (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#6 BigFuck Network - ITVA (300x250)} value=15.601300108342354 ]} {Instance:premium_deal_id=#8 BigFuck Network - UVB Desktop (728x90) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#8 BigFuck Network - UVB Desktop (728x90) Value:0xc006ce7cc8} main_query:{Var:main_query Labels:premium_deal_id=#8 BigFuck Network - UVB Desktop (728x90) Value:0xc006ce7cc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092849096s EvaluationString:[ var='C' labels={premium_deal_id=#8 BigFuck Network - UVB Desktop (728x90)} value=0 ], [ var='main_query' labels={premium_deal_id=#8 BigFuck Network - UVB Desktop (728x90)} value=19.28538708199725 ]} {Instance:premium_deal_id=#9 BigFuck Network - UVB Mobile (300x100) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#9 BigFuck Network - UVB Mobile (300x100) Value:0xc006ce7cf8} main_query:{Var:main_query Labels:premium_deal_id=#9 BigFuck Network - UVB Mobile (300x100) Value:0xc006ce7d40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09285506s EvaluationString:[ var='C' labels={premium_deal_id=#9 BigFuck Network - UVB Mobile (300x100)} value=0 ], [ var='main_query' labels={premium_deal_id=#9 BigFuck Network - UVB Mobile (300x100)} value=1.5345592897907379 ]} {Instance:premium_deal_id=#13 BigFuck Network - General Popunder State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#13 BigFuck Network - General Popunder Value:0xc006ce7d80} main_query:{Var:main_query Labels:premium_deal_id=#13 BigFuck Network - General Popunder Value:0xc006ce7d88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09286081s EvaluationString:[ var='C' labels={premium_deal_id=#13 BigFuck Network - General Popunder} value=0 ], [ var='main_query' labels={premium_deal_id=#13 BigFuck Network - General Popunder} value=6.934097421203433 ]} {Instance:premium_deal_id=#22 BigFuck Network - Pre-Roll State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#22 BigFuck Network - Pre-Roll Value:0xc006ce7dd8} main_query:{Var:main_query Labels:premium_deal_id=#22 BigFuck Network - Pre-Roll Value:0xc006ce7e30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092868736s EvaluationString:[ var='C' labels={premium_deal_id=#22 BigFuck Network - Pre-Roll} value=0 ], [ var='main_query' labels={premium_deal_id=#22 BigFuck Network - Pre-Roll} value=9.212410501193325 ]} {Instance:premium_deal_id=#88 BigFuck Network - Interstitial State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#88 BigFuck Network - Interstitial Value:0xc006ce7e70} main_query:{Var:main_query Labels:premium_deal_id=#88 BigFuck Network - Interstitial Value:0xc006ce7e78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092875066s EvaluationString:[ var='C' labels={premium_deal_id=#88 BigFuck Network - Interstitial} value=0 ], [ var='main_query' labels={premium_deal_id=#88 BigFuck Network - Interstitial} value=30.958904109589035 ]} {Instance:premium_deal_id=#91 BigFuck Network - ITVB (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#91 BigFuck Network - ITVB (300x250) Value:0xc006ce7ed8} main_query:{Var:main_query Labels:premium_deal_id=#91 BigFuck Network - ITVB (300x250) Value:0xc006ce7f20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092881577s EvaluationString:[ var='C' labels={premium_deal_id=#91 BigFuck Network - ITVB (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#91 BigFuck Network - ITVB (300x250)} value=0 ]} {Instance:premium_deal_id=#93 Gay Network - Interstitial State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#93 Gay Network - Interstitial Value:0xc006ce7f50} main_query:{Var:main_query Labels:premium_deal_id=#93 Gay Network - Interstitial Value:0xc006ce7f58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092887085s EvaluationString:[ var='C' labels={premium_deal_id=#93 Gay Network - Interstitial} value=0 ], [ var='main_query' labels={premium_deal_id=#93 Gay Network - Interstitial} value=1.2847514743049793 ]} {Instance:premium_deal_id=#94 Gay Network - NTVB (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#94 Gay Network - NTVB (300x250) Value:0xc006ce7fe8} main_query:{Var:main_query Labels:premium_deal_id=#94 Gay Network - NTVB (300x250) Value:0xc006ce7fe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092896681s EvaluationString:[ var='C' labels={premium_deal_id=#94 Gay Network - NTVB (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#94 Gay Network - NTVB (300x250)} value=49.175824175824175 ]} {Instance:premium_deal_id=#96 Gay Network - NTVC (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#96 Gay Network - NTVC (300x250) Value:0xc00d54e048} main_query:{Var:main_query Labels:premium_deal_id=#96 Gay Network - NTVC (300x250) Value:0xc00d54e040}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092902563s EvaluationString:[ var='C' labels={premium_deal_id=#96 Gay Network - NTVC (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#96 Gay Network - NTVC (300x250)} value=-13.296778615490057 ]} {Instance:premium_deal_id=#97 Gay Network - Mobile Header (300x100) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#97 Gay Network - Mobile Header (300x100) Value:0xc00d54e078} main_query:{Var:main_query Labels:premium_deal_id=#97 Gay Network - Mobile Header (300x100) Value:0xc00d54e0d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092908133s EvaluationString:[ var='C' labels={premium_deal_id=#97 Gay Network - Mobile Header (300x100)} value=0 ], [ var='main_query' labels={premium_deal_id=#97 Gay Network - Mobile Header (300x100)} value=2.6636870360381204 ]} {Instance:premium_deal_id=#98 Gay Network - Desktop Footer #1 (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#98 Gay Network - Desktop Footer #1 (300x250) Value:0xc00d54e100} main_query:{Var:main_query Labels:premium_deal_id=#98 Gay Network - Desktop Footer #1 (300x250) Value:0xc00d54e108}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092914458s EvaluationString:[ var='C' labels={premium_deal_id=#98 Gay Network - Desktop Footer #1 (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#98 Gay Network - Desktop Footer #1 (300x250)} value=27.406473308112655 ]} {Instance:premium_deal_id=#99 Gay Network - Desktop Footer #2 (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#99 Gay Network - Desktop Footer #2 (300x250) Value:0xc00d54e168} main_query:{Var:main_query Labels:premium_deal_id=#99 Gay Network - Desktop Footer #2 (300x250) Value:0xc00d54e1c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092920698s EvaluationString:[ var='C' labels={premium_deal_id=#99 Gay Network - Desktop Footer #2 (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#99 Gay Network - Desktop Footer #2 (300x250)} value=9.902266431399088 ]} {Instance:premium_deal_id=#100 Gay Network - Desktop Footer #3 (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#100 Gay Network - Desktop Footer #3 (300x250) Value:0xc00d54e218} main_query:{Var:main_query Labels:premium_deal_id=#100 Gay Network - Desktop Footer #3 (300x250) Value:0xc00d54e210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092926532s EvaluationString:[ var='C' labels={premium_deal_id=#100 Gay Network - Desktop Footer #3 (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#100 Gay Network - Desktop Footer #3 (300x250)} value=9.60981599589472 ]} {Instance:premium_deal_id=#101 Gay Network - Mobile Footer (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#101 Gay Network - Mobile Footer (300x250) Value:0xc00d54e248} main_query:{Var:main_query Labels:premium_deal_id=#101 Gay Network - Mobile Footer (300x250) Value:0xc00d54e290}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092933254s EvaluationString:[ var='C' labels={premium_deal_id=#101 Gay Network - Mobile Footer (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#101 Gay Network - Mobile Footer (300x250)} value=4.557313225784809 ]} {Instance:premium_deal_id=#102 Gay Network - ITVA (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#102 Gay Network - ITVA (300x250) Value:0xc00d54e2d0} main_query:{Var:main_query Labels:premium_deal_id=#102 Gay Network - ITVA (300x250) Value:0xc00d54e2d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092937195s EvaluationString:[ var='C' labels={premium_deal_id=#102 Gay Network - ITVA (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#102 Gay Network - ITVA (300x250)} value=11.021175776460513 ]} {Instance:premium_deal_id=#103 Gay Network - UVB Desktop (728x90) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#103 Gay Network - UVB Desktop (728x90) Value:0xc00d54e368} main_query:{Var:main_query Labels:premium_deal_id=#103 Gay Network - UVB Desktop (728x90) Value:0xc00d54e360}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092942266s EvaluationString:[ var='C' labels={premium_deal_id=#103 Gay Network - UVB Desktop (728x90)} value=0 ], [ var='main_query' labels={premium_deal_id=#103 Gay Network - UVB Desktop (728x90)} value=10.253914587708968 ]} {Instance:premium_deal_id=#104 Gay Network - UVB Mobile (300x100) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#104 Gay Network - UVB Mobile (300x100) Value:0xc00d54e398} main_query:{Var:main_query Labels:premium_deal_id=#104 Gay Network - UVB Mobile (300x100) Value:0xc00d54e3f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092948832s EvaluationString:[ var='C' labels={premium_deal_id=#104 Gay Network - UVB Mobile (300x100)} value=0 ], [ var='main_query' labels={premium_deal_id=#104 Gay Network - UVB Mobile (300x100)} value=-4.434087044151913 ]} {Instance:premium_deal_id=#105 Gay Network - Pre-Roll State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#105 Gay Network - Pre-Roll Value:0xc00d54e420} main_query:{Var:main_query Labels:premium_deal_id=#105 Gay Network - Pre-Roll Value:0xc00d54e428}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092954849s EvaluationString:[ var='C' labels={premium_deal_id=#105 Gay Network - Pre-Roll} value=0 ], [ var='main_query' labels={premium_deal_id=#105 Gay Network - Pre-Roll} value=-22.661870503597125 ]} {Instance:premium_deal_id=#106 Shemale Network - NTVA (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#106 Shemale Network - NTVA (300x250) Value:0xc00d54e4b8} main_query:{Var:main_query Labels:premium_deal_id=#106 Shemale Network - NTVA (300x250) Value:0xc00d54e4b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092960975s EvaluationString:[ var='C' labels={premium_deal_id=#106 Shemale Network - NTVA (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#106 Shemale Network - NTVA (300x250)} value=0 ]} {Instance:premium_deal_id=#107 Shemale Network - NTVB (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#107 Shemale Network - NTVB (300x250) Value:0xc00d54e4f8} main_query:{Var:main_query Labels:premium_deal_id=#107 Shemale Network - NTVB (300x250) Value:0xc00d54e550}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092966751s EvaluationString:[ var='C' labels={premium_deal_id=#107 Shemale Network - NTVB (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#107 Shemale Network - NTVB (300x250)} value=-0.1404494382022503 ]} {Instance:premium_deal_id=#108 Shemale Network - NTVC (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#108 Shemale Network - NTVC (300x250) Value:0xc00d54e5a0} main_query:{Var:main_query Labels:premium_deal_id=#108 Shemale Network - NTVC (300x250) Value:0xc00d54e5a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092973707s EvaluationString:[ var='C' labels={premium_deal_id=#108 Shemale Network - NTVC (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#108 Shemale Network - NTVC (300x250)} value=101.80180180180179 ]} {Instance:premium_deal_id=#109 Shemale Network - Mobile Header (300x100) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#109 Shemale Network - Mobile Header (300x100) Value:0xc00d54e608} main_query:{Var:main_query Labels:premium_deal_id=#109 Shemale Network - Mobile Header (300x100) Value:0xc00d54e650}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092979515s EvaluationString:[ var='C' labels={premium_deal_id=#109 Shemale Network - Mobile Header (300x100)} value=0 ], [ var='main_query' labels={premium_deal_id=#109 Shemale Network - Mobile Header (300x100)} value=4.545454545454541 ]} {Instance:premium_deal_id=#111 Shemale Network - Mobile + Tablet Footer (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#111 Shemale Network - Mobile + Tablet Footer (300x250) Value:0xc00d54e680} main_query:{Var:main_query Labels:premium_deal_id=#111 Shemale Network - Mobile + Tablet Footer (300x250) Value:0xc00d54e688}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09298682s EvaluationString:[ var='C' labels={premium_deal_id=#111 Shemale Network - Mobile + Tablet Footer (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#111 Shemale Network - Mobile + Tablet Footer (300x250)} value=5.072463768115942 ]} {Instance:premium_deal_id=#112 Shemale Network - UVB Desktop (728x90) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#112 Shemale Network - UVB Desktop (728x90) Value:0xc00d54e718} main_query:{Var:main_query Labels:premium_deal_id=#112 Shemale Network - UVB Desktop (728x90) Value:0xc00d54e710}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092992523s EvaluationString:[ var='C' labels={premium_deal_id=#112 Shemale Network - UVB Desktop (728x90)} value=0 ], [ var='main_query' labels={premium_deal_id=#112 Shemale Network - UVB Desktop (728x90)} value=16.456422018348626 ]} {Instance:premium_deal_id=#115 Shemale Network - Pre-Roll State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#115 Shemale Network - Pre-Roll Value:0xc00d54e778} main_query:{Var:main_query Labels:premium_deal_id=#115 Shemale Network - Pre-Roll Value:0xc00d54e770}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.092998909s EvaluationString:[ var='C' labels={premium_deal_id=#115 Shemale Network - Pre-Roll} value=0 ], [ var='main_query' labels={premium_deal_id=#115 Shemale Network - Pre-Roll} value=0 ]} {Instance:premium_deal_id=#116 Shemale Network - Interstitial State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#116 Shemale Network - Interstitial Value:0xc00d54e7b8} main_query:{Var:main_query Labels:premium_deal_id=#116 Shemale Network - Interstitial Value:0xc00d54e800}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093004272s EvaluationString:[ var='C' labels={premium_deal_id=#116 Shemale Network - Interstitial} value=0 ], [ var='main_query' labels={premium_deal_id=#116 Shemale Network - Interstitial} value=0 ]} {Instance:premium_deal_id=#117 Gay Network - Popunder State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#117 Gay Network - Popunder Value:0xc00d54e830} main_query:{Var:main_query Labels:premium_deal_id=#117 Gay Network - Popunder Value:0xc00d54e838}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093009592s EvaluationString:[ var='C' labels={premium_deal_id=#117 Gay Network - Popunder} value=0 ], [ var='main_query' labels={premium_deal_id=#117 Gay Network - Popunder} value=14.141414141414144 ]} {Instance:premium_deal_id=#118 Shemale Network - Popunder State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#118 Shemale Network - Popunder Value:0xc00d54e898} main_query:{Var:main_query Labels:premium_deal_id=#118 Shemale Network - Popunder Value:0xc00d54e8e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093015361s EvaluationString:[ var='C' labels={premium_deal_id=#118 Shemale Network - Popunder} value=0 ], [ var='main_query' labels={premium_deal_id=#118 Shemale Network - Popunder} value=3.7572254335260125 ]} {Instance:premium_deal_id=#153 BigFuck Network - Native Ad State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#153 BigFuck Network - Native Ad Value:0xc00d54e910} main_query:{Var:main_query Labels:premium_deal_id=#153 BigFuck Network - Native Ad Value:0xc00d54e918}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093020725s EvaluationString:[ var='C' labels={premium_deal_id=#153 BigFuck Network - Native Ad} value=0 ], [ var='main_query' labels={premium_deal_id=#153 BigFuck Network - Native Ad} value=0 ]} {Instance:premium_deal_id=#156 Gay Network - Video Slider State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#156 Gay Network - Video Slider Value:0xc00d54e978} main_query:{Var:main_query Labels:premium_deal_id=#156 Gay Network - Video Slider Value:0xc00d54e9c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09302475s EvaluationString:[ var='C' labels={premium_deal_id=#156 Gay Network - Video Slider} value=0 ], [ var='main_query' labels={premium_deal_id=#156 Gay Network - Video Slider} value=6.235565819861422 ]} {Instance:premium_deal_id=#157 BigFuck Network - Video Slider State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#157 BigFuck Network - Video Slider Value:0xc00d54ea00} main_query:{Var:main_query Labels:premium_deal_id=#157 BigFuck Network - Video Slider Value:0xc00d54ea08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09303118s EvaluationString:[ var='C' labels={premium_deal_id=#157 BigFuck Network - Video Slider} value=0 ], [ var='main_query' labels={premium_deal_id=#157 BigFuck Network - Video Slider} value=16.30516080777862 ]} {Instance:premium_deal_id=#191 AllYourTubes - Desktop Header (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#191 AllYourTubes - Desktop Header (300x250) Value:0xc00d54ea58} main_query:{Var:main_query Labels:premium_deal_id=#191 AllYourTubes - Desktop Header (300x250) Value:0xc00d54eab0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093037497s EvaluationString:[ var='C' labels={premium_deal_id=#191 AllYourTubes - Desktop Header (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#191 AllYourTubes - Desktop Header (300x250)} value=-5.662235352043332 ]} {Instance:premium_deal_id=#192 AllYourTubes - Desktop Footer (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#192 AllYourTubes - Desktop Footer (300x250) Value:0xc00d54eaf0} main_query:{Var:main_query Labels:premium_deal_id=#192 AllYourTubes - Desktop Footer (300x250) Value:0xc00d54eaf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093043967s EvaluationString:[ var='C' labels={premium_deal_id=#192 AllYourTubes - Desktop Footer (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#192 AllYourTubes - Desktop Footer (300x250)} value=-8.763799918222704 ]} {Instance:premium_deal_id=#193 AllYourTubes - Desktop Embed (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#193 AllYourTubes - Desktop Embed (300x250) Value:0xc00d54eb48} main_query:{Var:main_query Labels:premium_deal_id=#193 AllYourTubes - Desktop Embed (300x250) Value:0xc00d54eba0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093050395s EvaluationString:[ var='C' labels={premium_deal_id=#193 AllYourTubes - Desktop Embed (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#193 AllYourTubes - Desktop Embed (300x250)} value=-11.987704918032783 ]} {Instance:premium_deal_id=#197 AllYourTubes - Mobile Popunder State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#197 AllYourTubes - Mobile Popunder Value:0xc00d54ebd0} main_query:{Var:main_query Labels:premium_deal_id=#197 AllYourTubes - Mobile Popunder Value:0xc00d54ebd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093056069s EvaluationString:[ var='C' labels={premium_deal_id=#197 AllYourTubes - Mobile Popunder} value=0 ], [ var='main_query' labels={premium_deal_id=#197 AllYourTubes - Mobile Popunder} value=0 ]} {Instance:premium_deal_id=#198 AllYourTubes - Mobile Header (300x100) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#198 AllYourTubes - Mobile Header (300x100) Value:0xc00d54ec38} main_query:{Var:main_query Labels:premium_deal_id=#198 AllYourTubes - Mobile Header (300x100) Value:0xc00d54ec80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093062068s EvaluationString:[ var='C' labels={premium_deal_id=#198 AllYourTubes - Mobile Header (300x100)} value=0 ], [ var='main_query' labels={premium_deal_id=#198 AllYourTubes - Mobile Header (300x100)} value=21.305925030229744 ]} {Instance:premium_deal_id=#199 AllYourTubes - Mobile Footer (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#199 AllYourTubes - Mobile Footer (300x250) Value:0xc00d54ecc0} main_query:{Var:main_query Labels:premium_deal_id=#199 AllYourTubes - Mobile Footer (300x250) Value:0xc00d54ecc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093068913s EvaluationString:[ var='C' labels={premium_deal_id=#199 AllYourTubes - Mobile Footer (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#199 AllYourTubes - Mobile Footer (300x250)} value=21.307506053268767 ]} {Instance:premium_deal_id=#200 AllYourTubes - Mobile Embed (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#200 AllYourTubes - Mobile Embed (300x250) Value:0xc00d54ed18} main_query:{Var:main_query Labels:premium_deal_id=#200 AllYourTubes - Mobile Embed (300x250) Value:0xc00d54ed60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093075031s EvaluationString:[ var='C' labels={premium_deal_id=#200 AllYourTubes - Mobile Embed (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#200 AllYourTubes - Mobile Embed (300x250)} value=18.517308948399737 ]} {Instance:premium_deal_id=#203 ImageBam - Mobile Header (300x100) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#203 ImageBam - Mobile Header (300x100) Value:0xc00d54ed90} main_query:{Var:main_query Labels:premium_deal_id=#203 ImageBam - Mobile Header (300x100) Value:0xc00d54ed98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093080988s EvaluationString:[ var='C' labels={premium_deal_id=#203 ImageBam - Mobile Header (300x100)} value=0 ], [ var='main_query' labels={premium_deal_id=#203 ImageBam - Mobile Header (300x100)} value=0 ]} {Instance:premium_deal_id=#204 ImageBam - Mobile Footer (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#204 ImageBam - Mobile Footer (300x250) Value:0xc00d54ee78} main_query:{Var:main_query Labels:premium_deal_id=#204 ImageBam - Mobile Footer (300x250) Value:0xc00d54ee70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093086066s EvaluationString:[ var='C' labels={premium_deal_id=#204 ImageBam - Mobile Footer (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#204 ImageBam - Mobile Footer (300x250)} value=0 ]} {Instance:premium_deal_id=#206 ImageBam - Desktop Header A (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#206 ImageBam - Desktop Header A (300x250) Value:0xc00d54eea8} main_query:{Var:main_query Labels:premium_deal_id=#206 ImageBam - Desktop Header A (300x250) Value:0xc00d54ef00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093091424s EvaluationString:[ var='C' labels={premium_deal_id=#206 ImageBam - Desktop Header A (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#206 ImageBam - Desktop Header A (300x250)} value=0 ]} {Instance:premium_deal_id=#207 ImageBam - Desktop Header B (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#207 ImageBam - Desktop Header B (300x250) Value:0xc00d54ef58} main_query:{Var:main_query Labels:premium_deal_id=#207 ImageBam - Desktop Header B (300x250) Value:0xc00d54ef50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093096637s EvaluationString:[ var='C' labels={premium_deal_id=#207 ImageBam - Desktop Header B (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#207 ImageBam - Desktop Header B (300x250)} value=0 ]} {Instance:premium_deal_id=#208 ImageBam - Desktop Header C (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#208 ImageBam - Desktop Header C (300x250) Value:0xc00d54ef88} main_query:{Var:main_query Labels:premium_deal_id=#208 ImageBam - Desktop Header C (300x250) Value:0xc00d54efd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093100895s EvaluationString:[ var='C' labels={premium_deal_id=#208 ImageBam - Desktop Header C (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#208 ImageBam - Desktop Header C (300x250)} value=0 ]} {Instance:premium_deal_id=#246 4K - Pre-Roll Spot State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#246 4K - Pre-Roll Spot Value:0xc00d54f010} main_query:{Var:main_query Labels:premium_deal_id=#246 4K - Pre-Roll Spot Value:0xc00d54f018}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093105917s EvaluationString:[ var='C' labels={premium_deal_id=#246 4K - Pre-Roll Spot} value=0 ], [ var='main_query' labels={premium_deal_id=#246 4K - Pre-Roll Spot} value=16.30057803468208 ]} {Instance:premium_deal_id=#247 AllYourTubes - Desktop In Thumbs (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#247 AllYourTubes - Desktop In Thumbs (300x250) Value:0xc00d54f068} main_query:{Var:main_query Labels:premium_deal_id=#247 AllYourTubes - Desktop In Thumbs (300x250) Value:0xc00d54f0c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093111419s EvaluationString:[ var='C' labels={premium_deal_id=#247 AllYourTubes - Desktop In Thumbs (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#247 AllYourTubes - Desktop In Thumbs (300x250)} value=-9.222526946871612 ]} {Instance:premium_deal_id=#248 AllYourTubes - Mobile In Thumbs (300x100) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#248 AllYourTubes - Mobile In Thumbs (300x100) Value:0xc00d54f0f0} main_query:{Var:main_query Labels:premium_deal_id=#248 AllYourTubes - Mobile In Thumbs (300x100) Value:0xc00d54f0f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093118112s EvaluationString:[ var='C' labels={premium_deal_id=#248 AllYourTubes - Mobile In Thumbs (300x100)} value=0 ], [ var='main_query' labels={premium_deal_id=#248 AllYourTubes - Mobile In Thumbs (300x100)} value=20.919975798490587 ]} {Instance:premium_deal_id=#249 Shemale Network - VideoSlider State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#249 Shemale Network - VideoSlider Value:0xc00d54f158} main_query:{Var:main_query Labels:premium_deal_id=#249 Shemale Network - VideoSlider Value:0xc00d54f1a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093124363s EvaluationString:[ var='C' labels={premium_deal_id=#249 Shemale Network - VideoSlider} value=0 ], [ var='main_query' labels={premium_deal_id=#249 Shemale Network - VideoSlider} value=0 ]} {Instance:premium_deal_id=#250 4K - VideoSlider State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#250 4K - VideoSlider Value:0xc00d54f1d0} main_query:{Var:main_query Labels:premium_deal_id=#250 4K - VideoSlider Value:0xc00d54f1d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093130196s EvaluationString:[ var='C' labels={premium_deal_id=#250 4K - VideoSlider} value=0 ], [ var='main_query' labels={premium_deal_id=#250 4K - VideoSlider} value=22.750451086530155 ]} {Instance:premium_deal_id=#255 SendVid - Mobile Header (300x100) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#255 SendVid - Mobile Header (300x100) Value:0xc00d54f278} main_query:{Var:main_query Labels:premium_deal_id=#255 SendVid - Mobile Header (300x100) Value:0xc00d54f270}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093135368s EvaluationString:[ var='C' labels={premium_deal_id=#255 SendVid - Mobile Header (300x100)} value=0 ], [ var='main_query' labels={premium_deal_id=#255 SendVid - Mobile Header (300x100)} value=5.337557884474764 ]} {Instance:premium_deal_id=#257 SendVid - Mobile Adhession (300x100) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#257 SendVid - Mobile Adhession (300x100) Value:0xc00d54f2a8} main_query:{Var:main_query Labels:premium_deal_id=#257 SendVid - Mobile Adhession (300x100) Value:0xc00d54f300}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093142172s EvaluationString:[ var='C' labels={premium_deal_id=#257 SendVid - Mobile Adhession (300x100)} value=0 ], [ var='main_query' labels={premium_deal_id=#257 SendVid - Mobile Adhession (300x100)} value=7.334109429569269 ]} {Instance:premium_deal_id=#258 4k - Mobile Header (300x100) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#258 4k - Mobile Header (300x100) Value:0xc00d54f340} main_query:{Var:main_query Labels:premium_deal_id=#258 4k - Mobile Header (300x100) Value:0xc00d54f348}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093155773s EvaluationString:[ var='C' labels={premium_deal_id=#258 4k - Mobile Header (300x100)} value=0 ], [ var='main_query' labels={premium_deal_id=#258 4k - Mobile Header (300x100)} value=-1.3070866141732251 ]} {Instance:premium_deal_id=#259 4k - Desktop Footer (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#259 4k - Desktop Footer (300x250) Value:0xc00d54f398} main_query:{Var:main_query Labels:premium_deal_id=#259 4k - Desktop Footer (300x250) Value:0xc00d54f3e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093161566s EvaluationString:[ var='C' labels={premium_deal_id=#259 4k - Desktop Footer (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#259 4k - Desktop Footer (300x250)} value=4.521739130434788 ]} {Instance:premium_deal_id=#260 4k - ITVA (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#260 4k - ITVA (300x250) Value:0xc00d54f410} main_query:{Var:main_query Labels:premium_deal_id=#260 4k - ITVA (300x250) Value:0xc00d54f418}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093167662s EvaluationString:[ var='C' labels={premium_deal_id=#260 4k - ITVA (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#260 4k - ITVA (300x250)} value=-5.849632155033191 ]} {Instance:premium_deal_id=#261 4k - ITVB (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#261 4k - ITVB (300x250) Value:0xc00d54f478} main_query:{Var:main_query Labels:premium_deal_id=#261 4k - ITVB (300x250) Value:0xc00d54f4d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09317343s EvaluationString:[ var='C' labels={premium_deal_id=#261 4k - ITVB (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#261 4k - ITVB (300x250)} value=-4.527027027027031 ]} {Instance:premium_deal_id=#262 4k - NTVA (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#262 4k - NTVA (300x250) Value:0xc00d54f500} main_query:{Var:main_query Labels:premium_deal_id=#262 4k - NTVA (300x250) Value:0xc00d54f508}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093178788s EvaluationString:[ var='C' labels={premium_deal_id=#262 4k - NTVA (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#262 4k - NTVA (300x250)} value=-1.3662013662013694 ]} {Instance:premium_deal_id=#263 4k - NTVB (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#263 4k - NTVB (300x250) Value:0xc00d54f558} main_query:{Var:main_query Labels:premium_deal_id=#263 4k - NTVB (300x250) Value:0xc00d54f5a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09318424s EvaluationString:[ var='C' labels={premium_deal_id=#263 4k - NTVB (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#263 4k - NTVB (300x250)} value=-3.0139935414424057 ]} {Instance:premium_deal_id=#264 4k - Desktop Header (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#264 4k - Desktop Header (300x250) Value:0xc00d54f5e0} main_query:{Var:main_query Labels:premium_deal_id=#264 4k - Desktop Header (300x250) Value:0xc00d54f5e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093189745s EvaluationString:[ var='C' labels={premium_deal_id=#264 4k - Desktop Header (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#264 4k - Desktop Header (300x250)} value=15.714285714285726 ]} {Instance:premium_deal_id=#265 4k - Popunder State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#265 4k - Popunder Value:0xc00d54f638} main_query:{Var:main_query Labels:premium_deal_id=#265 4k - Popunder Value:0xc00d54f680}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093196755s EvaluationString:[ var='C' labels={premium_deal_id=#265 4k - Popunder} value=0 ], [ var='main_query' labels={premium_deal_id=#265 4k - Popunder} value=0 ]} {Instance:premium_deal_id=#276 Gay Network (SPONSORS) - UVB Desktop (728x90) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#276 Gay Network (SPONSORS) - UVB Desktop (728x90) Value:0xc00d54f6b0} main_query:{Var:main_query Labels:premium_deal_id=#276 Gay Network (SPONSORS) - UVB Desktop (728x90) Value:0xc00d54f6b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093203987s EvaluationString:[ var='C' labels={premium_deal_id=#276 Gay Network (SPONSORS) - UVB Desktop (728x90)} value=0 ], [ var='main_query' labels={premium_deal_id=#276 Gay Network (SPONSORS) - UVB Desktop (728x90)} value=0 ]} {Instance:premium_deal_id=#277 Gay Network (SPONSORS) - UVB Mobile (300x100) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#277 Gay Network (SPONSORS) - UVB Mobile (300x100) Value:0xc00d54f718} main_query:{Var:main_query Labels:premium_deal_id=#277 Gay Network (SPONSORS) - UVB Mobile (300x100) Value:0xc00d54f770}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093209342s EvaluationString:[ var='C' labels={premium_deal_id=#277 Gay Network (SPONSORS) - UVB Mobile (300x100)} value=0 ], [ var='main_query' labels={premium_deal_id=#277 Gay Network (SPONSORS) - UVB Mobile (300x100)} value=0 ]} {Instance:premium_deal_id=#280 4k - Mobile Footer (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#280 4k - Mobile Footer (300x250) Value:0xc00d54f7a0} main_query:{Var:main_query Labels:premium_deal_id=#280 4k - Mobile Footer (300x250) Value:0xc00d54f7a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093214604s EvaluationString:[ var='C' labels={premium_deal_id=#280 4k - Mobile Footer (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#280 4k - Mobile Footer (300x250)} value=0.8024608800321076 ]} {Instance:premium_deal_id=#284 CJ Network - Mobile Header State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#284 CJ Network - Mobile Header Value:0xc00d54f838} main_query:{Var:main_query Labels:premium_deal_id=#284 CJ Network - Mobile Header Value:0xc00d54f830}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093219775s EvaluationString:[ var='C' labels={premium_deal_id=#284 CJ Network - Mobile Header} value=0 ], [ var='main_query' labels={premium_deal_id=#284 CJ Network - Mobile Header} value=-2.723778143876987 ]} {Instance:premium_deal_id=#286 CJ Network - Mobile Footer (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#286 CJ Network - Mobile Footer (300x250) Value:0xc00d54f868} main_query:{Var:main_query Labels:premium_deal_id=#286 CJ Network - Mobile Footer (300x250) Value:0xc00d54f8b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093227667s EvaluationString:[ var='C' labels={premium_deal_id=#286 CJ Network - Mobile Footer (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#286 CJ Network - Mobile Footer (300x250)} value=3.7190845330380196 ]} {Instance:premium_deal_id=#287 CJ Network - NTVA (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#287 CJ Network - NTVA (300x250) Value:0xc00d54f8f0} main_query:{Var:main_query Labels:premium_deal_id=#287 CJ Network - NTVA (300x250) Value:0xc00d54f8f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093233457s EvaluationString:[ var='C' labels={premium_deal_id=#287 CJ Network - NTVA (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#287 CJ Network - NTVA (300x250)} value=-1.7527386541471013 ]} {Instance:premium_deal_id=#288 CJ Network - NTVB (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#288 CJ Network - NTVB (300x250) Value:0xc00d54f988} main_query:{Var:main_query Labels:premium_deal_id=#288 CJ Network - NTVB (300x250) Value:0xc00d54f980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093239763s EvaluationString:[ var='C' labels={premium_deal_id=#288 CJ Network - NTVB (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#288 CJ Network - NTVB (300x250)} value=-0.8767809613276945 ]} {Instance:premium_deal_id=#289 CJ Network - NTVC (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#289 CJ Network - NTVC (300x250) Value:0xc00d54f9b8} main_query:{Var:main_query Labels:premium_deal_id=#289 CJ Network - NTVC (300x250) Value:0xc00d54fa10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093245642s EvaluationString:[ var='C' labels={premium_deal_id=#289 CJ Network - NTVC (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#289 CJ Network - NTVC (300x250)} value=-4.005340453938588 ]} {Instance:premium_deal_id=#290 CJ Network - UVB Desktop (728x90) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#290 CJ Network - UVB Desktop (728x90) Value:0xc00d54fa40} main_query:{Var:main_query Labels:premium_deal_id=#290 CJ Network - UVB Desktop (728x90) Value:0xc00d54fa48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093251866s EvaluationString:[ var='C' labels={premium_deal_id=#290 CJ Network - UVB Desktop (728x90)} value=0 ], [ var='main_query' labels={premium_deal_id=#290 CJ Network - UVB Desktop (728x90)} value=-4.0434134922323945 ]} {Instance:premium_deal_id=#291 CJ Network - UVB Mobile (300x100) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#291 CJ Network - UVB Mobile (300x100) Value:0xc00d54fa98} main_query:{Var:main_query Labels:premium_deal_id=#291 CJ Network - UVB Mobile (300x100) Value:0xc00d54fb00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09325925s EvaluationString:[ var='C' labels={premium_deal_id=#291 CJ Network - UVB Mobile (300x100)} value=0 ], [ var='main_query' labels={premium_deal_id=#291 CJ Network - UVB Mobile (300x100)} value=-1.1408290024084167 ]} {Instance:premium_deal_id=#292 CJ Network - VideoSlider State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#292 CJ Network - VideoSlider Value:0xc00d54fb30} main_query:{Var:main_query Labels:premium_deal_id=#292 CJ Network - VideoSlider Value:0xc00d54fb38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093265615s EvaluationString:[ var='C' labels={premium_deal_id=#292 CJ Network - VideoSlider} value=0 ], [ var='main_query' labels={premium_deal_id=#292 CJ Network - VideoSlider} value=3.264371714431058 ]} {Instance:premium_deal_id=#293 CJ Network - Interstitial State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#293 CJ Network - Interstitial Value:0xc00d54fb98} main_query:{Var:main_query Labels:premium_deal_id=#293 CJ Network - Interstitial Value:0xc00d54fbe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093271819s EvaluationString:[ var='C' labels={premium_deal_id=#293 CJ Network - Interstitial} value=0 ], [ var='main_query' labels={premium_deal_id=#293 CJ Network - Interstitial} value=-2.0000000000000018 ]} {Instance:premium_deal_id=#294 CJ Network - Popunder State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#294 CJ Network - Popunder Value:0xc00d54fc20} main_query:{Var:main_query Labels:premium_deal_id=#294 CJ Network - Popunder Value:0xc00d54fc28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09327798s EvaluationString:[ var='C' labels={premium_deal_id=#294 CJ Network - Popunder} value=0 ], [ var='main_query' labels={premium_deal_id=#294 CJ Network - Popunder} value=1.7956656346749256 ]} {Instance:premium_deal_id=#301 Gay Network - Menu Item #2 (Live Chat) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#301 Gay Network - Menu Item #2 (Live Chat) Value:0xc00d54fc88} main_query:{Var:main_query Labels:premium_deal_id=#301 Gay Network - Menu Item #2 (Live Chat) Value:0xc00d54fcd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093286194s EvaluationString:[ var='C' labels={premium_deal_id=#301 Gay Network - Menu Item #2 (Live Chat)} value=0 ], [ var='main_query' labels={premium_deal_id=#301 Gay Network - Menu Item #2 (Live Chat)} value=0 ]} {Instance:premium_deal_id=#304 BigRoundAss - Gallery Cube #1 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#304 BigRoundAss - Gallery Cube #1 Value:0xc00d54fd00} main_query:{Var:main_query Labels:premium_deal_id=#304 BigRoundAss - Gallery Cube #1 Value:0xc00d54fd08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093291815s EvaluationString:[ var='C' labels={premium_deal_id=#304 BigRoundAss - Gallery Cube #1} value=0 ], [ var='main_query' labels={premium_deal_id=#304 BigRoundAss - Gallery Cube #1} value=18.699922630053646 ]} {Instance:premium_deal_id=#310 BigRoundAss - Gallery Cube #2 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#310 BigRoundAss - Gallery Cube #2 Value:0xc00d54fd68} main_query:{Var:main_query Labels:premium_deal_id=#310 BigRoundAss - Gallery Cube #2 Value:0xc00d54fdc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093297991s EvaluationString:[ var='C' labels={premium_deal_id=#310 BigRoundAss - Gallery Cube #2} value=0 ], [ var='main_query' labels={premium_deal_id=#310 BigRoundAss - Gallery Cube #2} value=15.09773053640766 ]} {Instance:premium_deal_id=#311 BigRoundAss - Gallery Cube #3 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#311 BigRoundAss - Gallery Cube #3 Value:0xc00d54fdf0} main_query:{Var:main_query Labels:premium_deal_id=#311 BigRoundAss - Gallery Cube #3 Value:0xc00d54fdf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093303765s EvaluationString:[ var='C' labels={premium_deal_id=#311 BigRoundAss - Gallery Cube #3} value=0 ], [ var='main_query' labels={premium_deal_id=#311 BigRoundAss - Gallery Cube #3} value=27.042691546508337 ]} {Instance:premium_deal_id=#312 TeenPornXXX - Index Cube #1 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#312 TeenPornXXX - Index Cube #1 Value:0xc00d54fe88} main_query:{Var:main_query Labels:premium_deal_id=#312 TeenPornXXX - Index Cube #1 Value:0xc00d54fe80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093308901s EvaluationString:[ var='C' labels={premium_deal_id=#312 TeenPornXXX - Index Cube #1} value=0 ], [ var='main_query' labels={premium_deal_id=#312 TeenPornXXX - Index Cube #1} value=14.533966426141243 ]} {Instance:premium_deal_id=#313 TeenPornXXX - Leaderboard #1 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#313 TeenPornXXX - Leaderboard #1 Value:0xc00d54feb8} main_query:{Var:main_query Labels:premium_deal_id=#313 TeenPornXXX - Leaderboard #1 Value:0xc00d54ff00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093314595s EvaluationString:[ var='C' labels={premium_deal_id=#313 TeenPornXXX - Leaderboard #1} value=0 ], [ var='main_query' labels={premium_deal_id=#313 TeenPornXXX - Leaderboard #1} value=-9.656652360515016 ]} {Instance:premium_deal_id=#315 TeenPornXXX - Index Cube #2 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#315 TeenPornXXX - Index Cube #2 Value:0xc00d54ff40} main_query:{Var:main_query Labels:premium_deal_id=#315 TeenPornXXX - Index Cube #2 Value:0xc00d54ff48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093319923s EvaluationString:[ var='C' labels={premium_deal_id=#315 TeenPornXXX - Index Cube #2} value=0 ], [ var='main_query' labels={premium_deal_id=#315 TeenPornXXX - Index Cube #2} value=14.547708165145835 ]} {Instance:premium_deal_id=#316 TeenPornXXX - Index Cube #3 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#316 TeenPornXXX - Index Cube #3 Value:0xc00d54ff98} main_query:{Var:main_query Labels:premium_deal_id=#316 TeenPornXXX - Index Cube #3 Value:0xc00d54fff0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093325069s EvaluationString:[ var='C' labels={premium_deal_id=#316 TeenPornXXX - Index Cube #3} value=0 ], [ var='main_query' labels={premium_deal_id=#316 TeenPornXXX - Index Cube #3} value=14.312534330242578 ]} {Instance:premium_deal_id=#317 TeenPornXXX - Index Cube #4 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#317 TeenPornXXX - Index Cube #4 Value:0xc0010fa020} main_query:{Var:main_query Labels:premium_deal_id=#317 TeenPornXXX - Index Cube #4 Value:0xc0010fa028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093330222s EvaluationString:[ var='C' labels={premium_deal_id=#317 TeenPornXXX - Index Cube #4} value=0 ], [ var='main_query' labels={premium_deal_id=#317 TeenPornXXX - Index Cube #4} value=14.025526912419185 ]} {Instance:premium_deal_id=#318 PornPics.com - Footer 300x250 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#318 PornPics.com - Footer 300x250 Value:0xc0010fa078} main_query:{Var:main_query Labels:premium_deal_id=#318 PornPics.com - Footer 300x250 Value:0xc0010fa0d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093335237s EvaluationString:[ var='C' labels={premium_deal_id=#318 PornPics.com - Footer 300x250} value=0 ], [ var='main_query' labels={premium_deal_id=#318 PornPics.com - Footer 300x250} value=16.721652400958774 ]} {Instance:premium_deal_id=#328 BigRoundAss - Inside Gallery #1 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#328 BigRoundAss - Inside Gallery #1 Value:0xc0010fa138} main_query:{Var:main_query Labels:premium_deal_id=#328 BigRoundAss - Inside Gallery #1 Value:0xc0010fa130}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093342051s EvaluationString:[ var='C' labels={premium_deal_id=#328 BigRoundAss - Inside Gallery #1} value=0 ], [ var='main_query' labels={premium_deal_id=#328 BigRoundAss - Inside Gallery #1} value=12.87740996726081 ]} {Instance:premium_deal_id=#329 BigRoundAss - Inside Gallery #2 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#329 BigRoundAss - Inside Gallery #2 Value:0xc0010fa168} main_query:{Var:main_query Labels:premium_deal_id=#329 BigRoundAss - Inside Gallery #2 Value:0xc0010fa1c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093349082s EvaluationString:[ var='C' labels={premium_deal_id=#329 BigRoundAss - Inside Gallery #2} value=0 ], [ var='main_query' labels={premium_deal_id=#329 BigRoundAss - Inside Gallery #2} value=39.39248088133056 ]} {Instance:premium_deal_id=#335 4k - Desktop In Thumbs State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#335 4k - Desktop In Thumbs Value:0xc0010fa218} main_query:{Var:main_query Labels:premium_deal_id=#335 4k - Desktop In Thumbs Value:0xc0010fa210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093354337s EvaluationString:[ var='C' labels={premium_deal_id=#335 4k - Desktop In Thumbs} value=0 ], [ var='main_query' labels={premium_deal_id=#335 4k - Desktop In Thumbs} value=12.173780221698527 ]} {Instance:premium_deal_id=#336 4k - Mobile In Thumbs State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#336 4k - Mobile In Thumbs Value:0xc0010fa248} main_query:{Var:main_query Labels:premium_deal_id=#336 4k - Mobile In Thumbs Value:0xc0010fa290}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093360202s EvaluationString:[ var='C' labels={premium_deal_id=#336 4k - Mobile In Thumbs} value=0 ], [ var='main_query' labels={premium_deal_id=#336 4k - Mobile In Thumbs} value=1.351241032272399 ]} {Instance:premium_deal_id=#338 HDPornVideos.tv - NTVA State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#338 HDPornVideos.tv - NTVA Value:0xc0010fa398} main_query:{Var:main_query Labels:premium_deal_id=#338 HDPornVideos.tv - NTVA Value:0xc0010fa390}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093365535s EvaluationString:[ var='C' labels={premium_deal_id=#338 HDPornVideos.tv - NTVA} value=0 ], [ var='main_query' labels={premium_deal_id=#338 HDPornVideos.tv - NTVA} value=-16.326530612244895 ]} {Instance:premium_deal_id=#339 HDPornVideos.tv - NTVB State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#339 HDPornVideos.tv - NTVB Value:0xc0010fa3d8} main_query:{Var:main_query Labels:premium_deal_id=#339 HDPornVideos.tv - NTVB Value:0xc0010fa430}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093371594s EvaluationString:[ var='C' labels={premium_deal_id=#339 HDPornVideos.tv - NTVB} value=0 ], [ var='main_query' labels={premium_deal_id=#339 HDPornVideos.tv - NTVB} value=-13.732394366197187 ]} {Instance:premium_deal_id=#340 HDPornVideos.tv - Mobile Header State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#340 HDPornVideos.tv - Mobile Header Value:0xc0010fa460} main_query:{Var:main_query Labels:premium_deal_id=#340 HDPornVideos.tv - Mobile Header Value:0xc0010fa468}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093378027s EvaluationString:[ var='C' labels={premium_deal_id=#340 HDPornVideos.tv - Mobile Header} value=0 ], [ var='main_query' labels={premium_deal_id=#340 HDPornVideos.tv - Mobile Header} value=33.9344262295082 ]} {Instance:premium_deal_id=#348 HDPornVideosTV - Popunder State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#348 HDPornVideosTV - Popunder Value:0xc0010fa4c8} main_query:{Var:main_query Labels:premium_deal_id=#348 HDPornVideosTV - Popunder Value:0xc0010fa510}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093384048s EvaluationString:[ var='C' labels={premium_deal_id=#348 HDPornVideosTV - Popunder} value=0 ], [ var='main_query' labels={premium_deal_id=#348 HDPornVideosTV - Popunder} value=0 ]} {Instance:premium_deal_id=#381 BigRoundAss - Gay Cube #1 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#381 BigRoundAss - Gay Cube #1 Value:0xc0010fa540} main_query:{Var:main_query Labels:premium_deal_id=#381 BigRoundAss - Gay Cube #1 Value:0xc0010fa548}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093389819s EvaluationString:[ var='C' labels={premium_deal_id=#381 BigRoundAss - Gay Cube #1} value=0 ], [ var='main_query' labels={premium_deal_id=#381 BigRoundAss - Gay Cube #1} value=7.1823950317267515 ]} {Instance:premium_deal_id=#566 4K Main Network - NTVA State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#566 4K Main Network - NTVA Value:0xc0010fa5b8} main_query:{Var:main_query Labels:premium_deal_id=#566 4K Main Network - NTVA Value:0xc0010fa600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093396314s EvaluationString:[ var='C' labels={premium_deal_id=#566 4K Main Network - NTVA} value=0 ], [ var='main_query' labels={premium_deal_id=#566 4K Main Network - NTVA} value=20.864694556275154 ]} {Instance:premium_deal_id=#567 4K Main Network - NTVB State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#567 4K Main Network - NTVB Value:0xc0010fa640} main_query:{Var:main_query Labels:premium_deal_id=#567 4K Main Network - NTVB Value:0xc0010fa648}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093402471s EvaluationString:[ var='C' labels={premium_deal_id=#567 4K Main Network - NTVB} value=0 ], [ var='main_query' labels={premium_deal_id=#567 4K Main Network - NTVB} value=7.288108020172435 ]} {Instance:premium_deal_id=#568 4K Main Network - ITVA State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#568 4K Main Network - ITVA Value:0xc0010fa6a8} main_query:{Var:main_query Labels:premium_deal_id=#568 4K Main Network - ITVA Value:0xc0010fa6f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09340811s EvaluationString:[ var='C' labels={premium_deal_id=#568 4K Main Network - ITVA} value=0 ], [ var='main_query' labels={premium_deal_id=#568 4K Main Network - ITVA} value=31.90709046454767 ]} {Instance:premium_deal_id=#569 4K Main Network - ITVB State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#569 4K Main Network - ITVB Value:0xc0010fa720} main_query:{Var:main_query Labels:premium_deal_id=#569 4K Main Network - ITVB Value:0xc0010fa728}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093414387s EvaluationString:[ var='C' labels={premium_deal_id=#569 4K Main Network - ITVB} value=0 ], [ var='main_query' labels={premium_deal_id=#569 4K Main Network - ITVB} value=25.1621271076524 ]} {Instance:premium_deal_id=#570 4K Main Network - Footer 1 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#570 4K Main Network - Footer 1 Value:0xc0010fa778} main_query:{Var:main_query Labels:premium_deal_id=#570 4K Main Network - Footer 1 Value:0xc0010fa7d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093419995s EvaluationString:[ var='C' labels={premium_deal_id=#570 4K Main Network - Footer 1} value=0 ], [ var='main_query' labels={premium_deal_id=#570 4K Main Network - Footer 1} value=16.00873119078581 ]} {Instance:premium_deal_id=#571 4K Main Network - Footer 2 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#571 4K Main Network - Footer 2 Value:0xc0010fa828} main_query:{Var:main_query Labels:premium_deal_id=#571 4K Main Network - Footer 2 Value:0xc0010fa820}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093426234s EvaluationString:[ var='C' labels={premium_deal_id=#571 4K Main Network - Footer 2} value=0 ], [ var='main_query' labels={premium_deal_id=#571 4K Main Network - Footer 2} value=17.04629675379219 ]} {Instance:premium_deal_id=#572 4K Main Network - Footer 3 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#572 4K Main Network - Footer 3 Value:0xc0010fa898} main_query:{Var:main_query Labels:premium_deal_id=#572 4K Main Network - Footer 3 Value:0xc0010fa890}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093433907s EvaluationString:[ var='C' labels={premium_deal_id=#572 4K Main Network - Footer 3} value=0 ], [ var='main_query' labels={premium_deal_id=#572 4K Main Network - Footer 3} value=17.414234511008697 ]} {Instance:premium_deal_id=#573 4K Main Network - Footer 4 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#573 4K Main Network - Footer 4 Value:0xc0010fa908} main_query:{Var:main_query Labels:premium_deal_id=#573 4K Main Network - Footer 4 Value:0xc0010fa900}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093440194s EvaluationString:[ var='C' labels={premium_deal_id=#573 4K Main Network - Footer 4} value=0 ], [ var='main_query' labels={premium_deal_id=#573 4K Main Network - Footer 4} value=17.17862935928629 ]} {Instance:premium_deal_id=#574 4K Main Network - Mobile Header State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#574 4K Main Network - Mobile Header Value:0xc0010fa938} main_query:{Var:main_query Labels:premium_deal_id=#574 4K Main Network - Mobile Header Value:0xc0010fa980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093446936s EvaluationString:[ var='C' labels={premium_deal_id=#574 4K Main Network - Mobile Header} value=0 ], [ var='main_query' labels={premium_deal_id=#574 4K Main Network - Mobile Header} value=2.6804695443201743 ]} {Instance:premium_deal_id=#575 4K Main Network - IM Desktop State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#575 4K Main Network - IM Desktop Value:0xc0010fa9b0} main_query:{Var:main_query Labels:premium_deal_id=#575 4K Main Network - IM Desktop Value:0xc0010fa9b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093453221s EvaluationString:[ var='C' labels={premium_deal_id=#575 4K Main Network - IM Desktop} value=0 ], [ var='main_query' labels={premium_deal_id=#575 4K Main Network - IM Desktop} value=9.786700125470515 ]} {Instance:premium_deal_id=#576 4K Main Network - IM Mobile (300x100) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#576 4K Main Network - IM Mobile (300x100) Value:0xc0010faa18} main_query:{Var:main_query Labels:premium_deal_id=#576 4K Main Network - IM Mobile (300x100) Value:0xc0010faa70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093459027s EvaluationString:[ var='C' labels={premium_deal_id=#576 4K Main Network - IM Mobile (300x100)} value=0 ], [ var='main_query' labels={premium_deal_id=#576 4K Main Network - IM Mobile (300x100)} value=-2.745518432493599 ]} {Instance:premium_deal_id=#577 4K Main Network - Mobile Footer 1 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#577 4K Main Network - Mobile Footer 1 Value:0xc0010faac8} main_query:{Var:main_query Labels:premium_deal_id=#577 4K Main Network - Mobile Footer 1 Value:0xc0010faac0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093465315s EvaluationString:[ var='C' labels={premium_deal_id=#577 4K Main Network - Mobile Footer 1} value=0 ], [ var='main_query' labels={premium_deal_id=#577 4K Main Network - Mobile Footer 1} value=5.064471026875883 ]} {Instance:premium_deal_id=#578 4K Main Network - Mobile Footer 2 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#578 4K Main Network - Mobile Footer 2 Value:0xc0010fab38} main_query:{Var:main_query Labels:premium_deal_id=#578 4K Main Network - Mobile Footer 2 Value:0xc0010fab30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093471082s EvaluationString:[ var='C' labels={premium_deal_id=#578 4K Main Network - Mobile Footer 2} value=0 ], [ var='main_query' labels={premium_deal_id=#578 4K Main Network - Mobile Footer 2} value=3.957578892912572 ]} {Instance:premium_deal_id=#579 4K Main Network - Popunder State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#579 4K Main Network - Popunder Value:0xc0010fab68} main_query:{Var:main_query Labels:premium_deal_id=#579 4K Main Network - Popunder Value:0xc0010fabb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093476879s EvaluationString:[ var='C' labels={premium_deal_id=#579 4K Main Network - Popunder} value=0 ], [ var='main_query' labels={premium_deal_id=#579 4K Main Network - Popunder} value=0.30674846625766694 ]} {Instance:premium_deal_id=#585 [CJ Network] Native Ad Unit for Internal Ads [Desktop] State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#585 [CJ Network] Native Ad Unit for Internal Ads [Desktop] Value:0xc0010fabf0} main_query:{Var:main_query Labels:premium_deal_id=#585 [CJ Network] Native Ad Unit for Internal Ads [Desktop] Value:0xc0010fabf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093483735s EvaluationString:[ var='C' labels={premium_deal_id=#585 [CJ Network] Native Ad Unit for Internal Ads [Desktop]} value=0 ], [ var='main_query' labels={premium_deal_id=#585 [CJ Network] Native Ad Unit for Internal Ads [Desktop]} value=1.4424177669235982 ]} {Instance:premium_deal_id=#622 4K - IM Desktop State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#622 4K - IM Desktop Value:0xc0010fac48} main_query:{Var:main_query Labels:premium_deal_id=#622 4K - IM Desktop Value:0xc0010faca0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093491258s EvaluationString:[ var='C' labels={premium_deal_id=#622 4K - IM Desktop} value=0 ], [ var='main_query' labels={premium_deal_id=#622 4K - IM Desktop} value=5.250596658711215 ]} {Instance:premium_deal_id=#623 4k - IM Mobile State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#623 4k - IM Mobile Value:0xc0010facf8} main_query:{Var:main_query Labels:premium_deal_id=#623 4k - IM Mobile Value:0xc0010facf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093497359s EvaluationString:[ var='C' labels={premium_deal_id=#623 4k - IM Mobile} value=0 ], [ var='main_query' labels={premium_deal_id=#623 4k - IM Mobile} value=-1.3061547595487832 ]} {Instance:premium_deal_id=#624 CJ Network - Pre-Roll State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#624 CJ Network - Pre-Roll Value:0xc0010fad38} main_query:{Var:main_query Labels:premium_deal_id=#624 CJ Network - Pre-Roll Value:0xc0010fad80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093503468s EvaluationString:[ var='C' labels={premium_deal_id=#624 CJ Network - Pre-Roll} value=0 ], [ var='main_query' labels={premium_deal_id=#624 CJ Network - Pre-Roll} value=1.5686274509803866 ]} {Instance:premium_deal_id=#625 CJ Network - ITVA (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#625 CJ Network - ITVA (300x250) Value:0xc0010fadb0} main_query:{Var:main_query Labels:premium_deal_id=#625 CJ Network - ITVA (300x250) Value:0xc0010fadb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093511292s EvaluationString:[ var='C' labels={premium_deal_id=#625 CJ Network - ITVA (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#625 CJ Network - ITVA (300x250)} value=11.098623208766512 ]} {Instance:premium_deal_id=#627 HDPornVideos.TV - Pre-Roll State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#627 HDPornVideos.TV - Pre-Roll Value:0xc0010fae18} main_query:{Var:main_query Labels:premium_deal_id=#627 HDPornVideos.TV - Pre-Roll Value:0xc0010fae60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093516843s EvaluationString:[ var='C' labels={premium_deal_id=#627 HDPornVideos.TV - Pre-Roll} value=0 ], [ var='main_query' labels={premium_deal_id=#627 HDPornVideos.TV - Pre-Roll} value=0 ]} {Instance:premium_deal_id=#741 HDPornVideos.TV - Interstitial State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#741 HDPornVideos.TV - Interstitial Value:0xc0010faea0} main_query:{Var:main_query Labels:premium_deal_id=#741 HDPornVideos.TV - Interstitial Value:0xc0010faea8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093522413s EvaluationString:[ var='C' labels={premium_deal_id=#741 HDPornVideos.TV - Interstitial} value=0 ], [ var='main_query' labels={premium_deal_id=#741 HDPornVideos.TV - Interstitial} value=0 ]} {Instance:premium_deal_id=#751 BigRoundAss - VideoSlider State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#751 BigRoundAss - VideoSlider Value:0xc0010faf08} main_query:{Var:main_query Labels:premium_deal_id=#751 BigRoundAss - VideoSlider Value:0xc0010faf50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093529819s EvaluationString:[ var='C' labels={premium_deal_id=#751 BigRoundAss - VideoSlider} value=0 ], [ var='main_query' labels={premium_deal_id=#751 BigRoundAss - VideoSlider} value=37.48726655348047 ]} {Instance:premium_deal_id=#752 BigRoundAss - Interstitial State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#752 BigRoundAss - Interstitial Value:0xc0010fafa8} main_query:{Var:main_query Labels:premium_deal_id=#752 BigRoundAss - Interstitial Value:0xc0010fafa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093536332s EvaluationString:[ var='C' labels={premium_deal_id=#752 BigRoundAss - Interstitial} value=0 ], [ var='main_query' labels={premium_deal_id=#752 BigRoundAss - Interstitial} value=29.295774647887328 ]} {Instance:premium_deal_id=#754 4k - VastOverEmbed State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#754 4k - VastOverEmbed Value:0xc0010fb018} main_query:{Var:main_query Labels:premium_deal_id=#754 4k - VastOverEmbed Value:0xc0010fb010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09354201s EvaluationString:[ var='C' labels={premium_deal_id=#754 4k - VastOverEmbed} value=0 ], [ var='main_query' labels={premium_deal_id=#754 4k - VastOverEmbed} value=0 ]} {Instance:premium_deal_id=#756 4k - VideoSlider State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#756 4k - VideoSlider Value:0xc0010fb048} main_query:{Var:main_query Labels:premium_deal_id=#756 4k - VideoSlider Value:0xc0010fb0a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093639985s EvaluationString:[ var='C' labels={premium_deal_id=#756 4k - VideoSlider} value=0 ], [ var='main_query' labels={premium_deal_id=#756 4k - VideoSlider} value=-7.635467980295562 ]} {Instance:premium_deal_id=#759 Shemale Network - Desktop Footer #1 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#759 Shemale Network - Desktop Footer #1 Value:0xc0010fb0d0} main_query:{Var:main_query Labels:premium_deal_id=#759 Shemale Network - Desktop Footer #1 Value:0xc0010fb0d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093648021s EvaluationString:[ var='C' labels={premium_deal_id=#759 Shemale Network - Desktop Footer #1} value=0 ], [ var='main_query' labels={premium_deal_id=#759 Shemale Network - Desktop Footer #1} value=10.690665154950874 ]} {Instance:premium_deal_id=#760 Shemale Network - Desktop Footer #2 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#760 Shemale Network - Desktop Footer #2 Value:0xc0010fb138} main_query:{Var:main_query Labels:premium_deal_id=#760 Shemale Network - Desktop Footer #2 Value:0xc0010fb180}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093655366s EvaluationString:[ var='C' labels={premium_deal_id=#760 Shemale Network - Desktop Footer #2} value=0 ], [ var='main_query' labels={premium_deal_id=#760 Shemale Network - Desktop Footer #2} value=11.27198805177565 ]} {Instance:premium_deal_id=#761 Shemale Network - Desktop Footer #3 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#761 Shemale Network - Desktop Footer #3 Value:0xc0010fb1b0} main_query:{Var:main_query Labels:premium_deal_id=#761 Shemale Network - Desktop Footer #3 Value:0xc0010fb1b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093661509s EvaluationString:[ var='C' labels={premium_deal_id=#761 Shemale Network - Desktop Footer #3} value=0 ], [ var='main_query' labels={premium_deal_id=#761 Shemale Network - Desktop Footer #3} value=11.108809148918542 ]} {Instance:premium_deal_id=#762 [CJ Network] Native Ad Unit for Internal Ads [Mobile] State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#762 [CJ Network] Native Ad Unit for Internal Ads [Mobile] Value:0xc0010fb208} main_query:{Var:main_query Labels:premium_deal_id=#762 [CJ Network] Native Ad Unit for Internal Ads [Mobile] Value:0xc0010fb260}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093668023s EvaluationString:[ var='C' labels={premium_deal_id=#762 [CJ Network] Native Ad Unit for Internal Ads [Mobile]} value=0 ], [ var='main_query' labels={premium_deal_id=#762 [CJ Network] Native Ad Unit for Internal Ads [Mobile]} value=8.19499894935911 ]} {Instance:premium_deal_id=#763 [HDPorn Network] Native Ad Unit for Internal Ads [Mobile] State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#763 [HDPorn Network] Native Ad Unit for Internal Ads [Mobile] Value:0xc0010fb290} main_query:{Var:main_query Labels:premium_deal_id=#763 [HDPorn Network] Native Ad Unit for Internal Ads [Mobile] Value:0xc0010fb298}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093673089s EvaluationString:[ var='C' labels={premium_deal_id=#763 [HDPorn Network] Native Ad Unit for Internal Ads [Mobile]} value=0 ], [ var='main_query' labels={premium_deal_id=#763 [HDPorn Network] Native Ad Unit for Internal Ads [Mobile]} value=-39.741219963031426 ]} {Instance:premium_deal_id=#764 [HDPorn Network] Native Ad Unit for Internal Ads [Desktop] State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#764 [HDPorn Network] Native Ad Unit for Internal Ads [Desktop] Value:0xc0010fb308} main_query:{Var:main_query Labels:premium_deal_id=#764 [HDPorn Network] Native Ad Unit for Internal Ads [Desktop] Value:0xc0010fb360}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093680111s EvaluationString:[ var='C' labels={premium_deal_id=#764 [HDPorn Network] Native Ad Unit for Internal Ads [Desktop]} value=0 ], [ var='main_query' labels={premium_deal_id=#764 [HDPorn Network] Native Ad Unit for Internal Ads [Desktop]} value=7.870370370370372 ]} {Instance:premium_deal_id=#766 [Shemale Network] Native Ad Unit for Internal Ads [Desktop] State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#766 [Shemale Network] Native Ad Unit for Internal Ads [Desktop] Value:0xc0010fb390} main_query:{Var:main_query Labels:premium_deal_id=#766 [Shemale Network] Native Ad Unit for Internal Ads [Desktop] Value:0xc0010fb398}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093685658s EvaluationString:[ var='C' labels={premium_deal_id=#766 [Shemale Network] Native Ad Unit for Internal Ads [Desktop]} value=0 ], [ var='main_query' labels={premium_deal_id=#766 [Shemale Network] Native Ad Unit for Internal Ads [Desktop]} value=4.817031089565549 ]} {Instance:premium_deal_id=#768 CJ Network - Desktop + Tablet Footer 1 (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#768 CJ Network - Desktop + Tablet Footer 1 (300x250) Value:0xc0010fb3e8} main_query:{Var:main_query Labels:premium_deal_id=#768 CJ Network - Desktop + Tablet Footer 1 (300x250) Value:0xc0010fb430}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093694022s EvaluationString:[ var='C' labels={premium_deal_id=#768 CJ Network - Desktop + Tablet Footer 1 (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#768 CJ Network - Desktop + Tablet Footer 1 (300x250)} value=2.6339691189827485 ]} {Instance:premium_deal_id=#769 CJ Network - Desktop + Tablet Footer 2 (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#769 CJ Network - Desktop + Tablet Footer 2 (300x250) Value:0xc0010fb470} main_query:{Var:main_query Labels:premium_deal_id=#769 CJ Network - Desktop + Tablet Footer 2 (300x250) Value:0xc0010fb478}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093701704s EvaluationString:[ var='C' labels={premium_deal_id=#769 CJ Network - Desktop + Tablet Footer 2 (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#769 CJ Network - Desktop + Tablet Footer 2 (300x250)} value=3.8951197332524945 ]} {Instance:premium_deal_id=#770 CJ Network - Desktop + Tablet Footer 3 (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#770 CJ Network - Desktop + Tablet Footer 3 (300x250) Value:0xc0010fb538} main_query:{Var:main_query Labels:premium_deal_id=#770 CJ Network - Desktop + Tablet Footer 3 (300x250) Value:0xc0010fb620}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093708639s EvaluationString:[ var='C' labels={premium_deal_id=#770 CJ Network - Desktop + Tablet Footer 3 (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#770 CJ Network - Desktop + Tablet Footer 3 (300x250)} value=3.1553398058252524 ]} {Instance:premium_deal_id=#771 HDPornVideos.tv - Desktop + Tablet Footer 1 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#771 HDPornVideos.tv - Desktop + Tablet Footer 1 Value:0xc0010fb660} main_query:{Var:main_query Labels:premium_deal_id=#771 HDPornVideos.tv - Desktop + Tablet Footer 1 Value:0xc0010fb668}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093716972s EvaluationString:[ var='C' labels={premium_deal_id=#771 HDPornVideos.tv - Desktop + Tablet Footer 1} value=0 ], [ var='main_query' labels={premium_deal_id=#771 HDPornVideos.tv - Desktop + Tablet Footer 1} value=18.669131238447314 ]} {Instance:premium_deal_id=#772 HDPornVideos.tv - Desktop + Tablet Footer 2 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#772 HDPornVideos.tv - Desktop + Tablet Footer 2 Value:0xc0010fb6f8} main_query:{Var:main_query Labels:premium_deal_id=#772 HDPornVideos.tv - Desktop + Tablet Footer 2 Value:0xc0010fb6f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093724257s EvaluationString:[ var='C' labels={premium_deal_id=#772 HDPornVideos.tv - Desktop + Tablet Footer 2} value=0 ], [ var='main_query' labels={premium_deal_id=#772 HDPornVideos.tv - Desktop + Tablet Footer 2} value=19.924098671726753 ]} {Instance:premium_deal_id=#773 HDPornVideos.tv - Desktop + Tablet Footer 3 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#773 HDPornVideos.tv - Desktop + Tablet Footer 3 Value:0xc0010fb728} main_query:{Var:main_query Labels:premium_deal_id=#773 HDPornVideos.tv - Desktop + Tablet Footer 3 Value:0xc0010fb780}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.0937308s EvaluationString:[ var='C' labels={premium_deal_id=#773 HDPornVideos.tv - Desktop + Tablet Footer 3} value=0 ], [ var='main_query' labels={premium_deal_id=#773 HDPornVideos.tv - Desktop + Tablet Footer 3} value=19.999999999999996 ]} {Instance:premium_deal_id=#778 [Gay Network] Native Ad Unit for Internal Ads [Desktop] State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#778 [Gay Network] Native Ad Unit for Internal Ads [Desktop] Value:0xc0010fb7b0} main_query:{Var:main_query Labels:premium_deal_id=#778 [Gay Network] Native Ad Unit for Internal Ads [Desktop] Value:0xc0010fb7b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093736767s EvaluationString:[ var='C' labels={premium_deal_id=#778 [Gay Network] Native Ad Unit for Internal Ads [Desktop]} value=0 ], [ var='main_query' labels={premium_deal_id=#778 [Gay Network] Native Ad Unit for Internal Ads [Desktop]} value=8.037240548658463 ]} {Instance:premium_deal_id=#781 BigFuck Network - Desktop Footer 1 (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#781 BigFuck Network - Desktop Footer 1 (300x250) Value:0xc0010fb818} main_query:{Var:main_query Labels:premium_deal_id=#781 BigFuck Network - Desktop Footer 1 (300x250) Value:0xc0010fb870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093742752s EvaluationString:[ var='C' labels={premium_deal_id=#781 BigFuck Network - Desktop Footer 1 (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#781 BigFuck Network - Desktop Footer 1 (300x250)} value=21.643127113586623 ]} {Instance:premium_deal_id=#782 BigFuck Network - Desktop Footer 2 (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#782 BigFuck Network - Desktop Footer 2 (300x250) Value:0xc0010fb8a0} main_query:{Var:main_query Labels:premium_deal_id=#782 BigFuck Network - Desktop Footer 2 (300x250) Value:0xc0010fb8a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093748774s EvaluationString:[ var='C' labels={premium_deal_id=#782 BigFuck Network - Desktop Footer 2 (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#782 BigFuck Network - Desktop Footer 2 (300x250)} value=26.790358938789673 ]} {Instance:premium_deal_id=#783 BigFuck Network - Desktop Footer 3 (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#783 BigFuck Network - Desktop Footer 3 (300x250) Value:0xc0010fb8f8} main_query:{Var:main_query Labels:premium_deal_id=#783 BigFuck Network - Desktop Footer 3 (300x250) Value:0xc0010fb940}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093754811s EvaluationString:[ var='C' labels={premium_deal_id=#783 BigFuck Network - Desktop Footer 3 (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#783 BigFuck Network - Desktop Footer 3 (300x250)} value=26.355917518627624 ]} {Instance:premium_deal_id=#805 HotPornTubes Pre-Roll VAST State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#805 HotPornTubes Pre-Roll VAST Value:0xc0010fb980} main_query:{Var:main_query Labels:premium_deal_id=#805 HotPornTubes Pre-Roll VAST Value:0xc0010fb988}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093759262s EvaluationString:[ var='C' labels={premium_deal_id=#805 HotPornTubes Pre-Roll VAST} value=0 ], [ var='main_query' labels={premium_deal_id=#805 HotPornTubes Pre-Roll VAST} value=12.837045720984763 ]} {Instance:premium_deal_id=#806 HotPornTubes VideoSlider State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#806 HotPornTubes VideoSlider Value:0xc0010fb9d8} main_query:{Var:main_query Labels:premium_deal_id=#806 HotPornTubes VideoSlider Value:0xc0010fba90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093762926s EvaluationString:[ var='C' labels={premium_deal_id=#806 HotPornTubes VideoSlider} value=0 ], [ var='main_query' labels={premium_deal_id=#806 HotPornTubes VideoSlider} value=7.287325947758916 ]} {Instance:premium_deal_id=#822 Nsfw In Thumbs State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#822 Nsfw In Thumbs Value:0xc0010fbb40} main_query:{Var:main_query Labels:premium_deal_id=#822 Nsfw In Thumbs Value:0xc0010fbb48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093766663s EvaluationString:[ var='C' labels={premium_deal_id=#822 Nsfw In Thumbs} value=0 ], [ var='main_query' labels={premium_deal_id=#822 Nsfw In Thumbs} value=8.397554206173652 ]} {Instance:premium_deal_id=#824 Nsfw Video Slider State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#824 Nsfw Video Slider Value:0xc0010fbc68} main_query:{Var:main_query Labels:premium_deal_id=#824 Nsfw Video Slider Value:0xc0010fbd40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09381116s EvaluationString:[ var='C' labels={premium_deal_id=#824 Nsfw Video Slider} value=0 ], [ var='main_query' labels={premium_deal_id=#824 Nsfw Video Slider} value=26.06179213857429 ]} {Instance:premium_deal_id=#826 General Banner 300x250 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#826 General Banner 300x250 Value:0xc0010fbda0} main_query:{Var:main_query Labels:premium_deal_id=#826 General Banner 300x250 Value:0xc0010fbda8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093818005s EvaluationString:[ var='C' labels={premium_deal_id=#826 General Banner 300x250} value=0 ], [ var='main_query' labels={premium_deal_id=#826 General Banner 300x250} value=17.20629390397308 ]} {Instance:premium_deal_id=#827 HdPornVideos.Tv - UVB desktop State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#827 HdPornVideos.Tv - UVB desktop Value:0xc0010fbf18} main_query:{Var:main_query Labels:premium_deal_id=#827 HdPornVideos.Tv - UVB desktop Value:0xc0010fbf10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093823257s EvaluationString:[ var='C' labels={premium_deal_id=#827 HdPornVideos.Tv - UVB desktop} value=0 ], [ var='main_query' labels={premium_deal_id=#827 HdPornVideos.Tv - UVB desktop} value=33.33333333333333 ]} {Instance:premium_deal_id=#829 HdPornVideos.Tv - video slider State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#829 HdPornVideos.Tv - video slider Value:0xc0010fbf48} main_query:{Var:main_query Labels:premium_deal_id=#829 HdPornVideos.Tv - video slider Value:0xc0010fbf90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093841784s EvaluationString:[ var='C' labels={premium_deal_id=#829 HdPornVideos.Tv - video slider} value=0 ], [ var='main_query' labels={premium_deal_id=#829 HdPornVideos.Tv - video slider} value=15.048543689320383 ]} {Instance:premium_deal_id=#830 HDPornVideos.tv - ITVB State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#830 HDPornVideos.tv - ITVB Value:0xc0010fbfc0} main_query:{Var:main_query Labels:premium_deal_id=#830 HDPornVideos.tv - ITVB Value:0xc0010fbfc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09384856s EvaluationString:[ var='C' labels={premium_deal_id=#830 HDPornVideos.tv - ITVB} value=0 ], [ var='main_query' labels={premium_deal_id=#830 HDPornVideos.tv - ITVB} value=-2.7027027027026973 ]} {Instance:premium_deal_id=#842 Pornogram.tv - Popunder State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#842 Pornogram.tv - Popunder Value:0xc004ef2018} main_query:{Var:main_query Labels:premium_deal_id=#842 Pornogram.tv - Popunder Value:0xc004ef2060}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093856167s EvaluationString:[ var='C' labels={premium_deal_id=#842 Pornogram.tv - Popunder} value=0 ], [ var='main_query' labels={premium_deal_id=#842 Pornogram.tv - Popunder} value=0 ]} {Instance:premium_deal_id=#843 Pornogram.tv - NTVA State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#843 Pornogram.tv - NTVA Value:0xc004ef20a0} main_query:{Var:main_query Labels:premium_deal_id=#843 Pornogram.tv - NTVA Value:0xc004ef20a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093864905s EvaluationString:[ var='C' labels={premium_deal_id=#843 Pornogram.tv - NTVA} value=0 ], [ var='main_query' labels={premium_deal_id=#843 Pornogram.tv - NTVA} value=0 ]} {Instance:premium_deal_id=#844 Pornogram.tv - NTVB State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#844 Pornogram.tv - NTVB Value:0xc004ef2108} main_query:{Var:main_query Labels:premium_deal_id=#844 Pornogram.tv - NTVB Value:0xc004ef2170}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09387049s EvaluationString:[ var='C' labels={premium_deal_id=#844 Pornogram.tv - NTVB} value=0 ], [ var='main_query' labels={premium_deal_id=#844 Pornogram.tv - NTVB} value=0 ]} {Instance:premium_deal_id=#845 Pornogram.tv - Mobile Header State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#845 Pornogram.tv - Mobile Header Value:0xc004ef21b0} main_query:{Var:main_query Labels:premium_deal_id=#845 Pornogram.tv - Mobile Header Value:0xc004ef21b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09387594s EvaluationString:[ var='C' labels={premium_deal_id=#845 Pornogram.tv - Mobile Header} value=0 ], [ var='main_query' labels={premium_deal_id=#845 Pornogram.tv - Mobile Header} value=17.156862745098046 ]} {Instance:premium_deal_id=#847 Pornogram.tv - ITVA State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#847 Pornogram.tv - ITVA Value:0xc004ef2248} main_query:{Var:main_query Labels:premium_deal_id=#847 Pornogram.tv - ITVA Value:0xc004ef2240}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093881198s EvaluationString:[ var='C' labels={premium_deal_id=#847 Pornogram.tv - ITVA} value=0 ], [ var='main_query' labels={premium_deal_id=#847 Pornogram.tv - ITVA} value=0 ]} {Instance:premium_deal_id=#848 Pornogram.tv - UVB Mobile State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#848 Pornogram.tv - UVB Mobile Value:0xc004ef2278} main_query:{Var:main_query Labels:premium_deal_id=#848 Pornogram.tv - UVB Mobile Value:0xc004ef22d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093886677s EvaluationString:[ var='C' labels={premium_deal_id=#848 Pornogram.tv - UVB Mobile} value=0 ], [ var='main_query' labels={premium_deal_id=#848 Pornogram.tv - UVB Mobile} value=0 ]} {Instance:premium_deal_id=#851 Pornogram.tv - NTVC State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#851 Pornogram.tv - NTVC Value:0xc004ef2348} main_query:{Var:main_query Labels:premium_deal_id=#851 Pornogram.tv - NTVC Value:0xc004ef2340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093891989s EvaluationString:[ var='C' labels={premium_deal_id=#851 Pornogram.tv - NTVC} value=0 ], [ var='main_query' labels={premium_deal_id=#851 Pornogram.tv - NTVC} value=0 ]} {Instance:premium_deal_id=#852 Pornogram.tv - Desktop Footer State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#852 Pornogram.tv - Desktop Footer Value:0xc004ef23c8} main_query:{Var:main_query Labels:premium_deal_id=#852 Pornogram.tv - Desktop Footer Value:0xc004ef23c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093897121s EvaluationString:[ var='C' labels={premium_deal_id=#852 Pornogram.tv - Desktop Footer} value=0 ], [ var='main_query' labels={premium_deal_id=#852 Pornogram.tv - Desktop Footer} value=19.60264900662252 ]} {Instance:premium_deal_id=#853 Pornogram.tv ITVB State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#853 Pornogram.tv ITVB Value:0xc004ef23f8} main_query:{Var:main_query Labels:premium_deal_id=#853 Pornogram.tv ITVB Value:0xc004ef2450}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093902835s EvaluationString:[ var='C' labels={premium_deal_id=#853 Pornogram.tv ITVB} value=0 ], [ var='main_query' labels={premium_deal_id=#853 Pornogram.tv ITVB} value=0 ]} {Instance:premium_deal_id=#854 Pornogram.tv - UVB Desktop State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#854 Pornogram.tv - UVB Desktop Value:0xc004ef2480} main_query:{Var:main_query Labels:premium_deal_id=#854 Pornogram.tv - UVB Desktop Value:0xc004ef2488}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093907532s EvaluationString:[ var='C' labels={premium_deal_id=#854 Pornogram.tv - UVB Desktop} value=0 ], [ var='main_query' labels={premium_deal_id=#854 Pornogram.tv - UVB Desktop} value=0 ]} {Instance:premium_deal_id=#857 Pornogram.tv - Interstitial State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#857 Pornogram.tv - Interstitial Value:0xc004ef24e8} main_query:{Var:main_query Labels:premium_deal_id=#857 Pornogram.tv - Interstitial Value:0xc004ef2540}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093913158s EvaluationString:[ var='C' labels={premium_deal_id=#857 Pornogram.tv - Interstitial} value=0 ], [ var='main_query' labels={premium_deal_id=#857 Pornogram.tv - Interstitial} value=0 ]} {Instance:premium_deal_id=#860 Pornogram.tv - Desktop Footer 2 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#860 Pornogram.tv - Desktop Footer 2 Value:0xc004ef2590} main_query:{Var:main_query Labels:premium_deal_id=#860 Pornogram.tv - Desktop Footer 2 Value:0xc004ef2598}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093918612s EvaluationString:[ var='C' labels={premium_deal_id=#860 Pornogram.tv - Desktop Footer 2} value=0 ], [ var='main_query' labels={premium_deal_id=#860 Pornogram.tv - Desktop Footer 2} value=18.059299191374656 ]} {Instance:premium_deal_id=#861 Pornogram.tv - Desktop Footer 3 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#861 Pornogram.tv - Desktop Footer 3 Value:0xc004ef2608} main_query:{Var:main_query Labels:premium_deal_id=#861 Pornogram.tv - Desktop Footer 3 Value:0xc004ef2660}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093925155s EvaluationString:[ var='C' labels={premium_deal_id=#861 Pornogram.tv - Desktop Footer 3} value=0 ], [ var='main_query' labels={premium_deal_id=#861 Pornogram.tv - Desktop Footer 3} value=22.252747252747263 ]} {Instance:premium_deal_id=#906 Gay_Desktop Footer (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#906 Gay_Desktop Footer (300x250) Value:0xc004ef26a0} main_query:{Var:main_query Labels:premium_deal_id=#906 Gay_Desktop Footer (300x250) Value:0xc004ef26a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093931191s EvaluationString:[ var='C' labels={premium_deal_id=#906 Gay_Desktop Footer (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#906 Gay_Desktop Footer (300x250)} value=-4.952092788703988 ]} {Instance:premium_deal_id=#907 Gay_Mobile Footer (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#907 Gay_Mobile Footer (300x250) Value:0xc004ef26f8} main_query:{Var:main_query Labels:premium_deal_id=#907 Gay_Mobile Footer (300x250) Value:0xc004ef2750}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093937594s EvaluationString:[ var='C' labels={premium_deal_id=#907 Gay_Mobile Footer (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#907 Gay_Mobile Footer (300x250)} value=-11.367133885122493 ]} {Instance:premium_deal_id=#908 Gay_Mobile Header (300x100) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#908 Gay_Mobile Header (300x100) Value:0xc004ef2790} main_query:{Var:main_query Labels:premium_deal_id=#908 Gay_Mobile Header (300x100) Value:0xc004ef2798}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09394365s EvaluationString:[ var='C' labels={premium_deal_id=#908 Gay_Mobile Header (300x100)} value=0 ], [ var='main_query' labels={premium_deal_id=#908 Gay_Mobile Header (300x100)} value=-10.707803992740473 ]} {Instance:premium_deal_id=#911 Gay_Desktop In Thumbs (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#911 Gay_Desktop In Thumbs (300x250) Value:0xc004ef2808} main_query:{Var:main_query Labels:premium_deal_id=#911 Gay_Desktop In Thumbs (300x250) Value:0xc004ef2880}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093949555s EvaluationString:[ var='C' labels={premium_deal_id=#911 Gay_Desktop In Thumbs (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#911 Gay_Desktop In Thumbs (300x250)} value=-3.5749313456257314 ]} {Instance:premium_deal_id=#912 Gay_Mobile In Thumbs (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#912 Gay_Mobile In Thumbs (300x250) Value:0xc004ef28c0} main_query:{Var:main_query Labels:premium_deal_id=#912 Gay_Mobile In Thumbs (300x250) Value:0xc004ef28c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09395535s EvaluationString:[ var='C' labels={premium_deal_id=#912 Gay_Mobile In Thumbs (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#912 Gay_Mobile In Thumbs (300x250)} value=-10.876712328767123 ]} {Instance:premium_deal_id=#913 Gay_NTVA (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#913 Gay_NTVA (300x250) Value:0xc004ef2918} main_query:{Var:main_query Labels:premium_deal_id=#913 Gay_NTVA (300x250) Value:0xc004ef2980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093961703s EvaluationString:[ var='C' labels={premium_deal_id=#913 Gay_NTVA (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#913 Gay_NTVA (300x250)} value=-13.813517513566854 ]} {Instance:premium_deal_id=#915 Gay_NTVB (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#915 Gay_NTVB (300x250) Value:0xc004ef29b0} main_query:{Var:main_query Labels:premium_deal_id=#915 Gay_NTVB (300x250) Value:0xc004ef29b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093967319s EvaluationString:[ var='C' labels={premium_deal_id=#915 Gay_NTVB (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#915 Gay_NTVB (300x250)} value=-14.395886889460153 ]} {Instance:premium_deal_id=#916 Gay_NTVC Mobile (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#916 Gay_NTVC Mobile (300x250) Value:0xc004ef2a18} main_query:{Var:main_query Labels:premium_deal_id=#916 Gay_NTVC Mobile (300x250) Value:0xc004ef2a70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093975065s EvaluationString:[ var='C' labels={premium_deal_id=#916 Gay_NTVC Mobile (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#916 Gay_NTVC Mobile (300x250)} value=-13.360644905510766 ]} {Instance:premium_deal_id=#990 Bull - NTVA State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#990 Bull - NTVA Value:0xc004ef2ad0} main_query:{Var:main_query Labels:premium_deal_id=#990 Bull - NTVA Value:0xc004ef2ad8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09398037s EvaluationString:[ var='C' labels={premium_deal_id=#990 Bull - NTVA} value=0 ], [ var='main_query' labels={premium_deal_id=#990 Bull - NTVA} value=0 ]} {Instance:premium_deal_id=#991 Bull - NTVB State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#991 Bull - NTVB Value:0xc004ef2b68} main_query:{Var:main_query Labels:premium_deal_id=#991 Bull - NTVB Value:0xc004ef2be0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093987257s EvaluationString:[ var='C' labels={premium_deal_id=#991 Bull - NTVB} value=0 ], [ var='main_query' labels={premium_deal_id=#991 Bull - NTVB} value=0 ]} {Instance:premium_deal_id=#992 Bull - NTVC State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#992 Bull - NTVC Value:0xc004ef2c68} main_query:{Var:main_query Labels:premium_deal_id=#992 Bull - NTVC Value:0xc004ef2c60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093993203s EvaluationString:[ var='C' labels={premium_deal_id=#992 Bull - NTVC} value=0 ], [ var='main_query' labels={premium_deal_id=#992 Bull - NTVC} value=0 ]} {Instance:premium_deal_id=#993 Bull - ITVA State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#993 Bull - ITVA Value:0xc004ef2cc8} main_query:{Var:main_query Labels:premium_deal_id=#993 Bull - ITVA Value:0xc004ef2d40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.093998609s EvaluationString:[ var='C' labels={premium_deal_id=#993 Bull - ITVA} value=0 ], [ var='main_query' labels={premium_deal_id=#993 Bull - ITVA} value=0 ]} {Instance:premium_deal_id=#994 Bull - ITVB State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#994 Bull - ITVB Value:0xc004ef2db0} main_query:{Var:main_query Labels:premium_deal_id=#994 Bull - ITVB Value:0xc004ef2db8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.094004127s EvaluationString:[ var='C' labels={premium_deal_id=#994 Bull - ITVB} value=0 ], [ var='main_query' labels={premium_deal_id=#994 Bull - ITVB} value=0 ]} {Instance:premium_deal_id=#995 Bull - FOOTER 1 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#995 Bull - FOOTER 1 Value:0xc004ef2e38} main_query:{Var:main_query Labels:premium_deal_id=#995 Bull - FOOTER 1 Value:0xc004ef2e90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.094009239s EvaluationString:[ var='C' labels={premium_deal_id=#995 Bull - FOOTER 1} value=0 ], [ var='main_query' labels={premium_deal_id=#995 Bull - FOOTER 1} value=0 ]} {Instance:premium_deal_id=#998 Bull - Mobile header State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#998 Bull - Mobile header Value:0xc004ef2ec0} main_query:{Var:main_query Labels:premium_deal_id=#998 Bull - Mobile header Value:0xc004ef2ec8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.094015816s EvaluationString:[ var='C' labels={premium_deal_id=#998 Bull - Mobile header} value=0 ], [ var='main_query' labels={premium_deal_id=#998 Bull - Mobile header} value=0 ]} {Instance:premium_deal_id=#1000 Bull - UVB Mobile State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1000 Bull - UVB Mobile Value:0xc004ef2f18} main_query:{Var:main_query Labels:premium_deal_id=#1000 Bull - UVB Mobile Value:0xc004ef2f70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.094021566s EvaluationString:[ var='C' labels={premium_deal_id=#1000 Bull - UVB Mobile} value=0 ], [ var='main_query' labels={premium_deal_id=#1000 Bull - UVB Mobile} value=0 ]} {Instance:premium_deal_id=#1007 Bull - VideoSlider State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1007 Bull - VideoSlider Value:0xc004ef2fb0} main_query:{Var:main_query Labels:premium_deal_id=#1007 Bull - VideoSlider Value:0xc004ef2fb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09402696s EvaluationString:[ var='C' labels={premium_deal_id=#1007 Bull - VideoSlider} value=0 ], [ var='main_query' labels={premium_deal_id=#1007 Bull - VideoSlider} value=0 ]} {Instance:premium_deal_id=#1012 videos_v1.2_china_popunder State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1012 videos_v1.2_china_popunder Value:0xc004ef3018} main_query:{Var:main_query Labels:premium_deal_id=#1012 videos_v1.2_china_popunder Value:0xc004ef3080}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.094033552s EvaluationString:[ var='C' labels={premium_deal_id=#1012 videos_v1.2_china_popunder} value=0 ], [ var='main_query' labels={premium_deal_id=#1012 videos_v1.2_china_popunder} value=44.01710221063328 ]} {Instance:premium_deal_id=#1017 GayPorn.Video - NTVA (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1017 GayPorn.Video - NTVA (300x250) Value:0xc004ef30c0} main_query:{Var:main_query Labels:premium_deal_id=#1017 GayPorn.Video - NTVA (300x250) Value:0xc004ef30c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.094040522s EvaluationString:[ var='C' labels={premium_deal_id=#1017 GayPorn.Video - NTVA (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#1017 GayPorn.Video - NTVA (300x250)} value=0 ]} {Instance:premium_deal_id=#1018 GayPorn.Video - NTVB (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1018 GayPorn.Video - NTVB (300x250) Value:0xc004ef3148} main_query:{Var:main_query Labels:premium_deal_id=#1018 GayPorn.Video - NTVB (300x250) Value:0xc004ef3140}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.094048111s EvaluationString:[ var='C' labels={premium_deal_id=#1018 GayPorn.Video - NTVB (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#1018 GayPorn.Video - NTVB (300x250)} value=0 ]} {Instance:premium_deal_id=#1019 GayPorn.Video - NTVC (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1019 GayPorn.Video - NTVC (300x250) Value:0xc004ef31c8} main_query:{Var:main_query Labels:premium_deal_id=#1019 GayPorn.Video - NTVC (300x250) Value:0xc004ef31c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.094053599s EvaluationString:[ var='C' labels={premium_deal_id=#1019 GayPorn.Video - NTVC (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#1019 GayPorn.Video - NTVC (300x250)} value=0 ]} {Instance:premium_deal_id=#1020 GayPorn.Video - Interstitial State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1020 GayPorn.Video - Interstitial Value:0xc004ef3248} main_query:{Var:main_query Labels:premium_deal_id=#1020 GayPorn.Video - Interstitial Value:0xc004ef3240}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.094059393s EvaluationString:[ var='C' labels={premium_deal_id=#1020 GayPorn.Video - Interstitial} value=0 ], [ var='main_query' labels={premium_deal_id=#1020 GayPorn.Video - Interstitial} value=0 ]} {Instance:premium_deal_id=#1021 GayPorn.Video - Mobile Header (300x100) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1021 GayPorn.Video - Mobile Header (300x100) Value:0xc004ef3278} main_query:{Var:main_query Labels:premium_deal_id=#1021 GayPorn.Video - Mobile Header (300x100) Value:0xc004ef32f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.094065695s EvaluationString:[ var='C' labels={premium_deal_id=#1021 GayPorn.Video - Mobile Header (300x100)} value=0 ], [ var='main_query' labels={premium_deal_id=#1021 GayPorn.Video - Mobile Header (300x100)} value=34.466019417475735 ]} {Instance:premium_deal_id=#1022 GayPorn.Video - Mobile and Desktop Footer #1 (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1022 GayPorn.Video - Mobile and Desktop Footer #1 (300x250) Value:0xc004ef3320} main_query:{Var:main_query Labels:premium_deal_id=#1022 GayPorn.Video - Mobile and Desktop Footer #1 (300x250) Value:0xc004ef3328}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.094072386s EvaluationString:[ var='C' labels={premium_deal_id=#1022 GayPorn.Video - Mobile and Desktop Footer #1 (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#1022 GayPorn.Video - Mobile and Desktop Footer #1 (300x250)} value=28.037383177570096 ]} {Instance:premium_deal_id=#1023 GayPorn.Video - Desktop Footer #2 (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1023 GayPorn.Video - Desktop Footer #2 (300x250) Value:0xc004ef33a8} main_query:{Var:main_query Labels:premium_deal_id=#1023 GayPorn.Video - Desktop Footer #2 (300x250) Value:0xc004ef33a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.094097786s EvaluationString:[ var='C' labels={premium_deal_id=#1023 GayPorn.Video - Desktop Footer #2 (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#1023 GayPorn.Video - Desktop Footer #2 (300x250)} value=0 ]} {Instance:premium_deal_id=#1024 GayPorn.Video - Desktop Footer #3 (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1024 GayPorn.Video - Desktop Footer #3 (300x250) Value:0xc004ef33f8} main_query:{Var:main_query Labels:premium_deal_id=#1024 GayPorn.Video - Desktop Footer #3 (300x250) Value:0xc004ef3440}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.094103744s EvaluationString:[ var='C' labels={premium_deal_id=#1024 GayPorn.Video - Desktop Footer #3 (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#1024 GayPorn.Video - Desktop Footer #3 (300x250)} value=0 ]} {Instance:premium_deal_id=#1026 GayPorn.Video - ITVA (300x250) State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1026 GayPorn.Video - ITVA (300x250) Value:0xc004ef3490} main_query:{Var:main_query Labels:premium_deal_id=#1026 GayPorn.Video - ITVA (300x250) Value:0xc004ef3498}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.094109654s EvaluationString:[ var='C' labels={premium_deal_id=#1026 GayPorn.Video - ITVA (300x250)} value=0 ], [ var='main_query' labels={premium_deal_id=#1026 GayPorn.Video - ITVA (300x250)} value=0 ]} {Instance:premium_deal_id=#1029 GayPorn.Video - Pre-Roll State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1029 GayPorn.Video - Pre-Roll Value:0xc004ef3508} main_query:{Var:main_query Labels:premium_deal_id=#1029 GayPorn.Video - Pre-Roll Value:0xc004ef3550}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.094116186s EvaluationString:[ var='C' labels={premium_deal_id=#1029 GayPorn.Video - Pre-Roll} value=0 ], [ var='main_query' labels={premium_deal_id=#1029 GayPorn.Video - Pre-Roll} value=0 ]} {Instance:premium_deal_id=#1030 GayPorn.Video - Popunder State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1030 GayPorn.Video - Popunder Value:0xc004ef3580} main_query:{Var:main_query Labels:premium_deal_id=#1030 GayPorn.Video - Popunder Value:0xc004ef3588}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.094123013s EvaluationString:[ var='C' labels={premium_deal_id=#1030 GayPorn.Video - Popunder} value=0 ], [ var='main_query' labels={premium_deal_id=#1030 GayPorn.Video - Popunder} value=0 ]} {Instance:premium_deal_id=#1078 Sos.xxx Banner 300x250 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1078 Sos.xxx Banner 300x250 Value:0xc004ef35d8} main_query:{Var:main_query Labels:premium_deal_id=#1078 Sos.xxx Banner 300x250 Value:0xc004ef3640}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.094128854s EvaluationString:[ var='C' labels={premium_deal_id=#1078 Sos.xxx Banner 300x250} value=0 ], [ var='main_query' labels={premium_deal_id=#1078 Sos.xxx Banner 300x250} value=10.049068270130746 ]} {Instance:premium_deal_id=#1079 Sos.xxx Popunder State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1079 Sos.xxx Popunder Value:0xc004ef3680} main_query:{Var:main_query Labels:premium_deal_id=#1079 Sos.xxx Popunder Value:0xc004ef3688}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.094135137s EvaluationString:[ var='C' labels={premium_deal_id=#1079 Sos.xxx Popunder} value=0 ], [ var='main_query' labels={premium_deal_id=#1079 Sos.xxx Popunder} value=0 ]} {Instance:premium_deal_id=#1080 Sos.xxx Mobile Banner 300x100 State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1080 Sos.xxx Mobile Banner 300x100 Value:0xc004ef36e8} main_query:{Var:main_query Labels:premium_deal_id=#1080 Sos.xxx Mobile Banner 300x100 Value:0xc004ef3740}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.09414287s EvaluationString:[ var='C' labels={premium_deal_id=#1080 Sos.xxx Mobile Banner 300x100} value=0 ], [ var='main_query' labels={premium_deal_id=#1080 Sos.xxx Mobile Banner 300x100} value=16.87402799377915 ]} {Instance:premium_deal_id=#1081 Sos.xxx - Mobile Popunder State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:premium_deal_id=#1081 Sos.xxx - Mobile Popunder Value:0xc004ef37a8} main_query:{Var:main_query Labels:premium_deal_id=#1081 Sos.xxx - Mobile Popunder Value:0xc004ef37a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.094149092s EvaluationString:[ var='C' labels={premium_deal_id=#1081 Sos.xxx - Mobile Popunder} value=0 ], [ var='main_query' labels={premium_deal_id=#1081 Sos.xxx - Mobile Popunder} value=20.903954802259882 ]}]" duration=9.063381ms +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chicago, country=United States, datacenter=DataPacket, environment=production, instance=212.102.59.191:9998, ip=212.102.59.191, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-chicago.crt, role=vpn, server=chicago429, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.099455978Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:15.099397411Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chicago, country=United States, datacenter=DataPacket, environment=production, instance=212.102.59.191:9998, ip=212.102.59.191, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=chicago429, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.099279465Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.099240525Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +level=debug ts=2024-05-29T13:44:15.099205113Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.09908876Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.099101497Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.09908944Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.098990484Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chicago, country=United States, datacenter=DataPacket, environment=production, instance=212.102.59.160:9998, ip=212.102.59.160, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-chicago.crt, role=vpn, server=chicago404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.099013998Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.09894882Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.098942851Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +level=debug ts=2024-05-29T13:44:15.098848629Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chicago, country=United States, datacenter=DataPacket, environment=production, instance=212.102.59.160:9998, ip=212.102.59.160, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=chicago404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.098736378Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=116479 slug=tomtomnv t=2024-05-29T13:44:15.098462804Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=info ts=2024-05-29T13:44:15.098409331Z caller=remote_image_capturer.go:61 user=116479 slug=tomtomnv rule_org_id=1 rule_uid=zmhcI4Lnz dashboard=Men4MVEGz panel=6 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" +logger=ngalert.state.manager user=116479 slug=tomtomnv instance= t=2024-05-29T13:44:15.098440526Z level=warn msg="Failed to take an image" dashboard=Men4MVEGz panel=6 error="rpc error: code = Code(422) desc = screenshots unavailable" +logger=ngalert.state.manager.persist user=106794 slug=tsikah t=2024-05-29T13:44:15.09832358Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Chicago, country=United States, datacenter=DataPacket, environment=production, instance=212.102.59.129:9998, ip=212.102.59.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=chicago403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.098272929Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.098094345Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.098140675Z caller=remote_instance_store.go:51 user=453497 slug=n444151595 msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.098128872Z caller=remote_image_capturer.go:54 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="rendering alert image with grafana" +level=error ts=2024-05-29T13:44:15.09800319Z caller=remote_rule_evaluator.go:110 user=453497 slug=n444151595 msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" +logger=ngalert.scheduler user=453497 slug=n444151595 version=12 fingerprint=d520d136867b38cb attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.098028108Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=3.681353ms +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.09802414Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Cheyenne, country=United States, datacenter=DataPacket, environment=production, instance=84.239.50.129:9998, ip=84.239.50.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-wyoming-pf.crt, role=vpn, server=wyoming402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.098033022Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.097836712Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.097814803Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +level=debug ts=2024-05-29T13:44:15.097635307Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.097636479Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="resource.label.project_id=exa-cloud-prod, resource.label.subscription_id=sg-audit-syslog-exa-audit-syslog-svc, resource.type=pubsub_subscription" t=2024-05-29T13:44:15.097567541Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.097553362Z caller=remote_image_capturer.go:54 user=106794 slug=tsikah rule_org_id=1 rule_uid=-TJCGoO7z dashboard=HRLG4OPZz panel=30 msg="rendering alert image with grafana" +logger=ngalert.state.manager user=116479 slug=tomtomnv instance= t=2024-05-29T13:44:15.097570956Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.scheduler user=327842 slug=exabeam version=32 fingerprint=5d734992f4712a4f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.097421426Z level=debug msg="Alert rule evaluated" results="[{Instance:resource.label.project_id=exa-cloud-prod, resource.label.subscription_id=sg-audit-syslog-exa-audit-syslog-svc, resource.type=pubsub_subscription State:Normal Error: Results:map[] Values:map[E:{Var:E Labels:resource.label.project_id=exa-cloud-prod, resource.label.subscription_id=sg-audit-syslog-exa-audit-syslog-svc, resource.type=pubsub_subscription Value:0xc01c269520} num_undelivered_messages:{Var:num_undelivered_messages Labels:resource.label.project_id=exa-cloud-prod, resource.label.subscription_id=sg-audit-syslog-exa-audit-syslog-svc, resource.type=pubsub_subscription Value:0xc01c269528} oldest_unacked_message_age:{Var:oldest_unacked_message_age Labels:resource.label.project_id=exa-cloud-prod, resource.label.subscription_id=sg-audit-syslog-exa-audit-syslog-svc, resource.type=pubsub_subscription Value:0xc01c269570}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.097088752s EvaluationString:[ var='E' labels={resource.label.project_id=exa-cloud-prod, resource.label.subscription_id=sg-audit-syslog-exa-audit-syslog-svc, resource.type=pubsub_subscription} value=0 ], [ var='num_undelivered_messages' labels={resource.label.project_id=exa-cloud-prod, resource.label.subscription_id=sg-audit-syslog-exa-audit-syslog-svc, resource.type=pubsub_subscription} value=0.1111111111111111 ], [ var='oldest_unacked_message_age' labels={resource.label.project_id=exa-cloud-prod, resource.label.subscription_id=sg-audit-syslog-exa-audit-syslog-svc, resource.type=pubsub_subscription} value=0.1111111111111111 ]}]" duration=141.502983ms +logger=ngalert.state.manager user=106794 slug=tsikah instance= t=2024-05-29T13:44:15.0974466Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.097427156Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +level=debug ts=2024-05-29T13:44:15.097303239Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.097210391Z caller=remote_image_capturer.go:54 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="rendering alert image with grafana" +logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.097167803Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.09713273Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.097171131Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.09711741Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=1hOGRG7Vk, ref_id=A" t=2024-05-29T13:44:15.097156115Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=1hOGRG7Vk, ref_id=A" t=2024-05-29T13:44:15.097149518Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=bd4dd1419baaf173 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.09708285Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096766431s EvaluationString:}]" duration=288.659493ms +logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=1hOGRG7Vk, ref_id=A" t=2024-05-29T13:44:15.097083999Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=info ts=2024-05-29T13:44:15.097039909Z caller=remote_image_capturer.go:61 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" +logger=ngalert.scheduler user=430961 slug=solifi version=2 fingerprint=39e0c0004ecbb877 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.096976681Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=1hOGRG7Vk, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.096707475s EvaluationString:}]" duration=35.784914ms +logger=ngalert.state.manager user=68499 slug=identt instance="datasource_uid=LG8GjSxMk, ref_id=A,B" t=2024-05-29T13:44:15.096978409Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.096947496Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Caracas, country=Venezuela, datacenter=M247, environment=production, instance=95.181.237.38:9998, ip=95.181.237.38, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/venezuela.crt, role=vpn, server=venezuela406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.096801484Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:15.096727563Z caller=remote_instance_store.go:51 user=695339 slug=twofiftysix msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=695339 slug=twofiftysix instance= t=2024-05-29T13:44:15.096677601Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=695339 slug=twofiftysix t=2024-05-29T13:44:15.096644106Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.096583098Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +level=debug ts=2024-05-29T13:44:15.096363671Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:15.096381804Z caller=remote_image_capturer.go:33 user=233863 slug=rtsystems rule_org_id=1 rule_uid=5okchS67k msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.096374212Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +logger=ngalert.state.manager user=233863 slug=rtsystems t=2024-05-29T13:44:15.09632077Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.09625104Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData +logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.09619477Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" +level=debug ts=2024-05-29T13:44:15.096150379Z caller=remote_instance_store.go:51 user=871095 slug=cmcnginp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Caracas, country=Venezuela, datacenter=M247, environment=production, instance=95.181.237.26:9998, ip=95.181.237.26, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/venezuela.crt, role=vpn, server=venezuela405, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.096021176Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Caracas, country=Venezuela, datacenter=M247, environment=production, instance=95.181.237.26:9998, ip=95.181.237.26, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/venezuela.crt, role=vpn, server=venezuela405, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.09600612Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.095927016Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.095813776Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=24.961ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.095779761Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.095776131Z caller=remote_instance_store.go:51 user=648636 slug=xrplayer765 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=607648 slug=kalandharfasil019 instance= t=2024-05-29T13:44:15.095718075Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=648636 slug=xrplayer765 instance="__name__=up, agent_hostname=pi-hole, instance=Pi-Hole, job=integrations/node_exporter" t=2024-05-29T13:44:15.095719356Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=607648 slug=kalandharfasil019 instance= t=2024-05-29T13:44:15.095708367Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=607648 slug=kalandharfasil019 t=2024-05-29T13:44:15.095661991Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=648636 slug=xrplayer765 t=2024-05-29T13:44:15.095638686Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Caracas, country=Venezuela, datacenter=M247, environment=production, instance=95.181.237.14:9998, ip=95.181.237.14, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/venezuela.crt, role=vpn, server=venezuela404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.095648069Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.095598871Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=191103 slug=amazonadmin t=2024-05-29T13:44:15.095608305Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.09551307Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.095386759Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=612695 slug=ocipprod instance="__name__=vault_backup_age, environment=ip-10-238-7-190.eu-central-1.compute.internal, instance=vault-3, job=node" t=2024-05-29T13:44:15.095068005Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=612695 slug=ocipprod instance="__name__=vault_backup_age, environment=ip-10-238-5-237.eu-central-1.compute.internal, instance=vault-2, job=node" t=2024-05-29T13:44:15.095032006Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Cairo, country=Egypt, datacenter=M247, environment=production, instance=188.214.122.98:9998, ip=188.214.122.98, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=cairo401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.09523268Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=612695 slug=ocipprod t=2024-05-29T13:44:15.094937449Z level=debug msg="State manager processing evaluation results" resultCount=3 + level=debug ts=2024-05-29T13:44:15.095170087Z caller=remote_image_capturer.go:54 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="rendering alert image with grafana" + level=debug ts=2024-05-29T13:44:15.095025537Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.095047666Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:15.094997565Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Cairo, country=Egypt, datacenter=M247, environment=production, instance=188.214.122.98:9998, ip=188.214.122.98, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/egypt.crt, role=vpn, server=cairo401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.095022603Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.095022235Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.09495682Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=info ts=2024-05-29T13:44:15.094883284Z caller=remote_image_capturer.go:61 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Cairo, country=Egypt, datacenter=M247, environment=production, instance=188.214.122.114:9998, ip=188.214.122.114, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=cairo402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.094814969Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.094744653Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.094569161Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Cairo, country=Egypt, datacenter=M247, environment=production, instance=188.214.122.114:9998, ip=188.214.122.114, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/egypt.crt, role=vpn, server=cairo402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.094637546Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.094613834Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=472504 slug=addmi instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.094562099Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.094502187Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.094123961Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.0942771Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.09417875Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Buenos Aires, country=Argentina, datacenter=M247, environment=production, instance=146.70.38.88:9998, ip=146.70.38.88, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=buenosaires410, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.094162537Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.094057815Z caller=remote_instance_store.go:51 user=907609 slug=calerts msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.094142135Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.094136155Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Buenos Aires, country=Argentina, datacenter=M247, environment=production, instance=146.70.38.88:9998, ip=146.70.38.88, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ar.crt, role=vpn, server=buenosaires410, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.094025786Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=351190 slug=stagebryxx t=2024-05-29T13:44:15.093830808Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.093875545Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=916854 slug=lalamuru instance= t=2024-05-29T13:44:15.093789451Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:47:00Z next_ends_at=2024-05-29T13:48:00Z + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Buenos Aires, country=Argentina, datacenter=M247, environment=production, instance=146.70.38.75:9998, ip=146.70.38.75, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ar.crt, role=vpn, server=buenosaires409, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.093782928Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.093772181Z caller=remote_instance_store.go:51 user=890329 slug=nearone msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.093729558Z caller=remote_instance_store.go:51 user=886980 slug=althq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=890329 slug=nearone instance= t=2024-05-29T13:44:15.09372167Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=890329 slug=nearone instance= t=2024-05-29T13:44:15.093712799Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=890329 slug=nearone t=2024-05-29T13:44:15.09368407Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=886980 slug=althq instance="datasource_uid=grafanacloud-prom, ref_id=p95 Response Time" previous_handler=resultNoData t=2024-05-29T13:44:15.093516547Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=886980 slug=althq instance="datasource_uid=grafanacloud-prom, ref_id=p95 Response Time" previous_handler=resultNoData t=2024-05-29T13:44:15.093505983Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=886980 slug=althq instance="datasource_uid=grafanacloud-prom, ref_id=p95 Response Time" t=2024-05-29T13:44:15.093492618Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.093456605Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Buenos Aires, country=Argentina, datacenter=M247, environment=production, instance=146.70.38.42:9998, ip=146.70.38.42, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ar.crt, role=vpn, server=buenosaires408, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.093456828Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:15.093467997Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=149323 slug=theatreonline instance= t=2024-05-29T13:44:15.093301135Z level=debug msg="Setting next state" handler=resultError + level=debug ts=2024-05-29T13:44:15.093251631Z caller=remote_rule_evaluator.go:193 user=823141 slug=yomafleetobservability msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager user=149323 slug=theatreonline t=2024-05-29T13:44:15.09327313Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Buenos Aires, country=Argentina, datacenter=M247, environment=production, instance=146.70.38.22:9998, ip=146.70.38.22, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=buenosaires407, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.093227205Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.093256914Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.09327523Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.093245113Z level=debug msg="Setting next state" handler=resultNoData + level=info ts=2024-05-29T13:44:15.093187604Z caller=remote_image_capturer.go:61 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:15.093069279Z caller=remote_instance_store.go:51 user=309195 slug=satelnetspa msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=309195 slug=satelnetspa t=2024-05-29T13:44:15.093007787Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.092916108Z caller=remote_instance_store.go:51 user=316418 slug=workmotion msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.092931047Z caller=remote_image_capturer.go:61 user=309195 slug=satelnetspa rule_org_id=1 rule_uid=LqQpC2Enk dashboard=wVWZgtP7z panel=35 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.092937878Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Buenos Aires, country=Argentina, datacenter=M247, environment=production, instance=146.70.38.101:9998, ip=146.70.38.101, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=buenosaires411, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.092738001Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.092651206Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.092601856Z caller=remote_instance_store.go:51 user=35611 slug=play msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Buenos Aires, country=Argentina, datacenter=M247, environment=production, instance=146.70.38.101:9998, ip=146.70.38.101, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ar.crt, role=vpn, server=buenosaires411, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.092539967Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=934316 slug=saadzafar005 instance="datasource_uid=grafanacloud-prom, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:15.092408689Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:15.092445286Z caller=remote_instance_store.go:51 user=206439 slug=relaypro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=934316 slug=saadzafar005 instance="datasource_uid=grafanacloud-prom, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:15.092399809Z level=debug msg="Execution keep last state is Alerting" handler=resultAlerting + logger=ngalert.state.manager user=934316 slug=saadzafar005 instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.092387739Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.092328598Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.092319219Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.092372294Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.092340583Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=836694 slug=viiviilxx t=2024-05-29T13:44:15.092327654Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.883444ms + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.092328734Z level=warn msg="Failed to take an image" dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager.persist user=516446 slug=awarehqdev t=2024-05-29T13:44:15.092155607Z level=debug msg="Deleting alert states" count=1 + level=debug ts=2024-05-29T13:44:15.092257587Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=516446 slug=awarehqdev t=2024-05-29T13:44:15.092143606Z level=info msg="Detected stale state entry" cacheID="[[\"EndpointName\",\"v3-themes-esp\"],[\"Series\",\"query8c2bc64a6f2f4f129a5556fcc41469dc\"],[\"__alert_rule_namespace_uid__\",\"D-8RyMx4z\"],[\"__alert_rule_uid__\",\"k_7YIBxVz\"],[\"alertname\",\"v3-themes-esp-5xx-errors\"],[\"grafana_folder\",\"bi\"],[\"group\",\"SageMaker5XXErrors\"],[\"route\",\"team=bi\"],[\"team\",\"bi\"]]" state=Normal reason= + level=debug ts=2024-05-29T13:44:15.092231906Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.092151154Z caller=remote_instance_store.go:51 user=797387 slug=roadrunnerdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Budapest, country=Hungary, datacenter=M247, environment=production, instance=185.94.190.194:9998, ip=185.94.190.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/hungary.crt, role=vpn, server=budapest405, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.092152671Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=516446 slug=awarehqdev t=2024-05-29T13:44:15.092059501Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=70430 slug=dapperlabs instance= t=2024-05-29T13:44:15.091975126Z level=debug msg="Setting next state" handler=resultError + level=debug ts=2024-05-29T13:44:15.091914439Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Budapest, country=Hungary, datacenter=M247, environment=production, instance=185.189.114.98:9998, ip=185.189.114.98, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=budapest402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.091893313Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.09170503Z caller=remote_image_capturer.go:54 user=309195 slug=satelnetspa rule_org_id=1 rule_uid=LqQpC2Enk dashboard=wVWZgtP7z panel=35 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=430959 slug=econoinfo instance= t=2024-05-29T13:44:15.091775615Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=430959 slug=econoinfo t=2024-05-29T13:44:15.091752145Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Budapest, country=Hungary, datacenter=M247, environment=production, instance=185.189.114.98:9998, ip=185.189.114.98, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/hungary.crt, role=vpn, server=budapest402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.091720958Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=824501 slug=bendingspoons instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.091651186Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=824501 slug=bendingspoons instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.091634839Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.091666038Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=824501 slug=bendingspoons instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.091618916Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.091555838Z caller=remote_instance_store.go:51 user=71676 slug=lemonbeat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.091563174Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.091553885Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.091528264Z level=warn msg="Failed to take an image" dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 error="rpc error: code = Code(422) desc = screenshots unavailable" + level=info ts=2024-05-29T13:44:15.091497084Z caller=remote_image_capturer.go:61 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Budapest, country=Hungary, datacenter=M247, environment=production, instance=185.189.114.102:9998, ip=185.189.114.102, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=budapest401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.091494472Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.09142564Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.091413877Z caller=remote_instance_store.go:51 user=941160 slug=uateu msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.091325333Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.156.57:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=adbhspyq1s7wif alerts=1 + level=info ts=2024-05-29T13:44:15.091315335Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.244.39:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=adbhspyq1s7wif alerts=1 + logger=ngalert.state.manager.persist user=112387 slug=lucidhq t=2024-05-29T13:44:15.091330972Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=53.108963ms + level=debug ts=2024-05-29T13:44:15.091336179Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:15.091223117Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=49.820568ms + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:15.09116873Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=48.822174ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Bucharest, country=Romania, datacenter=DataPacket, environment=production, instance=143.244.54.3:9998, ip=143.244.54.3, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=romania402, server_type=10G - Dual CPU, service_name=cpz_vpn" t=2024-05-29T13:44:15.09103949Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=764616 slug=jarif81231 instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.090942456Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=139570 slug=pentesttools t=2024-05-29T13:44:15.090935118Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.485876ms + level=debug ts=2024-05-29T13:44:15.090870347Z caller=remote_image_capturer.go:54 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.090784186Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.090778596Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Bucharest, country=Romania, datacenter=DataPacket, environment=production, instance=143.244.54.119:9998, ip=143.244.54.119, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=romania407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.09063731Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.090601205Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=386383 slug=nicosjus t=2024-05-29T13:44:15.090581446Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.090609884Z caller=remote_instance_store.go:51 user=386383 slug=nicosjus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=386383 slug=nicosjus t=2024-05-29T13:44:15.090505399Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Bucharest, country=Romania, datacenter=DataPacket, environment=production, instance=143.244.54.119:9998, ip=143.244.54.119, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ro.crt, role=vpn, server=romania407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.090448875Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=784043 slug=stpzdevops t=2024-05-29T13:44:15.090126958Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.689531ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.09022878Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.090063246Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.089948176Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.089940426Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=698122 slug=michaelkors instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.089974337Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.090031915Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=679029 slug=joveoprodaws t=2024-05-29T13:44:15.089904575Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.089860068Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.089890895Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.089889121Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=702631 slug=hrtconditionmonitoring instance="datasource_uid=ccc229bc-81f1-4a01-a988-904de43e9451, ref_id=A" t=2024-05-29T13:44:15.08975831Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Bucharest, country=Romania, datacenter=DataPacket, environment=production, instance=143.244.52.1:9998, ip=143.244.52.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=romania406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.089874256Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=679029 slug=joveoprodaws t=2024-05-29T13:44:15.089863375Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.089792103Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=679029 slug=joveoprodaws version=15232 fingerprint=349be5098c7dca63 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.089816644Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.089424362s EvaluationString:}]" duration=67.235486ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Bucharest, country=Romania, datacenter=DataPacket, environment=production, instance=143.244.52.1:9998, ip=143.244.52.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=romania406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.089863528Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.089834278Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.scheduler user=702631 slug=hrtconditionmonitoring version=4 fingerprint=fdd74e2027fbc818 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.089468108Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=ccc229bc-81f1-4a01-a988-904de43e9451, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.06657735s EvaluationString:}]" duration=32.480907ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Bucharest, country=Romania, datacenter=DataPacket, environment=production, instance=143.244.52.1:9998, ip=143.244.52.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ro.crt, role=vpn, server=romania406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.08968448Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.089641995Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.089617374Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.089619385Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:15.089545549Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=538037 slug=drivewealth instance="host=ny4ap-uoms-03" t=2024-05-29T13:44:15.089527233Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.089540821Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Brussels, country=Belgium, datacenter=PIA, environment=production, instance=181.214.218.5:9998, ip=181.214.218.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=brussels420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.089465454Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="host=ny4ap-uoms-01" t=2024-05-29T13:44:15.089463506Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=895137 slug=uid2 t=2024-05-29T13:44:15.089424709Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=27f459b7ea71d947 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.089386081Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.089175997s EvaluationString:}]" duration=9.56082ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Brussels, country=Belgium, datacenter=PIA, environment=production, instance=181.214.218.5:9998, ip=181.214.218.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/brussels.crt, role=streaming-optimized, server=brussels420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.089253736Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=282043 slug=goldenratiostaking t=2024-05-29T13:44:15.089216789Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=482192 slug=the55group t=2024-05-29T13:44:15.08918696Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.089251438Z caller=remote_image_capturer.go:54 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="rendering alert image with grafana" + logger=ngalert.scheduler user=282043 slug=goldenratiostaking version=4 fingerprint=f4d471a402d5b84e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.089162229Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.088839223s EvaluationString:}]" duration=11.058575ms + logger=ngalert.state.manager user=482192 slug=the55group instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.089150244Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482192 slug=the55group instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.089144143Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=482192 slug=the55group instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.0891372Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=482192 slug=the55group t=2024-05-29T13:44:15.089121993Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.089154767Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.089146287Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.089129825Z level=warn msg="Failed to take an image" dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:15.089024332Z caller=remote_instance_store.go:51 user=930551 slug=networkers msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=930551 slug=networkers instance= t=2024-05-29T13:44:15.08894987Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=930551 slug=networkers version=5 fingerprint=7fa251083af4e8fd attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.088814607Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.088462245s EvaluationString:}]" duration=19.55818ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Brussels, country=Belgium, datacenter=PIA, environment=production, instance=181.214.218.4:9998, ip=181.214.218.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/brussels.crt, role=streaming-optimized, server=brussels419, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.088882513Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.088874236Z caller=remote_instance_store.go:51 user=479589 slug=testmarch msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.088853361Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=479589 slug=testmarch instance= t=2024-05-29T13:44:15.088824444Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=543596 slug=ranbir instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.088690458Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543596 slug=ranbir t=2024-05-29T13:44:15.088659346Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=76508 slug=svh version=7 fingerprint=dc204c7dadf6052b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.088569197Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.088314974s EvaluationString:}]" duration=1.433340723s + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Brussels, country=Belgium, datacenter=PIA, environment=production, instance=181.214.218.3:9998, ip=181.214.218.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/brussels.crt, role=streaming-optimized, server=brussels418, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.088500744Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.088435127Z caller=remote_instance_store.go:51 user=401646 slug=automators1 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.08839644Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.historian backend=loki user=190917 slug=d1cx t=2024-05-29T13:44:15.088345418Z level=debug msg="Done saving alert state history batch" + logger=ngalert.state.manager user=401646 slug=automators1 instance= t=2024-05-29T13:44:15.088301975Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:15.088316403Z caller=remote_image_capturer.go:33 user=401646 slug=automators1 rule_org_id=1 rule_uid=AnScVum4z msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.088302137Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.088220486Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.088031407Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.087969471Z caller=remote_instance_store.go:51 user=271900 slug=islandworks msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.087983503Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.087974583Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Brisbane, country=Australia, datacenter=ServersAustralia, environment=production, instance=221.121.155.87:9998, ip=221.121.155.87, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=brisbane402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.087954686Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=271900 slug=islandworks version=4 fingerprint=c77e3953d04c7526 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.087797484Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.087518176s EvaluationString:}]" duration=12.428382ms + level=debug ts=2024-05-29T13:44:15.087772271Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Brisbane, country=Australia, datacenter=ServersAustralia, environment=production, instance=221.121.155.85:9998, ip=221.121.155.85, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/au-brisbane-pf.crt, role=vpn, server=brisbane403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.087402051Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.087305587Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Bridgeport, country=United States, datacenter=DataPacket, environment=production, instance=84.239.47.61:9998, ip=84.239.47.61, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-connecticut-pf.crt, role=vpn, server=connecticut404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.087214588Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.087166785Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.087056342Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.087033622Z level=warn msg="Failed to take an image" dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 error="rpc error: code = Code(422) desc = screenshots unavailable" + level=info ts=2024-05-29T13:44:15.087010811Z caller=remote_image_capturer.go:61 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Bridgeport, country=United States, datacenter=DataPacket, environment=production, instance=84.239.47.61:9998, ip=84.239.47.61, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=connecticut404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.087007695Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.086964677Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.086811993Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.08676986Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=560192 slug=ialab instance="instance=exporter:9100, job=node_exporter" t=2024-05-29T13:44:15.086637513Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=636875 slug=kaisbaccour t=2024-05-29T13:44:15.086512839Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=25.703194ms + logger=ngalert.state.manager.persist user=428486 slug=aldeia t=2024-05-29T13:44:15.086423729Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=428486 slug=aldeia instance= t=2024-05-29T13:44:15.086406089Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Bratislava, country=Slovakia, datacenter=DataPacket, environment=production, instance=149.102.232.28:9998, ip=149.102.232.28, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/sk.crt, role=vpn, server=bratislava404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.086344521Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.086310512Z caller=remote_image_capturer.go:54 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="rendering alert image with grafana" + level=debug ts=2024-05-29T13:44:15.086297109Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.086218292Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager.persist user=139426 slug=nmsalerts t=2024-05-29T13:44:15.086125464Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:15.086134451Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Bratislava, country=Slovakia, datacenter=DataPacket, environment=production, instance=149.102.232.28:9998, ip=149.102.232.28, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=bratislava404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.086104341Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=432323 slug=lithic instance= t=2024-05-29T13:44:15.086115728Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=432323 slug=lithic instance= t=2024-05-29T13:44:15.086095835Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=629704 slug=jvembu74 instance= t=2024-05-29T13:44:15.086021258Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:15.086043132Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=139426 slug=nmsalerts version=53 fingerprint=f10d500bdec17163 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.086003089Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.085708042s EvaluationString:}]" duration=117.377167ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.086059902Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=432323 slug=lithic t=2024-05-29T13:44:15.08603063Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=432323 slug=lithic version=5 fingerprint=1101e3e22e2bf49f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.085883109Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.085498276s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=15.89182ms + logger=ngalert.state.manager user=629704 slug=jvembu74 t=2024-05-29T13:44:15.085941796Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Bratislava, country=Slovakia, datacenter=DataPacket, environment=production, instance=149.102.232.1:9998, ip=149.102.232.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/sk.crt, role=vpn, server=bratislava403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.085932013Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.085890633Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.085770515Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=425131 slug=daannorris instance= t=2024-05-29T13:44:15.085757407Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=425131 slug=daannorris instance= t=2024-05-29T13:44:15.085751249Z level=debug msg="Execution error state is Alerting" handler=resultAlerting previous_handler=resultError + logger=ngalert.state.manager user=425131 slug=daannorris instance= t=2024-05-29T13:44:15.085743607Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.085749047Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=error ts=2024-05-29T13:44:15.085655401Z caller=remote_rule_evaluator.go:110 user=425131 slug=daannorris msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.scheduler user=425131 slug=daannorris version=2 fingerprint=e494168175281caf attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.085690886Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=2.986648ms + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.085719366Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager.persist user=475170 slug=paypaplane t=2024-05-29T13:44:15.085642472Z level=debug msg="Saving alert states done" count=89 max_state_save_concurrency=1 duration=1.03914791s + logger=ngalert.scheduler user=250150 slug=bizagi version=39 fingerprint=dd81e5ad67ec33e3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.08560281Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Alerting Error: Results:map[] Values:map[B0:{Var:B Labels: Value:0xc03fa2e2e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.085420606s EvaluationString:[ var='B0' metric='Deadlocks' labels={} value=3 ]}]" duration=398.571454ms + logger=ngalert.state.manager.persist user=907609 slug=calerts t=2024-05-29T13:44:15.085544634Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=907609 slug=calerts instance="device=/dev/nvme1n1, fstype=ext4, instance=localhost:9100, instance_name=acue-staging-app-uproot, job=acue-staging-app-uproot-other_metrics_export, mountpoint=/data, region=ap-south-1" t=2024-05-29T13:44:15.085525784Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.085572842Z caller=remote_instance_store.go:51 user=436902 slug=allianz1 msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=821297 slug=bcpcert t=2024-05-29T13:44:15.085571305Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=907609 slug=calerts instance="device=/dev/nvme1n1, fstype=ext4, instance=localhost:9100, instance_name=acue-staging-app-uproot, job=acue-staging-app-uproot-other_metrics_export, mountpoint=/data, region=ap-south-1" t=2024-05-29T13:44:15.085488164Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.085361192Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Boston, country=United States, datacenter=DataPacket, environment=production, instance=149.40.50.33:9998, ip=149.40.50.33, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=massachusetts404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.085411726Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.085354261Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.085380904Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=692010 slug=mercariusprod t=2024-05-29T13:44:15.085140622Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.078541ms + level=debug ts=2024-05-29T13:44:15.083994551Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=821297 slug=bcpcert t=2024-05-29T13:44:15.085365442Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info ts=2024-05-29T13:44:15.08530378Z caller=remote_image_capturer.go:61 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=907609 slug=calerts instance="device=/dev/nvme1n1, fstype=ext4, instance=localhost:9100, instance_name=acue-staging-ams-server, job=acue-staging-ams-server-other_metrics_export, mountpoint=/data, region=ap-south-1" t=2024-05-29T13:44:15.085184381Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Boston, country=United States, datacenter=DataPacket, environment=production, instance=149.40.50.1:9998, ip=149.40.50.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-massachusetts-pf.crt, role=vpn, server=massachusetts405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.085254225Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=907609 slug=calerts instance="device=/dev/nvme1n1, fstype=ext4, instance=localhost:9100, instance_name=acue-staging-ams-server, job=acue-staging-ams-server-other_metrics_export, mountpoint=/data, region=ap-south-1" t=2024-05-29T13:44:15.085159341Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=749309 slug=fundcountext t=2024-05-29T13:44:15.08519255Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.085211287Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=749309 slug=fundcountext instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.085165539Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.085160576Z caller=remote_instance_store.go:51 user=770212 slug=trimarkprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.085103327Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=770212 slug=trimarkprd instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.085087954Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=770212 slug=trimarkprd instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.085062344Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.08505678Z caller=remote_instance_store.go:51 user=354676 slug=gridmarketenergy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=770212 slug=trimarkprd instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.084974412Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.085022083Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=770212 slug=trimarkprd instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.084966222Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.085010336Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.084984051Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Boise, country=United States, datacenter=DataPacket, environment=production, instance=84.239.12.129:9998, ip=84.239.12.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-idaho-pf.crt, role=vpn, server=idaho402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.084862703Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.084788442Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.084694104Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=30.591389ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.084645119Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=886980 slug=althq instance="datasource_uid=grafanacloud-prom, ref_id=5xx Codes" previous_handler=resultNoData t=2024-05-29T13:44:15.084490771Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.08442117Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.084412661Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=886980 slug=althq instance="datasource_uid=grafanacloud-prom, ref_id=5xx Codes" previous_handler=resultNoData t=2024-05-29T13:44:15.084447471Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + level=info ts=2024-05-29T13:44:15.08437515Z caller=remote_image_capturer.go:61 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=886980 slug=althq instance="datasource_uid=grafanacloud-prom, ref_id=5xx Codes" t=2024-05-29T13:44:15.084402851Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=886980 slug=althq instance="datasource_uid=grafanacloud-prom, ref_id=5xx Codes" previous_handler=resultNoData t=2024-05-29T13:44:15.084351146Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="cluster=sase-prod-data-plane-us-east-2" t=2024-05-29T13:44:15.08431854Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=886980 slug=althq instance="datasource_uid=grafanacloud-prom, ref_id=5xx Codes" previous_handler=resultNoData t=2024-05-29T13:44:15.084309007Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Bogota, country=Colombia, datacenter=DataPacket, environment=production, instance=154.47.16.193:9998, ip=154.47.16.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=colombia404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.084152922Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.084107649Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.083970794Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=blender-general, pod=blender-general-54c6d55b8c-zxbnn" t=2024-05-29T13:44:15.08404979Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Bogota, country=Colombia, datacenter=DataPacket, environment=production, instance=154.47.16.193:9998, ip=154.47.16.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/bogota.crt, role=vpn, server=colombia404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.083970983Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=blender-general, pod=blender-general-54c6d55b8c-q4rgs" t=2024-05-29T13:44:15.083938455Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=886980 slug=althq version=3 fingerprint=7cc6693d21d9a5eb attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.083792806Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=5xx Codes State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.083331895s EvaluationString:}]" duration=14.67797ms + level=debug ts=2024-05-29T13:44:15.08385036Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:15.083783391Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=114492 slug=railsbank instance="ClusterName=PROD-IAM, ServiceName=STS" t=2024-05-29T13:44:15.083756437Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.083739011Z caller=remote_instance_store.go:51 user=797387 slug=roadrunnerdev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.083702373Z caller=remote_image_capturer.go:54 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="rendering alert image with grafana" + logger=ngalert.state.manager.persist user=797387 slug=roadrunnerdev t=2024-05-29T13:44:15.08366867Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=797387 slug=roadrunnerdev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.083630029Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Birmingham, country=United States, datacenter=DataPacket, environment=production, instance=84.239.6.2:9998, ip=84.239.6.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=alabama402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.083602457Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.083512933Z caller=remote_instance_store.go:51 user=285250 slug=quartx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.083579101Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=285250 slug=quartx instance="device=vda1, instance=prd01.lon.do.quartx.net, path=/" t=2024-05-29T13:44:15.083450763Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.083482995Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=201644 slug=thoughtspot t=2024-05-29T13:44:15.083446467Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.083273887Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.083242917Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Billings, country=United States, datacenter=DataPacket, environment=production, instance=84.239.47.129:9998, ip=84.239.47.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=montana402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.083183079Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.083122079Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.083097159Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="instance=suwdc07tdswcllog1001.tds.ecomm.local" t=2024-05-29T13:44:15.083025657Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=893151 slug=cmtdsnp version=1 fingerprint=5948b0e46a9a3822 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.082770844Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=suwdc07tdswcllog1001.tds.ecomm.local State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=suwdc07tdswcllog1001.tds.ecomm.local Value:0xc01af16cb0} B:{Var:B Labels:instance=suwdc07tdswcllog1001.tds.ecomm.local Value:0xc01af16cc0} C:{Var:C Labels:instance=suwdc07tdswcllog1001.tds.ecomm.local Value:0xc01af16ca0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.082257144s EvaluationString:[ var='A' labels={instance=suwdc07tdswcllog1001.tds.ecomm.local} value=23.128651946583545 ], [ var='B' labels={instance=suwdc07tdswcllog1001.tds.ecomm.local} value=23.128651946583545 ], [ var='C' labels={instance=suwdc07tdswcllog1001.tds.ecomm.local} value=0 ]}]" duration=15.669951ms + logger=ngalert.state.manager user=185895 slug=gradle instance= t=2024-05-29T13:44:15.082872371Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance= t=2024-05-29T13:44:15.082782106Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Berlin, country=Germany, datacenter=PIA, environment=production, instance=191.101.157.90:9998, ip=191.101.157.90, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/de-berlin.crt, role=vpn, server=berlin419, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.082811651Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.082758952Z caller=remote_instance_store.go:51 user=224047 slug=ppbtradingtribeprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.082731153Z caller=remote_image_capturer.go:54 user=201644 slug=thoughtspot rule_org_id=1 rule_uid=Zb9mmLK4k dashboard=s7TH-A27k panel=13 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=185895 slug=gradle t=2024-05-29T13:44:15.082735293Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.082734171Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.082604571Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=163215 slug=tripadvisor t=2024-05-29T13:44:15.082650648Z level=debug msg="Saving alert states done" count=25 max_state_save_concurrency=1 duration=286.216075ms + logger=ngalert.state.manager user=201644 slug=thoughtspot instance= t=2024-05-29T13:44:15.082639166Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Berlin, country=Germany, datacenter=PIA, environment=production, instance=191.101.157.60:9998, ip=191.101.157.60, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=berlin418, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.082635338Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=201644 slug=thoughtspot t=2024-05-29T13:44:15.082596372Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.082570626Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance= t=2024-05-29T13:44:15.082591291Z level=warn msg="Failed to take an image" dashboard=ap6yqBmnz panel=12 error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:15.082506575Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.082487614Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Berlin, country=Germany, datacenter=PIA, environment=production, instance=191.101.157.60:9998, ip=191.101.157.60, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/de-berlin.crt, role=vpn, server=berlin418, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.08247502Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Berlin, country=Germany, datacenter=PIA, environment=production, instance=191.101.157.60:9998, ip=191.101.157.60, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/de-berlin.crt, role=vpn, server=berlin418, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.082467153Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=829340 slug=unfnboprod t=2024-05-29T13:44:15.08241875Z level=debug msg="Saving alert states" count=8 max_state_save_concurrency=1 + logger=ngalert.state.manager user=829340 slug=unfnboprod instance="device=tmpfs, fstype=tmpfs, instance=puusea1afnboutlbst1001.fnbo.aws.hclsw.internal, job=BASTION-VM, mountpoint=/run/user/2027" t=2024-05-29T13:44:15.082373588Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=27737 slug=edfmancapital t=2024-05-29T13:44:15.08233744Z level=debug msg="Saving alert states done" count=13 max_state_save_concurrency=1 duration=195.747719ms + logger=ngalert.state.manager user=829340 slug=unfnboprod instance="device=/dev/xvda2, fstype=xfs, instance=puusea1afnboutlbst1001.fnbo.aws.hclsw.internal, job=BASTION-VM, mountpoint=/" t=2024-05-29T13:44:15.082301086Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.082242889Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Berlin, country=Germany, datacenter=PIA, environment=production, instance=191.101.157.30:9998, ip=191.101.157.30, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=berlin417, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.082293706Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=829340 slug=unfnboprod instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=puusea1afnboutlbst1001.fnbo.aws.hclsw.internal, job=BASTION-VM, mountpoint=/apps" t=2024-05-29T13:44:15.08219008Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=829340 slug=unfnboprod instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=puusea1afnboutlbst1001.fnbo.aws.hclsw.internal, job=BASTION-VM, mountpoint=/apps" t=2024-05-29T13:44:15.082177149Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=829340 slug=unfnboprod t=2024-05-29T13:44:15.082147217Z level=debug msg="State manager processing evaluation results" resultCount=8 + level=debug ts=2024-05-29T13:44:15.082152284Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.08218105Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.95947ms + level=debug ts=2024-05-29T13:44:15.082130706Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.082141476Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Berlin, country=Germany, datacenter=PIA, environment=production, instance=191.101.157.30:9998, ip=191.101.157.30, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/de-berlin.crt, role=vpn, server=berlin417, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.082085185Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.082049519Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=735589 slug=sremarek t=2024-05-29T13:44:15.081760918Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.082001642Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.081972886Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Berlin, country=Germany, datacenter=PIA, environment=production, instance=191.101.157.2:9998, ip=191.101.157.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=berlin416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.081926721Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.081862551Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=735589 slug=sremarek instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.081714577Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.081837991Z level=warn msg="Failed to take an image" dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager.persist user=177465 slug=fairtiq t=2024-05-29T13:44:15.081704221Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.961903ms + level=debug ts=2024-05-29T13:44:15.081828599Z caller=remote_instance_store.go:51 user=735589 slug=sremarek msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=735589 slug=sremarek version=1 fingerprint=873d10e76da63b1e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.081624126Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.081449605s EvaluationString:}]" duration=13.338517ms + level=debug ts=2024-05-29T13:44:15.081648235Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Berlin, country=Germany, datacenter=PIA, environment=production, instance=191.101.157.2:9998, ip=191.101.157.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/de-berlin.crt, role=vpn, server=berlin416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.081730467Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cluster=ams-development, datacenter=us-west-2" t=2024-05-29T13:44:15.081673317Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:15.081649058Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=633335 slug=promqlworkshop t=2024-05-29T13:44:15.081566231Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.378144ms + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:15.081593705Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Berlin, country=Germany, datacenter=PIA, environment=production, instance=191.101.157.210:9998, ip=191.101.157.210, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=berlin423, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.081561239Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Berlin, country=Germany, datacenter=PIA, environment=production, instance=191.101.157.210:9998, ip=191.101.157.210, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=berlin423, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.08154417Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=375431 slug=octivio t=2024-05-29T13:44:15.081402708Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.08127625Z caller=remote_instance_store.go:51 user=851297 slug=roadrunneruat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Berlin, country=Germany, datacenter=PIA, environment=production, instance=191.101.157.210:9998, ip=191.101.157.210, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/de-germany-so.crt, role=streaming-optimized, server=berlin423, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.081311901Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=851297 slug=roadrunneruat version=1 fingerprint=f8c461002970c652 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.080990134Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.08081263s EvaluationString:}]" duration=16.670955ms + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.081038351Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.08100957Z level=warn msg="Failed to take an image" dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Berlin, country=Germany, datacenter=PIA, environment=production, instance=191.101.157.180:9998, ip=191.101.157.180, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/de-berlin.crt, role=vpn, server=berlin422, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.08094269Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.080914386Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Berlin, country=Germany, datacenter=PIA, environment=production, instance=191.101.157.150:9998, ip=191.101.157.150, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=berlin421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.080754439Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.080711464Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.080613137Z caller=remote_instance_store.go:51 user=389169 slug=kevenyoung msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.080550008Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=389169 slug=kevenyoung t=2024-05-29T13:44:15.080540971Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.080526053Z caller=remote_instance_store.go:51 user=456850 slug=juniz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=tmpfs, fstype=tmpfs, instance=suuscn1fcngiwcadb21001.cngi.gcp.hclsw.internal, job=PREPROD-AUTH-DB-Host, mountpoint=/run" t=2024-05-29T13:44:15.080471932Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=tmpfs, fstype=tmpfs, instance=suuscn1fcngiwcadb21001.cngi.gcp.hclsw.internal, job=PREPROD-AUTH-DB-Host, mountpoint=/run" t=2024-05-29T13:44:15.080461092Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Berlin, country=Germany, datacenter=PIA, environment=production, instance=191.101.157.120:9998, ip=191.101.157.120, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=berlin420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.080347187Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=tmpfs, fstype=tmpfs, instance=nuuscn1ccngiutlbst1001.cngi.gcp.hclsw.internal, job=Bastion-Host, mountpoint=/run/user/4294" t=2024-05-29T13:44:15.080322889Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=456850 slug=juniz instance="TE_ID=3" t=2024-05-29T13:44:15.080249294Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.080218622Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.079668796Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.080200424Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.080137902Z caller=remote_alert_sender.go:94 user=250150 slug=bizagi host=bizagi-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.107.6:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=cc65897c-cc92-41e2-8d90-aa304f4a35bd alerts=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Berlin, country=Germany, datacenter=PIA, environment=production, instance=191.101.157.120:9998, ip=191.101.157.120, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/de-berlin.crt, role=vpn, server=berlin420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.080113457Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Berlin, country=Germany, datacenter=PIA, environment=production, instance=191.101.157.120:9998, ip=191.101.157.120, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/de-berlin.crt, role=vpn, server=berlin420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.08009917Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=456850 slug=juniz instance="TE_ID=1" t=2024-05-29T13:44:15.08006894Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.07996861Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.079644126Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=hclsw-hss-bkt-cngi-np-pre-order, fstype=fuse.gcsfuse, instance=nuuscn1ccngiutlbst1001.cngi.gcp.hclsw.internal, job=Bastion-Host, mountpoint=/data/PREORDER" t=2024-05-29T13:44:15.080014572Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Berlin, country=Germany, datacenter=PIA, environment=production, instance=181.214.173.2:9998, ip=181.214.173.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=berlin425, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.079909016Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Berlin, country=Germany, datacenter=PIA, environment=production, instance=181.214.173.2:9998, ip=181.214.173.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=berlin425, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.079892478Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=456850 slug=juniz t=2024-05-29T13:44:15.079853627Z level=debug msg="State manager processing evaluation results" resultCount=6 + logger=ngalert.state.manager.persist user=403369 slug=clearsaletechlabs t=2024-05-29T13:44:15.07981016Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.702266ms + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=hclsw-hss-bkt-cngi-np-pre-data, fstype=fuse.gcsfuse, instance=nuuscn1ccngiutlbst1001.cngi.gcp.hclsw.internal, job=Bastion-Host, mountpoint=/data/PREDATA/LMSW" t=2024-05-29T13:44:15.079859338Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=hclsw-hss-bkt-cngi-np-pre-data, fstype=fuse.gcsfuse, instance=nuuscn1ccngiutlbst1001.cngi.gcp.hclsw.internal, job=Bastion-Host, mountpoint=/data/PREDATA/LMM" t=2024-05-29T13:44:15.079814807Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.079666429Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=hclsw-hss-bkt-cngi-np-dailyjob, fstype=fuse.gcsfuse, instance=nuuscn1ccngiutlbst1001.cngi.gcp.hclsw.internal, job=Bastion-Host, mountpoint=/dailyJob" t=2024-05-29T13:44:15.07949007Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Berlin, country=Germany, datacenter=PIA, environment=production, instance=181.214.173.155:9998, ip=181.214.173.155, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=berlin424, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.079480895Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=/dev/sda2, fstype=xfs, instance=suuscn1fcngiwcadb21001.cngi.gcp.hclsw.internal, job=PREPROD-AUTH-DB-Host, mountpoint=/" t=2024-05-29T13:44:15.07946348Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.079446914Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=/dev/sda2, fstype=xfs, instance=nuuscn1ccngiutlbst1001.cngi.gcp.hclsw.internal, job=Bastion-Host, mountpoint=/" t=2024-05-29T13:44:15.079371467Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=/dev/sda2, fstype=xfs, instance=duuscn1fcngiwcadbs1001.cngi.gcp.hclsw.internal, job=Dev-DB-Host, mountpoint=/" t=2024-05-29T13:44:15.079349117Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=/dev/sda2, fstype=xfs, instance=duuscn1fcngiwcadbs1001.cngi.gcp.hclsw.internal, job=Dev-DB-Host, mountpoint=/" t=2024-05-29T13:44:15.079338317Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=/dev/sda1, fstype=vfat, instance=suuscn1fcngiwcadb21001.cngi.gcp.hclsw.internal, job=PREPROD-AUTH-DB-Host, mountpoint=/boot/efi" t=2024-05-29T13:44:15.079324726Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Berlin, country=Germany, datacenter=PIA, environment=production, instance=181.214.173.155:9998, ip=181.214.173.155, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/de-berlin.crt, role=vpn, server=berlin424, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.079309426Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=/dev/sda1, fstype=vfat, instance=nuuscn1ccngiutlbst1001.cngi.gcp.hclsw.internal, job=Bastion-Host, mountpoint=/boot/efi" t=2024-05-29T13:44:15.079268965Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.079253118Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.079252102Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.079278051Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.07924086Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager.persist user=755996 slug=psychobunnyprd t=2024-05-29T13:44:15.079044722Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.867572ms + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1fcngiwcadb21001.cngi.gcp.hclsw.internal, job=PREPROD-AUTH-DB-Host, mountpoint=/opt" t=2024-05-29T13:44:15.079162013Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1fcngiwcadb21001.cngi.gcp.hclsw.internal, job=PREPROD-AUTH-DB-Host, mountpoint=/export" t=2024-05-29T13:44:15.079109062Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1fcngiwcadb21001.cngi.gcp.hclsw.internal, job=PREPROD-AUTH-DB-Host, mountpoint=/db2inst" t=2024-05-29T13:44:15.079084111Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1ccngiwcldb21001.cngi.gcp.hclsw.internal, job=PREPROD-LIVE-DB-Host, mountpoint=/opt" t=2024-05-29T13:44:15.07904658Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1ccngiwcldb21001.cngi.gcp.hclsw.internal, job=PREPROD-LIVE-DB-Host, mountpoint=/home" t=2024-05-29T13:44:15.07902687Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1ccngiwcldb21001.cngi.gcp.hclsw.internal, job=PREPROD-LIVE-DB-Host, mountpoint=/export" t=2024-05-29T13:44:15.078999438Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=suuscn1ccngiwcldb21001.cngi.gcp.hclsw.internal, job=PREPROD-LIVE-DB-Host, mountpoint=/db2inst" t=2024-05-29T13:44:15.078982949Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=623470 slug=vijayavanidigital t=2024-05-29T13:44:15.078940059Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.77702ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Belgrade, country=Serbia, datacenter=Altushost, environment=production, instance=37.46.115.15:9998, ip=37.46.115.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/rs.crt, role=vpn, server=belgrade402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.078852907Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1ccngiutlbst1001.cngi.gcp.hclsw.internal, job=Bastion-Host, mountpoint=/opt" t=2024-05-29T13:44:15.078882977Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1ccngiutlbst1001.cngi.gcp.hclsw.internal, job=Bastion-Host, mountpoint=/opt" t=2024-05-29T13:44:15.078874635Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1ccngiutlbst1001.cngi.gcp.hclsw.internal, job=Bastion-Host, mountpoint=/home" t=2024-05-29T13:44:15.078853326Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1ccngiutlbst1001.cngi.gcp.hclsw.internal, job=Bastion-Host, mountpoint=/export" t=2024-05-29T13:44:15.078817465Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1ccngiutlbst1001.cngi.gcp.hclsw.internal, job=Bastion-Host, mountpoint=/export" t=2024-05-29T13:44:15.078804565Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=nuuscn1ccngiutlbst1001.cngi.gcp.hclsw.internal, job=Bastion-Host, mountpoint=/data" t=2024-05-29T13:44:15.078767564Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1fcngiwcadbs1001.cngi.gcp.hclsw.internal, job=Dev-DB-Host, mountpoint=/opt" t=2024-05-29T13:44:15.078673202Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=871095 slug=cmcnginp instance="device=/dev/mapper/vg01-lv01, fstype=ext4, instance=duuscn1fcngiwcadbs1001.cngi.gcp.hclsw.internal, job=Dev-DB-Host, mountpoint=/export" t=2024-05-29T13:44:15.078622441Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=127813 slug=clearsale t=2024-05-29T13:44:15.078640709Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.078600741Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=507549 slug=coindcx instance= t=2024-05-29T13:44:15.078495086Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=507549 slug=coindcx t=2024-05-29T13:44:15.07845893Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.078410916Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.078467124Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.078389888Z caller=remote_alert_sender.go:94 user=459574 slug=seybai23 host=seybai23-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.61.143:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fZKYHHbVz alerts=1 + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.078423818Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=IuJiRMnVk, ref_id=A" t=2024-05-29T13:44:15.078400166Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=430961 slug=solifi version=4 fingerprint=1f22f4b9de4dc394 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.078289984Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=IuJiRMnVk, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.077916906s EvaluationString:}]" duration=35.815603ms + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.078288999Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.078268302Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.078281209Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Beijing, country=China, datacenter=M247, environment=production, instance=188.241.80.43:9998, ip=188.241.80.43, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=china407, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.078264884Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.078254799Z level=warn msg="Failed to take an image" dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:15.078048983Z caller=remote_instance_store.go:51 user=542352 slug=scytaleai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Beijing, country=China, datacenter=M247, environment=production, instance=188.241.80.43:9998, ip=188.241.80.43, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/china.crt, role=vpn, server=china407, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.077797413Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=542352 slug=scytaleai instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.077895015Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.07791303Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=163513 slug=dialpad t=2024-05-29T13:44:15.077734439Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.077692402Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=163513 slug=dialpad t=2024-05-29T13:44:15.077443808Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.077474849Z caller=remote_image_capturer.go:54 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=d758ae0d-c7bd-47ff-bfe9-3b10af13822e dashboard=e25f1663-bd03-4097-b6c3-d0e55a63a225 panel=1 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Beijing, country=China, datacenter=M247, environment=production, instance=188.241.80.29:9998, ip=188.241.80.29, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/china.crt, role=vpn, server=china406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.077429631Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=836694 slug=viiviilxx t=2024-05-29T13:44:15.077442039Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Beijing, country=China, datacenter=M247, environment=production, instance=188.241.80.29:9998, ip=188.241.80.29, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/china.crt, role=vpn, server=china406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.077414539Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.077263227Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=836694 slug=viiviilxx t=2024-05-29T13:44:15.077321818Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=836694 slug=viiviilxx version=8 fingerprint=41403f7e8b4c47da attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.077196437Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=namedprocess_namegroup_num_procs, groupname=cloudflared, instance=pve, job=integrations/process State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, groupname=cloudflared, instance=pve, job=integrations/process Value:0xc0526421d8} C:{Var:C Labels:__name__=namedprocess_namegroup_num_procs, groupname=cloudflared, instance=pve, job=integrations/process Value:0xc052642328}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.076476936s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, groupname=cloudflared, instance=pve, job=integrations/process} value=1 ], [ var='C' labels={__name__=namedprocess_namegroup_num_procs, groupname=cloudflared, instance=pve, job=integrations/process} value=0 ]}]" duration=18.932312ms + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.077256286Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.077139525Z caller=remote_instance_store.go:51 user=829352 slug=unfnbonp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Baltimore, country=United States, datacenter=DataPacket, environment=production, instance=84.239.43.32:9998, ip=84.239.43.32, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-baltimore.crt, role=vpn, server=baltimore402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.077244055Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.077094133Z caller=remote_image_capturer.go:33 user=829352 slug=unfnbonp rule_org_id=1 rule_uid=ddcf7555-8727-44a4-a6b3-57cac8369d17 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager.persist user=829352 slug=unfnbonp t=2024-05-29T13:44:15.077101083Z level=debug msg="Saving alert states" count=6 max_state_save_concurrency=1 + logger=ngalert.state.manager user=829352 slug=unfnbonp instance="container=unica-platform, namespace=fnbo-np-uni, pod=hcl-unica-platform-5df7fbbfb9-lrb77" t=2024-05-29T13:44:15.077084942Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:15.077024243Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=829352 slug=unfnbonp instance="container=unica-offer, namespace=fnbo-np-uni, pod=hcl-unica-offer-5f5cfb9b6-b2rhz" t=2024-05-29T13:44:15.077062232Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:15.07703593Z caller=remote_image_capturer.go:33 user=829352 slug=unfnbonp rule_org_id=1 rule_uid=ddcf7555-8727-44a4-a6b3-57cac8369d17 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=829352 slug=unfnbonp instance="container=unica-campaign, namespace=fnbo-np-uni, pod=hcl-unica-campaign-76998c6f86-5wc64" t=2024-05-29T13:44:15.077018299Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Baltimore, country=United States, datacenter=DataPacket, environment=production, instance=84.239.43.32:9998, ip=84.239.43.32, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=baltimore402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.077055994Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Baltimore, country=United States, datacenter=DataPacket, environment=production, instance=84.239.43.32:9998, ip=84.239.43.32, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=baltimore402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.077047841Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=807171 slug=unstarnp t=2024-05-29T13:44:15.076857392Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=7.928496ms + level=debug ts=2024-05-29T13:44:15.076972543Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.076872065Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=274199 slug=telemetriahgm instance= t=2024-05-29T13:44:15.076867195Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.076719485Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=node-exporter, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-prometheus-node-exporter-l6b8g, workload=grafana-k8s-monitoring-prometheus-node" t=2024-05-29T13:44:15.076865982Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=274199 slug=telemetriahgm t=2024-05-29T13:44:15.076821024Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.076801505Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Baltimore, country=United States, datacenter=DataPacket, environment=production, instance=84.239.43.1:9998, ip=84.239.43.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-baltimore.crt, role=vpn, server=baltimore401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.076831642Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=node-exporter, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-prometheus-node-exporter-l6b8g, workload=grafana-k8s-monitoring-prometheus-node" t=2024-05-29T13:44:15.076847861Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Baltimore, country=United States, datacenter=DataPacket, environment=production, instance=84.239.43.1:9998, ip=84.239.43.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-baltimore.crt, role=vpn, server=baltimore401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.076817202Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.076776531Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.076694082Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.076771555Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.076673546Z caller=remote_instance_store.go:51 user=788179 slug=krea msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=788179 slug=krea instance= t=2024-05-29T13:44:15.076602984Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.076588115Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Baltimore, country=United States, datacenter=DataPacket, environment=production, instance=84.239.43.1:9998, ip=84.239.43.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=baltimore401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.07660792Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.076488801Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=788179 slug=krea version=20 fingerprint=3aeff49eeb9418a0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.076498532Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc013233200} B:{Var:B Labels: Value:0xc013233208}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.07608273s EvaluationString:[ var='A' labels={} value=3.57 ], [ var='B' labels={} value=0 ]}]" duration=11.781448ms + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.076410247Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=PIA, environment=production, instance=5.182.110.99:9998, ip=5.182.110.99, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=atlanta422, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.076204306Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=node-cache, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=node-local-dns-d9vtx, workload=node-local" t=2024-05-29T13:44:15.076151997Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.076100145Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=node-cache, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=node-local-dns-2mf68, workload=node-local" t=2024-05-29T13:44:15.075977064Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=PIA, environment=production, instance=5.182.110.67:9998, ip=5.182.110.67, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-atlanta.crt, role=vpn, server=atlanta421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.075999604Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=node-cache, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=node-local-dns-2mf68, workload=node-local" t=2024-05-29T13:44:15.075952593Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.075791384Z caller=remote_instance_store.go:51 user=739130 slug=redphasetech msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=PIA, environment=production, instance=5.182.110.67:9998, ip=5.182.110.67, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=atlanta421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.075813624Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=PIA, environment=production, instance=5.182.110.67:9998, ip=5.182.110.67, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=atlanta421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.075803269Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=netd-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-v6ccv" t=2024-05-29T13:44:15.075708349Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=PIA, environment=production, instance=5.182.110.35:9998, ip=5.182.110.35, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-atlanta.crt, role=vpn, server=atlanta420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.075599657Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.075449984Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.075533097Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=netd-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-sxmh5" t=2024-05-29T13:44:15.075499625Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.075275174Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.075104338Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=PIA, environment=production, instance=5.182.110.2:9998, ip=5.182.110.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-atlanta.crt, role=vpn, server=atlanta419, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.075138576Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=netd-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-nwtd4" t=2024-05-29T13:44:15.075174488Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=190917 slug=d1cx instance= t=2024-05-29T13:44:15.074985511Z level=debug msg="Changing state" previous_state=Normal next_state=Error previous_ends_at=2024-05-29T13:43:00Z next_ends_at=2024-05-29T13:48:00Z + logger=ngalert.state.manager.persist user=735588 slug=srepradnya t=2024-05-29T13:44:15.074889301Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.353607ms + level=debug ts=2024-05-29T13:44:15.074918555Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.074835605Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=netd, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-v6ccv" t=2024-05-29T13:44:15.074808421Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=755975 slug=franprd t=2024-05-29T13:44:15.074651366Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.577631ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=PIA, environment=production, instance=5.182.110.225:9998, ip=5.182.110.225, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-atlanta.crt, role=vpn, server=atlanta436, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.074698954Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.074649706Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.074579553Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=netd, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-sxmh5" t=2024-05-29T13:44:15.074571356Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=PIA, environment=production, instance=5.182.110.225:9998, ip=5.182.110.225, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=atlanta436, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.074467726Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.07429109Z caller=remote_instance_store.go:51 user=713314 slug=tpceunonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:15.074341982Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=43.802853ms + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.074320652Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=713314 slug=tpceunonprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.074209019Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.074114423Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=470503 slug=javatgbot t=2024-05-29T13:44:15.074138999Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.597279ms + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.074135351Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager.persist user=224047 slug=ppbtradingtribeprd t=2024-05-29T13:44:15.074009982Z level=debug msg="Saving alert states done" count=10 max_state_save_concurrency=1 duration=317.280432ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=PIA, environment=production, instance=5.182.110.195:9998, ip=5.182.110.195, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=atlanta410, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.074038289Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.074012454Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.074018129Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.073868397Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.073876915Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.073632146Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=PIA, environment=production, instance=5.182.110.163:9998, ip=5.182.110.163, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=atlanta424, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.073636416Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=397201 slug=zultys t=2024-05-29T13:44:15.073521152Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=397201 slug=zultys t=2024-05-29T13:44:15.073474697Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=PIA, environment=production, instance=5.182.110.131:9998, ip=5.182.110.131, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-atlanta.crt, role=vpn, server=atlanta423, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.073455855Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=PIA, environment=production, instance=5.182.110.131:9998, ip=5.182.110.131, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-atlanta.crt, role=vpn, server=atlanta423, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.073443387Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.073412815Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=kubedns, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-jjx98, workload=kube-dns" t=2024-05-29T13:44:15.073309562Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=PIA, environment=production, instance=5.182.110.131:9998, ip=5.182.110.131, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=atlanta423, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.073269259Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=PIA, environment=production, instance=181.214.167.99:9998, ip=181.214.167.99, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=atlanta414, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.072844094Z level=debug msg="Setting next state" handler=resultNormal + level=debug component=discovery ts=2024-05-29T13:44:15.072490147Z caller=retry.go:58 user=470734 msg="retrying grpc request" method=/remoteruler.rules.v1.RulesService/GetByRuleGroup attempt=4 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=PIA, environment=production, instance=181.214.167.67:9998, ip=181.214.167.67, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=atlanta413, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.072463508Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=475816 slug=lightninglabs t=2024-05-29T13:44:15.072290613Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=475816 slug=lightninglabs instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.072235992Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=475816 slug=lightninglabs t=2024-05-29T13:44:15.072222599Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.072076877Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.072008757Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071855857Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.071974372Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071848447Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.071964302Z caller=remote_instance_store.go:51 user=162543 slug=rapharacing msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071824257Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=PIA, environment=production, instance=181.214.167.2:9998, ip=181.214.167.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-atlanta.crt, role=vpn, server=atlanta411, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.071926884Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071799627Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=162543 slug=rapharacing instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.071887993Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=162543 slug=rapharacing instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.071880245Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=162543 slug=rapharacing instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.071856715Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071782006Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071775706Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=162543 slug=rapharacing t=2024-05-29T13:44:15.071842107Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.071769196Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071760966Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071754706Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071735296Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071710806Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager.persist user=620066 slug=xiaodi22013 t=2024-05-29T13:44:15.071848159Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=620066 slug=xiaodi22013 instance="agent_hostname=iZbp1hyp5omrs56ehvdo1uZ, cpu=1, instance=iZbp1hyp5omrs56ehvdo1uZ:12345, job=integrations/node_exporter, mode=idle" t=2024-05-29T13:44:15.071838608Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.071840227Z caller=remote_instance_store.go:51 user=776563 slug=eagleeye4els msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=PIA, environment=production, instance=181.214.167.2:9998, ip=181.214.167.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=atlanta411, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.071809819Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071692586Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071685425Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.071678725Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071669265Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071663165Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=776563 slug=eagleeye4els instance="city=Tokyo" t=2024-05-29T13:44:15.071759385Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071647395Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=776563 slug=eagleeye4els instance="city=Shenzhen" t=2024-05-29T13:44:15.071720334Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071639825Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager.persist user=214309 slug=spenmo t=2024-05-29T13:44:15.071696324Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.461993ms + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071625205Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071618635Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=776563 slug=eagleeye4els instance="city=Shanghai" t=2024-05-29T13:44:15.071692324Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071603445Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071595305Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.071593278Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071565054Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071558694Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.071552294Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071543704Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071535274Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=konnectivity-agent-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-j9sw5, workload=konnectivity-agent" t=2024-05-29T13:44:15.071612479Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071520684Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=776563 slug=eagleeye4els t=2024-05-29T13:44:15.0715194Z level=debug msg="State manager processing evaluation results" resultCount=6 + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071514104Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071498424Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071477073Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071460923Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071453573Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.071446283Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071420293Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071414103Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.071406593Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071396453Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071389832Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071374852Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.071478222Z caller=remote_instance_store.go:51 user=139570 slug=pentesttools msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071368192Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager.persist user=139570 slug=pentesttools t=2024-05-29T13:44:15.07144587Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.071361642Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=139570 slug=pentesttools instance="__name__=node_load15, instance=app.pentest-tools.com:59100, job=node_exporter_local" t=2024-05-29T13:44:15.071433608Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071350872Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071344652Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071328882Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=139570 slug=pentesttools version=62 fingerprint=39912eceeb35a8de attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.07130487Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=node_load15, instance=app.pentest-tools.com:59100, job=node_exporter_local State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=node_load15, instance=app.pentest-tools.com:59100, job=node_exporter_local Value:0xc016480670} C:{Var:C Labels:__name__=node_load15, instance=app.pentest-tools.com:59100, job=node_exporter_local Value:0xc0164806a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.070984174s EvaluationString:[ var='B' labels={__name__=node_load15, instance=app.pentest-tools.com:59100, job=node_exporter_local} value=1.35 ], [ var='C' labels={__name__=node_load15, instance=app.pentest-tools.com:59100, job=node_exporter_local} value=0 ]}]" duration=99.176003ms + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071322472Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=PIA, environment=production, instance=181.214.167.195:9998, ip=181.214.167.195, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-atlanta.crt, role=vpn, server=atlanta417, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.071368921Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071306192Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.071337196Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071300022Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.071293311Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071285291Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071278391Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071231851Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071225671Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071210761Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071203371Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.07119513Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.0711564Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.07114858Z level=debug msg="Execution keep last state is Alerting" handler=resultAlerting + logger=ngalert.state.manager user=563718 slug=exertus instance="datasource_uid=grafanacloud-logs, ref_id=Log lines with ERROR level" t=2024-05-29T13:44:15.071185956Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.07113267Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.07112643Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=563718 slug=exertus instance="datasource_uid=grafanacloud-logs, ref_id=Log lines with ERROR level" t=2024-05-29T13:44:15.071177556Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.07110992Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.07110343Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=PIA, environment=production, instance=181.214.167.195:9998, ip=181.214.167.195, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=atlanta417, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.071145946Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=563718 slug=exertus version=19 fingerprint=b91e31176e4061c0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.071054855Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=Log lines with ERROR level State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.070461229s EvaluationString:}]" duration=45.660473ms + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071086259Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071079779Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071064689Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071057899Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.071030279Z level=debug msg="Setting next state" handler=resultError + level=debug ts=2024-05-29T13:44:15.071060019Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071022009Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.071015439Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.071008769Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070999998Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070993538Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070978368Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070972028Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070956888Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070949108Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070930418Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070922938Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070907778Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070901497Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.07093595Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=PIA, environment=production, instance=181.214.167.131:9998, ip=181.214.167.131, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-atlanta.crt, role=vpn, server=atlanta415, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.070930115Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070884707Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070875867Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070854857Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070840397Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070730466Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070721386Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.070712565Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070659535Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=konnectivity-agent, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-tprgl, workload=konnectivity-agent" t=2024-05-29T13:44:15.070758462Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070651265Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.070764236Z caller=remote_instance_store.go:51 user=442889 slug=artie msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.070607124Z caller=remote_instance_store.go:51 user=381989 slug=vanoordacf msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.070628954Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=442889 slug=artie instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.070697844Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=442889 slug=artie instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.070680127Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070609554Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070600224Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=555176 slug=iotashome t=2024-05-29T13:44:15.07062186Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070577224Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070567014Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.070557994Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070544624Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=555176 slug=iotashome version=13 fingerprint=d8d9cd8092a9cd15 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.070573932Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.070332202s EvaluationString:}]" duration=20.098908ms + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070533604Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070488253Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070479873Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070455713Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070447183Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070427553Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070418752Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.070409272Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070397872Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070388212Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070351162Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070340972Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.070329972Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070303261Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070294991Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.070286051Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070274171Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070264481Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.07022714Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.07022094Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.07021268Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.07018383Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.07017396Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.070292063Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=gke-metadata-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=gke-metadata-server-phtr9, workload=gke-metadata" t=2024-05-29T13:44:15.070238922Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.07013912Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.07023214Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070132069Z level=debug msg="Execution keep last state is Alerting" handler=resultAlerting + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.070201151Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070098709Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070091029Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.070188829Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.070084399Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070076139Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070069729Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.070063179Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070053069Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070046359Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.070121632Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070030148Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.070044161Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070023808Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=gke-metadata-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=gke-metadata-server-fshwj, workload=gke-metadata" t=2024-05-29T13:44:15.070000507Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070008888Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=gke-metadata-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=gke-metadata-server-fshwj, workload=gke-metadata" t=2024-05-29T13:44:15.069978757Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.070001288Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.069994688Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069985228Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=436902 slug=allianz1 instance="service=Postgres_db_replication_status_emea-campaign-qa-1-db-a-slb24183-1" t=2024-05-29T13:44:15.069995549Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069977298Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.069952158Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069940597Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069930227Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069892887Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069880787Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.069907521Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069858777Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069848907Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.06989133Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=156.146.46.65:9998, ip=156.146.46.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=atlanta428, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.069896364Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.069863756Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.069825536Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069812626Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069789946Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069765786Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069756606Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=gke-metadata-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=gke-metadata-server-66slh, workload=gke-metadata" t=2024-05-29T13:44:15.069732642Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069689465Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069676315Z level=debug msg="Execution keep last state is Alerting" handler=resultAlerting + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=156.146.46.198:9998, ip=156.146.46.198, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-atlanta.crt, role=vpn, server=atlanta426, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.069702041Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069641244Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069632644Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069609504Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069601284Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069579014Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069556614Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069535753Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069526623Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069488733Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069482003Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069447482Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069439772Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=156.146.46.198:9998, ip=156.146.46.198, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=atlanta426, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.069502163Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.069467512Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069422802Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069398062Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager.persist user=49546 slug=nulogyinfra t=2024-05-29T13:44:15.069426934Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=49546 slug=nulogyinfra instance= t=2024-05-29T13:44:15.069416927Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=49546 slug=nulogyinfra instance= t=2024-05-29T13:44:15.069407904Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069379552Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069372402Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.069360971Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069335381Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069326731Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.069319161Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069310611Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069304281Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069284121Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.06925895Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.06924049Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.06923313Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.069287646Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.06922624Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.06920426Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.06919794Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.06918077Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.06917277Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.069146739Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069135639Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069127149Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069105239Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.069210673Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069095409Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069063638Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.069033988Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.06908707Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=691055 slug=simonsdropshipqa t=2024-05-29T13:44:15.068983949Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.068987418Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=691055 slug=simonsdropshipqa instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.068959499Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=691055 slug=simonsdropshipqa instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.068948379Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.068990096Z caller=remote_instance_store.go:51 user=807171 slug=unstarnp msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=114492 slug=railsbank version=2 fingerprint=c3cf10c4ec8fab25 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.068983568Z level=debug msg="Alert rule evaluated" results="[{Instance:DBInstanceIdentifier=iam-prod-220220503091207160800000020 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=iam-prod-220220503091207160800000020 Value:0xc02c134b98} C:{Var:C Labels:DBInstanceIdentifier=iam-prod-220220503091207160800000020 Value:0xc02c134ba0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.068643439s EvaluationString:[ var='B' labels={DBInstanceIdentifier=iam-prod-220220503091207160800000020} value=5.932015616e+09 ], [ var='C' labels={DBInstanceIdentifier=iam-prod-220220503091207160800000020} value=0 ]}]" duration=267.538869ms + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.068960457Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.068952707Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.068943857Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.068937017Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.scheduler user=691055 slug=simonsdropshipqa version=1 fingerprint=e5ecc39222f5500b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.068872388Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.068649072s EvaluationString:}]" duration=6.588322ms + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.068901397Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.068889967Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=672418 slug=streamkap instance= t=2024-05-29T13:44:15.068861526Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager.persist user=174675 slug=journalprod t=2024-05-29T13:44:15.068962783Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=807171 slug=unstarnp version=5 fingerprint=d8f776015de5a570 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.068753754Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.068444609s EvaluationString:}]" duration=6.26513ms + level=debug ts=2024-05-29T13:44:15.068881169Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=174675 slug=journalprod instance="datasource_uid=uF2hBHyGz, ref_id=A" t=2024-05-29T13:44:15.068901907Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.068844806Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=672418 slug=streamkap instance= previous_handler=resultError t=2024-05-29T13:44:15.068835126Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.068876841Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.068849541Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.068813804Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=gce-pd-driver, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-9q8k4, workload=pdcsi" t=2024-05-29T13:44:15.068742312Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="datasource_uid=grafanacloud-prom, ref_id=QUERY" t=2024-05-29T13:44:15.068637867Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="datasource_uid=grafanacloud-prom, ref_id=QUERY" t=2024-05-29T13:44:15.0686191Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.068636924Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=867241 slug=drivebot instance= t=2024-05-29T13:44:15.068682397Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.068658076Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=867241 slug=drivebot t=2024-05-29T13:44:15.068584446Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=867241 slug=drivebot version=2 fingerprint=f3be482f248967ef attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.068478544Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc013a86448} C:{Var:C Labels: Value:0xc013a86460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.068016472s EvaluationString:[ var='A' labels={} value=0 ], [ var='C' labels={} value=0 ]}]" duration=27.530023ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Athens, country=Greece, datacenter=OneProvider, environment=production, instance=195.146.4.70:9998, ip=195.146.4.70, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=athens403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.068522412Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.068485694Z caller=remote_instance_store.go:51 user=536824 slug=forgerockit msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=472647 slug=planet t=2024-05-29T13:44:15.068218008Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=623470 slug=vijayavanidigital t=2024-05-29T13:44:15.068160028Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=472647 slug=planet instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.068209972Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:15.068179533Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.068171058Z caller=remote_instance_store.go:51 user=635606 slug=royalbamgroupa msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=438185 slug=nodeinfra t=2024-05-29T13:44:15.06815564Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=26.84824ms + logger=ngalert.scheduler user=472647 slug=planet version=4 fingerprint=5613fe07d7f01da3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.068119408Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.067818244s EvaluationString:}]" duration=34.840101ms + level=debug ts=2024-05-29T13:44:15.068009453Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=635606 slug=royalbamgroupa t=2024-05-29T13:44:15.068080657Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=635606 slug=royalbamgroupa version=11 fingerprint=d8e585e62bfab534 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.068022056Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc0035e8850} B:{Var:B Labels: Value:0xc0035e8858} C:{Var:C Labels: Value:0xc0035e8880}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.06594597s EvaluationString:[ var='A' labels={} value=0 ], [ var='B' labels={} value=0 ], [ var='C' labels={} value=0 ]}]" duration=6.293596ms + level=debug ts=2024-05-29T13:44:15.067969697Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=62.133.47.2:9998, ip=62.133.47.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/kazakhstan.crt, role=vpn, server=kazakhstan403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.067926028Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.067917134Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.067785758Z caller=remote_instance_store.go:51 user=177465 slug=fairtiq msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=177465 slug=fairtiq t=2024-05-29T13:44:15.06773777Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=177465 slug=fairtiq instance="datasource_uid=L8E5gTd7z, ref_id=A" t=2024-05-29T13:44:15.067725218Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:15.067672051Z caller=remote_instance_store.go:51 user=639928 slug=skatteetaten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=639928 slug=skatteetaten instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.06762295Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=639928 slug=skatteetaten instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.067595949Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=639928 slug=skatteetaten instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.067580149Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=639928 slug=skatteetaten t=2024-05-29T13:44:15.067566749Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.067530739Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.067405936Z caller=remote_instance_store.go:51 user=172772 slug=ppbtradingtribe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.067282866Z caller=remote_instance_store.go:51 user=20177 slug=paddledash msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=172772 slug=ppbtradingtribe instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.0672929Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager.persist user=20177 slug=paddledash t=2024-05-29T13:44:15.067237318Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Andorra la Vella, country=Andorra, datacenter=GSL, environment=production, instance=173.239.217.142:9998, ip=173.239.217.142, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ad.crt, role=vpn, server=andorra407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.067192646Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=755996 slug=psychobunnyprd t=2024-05-29T13:44:15.067080547Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.06713577Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + Error parsing panelUID for alert annotationruleID2775dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=20177 slug=paddledash version=2 fingerprint=615b7da965ca53e0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.067087238Z level=debug msg="Alert rule evaluated" results="[{Instance:Component=localised-price-service, SLI=Correctness State:Normal Error: Results:map[] Values:map[AlertCondition:{Var:AlertCondition Labels:Component=localised-price-service, SLI=Correctness Value:0xc02667e7a0} BurnRate:{Var:BurnRate Labels:Component=localised-price-service, SLI=Correctness Value:0xc02667e7e0} GoodEvents:{Var:GoodEvents Labels:Component=localised-price-service, SLI=Correctness Value:0xc02667e830} ValidEvents:{Var:ValidEvents Labels:Component=localised-price-service, SLI=Correctness Value:0xc02667e880}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.066779464s EvaluationString:[ var='AlertCondition' labels={Component=localised-price-service, SLI=Correctness} value=0 ], [ var='BurnRate' labels={Component=localised-price-service, SLI=Correctness} value=NaN ], [ var='GoodEvents' labels={Component=localised-price-service, SLI=Correctness} value=0 ], [ var='ValidEvents' labels={Component=localised-price-service, SLI=Correctness} value=0 ]}]" duration=91.809258ms + level=debug ts=2024-05-29T13:44:15.067118459Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=755996 slug=psychobunnyprd version=1 fingerprint=09edca02e05d20c3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.067001726Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=node_systemd_unit_state, agent_hostname=ip-172-31-1-221, instance=ip-172-31-1-221:9090, job=integrations/node_exporter, name=kibana.service, state=active, type=simple State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_systemd_unit_state, agent_hostname=ip-172-31-1-221, instance=ip-172-31-1-221:9090, job=integrations/node_exporter, name=kibana.service, state=active, type=simple Value:0xc003815b70} B:{Var:B Labels:__name__=node_systemd_unit_state, agent_hostname=ip-172-31-1-221, instance=ip-172-31-1-221:9090, job=integrations/node_exporter, name=kibana.service, state=active, type=simple Value:0xc003815af0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.066682663s EvaluationString:[ var='A' labels={__name__=node_systemd_unit_state, agent_hostname=ip-172-31-1-221, instance=ip-172-31-1-221:9090, job=integrations/node_exporter, name=kibana.service, state=active, type=simple} value=1 ], [ var='B' labels={__name__=node_systemd_unit_state, agent_hostname=ip-172-31-1-221, instance=ip-172-31-1-221:9090, job=integrations/node_exporter, name=kibana.service, state=active, type=simple} value=0 ]}]" duration=6.525082ms + level=debug ts=2024-05-29T13:44:15.066937103Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.067032957Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:15.066882862Z level=debug msg="Saving alert states" count=15 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.066922784Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.066931501Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.jyn.radioSettingsUpdate" t=2024-05-29T13:44:15.066804391Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Andorra la Vella, country=Andorra, datacenter=GSL, environment=production, instance=173.239.217.128:9998, ip=173.239.217.128, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/ad.crt, role=vpn, server=andorra406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.06677979Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.jyn.radioSettingsUpdate" t=2024-05-29T13:44:15.06679664Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.jyn.downloadedFile" t=2024-05-29T13:44:15.06677002Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.jyn.doorbellPress" t=2024-05-29T13:44:15.066759209Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.jyn.clearWarning" t=2024-05-29T13:44:15.06673678Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=112732 slug=gleamer version=1 fingerprint=e4d73c900d17af4a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.066681347Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.06641615s EvaluationString:}]" duration=30.633547ms + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.jyn.clearMessage" t=2024-05-29T13:44:15.066714489Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.jyn.bluetoothUnlock" t=2024-05-29T13:44:15.066669288Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.jyn.basestationRefurbish" t=2024-05-29T13:44:15.066647539Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.066639587Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.jyn.basestationLoginPriority" t=2024-05-29T13:44:15.066614517Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.066540785Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Anchorage, country=United States, datacenter=DataPacket, environment=production, instance=84.239.52.1:9998, ip=84.239.52.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-alaska-pf.crt, role=vpn, server=alaska402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.066557373Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=806229 slug=simplisafe t=2024-05-29T13:44:15.066523436Z level=debug msg="State manager processing evaluation results" resultCount=15 + level=debug ts=2024-05-29T13:44:15.066475002Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=806229 slug=simplisafe version=33 fingerprint=f8537ea894f84470 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.066324425Z level=debug msg="Alert rule evaluated" results="[{Instance:queue=two.jyn.basestationCSR State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:queue=two.jyn.basestationCSR Value:0xc0959e1db0} D:{Var:D Labels:queue=two.jyn.basestationCSR Value:0xc0959e1dd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.065542297s EvaluationString:[ var='B' labels={queue=two.jyn.basestationCSR} value=0 ], [ var='D' labels={queue=two.jyn.basestationCSR} value=0 ]} {Instance:queue=two.jyn.basestationLogin State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:queue=two.jyn.basestationLogin Value:0xc0959e1df0} D:{Var:D Labels:queue=two.jyn.basestationLogin Value:0xc0959e1e00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.065559047s EvaluationString:[ var='B' labels={queue=two.jyn.basestationLogin} value=0 ], [ var='D' labels={queue=two.jyn.basestationLogin} value=0 ]} {Instance:queue=two.jyn.basestationLoginPriority State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:queue=two.jyn.basestationLoginPriority Value:0xc0959e1e20} D:{Var:D Labels:queue=two.jyn.basestationLoginPriority Value:0xc0959e1e30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.065565917s EvaluationString:[ var='B' labels={queue=two.jyn.basestationLoginPriority} value=20 ], [ var='D' labels={queue=two.jyn.basestationLoginPriority} value=0 ]} {Instance:queue=two.jyn.basestationMessages State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:queue=two.jyn.basestationMessages Value:0xc0959e1e90} D:{Var:D Labels:queue=two.jyn.basestationMessages Value:0xc0959e1e60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.065574807s EvaluationString:[ var='B' labels={queue=two.jyn.basestationMessages} value=1 ], [ var='D' labels={queue=two.jyn.basestationMessages} value=0 ]} {Instance:queue=two.jyn.basestationRefurbish State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:queue=two.jyn.basestationRefurbish Value:0xc0959e1f10} D:{Var:D Labels:queue=two.jyn.basestationRefurbish Value:0xc0959e1ee0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.065584327s EvaluationString:[ var='B' labels={queue=two.jyn.basestationRefurbish} value=0 ], [ var='D' labels={queue=two.jyn.basestationRefurbish} value=0 ]} {Instance:queue=two.jyn.bluetoothUnlock State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:queue=two.jyn.bluetoothUnlock Value:0xc0959e1f90} D:{Var:D Labels:queue=two.jyn.bluetoothUnlock Value:0xc0959e1fd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.065591537s EvaluationString:[ var='B' labels={queue=two.jyn.bluetoothUnlock} value=0 ], [ var='D' labels={queue=two.jyn.bluetoothUnlock} value=0 ]} {Instance:queue=two.jyn.cellularEvent State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:queue=two.jyn.cellularEvent Value:0xc0288dac50} D:{Var:D Labels:queue=two.jyn.cellularEvent Value:0xc0288dac60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.065597108s EvaluationString:[ var='B' labels={queue=two.jyn.cellularEvent} value=0 ], [ var='D' labels={queue=two.jyn.cellularEvent} value=0 ]} {Instance:queue=two.jyn.clearMessage State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:queue=two.jyn.clearMessage Value:0xc0288dac90} D:{Var:D Labels:queue=two.jyn.clearMessage Value:0xc0288daea0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.065603088s EvaluationString:[ var='B' labels={queue=two.jyn.clearMessage} value=1 ], [ var='D' labels={queue=two.jyn.clearMessage} value=0 ]} {Instance:queue=two.jyn.clearWarning State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:queue=two.jyn.clearWarning Value:0xc0288db0f0} D:{Var:D Labels:queue=two.jyn.clearWarning Value:0xc0288db180}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.065611878s EvaluationString:[ var='B' labels={queue=two.jyn.clearWarning} value=0 ], [ var='D' labels={queue=two.jyn.clearWarning} value=0 ]} {Instance:queue=two.jyn.doorbellPress State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:queue=two.jyn.doorbellPress Value:0xc0288db400} D:{Var:D Labels:queue=two.jyn.doorbellPress Value:0xc0288db3f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.065618128s EvaluationString:[ var='B' labels={queue=two.jyn.doorbellPress} value=0 ], [ var='D' labels={queue=two.jyn.doorbellPress} value=0 ]} {Instance:queue=two.jyn.downloadedFile State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:queue=two.jyn.downloadedFile Value:0xc0288db420} D:{Var:D Labels:queue=two.jyn.downloadedFile Value:0xc0288db430}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.065627878s EvaluationString:[ var='B' labels={queue=two.jyn.downloadedFile} value=0 ], [ var='D' labels={queue=two.jyn.downloadedFile} value=0 ]} {Instance:queue=two.jyn.radioSettingsUpdate State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:queue=two.jyn.radioSettingsUpdate Value:0xc0288db450} D:{Var:D Labels:queue=two.jyn.radioSettingsUpdate Value:0xc0288db460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.065635928s EvaluationString:[ var='B' labels={queue=two.jyn.radioSettingsUpdate} value=0 ], [ var='D' labels={queue=two.jyn.radioSettingsUpdate} value=0 ]} {Instance:queue=two.jyn.requestFwUpdate State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:queue=two.jyn.requestFwUpdate Value:0xc0288db480} D:{Var:D Labels:queue=two.jyn.requestFwUpdate Value:0xc0288db4a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.065643688s EvaluationString:[ var='B' labels={queue=two.jyn.requestFwUpdate} value=0 ], [ var='D' labels={queue=two.jyn.requestFwUpdate} value=0 ]} {Instance:queue=two.jyn.sensorStatusChange State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:queue=two.jyn.sensorStatusChange Value:0xc0288db4c0} D:{Var:D Labels:queue=two.jyn.sensorStatusChange Value:0xc0288db4d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.065649988s EvaluationString:[ var='B' labels={queue=two.jyn.sensorStatusChange} value=0 ], [ var='D' labels={queue=two.jyn.sensorStatusChange} value=0 ]} {Instance:queue=two.jyn.stateChange State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:queue=two.jyn.stateChange Value:0xc02baec010} D:{Var:D Labels:queue=two.jyn.stateChange Value:0xc02baec020}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.065655988s EvaluationString:[ var='B' labels={queue=two.jyn.stateChange} value=4 ], [ var='D' labels={queue=two.jyn.stateChange} value=0 ]}]" duration=27.794603ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Anchorage, country=United States, datacenter=DataPacket, environment=production, instance=84.239.52.1:9998, ip=84.239.52.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=alaska402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.066393023Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.066374353Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.066244257Z caller=remote_instance_store.go:51 user=459574 slug=seybai23 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=PIA, environment=production, instance=195.78.54.6:9998, ip=195.78.54.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=amsterdam432, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.066245212Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.066176583Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.066181526Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=459574 slug=seybai23 instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.0661539Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=PIA, environment=production, instance=195.78.54.6:9998, ip=195.78.54.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nl-amsterdam.crt, role=vpn, server=amsterdam432, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.06603762Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.06582316Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=PIA, environment=production, instance=195.78.54.5:9998, ip=195.78.54.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nl-amsterdam.crt, role=vpn, server=amsterdam429, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.06568287Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.065614603Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.065601213Z caller=remote_instance_store.go:51 user=656284 slug=cencosudx msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=704123 slug=sportsmanwarehouse t=2024-05-29T13:44:15.065160074Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=704123 slug=sportsmanwarehouse instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.065112134Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.065262887Z caller=remote_instance_store.go:51 user=851297 slug=roadrunneruat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=PIA, environment=production, instance=195.78.54.3:9998, ip=195.78.54.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=amsterdam425, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.06546345Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=default-http-backend, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=l7-default-backend-745c798fdd-g4k7r, workload=l7-default-backend" t=2024-05-29T13:44:15.065462308Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.06543745Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.scheduler user=704123 slug=sportsmanwarehouse version=1 fingerprint=4813657851593ab8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.064971621Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.06478675s EvaluationString:}]" duration=7.429117ms + logger=ngalert.state.manager.persist user=63636 slug=streamelements t=2024-05-29T13:44:15.065347802Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=63636 slug=streamelements instance="op=handleMsg, source=mad-men-change-feed.deployments, status=failed" t=2024-05-29T13:44:15.065318825Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=PIA, environment=production, instance=195.78.54.3:9998, ip=195.78.54.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nl-amsterdam.crt, role=vpn, server=amsterdam425, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.065285264Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=851297 slug=roadrunneruat instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.065183885Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=63636 slug=streamelements version=10 fingerprint=ef328e87cdf01459 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.065188229Z level=debug msg="Alert rule evaluated" results="[{Instance:op=handleMsg, source=mad-men-change-feed.deployments, status=failed State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:op=handleMsg, source=mad-men-change-feed.deployments, status=failed Value:0xc052879cf0} D:{Var:D Labels:op=handleMsg, source=mad-men-change-feed.deployments, status=failed Value:0xc052879c90} E:{Var:E Labels:op=handleMsg, source=mad-men-change-feed.deployments, status=failed Value:0xc052879cc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.064892982s EvaluationString:[ var='B' labels={op=handleMsg, source=mad-men-change-feed.deployments, status=failed} value=0 ], [ var='D' labels={op=handleMsg, source=mad-men-change-feed.deployments, status=failed} value=0 ], [ var='E' labels={op=handleMsg, source=mad-men-change-feed.deployments, status=failed} value=0 ]}]" duration=58.843433ms + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=csi-driver-registrar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-m7skc, workload=pdcsi" t=2024-05-29T13:44:15.065226204Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=851297 slug=roadrunneruat version=1 fingerprint=74d7d3b259594e45 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.065096654Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.064736218s EvaluationString:}]" duration=6.975249ms + logger=ngalert.state.manager user=403369 slug=clearsaletechlabs instance= t=2024-05-29T13:44:15.065091391Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=PIA, environment=production, instance=191.96.168.6:9998, ip=191.96.168.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=amsterdam433, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.065071773Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:15.065024281Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=692010 slug=mercariusprod version=2 fingerprint=1ec5b76bef4763d9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.064754429Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.064365373s EvaluationString:}]" duration=43.376572ms + logger=ngalert.state.manager user=403369 slug=clearsaletechlabs t=2024-05-29T13:44:15.064884306Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.064850193Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=403369 slug=clearsaletechlabs version=335 fingerprint=171a6cd3c9996fca attempt=1 now=2024-05-29T13:44:00Z t=2024-05-29T13:44:15.064794046Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc05cda3d78} C:{Var:C Labels: Value:0xc05cda3d90}] EvaluatedAt:2024-05-29 13:44:00 +0000 UTC EvaluationDuration:15.064417637s EvaluationString:[ var='B' labels={} value=1110 ], [ var='C' labels={} value=0 ]}]" duration=5.892473138s + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=PIA, environment=production, instance=191.96.168.5:9998, ip=191.96.168.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=amsterdam430, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.064749194Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=PIA, environment=production, instance=191.96.168.5:9998, ip=191.96.168.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=amsterdam430, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.064735567Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=csi-driver-registrar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-8f8x5, workload=pdcsi" t=2024-05-29T13:44:15.064712884Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=735588 slug=srepradnya instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.064517713Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=735588 slug=srepradnya instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.064494283Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=PIA, environment=production, instance=191.96.168.5:9998, ip=191.96.168.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nl-amsterdam.crt, role=vpn, server=amsterdam430, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.06456793Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.064525664Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=PIA, environment=production, instance=191.96.168.3:9998, ip=191.96.168.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=amsterdam426, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.064374751Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-lfv59, workload=grafana-k8s-monitoring-alloy" t=2024-05-29T13:44:15.064327897Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=405934 slug=parasrandhawa t=2024-05-29T13:44:15.064263205Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.064185806Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=405934 slug=parasrandhawa instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.064249838Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=405934 slug=parasrandhawa instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.06423249Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.064043809Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-6kds9, workload=grafana-k8s-monitoring-alloy" t=2024-05-29T13:44:15.064128973Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=PIA, environment=production, instance=191.96.168.3:9998, ip=191.96.168.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nl-amsterdam.crt, role=vpn, server=amsterdam426, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.064115824Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.06398524Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.063972481Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.064051475Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=PIA, environment=production, instance=191.96.168.3:9998, ip=191.96.168.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nl-amsterdam.crt, role=vpn, server=amsterdam426, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.064104847Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=755975 slug=franprd instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.064042936Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=755975 slug=franprd instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.064032235Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=396773 slug=aninditadass version=115 fingerprint=ca7d2883e69b49a6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.063909839Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.063622977s EvaluationString:}]" duration=12.485097ms + logger=ngalert.state.manager user=452240 slug=trulioo instance= t=2024-05-29T13:44:15.063985257Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=452240 slug=trulioo version=91 fingerprint=808c841d6fb20904 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.063889604Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[A0:{Var:A Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.063600277s EvaluationString:[ var='A0' metric='NoData' labels={} value=null ]}]" duration=48.471185ms + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-events-f7cfbcddf-g7g64, workload=grafana-k8s-monitoring-alloy-events" t=2024-05-29T13:44:15.063925038Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.063876754Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-events-f7cfbcddf-g7g64, workload=grafana-k8s-monitoring-alloy-events" t=2024-05-29T13:44:15.063872637Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.063833388Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.063824495Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.063804241Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=PIA, environment=production, instance=181.214.206.6:9998, ip=181.214.206.6, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=amsterdam435, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.063876316Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.063833944Z caller=remote_instance_store.go:51 user=744689 slug=prashantkumarsi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.063804803Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=744689 slug=prashantkumarsi instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.063769482Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=744689 slug=prashantkumarsi instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.063754442Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.063688779Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.063714455Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.063706142Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=744689 slug=prashantkumarsi version=1 fingerprint=996f9389c2137f59 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.06366558Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.063274781s EvaluationString:}]" duration=8.699346ms + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.063665968Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.063578874Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.063555019Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.06358329Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.063540575Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.063568634Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.063505913Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.06349744Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.063402943Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=PIA, environment=production, instance=181.214.206.5:9998, ip=181.214.206.5, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=amsterdam434, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.063424509Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.063406145Z caller=remote_instance_store.go:51 user=731546 slug=liderbci msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.063306953Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.063282813Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.063115886Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.063088821Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.063077829Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.063034034Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.063156353Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.063036978Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=PIA, environment=production, instance=181.214.206.4:9998, ip=181.214.206.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=amsterdam431, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.063023497Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.062986407Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=441627 slug=foreststaking instance= t=2024-05-29T13:44:15.062961262Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=441627 slug=foreststaking instance= t=2024-05-29T13:44:15.062948435Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=441627 slug=foreststaking t=2024-05-29T13:44:15.062927946Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=441627 slug=foreststaking version=43 fingerprint=fa3e15b26a38914f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.062876924Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=4.129782ms + level=debug ts=2024-05-29T13:44:15.062810452Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + level=error ts=2024-05-29T13:44:15.062846498Z caller=remote_rule_evaluator.go:110 user=441627 slug=foreststaking msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=PIA, environment=production, instance=181.214.206.4:9998, ip=181.214.206.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nl-amsterdam.crt, role=vpn, server=amsterdam431, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.06281091Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=PIA, environment=production, instance=181.214.206.4:9998, ip=181.214.206.4, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nl-amsterdam.crt, role=vpn, server=amsterdam431, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.062795767Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=376364 slug=pn0625prod01 instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:15.062621505Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=PIA, environment=production, instance=181.214.206.3:9998, ip=181.214.206.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=amsterdam428, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.062627841Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=376364 slug=pn0625prod01 t=2024-05-29T13:44:15.062596105Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.06254691Z caller=remote_instance_store.go:51 user=695888 slug=boeingdr msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=695888 slug=boeingdr instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.062469058Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.062494861Z caller=remote_image_capturer.go:54 user=633335 slug=promqlworkshop rule_org_id=1 rule_uid=b3034a0f-8dda-4687-8605-02454d4a2b00 dashboard=3JvCusVnz panel=26 msg="rendering alert image with grafana" + logger=ngalert.scheduler user=695888 slug=boeingdr version=1 fingerprint=7a77b62f7d347366 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.062362176Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.062233452s EvaluationString:}]" duration=7.221793ms + logger=ngalert.state.manager user=633335 slug=promqlworkshop instance= t=2024-05-29T13:44:15.062412981Z level=debug msg="Execution error state is Alerting" handler=resultAlerting previous_handler=resultError + level=debug ts=2024-05-29T13:44:15.062409194Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=236496 slug=improbable t=2024-05-29T13:44:15.062384692Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=PIA, environment=production, instance=181.214.206.3:9998, ip=181.214.206.3, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nl-amsterdam.crt, role=vpn, server=amsterdam428, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.062434211Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=PIA, environment=production, instance=181.214.206.2:9998, ip=181.214.206.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=amsterdam427, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.062269511Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.062239721Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.062219128Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.062239242Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=PIA, environment=production, instance=181.214.206.2:9998, ip=181.214.206.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nl-amsterdam.crt, role=vpn, server=amsterdam427, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.0621142Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=PIA, environment=production, instance=181.214.206.2:9998, ip=181.214.206.2, is_collocated=true, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nl-amsterdam.crt, role=vpn, server=amsterdam427, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.062102798Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.0618909Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.061814979Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=DataPacket, environment=production, instance=212.102.35.1:9998, ip=212.102.35.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nl-amsterdam.crt, role=vpn, server=amsterdam438, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.061810981Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=DataPacket, environment=production, instance=212.102.35.1:9998, ip=212.102.35.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nl-amsterdam.crt, role=vpn, server=amsterdam438, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.061798059Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=DataPacket, environment=production, instance=212.102.35.130:9998, ip=212.102.35.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=amsterdam416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.0616472Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.061583036Z caller=remote_instance_store.go:51 user=470503 slug=javatgbot msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=470503 slug=javatgbot instance= t=2024-05-29T13:44:15.061520772Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=DataPacket, environment=production, instance=212.102.35.130:9998, ip=212.102.35.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nl-amsterdam.crt, role=vpn, server=amsterdam416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.06150257Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.061291098Z caller=remote_instance_store.go:51 user=763376 slug=f5nginxone msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=DataPacket, environment=production, instance=154.47.21.129:9998, ip=154.47.21.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=amsterdam402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.061325897Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.06129886Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.06120318Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=884866 slug=cnonumerique t=2024-05-29T13:44:15.061069972Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=7.591089ms + logger=ngalert.state.manager.persist user=214309 slug=spenmo t=2024-05-29T13:44:15.061230155Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=214309 slug=spenmo instance="datasource_uid=grafanacloud-prom, ref_id=A,B" t=2024-05-29T13:44:15.06118949Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=DataPacket, environment=production, instance=154.47.21.129:9998, ip=154.47.21.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nl-amsterdam.crt, role=vpn, server=amsterdam402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.061158672Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=DataPacket, environment=production, instance=154.47.21.129:9998, ip=154.47.21.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nl-amsterdam.crt, role=vpn, server=amsterdam402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.061144592Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.061114657Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=DataPacket, environment=production, instance=143.244.41.188:9998, ip=143.244.41.188, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=amsterdam412, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.060954513Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=DataPacket, environment=production, instance=143.244.41.188:9998, ip=143.244.41.188, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=amsterdam412, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.060939947Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.06089186Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=636875 slug=kaisbaccour t=2024-05-29T13:44:15.060807345Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=636875 slug=kaisbaccour instance="contract_version=0.4.0" t=2024-05-29T13:44:15.060796445Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.060710405Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=DataPacket, environment=production, instance=143.244.41.188:9998, ip=143.244.41.188, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nl-amsterdam.crt, role=vpn, server=amsterdam412, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.060720876Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=DataPacket, environment=production, instance=143.244.41.188:9998, ip=143.244.41.188, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nl-amsterdam.crt, role=vpn, server=amsterdam412, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.060706364Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.060634204Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.060672894Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.060584185Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=DataPacket, environment=production, instance=143.244.41.186:9998, ip=143.244.41.186, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=amsterdam442, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.060510842Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=818291 slug=g8tamanini t=2024-05-29T13:44:15.060427012Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.060409452Z caller=remote_image_capturer.go:33 user=818291 slug=g8tamanini rule_org_id=1 rule_uid=cddk6njss1s00d msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=818291 slug=g8tamanini instance="id=BCFF4D30C1D0, local=!0 - Manutenção RJ" t=2024-05-29T13:44:15.060387741Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=818291 slug=g8tamanini instance="id=BCFF4D30C1D0, local=!0 - Manutenção RJ" t=2024-05-29T13:44:15.060379851Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=DataPacket, environment=production, instance=143.244.41.186:9998, ip=143.244.41.186, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nl-amsterdam.crt, role=vpn, server=amsterdam442, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.060269844Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=818291 slug=g8tamanini instance="id=4417930B60AE, local=!0 - Manutenção RJ" t=2024-05-29T13:44:15.060265488Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=818291 slug=g8tamanini instance="id=4417930B60AE, local=!0 - Manutenção RJ" t=2024-05-29T13:44:15.060250799Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=818291 slug=g8tamanini t=2024-05-29T13:44:15.060220587Z level=debug msg="State manager processing evaluation results" resultCount=3 + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-m657m, workload=grafana-k8s-monitoring-alloy" t=2024-05-29T13:44:15.060240497Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=818291 slug=g8tamanini version=3 fingerprint=cea5b821c1da3253 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.060138086Z level=debug msg="Alert rule evaluated" results="[{Instance:id=4417930B60AE, local=!0 - Manutenção RJ State:Alerting Error: Results:map[] Values:map[C:{Var:C Labels:id=4417930B60AE, local=!0 - Manutenção RJ Value:0xc004adb3e0} Status:{Var:Status Labels:id=4417930B60AE, local=!0 - Manutenção RJ Value:0xc004adb410}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.059548642s EvaluationString:[ var='C' labels={id=4417930B60AE, local=!0 - Manutenção RJ} value=1 ], [ var='Status' labels={id=4417930B60AE, local=!0 - Manutenção RJ} value=0 ]} {Instance:id=4417930B9CF9, local=!0 - Manutenção State:Alerting Error: Results:map[] Values:map[C:{Var:C Labels:id=4417930B9CF9, local=!0 - Manutenção Value:0xc004adb450} Status:{Var:Status Labels:id=4417930B9CF9, local=!0 - Manutenção Value:0xc004adb480}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.059564843s EvaluationString:[ var='C' labels={id=4417930B9CF9, local=!0 - Manutenção} value=1 ], [ var='Status' labels={id=4417930B9CF9, local=!0 - Manutenção} value=0 ]} {Instance:id=BCFF4D30C1D0, local=!0 - Manutenção RJ State:Alerting Error: Results:map[] Values:map[C:{Var:C Labels:id=BCFF4D30C1D0, local=!0 - Manutenção RJ Value:0xc004adb4c0} Status:{Var:Status Labels:id=BCFF4D30C1D0, local=!0 - Manutenção RJ Value:0xc004adb4f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.059572493s EvaluationString:[ var='C' labels={id=BCFF4D30C1D0, local=!0 - Manutenção RJ} value=1 ], [ var='Status' labels={id=BCFF4D30C1D0, local=!0 - Manutenção RJ} value=0 ]}]" duration=5.17797ms + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-lfv59, workload=grafana-k8s-monitoring-alloy" t=2024-05-29T13:44:15.060031233Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.059975857Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=722851 slug=azarny t=2024-05-29T13:44:15.059863362Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=DataPacket, environment=production, instance=143.244.41.185:9998, ip=143.244.41.185, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nl-amsterdam.crt, role=vpn, server=amsterdam441, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.059819392Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.059561386Z caller=remote_instance_store.go:51 user=739130 slug=redphasetech msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=739130 slug=redphasetech instance="device=dm-1, fstype=ext4, label=sysctrl--vg-var, mode=rw, path=/var, unit_serial=10017" t=2024-05-29T13:44:15.059432686Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=739130 slug=redphasetech instance="device=dm-1, fstype=ext4, label=sysctrl--vg-var, mode=rw, path=/var, unit_serial=10017" t=2024-05-29T13:44:15.059419585Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=739130 slug=redphasetech instance="device=sda8, fstype=ext4, mode=rw, path=/home, unit_serial=10004" t=2024-05-29T13:44:15.059375885Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=DataPacket, environment=production, instance=143.244.41.183:9998, ip=143.244.41.183, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=amsterdam439, server_type=10G - Dual CPU, service_name=cpz_vpn" t=2024-05-29T13:44:15.059271944Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=739130 slug=redphasetech instance="device=dm-3, fstype=ext4, label=sysctrl--vg-tmp, mode=rw, path=/tmp, unit_serial=10017" t=2024-05-29T13:44:15.059304585Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=739130 slug=redphasetech instance="device=sda1, fstype=ext4, mode=rw, path=/, unit_serial=10015" t=2024-05-29T13:44:15.059287885Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=platform-prd-eks, container=logi-integration-service, environment=production, namespace=reporting-logi, pod=logi-integration-service-677fd6cd8c-b8scw" t=2024-05-29T13:44:15.059191296Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager.persist user=705083 slug=mediakindsaas t=2024-05-29T13:44:15.059012842Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=912534 slug=useat instance="cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-0, workload=grafana-k8s-monitoring" t=2024-05-29T13:44:15.059134885Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=k8s-aks-pro-eus-74f7, container=doc-compiler, environment=production, namespace=functionapps, pod=doc-compiler-7d8f79b788-7l7rm" t=2024-05-29T13:44:15.059078356Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=739130 slug=redphasetech instance="device=dm-6, fstype=ext4, label=sysctrl--vg-config, mode=rw, path=/config, unit_serial=10019" t=2024-05-29T13:44:15.059053983Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.059100583Z caller=remote_image_capturer.go:33 user=396586 slug=opengov rule_org_id=1 rule_uid=gCY6iwIVz msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=705083 slug=mediakindsaas t=2024-05-29T13:44:15.058933338Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=705083 slug=mediakindsaas version=65 fingerprint=b1b2c7fa02340625 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.058835231Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=babd0237-43f7-4b06-8fac-9df5d5be23b2, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.055441119s EvaluationString:}]" duration=43.99924ms + level=debug ts=2024-05-29T13:44:15.058975344Z caller=remote_image_capturer.go:33 user=396586 slug=opengov rule_org_id=1 rule_uid=gCY6iwIVz msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=396586 slug=opengov instance="cluster=k8s-aks-dev-eus-97ad, container=employee, environment=Development, namespace=sharks, pod=employee-775bf54674-4flmq" t=2024-05-29T13:44:15.05894521Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager.persist user=920307 slug=tedoptimus t=2024-05-29T13:44:15.058958325Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=920307 slug=tedoptimus instance="hostname=s1, job=systemd-journal, level=info, service_name=systemd-journal, unit=ttw-alert.service" t=2024-05-29T13:44:15.058945864Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=920307 slug=tedoptimus instance="hostname=s1, job=systemd-journal, level=info, service_name=systemd-journal, unit=ttw-alert.service" t=2024-05-29T13:44:15.058928054Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=739130 slug=redphasetech instance="device=sda5, fstype=ext4, mode=rw, path=/var, unit_serial=10015" t=2024-05-29T13:44:15.058856182Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=DataPacket, environment=production, instance=143.244.41.156:9998, ip=143.244.41.156, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=amsterdam404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.058869885Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=DataPacket, environment=production, instance=143.244.41.156:9998, ip=143.244.41.156, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=streaming-optimized, server=amsterdam404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.058858935Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=739130 slug=redphasetech instance="device=dm-0, fstype=ext4, label=sysctrl--vg-root, mode=rw, path=/, unit_serial=10017" t=2024-05-29T13:44:15.058800682Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov t=2024-05-29T13:44:15.058788746Z level=debug msg="State manager processing evaluation results" resultCount=4 + logger=ngalert.state.manager user=739130 slug=redphasetech t=2024-05-29T13:44:15.058642381Z level=debug msg="State manager processing evaluation results" resultCount=21 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=DataPacket, environment=production, instance=143.244.41.156:9998, ip=143.244.41.156, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nl-netherlands-so.crt, role=streaming-optimized, server=amsterdam404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.0587106Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Amsterdam, country=Netherlands, datacenter=DataPacket, environment=production, instance=143.244.41.156:9998, ip=143.244.41.156, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/nl-netherlands-so.crt, role=streaming-optimized, server=amsterdam404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.058691887Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=739130 slug=redphasetech version=8 fingerprint=ee71b7510a322b1d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.058377079Z level=debug msg="Alert rule evaluated" results="[{Instance:device=dm-1, fstype=ext4, label=sysctrl--vg-var, mode=rw, path=/var, unit_serial=10019 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=dm-1, fstype=ext4, label=sysctrl--vg-var, mode=rw, path=/var, unit_serial=10019 Value:0xc042a14c20} C:{Var:C Labels:device=dm-1, fstype=ext4, label=sysctrl--vg-var, mode=rw, path=/var, unit_serial=10019 Value:0xc042a14ce0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.055376963s EvaluationString:[ var='B' labels={device=dm-1, fstype=ext4, label=sysctrl--vg-var, mode=rw, path=/var, unit_serial=10019} value=25.559071819936936 ], [ var='C' labels={device=dm-1, fstype=ext4, label=sysctrl--vg-var, mode=rw, path=/var, unit_serial=10019} value=0 ]} {Instance:device=sda1, fstype=ext2, mode=rw, path=/boot, unit_serial=10019 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, fstype=ext2, mode=rw, path=/boot, unit_serial=10019 Value:0xc042a14de0} C:{Var:C Labels:device=sda1, fstype=ext2, mode=rw, path=/boot, unit_serial=10019 Value:0xc042a14e70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.055408363s EvaluationString:[ var='B' labels={device=sda1, fstype=ext2, mode=rw, path=/boot, unit_serial=10019} value=33.74092572923342 ], [ var='C' labels={device=sda1, fstype=ext2, mode=rw, path=/boot, unit_serial=10019} value=0 ]} {Instance:device=dm-3, fstype=ext4, label=sysctrl--vg-tmp, mode=rw, path=/tmp, unit_serial=10019 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=dm-3, fstype=ext4, label=sysctrl--vg-tmp, mode=rw, path=/tmp, unit_serial=10019 Value:0xc042a15128} C:{Var:C Labels:device=dm-3, fstype=ext4, label=sysctrl--vg-tmp, mode=rw, path=/tmp, unit_serial=10019 Value:0xc042a151d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.055421363s EvaluationString:[ var='B' labels={device=dm-3, fstype=ext4, label=sysctrl--vg-tmp, mode=rw, path=/tmp, unit_serial=10019} value=54.581939297556005 ], [ var='C' labels={device=dm-3, fstype=ext4, label=sysctrl--vg-tmp, mode=rw, path=/tmp, unit_serial=10019} value=0 ]} {Instance:device=sda8, fstype=ext4, mode=rw, path=/home, unit_serial=10015 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda8, fstype=ext4, mode=rw, path=/home, unit_serial=10015 Value:0xc042a153d0} C:{Var:C Labels:device=sda8, fstype=ext4, mode=rw, path=/home, unit_serial=10015 Value:0xc042a15468}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.055432163s EvaluationString:[ var='B' labels={device=sda8, fstype=ext4, mode=rw, path=/home, unit_serial=10015} value=59.86225650910647 ], [ var='C' labels={device=sda8, fstype=ext4, mode=rw, path=/home, unit_serial=10015} value=0 ]} {Instance:device=dm-0, fstype=ext4, label=sysctrl--vg-root, mode=rw, path=/, unit_serial=10017 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=dm-0, fstype=ext4, label=sysctrl--vg-root, mode=rw, path=/, unit_serial=10017 Value:0xc042a156b0} C:{Var:C Labels:device=dm-0, fstype=ext4, label=sysctrl--vg-root, mode=rw, path=/, unit_serial=10017 Value:0xc042a15748}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.055441863s EvaluationString:[ var='B' labels={device=dm-0, fstype=ext4, label=sysctrl--vg-root, mode=rw, path=/, unit_serial=10017} value=57.42170335027757 ], [ var='C' labels={device=dm-0, fstype=ext4, label=sysctrl--vg-root, mode=rw, path=/, unit_serial=10017} value=0 ]} {Instance:device=sda5, fstype=ext4, mode=rw, path=/var, unit_serial=10015 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda5, fstype=ext4, mode=rw, path=/var, unit_serial=10015 Value:0xc042a15a00} C:{Var:C Labels:device=sda5, fstype=ext4, mode=rw, path=/var, unit_serial=10015 Value:0xc042a15aa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.055452763s EvaluationString:[ var='B' labels={device=sda5, fstype=ext4, mode=rw, path=/var, unit_serial=10015} value=28.031285109325037 ], [ var='C' labels={device=sda5, fstype=ext4, mode=rw, path=/var, unit_serial=10015} value=0 ]} {Instance:device=dm-0, fstype=ext4, label=sysctrl--vg-root, mode=rw, path=/, unit_serial=10019 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=dm-0, fstype=ext4, label=sysctrl--vg-root, mode=rw, path=/, unit_serial=10019 Value:0xc042a15c00} C:{Var:C Labels:device=dm-0, fstype=ext4, label=sysctrl--vg-root, mode=rw, path=/, unit_serial=10019 Value:0xc042a15ed8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.055462963s EvaluationString:[ var='B' labels={device=dm-0, fstype=ext4, label=sysctrl--vg-root, mode=rw, path=/, unit_serial=10019} value=57.46418960204891 ], [ var='C' labels={device=dm-0, fstype=ext4, label=sysctrl--vg-root, mode=rw, path=/, unit_serial=10019} value=0 ]} {Instance:device=sda7, fstype=ext4, mode=rw, path=/tmp, unit_serial=10015 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda7, fstype=ext4, mode=rw, path=/tmp, unit_serial=10015 Value:0xc00599ffb0} C:{Var:C Labels:device=sda7, fstype=ext4, mode=rw, path=/tmp, unit_serial=10015 Value:0xc00599ea90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.055473263s EvaluationString:[ var='B' labels={device=sda7, fstype=ext4, mode=rw, path=/tmp, unit_serial=10015} value=0.01323343142876948 ], [ var='C' labels={device=sda7, fstype=ext4, mode=rw, path=/tmp, unit_serial=10015} value=0 ]} {Instance:device=sda7, fstype=ext4, mode=rw, path=/tmp, unit_serial=10004 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda7, fstype=ext4, mode=rw, path=/tmp, unit_serial=10004 Value:0xc04808d458} C:{Var:C Labels:device=sda7, fstype=ext4, mode=rw, path=/tmp, unit_serial=10004 Value:0xc04808d3e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.055483563s EvaluationString:[ var='B' labels={device=sda7, fstype=ext4, mode=rw, path=/tmp, unit_serial=10004} value=0.012603268027399505 ], [ var='C' labels={device=sda7, fstype=ext4, mode=rw, path=/tmp, unit_serial=10004} value=0 ]} {Instance:device=dm-6, fstype=ext4, label=sysctrl--vg-config, mode=rw, path=/config, unit_serial=10017 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=dm-6, fstype=ext4, label=sysctrl--vg-config, mode=rw, path=/config, unit_serial=10017 Value:0xc04808d578} C:{Var:C Labels:device=dm-6, fstype=ext4, label=sysctrl--vg-config, mode=rw, path=/config, unit_serial=10017 Value:0xc04808d608}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.055495063s EvaluationString:[ var='B' labels={device=dm-6, fstype=ext4, label=sysctrl--vg-config, mode=rw, path=/config, unit_serial=10017} value=0.014655677781992482 ], [ var='C' labels={device=dm-6, fstype=ext4, label=sysctrl--vg-config, mode=rw, path=/config, unit_serial=10017} value=0 ]} {Instance:device=dm-4, fstype=ext4, label=sysctrl--vg-home, mode=rw, path=/home, unit_serial=10019 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=dm-4, fstype=ext4, label=sysctrl--vg-home, mode=rw, path=/home, unit_serial=10019 Value:0xc04808d768} C:{Var:C Labels:device=dm-4, fstype=ext4, label=sysctrl--vg-home, mode=rw, path=/home, unit_serial=10019 Value:0xc04808d808}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.055507163s EvaluationString:[ var='B' labels={device=dm-4, fstype=ext4, label=sysctrl--vg-home, mode=rw, path=/home, unit_serial=10019} value=23.55776088904646 ], [ var='C' labels={device=dm-4, fstype=ext4, label=sysctrl--vg-home, mode=rw, path=/home, unit_serial=10019} value=0 ]} {Instance:device=dm-6, fstype=ext4, label=sysctrl--vg-config, mode=rw, path=/config, unit_serial=10019 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=dm-6, fstype=ext4, label=sysctrl--vg-config, mode=rw, path=/config, unit_serial=10019 Value:0xc04808da50} C:{Var:C Labels:device=dm-6, fstype=ext4, label=sysctrl--vg-config, mode=rw, path=/config, unit_serial=10019 Value:0xc04808d988}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.055519063s EvaluationString:[ var='B' labels={device=dm-6, fstype=ext4, label=sysctrl--vg-config, mode=rw, path=/config, unit_serial=10019} value=0.014224628435463292 ], [ var='C' labels={device=dm-6, fstype=ext4, label=sysctrl--vg-config, mode=rw, path=/config, unit_serial=10019} value=0 ]} {Instance:device=sda1, fstype=ext2, mode=rw, path=/boot, unit_serial=10017 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, fstype=ext2, mode=rw, path=/boot, unit_serial=10017 Value:0xc04808db60} C:{Var:C Labels:device=sda1, fstype=ext2, mode=rw, path=/boot, unit_serial=10017 Value:0xc04808dc10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.055530263s EvaluationString:[ var='B' labels={device=sda1, fstype=ext2, mode=rw, path=/boot, unit_serial=10017} value=33.74070627891258 ], [ var='C' labels={device=sda1, fstype=ext2, mode=rw, path=/boot, unit_serial=10017} value=0 ]} {Instance:device=sda9, fstype=ext4, mode=rw, path=/config, unit_serial=10015 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda9, fstype=ext4, mode=rw, path=/config, unit_serial=10015 Value:0xc04808dde0} C:{Var:C Labels:device=sda9, fstype=ext4, mode=rw, path=/config, unit_serial=10015 Value:0xc04808dd38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.055539563s EvaluationString:[ var='B' labels={device=sda9, fstype=ext4, mode=rw, path=/config, unit_serial=10015} value=0.007895927408684666 ], [ var='C' labels={device=sda9, fstype=ext4, mode=rw, path=/config, unit_serial=10015} value=0 ]} {Instance:device=dm-4, fstype=ext4, label=sysctrl--vg-home, mode=rw, path=/home, unit_serial=10017 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=dm-4, fstype=ext4, label=sysctrl--vg-home, mode=rw, path=/home, unit_serial=10017 Value:0xc04808dfd0} C:{Var:C Labels:device=dm-4, fstype=ext4, label=sysctrl--vg-home, mode=rw, path=/home, unit_serial=10017 Value:0xc01c346188}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.055566263s EvaluationString:[ var='B' labels={device=dm-4, fstype=ext4, label=sysctrl--vg-home, mode=rw, path=/home, unit_serial=10017} value=16.47809036402378 ], [ var='C' labels={device=dm-4, fstype=ext4, label=sysctrl--vg-home, mode=rw, path=/home, unit_serial=10017} value=0 ]} {Instance:device=sda1, fstype=ext4, mode=rw, path=/, unit_serial=10015 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, fstype=ext4, mode=rw, path=/, unit_serial=10015 Value:0xc01c3462c0} C:{Var:C Labels:device=sda1, fstype=ext4, mode=rw, path=/, unit_serial=10015 Value:0xc01c346310}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.055577163s EvaluationString:[ var='B' labels={device=sda1, fstype=ext4, mode=rw, path=/, unit_serial=10015} value=45.17649584908065 ], [ var='C' labels={device=sda1, fstype=ext4, mode=rw, path=/, unit_serial=10015} value=0 ]} {Instance:device=dm-3, fstype=ext4, label=sysctrl--vg-tmp, mode=rw, path=/tmp, unit_serial=10017 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=dm-3, fstype=ext4, label=sysctrl--vg-tmp, mode=rw, path=/tmp, unit_serial=10017 Value:0xc01c3466c8} C:{Var:C Labels:device=dm-3, fstype=ext4, label=sysctrl--vg-tmp, mode=rw, path=/tmp, unit_serial=10017 Value:0xc01c346660}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.055607063s EvaluationString:[ var='B' labels={device=dm-3, fstype=ext4, label=sysctrl--vg-tmp, mode=rw, path=/tmp, unit_serial=10017} value=0.012752614285928615 ], [ var='C' labels={device=dm-3, fstype=ext4, label=sysctrl--vg-tmp, mode=rw, path=/tmp, unit_serial=10017} value=0 ]} {Instance:device=sda1, fstype=ext4, mode=rw, path=/, unit_serial=10004 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, fstype=ext4, mode=rw, path=/, unit_serial=10004 Value:0xc01c346780} C:{Var:C Labels:device=sda1, fstype=ext4, mode=rw, path=/, unit_serial=10004 Value:0xc01c3467d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.055618663s EvaluationString:[ var='B' labels={device=sda1, fstype=ext4, mode=rw, path=/, unit_serial=10004} value=45.8440482241379 ], [ var='C' labels={device=sda1, fstype=ext4, mode=rw, path=/, unit_serial=10004} value=0 ]} {Instance:device=sda8, fstype=ext4, mode=rw, path=/home, unit_serial=10004 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda8, fstype=ext4, mode=rw, path=/home, unit_serial=10004 Value:0xc01c3468a0} C:{Var:C Labels:device=sda8, fstype=ext4, mode=rw, path=/home, unit_serial=10004 Value:0xc01c346908}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.055627963s EvaluationString:[ var='B' labels={device=sda8, fstype=ext4, mode=rw, path=/home, unit_serial=10004} value=26.63862295903484 ], [ var='C' labels={device=sda8, fstype=ext4, mode=rw, path=/home, unit_serial=10004} value=0 ]} {Instance:device=dm-1, fstype=ext4, label=sysctrl--vg-var, mode=rw, path=/var, unit_serial=10017 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=dm-1, fstype=ext4, label=sysctrl--vg-var, mode=rw, path=/var, unit_serial=10017 Value:0xc01c346a20} C:{Var:C Labels:device=dm-1, fstype=ext4, label=sysctrl--vg-var, mode=rw, path=/var, unit_serial=10017 Value:0xc01c346aa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.055644563s EvaluationString:[ var='B' labels={device=dm-1, fstype=ext4, label=sysctrl--vg-var, mode=rw, path=/var, unit_serial=10017} value=25.357052479535056 ], [ var='C' labels={device=dm-1, fstype=ext4, label=sysctrl--vg-var, mode=rw, path=/var, unit_serial=10017} value=0 ]} {Instance:device=sda5, fstype=ext4, mode=rw, path=/var, unit_serial=10004 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda5, fstype=ext4, mode=rw, path=/var, unit_serial=10004 Value:0xc01c346b70} C:{Var:C Labels:device=sda5, fstype=ext4, mode=rw, path=/var, unit_serial=10004 Value:0xc01c346bc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.055672263s EvaluationString:[ var='B' labels={device=sda5, fstype=ext4, mode=rw, path=/var, unit_serial=10004} value=21.714234327664457 ], [ var='C' labels={device=sda5, fstype=ext4, mode=rw, path=/var, unit_serial=10004} value=0 ]}]" duration=39.479836ms + logger=ngalert.scheduler user=912534 slug=useat version=6 fingerprint=1dfc62be6d160512 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.057157236Z level=debug msg="Alert rule evaluated" results="[{Instance:cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-0, workload=grafana-k8s-monitoring State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-0, workload=grafana-k8s-monitoring Value:0xc001e73510} B:{Var:B Labels:cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-0, workload=grafana-k8s-monitoring Value:0xc001e73578}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.048970186s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-0, workload=grafana-k8s-monitoring} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-0, workload=grafana-k8s-monitoring} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-events-f7cfbcddf-g7g64, workload=grafana-k8s-monitoring-alloy-events State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-events-f7cfbcddf-g7g64, workload=grafana-k8s-monitoring-alloy-events Value:0xc001e73648} B:{Var:B Labels:cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-events-f7cfbcddf-g7g64, workload=grafana-k8s-monitoring-alloy-events Value:0xc001e736b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.048982216s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-events-f7cfbcddf-g7g64, workload=grafana-k8s-monitoring-alloy-events} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-events-f7cfbcddf-g7g64, workload=grafana-k8s-monitoring-alloy-events} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-6kds9, workload=grafana-k8s-monitoring-alloy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-6kds9, workload=grafana-k8s-monitoring-alloy Value:0xc001e737e0} B:{Var:B Labels:cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-6kds9, workload=grafana-k8s-monitoring-alloy Value:0xc001e73780}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.048990826s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-6kds9, workload=grafana-k8s-monitoring-alloy} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-6kds9, workload=grafana-k8s-monitoring-alloy} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-lfv59, workload=grafana-k8s-monitoring-alloy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-lfv59, workload=grafana-k8s-monitoring-alloy Value:0xc001e738a8} B:{Var:B Labels:cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-lfv59, workload=grafana-k8s-monitoring-alloy Value:0xc001e73908}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.048997746s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-lfv59, workload=grafana-k8s-monitoring-alloy} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-lfv59, workload=grafana-k8s-monitoring-alloy} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-m657m, workload=grafana-k8s-monitoring-alloy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-m657m, workload=grafana-k8s-monitoring-alloy Value:0xc001e739e0} B:{Var:B Labels:cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-m657m, workload=grafana-k8s-monitoring-alloy Value:0xc001e73a50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049005157s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-m657m, workload=grafana-k8s-monitoring-alloy} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=alloy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-m657m, workload=grafana-k8s-monitoring-alloy} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=autoscaler, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-autoscaler-77f84b86b7-p9x85, workload=konnectivity-agent-autoscaler State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=autoscaler, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-autoscaler-77f84b86b7-p9x85, workload=konnectivity-agent-autoscaler Value:0xc001e73b58} B:{Var:B Labels:cluster=consumer-k8s-dev, container=autoscaler, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-autoscaler-77f84b86b7-p9x85, workload=konnectivity-agent-autoscaler Value:0xc001e73be0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049012598s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=autoscaler, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-autoscaler-77f84b86b7-p9x85, workload=konnectivity-agent-autoscaler} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=autoscaler, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-autoscaler-77f84b86b7-p9x85, workload=konnectivity-agent-autoscaler} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=autoscaler, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-autoscaler-79b96f5cb-bcnvx, workload=kube-dns-autoscaler State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=autoscaler, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-autoscaler-79b96f5cb-bcnvx, workload=kube-dns-autoscaler Value:0xc001e73d18} B:{Var:B Labels:cluster=consumer-k8s-dev, container=autoscaler, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-autoscaler-79b96f5cb-bcnvx, workload=kube-dns-autoscaler Value:0xc001e73d98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049018368s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=autoscaler, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-autoscaler-79b96f5cb-bcnvx, workload=kube-dns-autoscaler} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=autoscaler, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-autoscaler-79b96f5cb-bcnvx, workload=kube-dns-autoscaler} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-0, workload=grafana-k8s-monitoring State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-0, workload=grafana-k8s-monitoring Value:0xc001e73e90} B:{Var:B Labels:cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-0, workload=grafana-k8s-monitoring Value:0xc001e73f10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049024158s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-0, workload=grafana-k8s-monitoring} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-0, workload=grafana-k8s-monitoring} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-events-f7cfbcddf-g7g64, workload=grafana-k8s-monitoring-alloy-events State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-events-f7cfbcddf-g7g64, workload=grafana-k8s-monitoring-alloy-events Value:0xc012f46008} B:{Var:B Labels:cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-events-f7cfbcddf-g7g64, workload=grafana-k8s-monitoring-alloy-events Value:0xc012f46118}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049031478s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-events-f7cfbcddf-g7g64, workload=grafana-k8s-monitoring-alloy-events} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-events-f7cfbcddf-g7g64, workload=grafana-k8s-monitoring-alloy-events} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-6kds9, workload=grafana-k8s-monitoring-alloy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-6kds9, workload=grafana-k8s-monitoring-alloy Value:0xc012f46568} B:{Var:B Labels:cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-6kds9, workload=grafana-k8s-monitoring-alloy Value:0xc012f462d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049038777s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-6kds9, workload=grafana-k8s-monitoring-alloy} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-6kds9, workload=grafana-k8s-monitoring-alloy} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-lfv59, workload=grafana-k8s-monitoring-alloy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-lfv59, workload=grafana-k8s-monitoring-alloy Value:0xc012f46740} B:{Var:B Labels:cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-lfv59, workload=grafana-k8s-monitoring-alloy Value:0xc012f46800}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049047608s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-lfv59, workload=grafana-k8s-monitoring-alloy} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-lfv59, workload=grafana-k8s-monitoring-alloy} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-m657m, workload=grafana-k8s-monitoring-alloy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-m657m, workload=grafana-k8s-monitoring-alloy Value:0xc012f46c00} B:{Var:B Labels:cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-m657m, workload=grafana-k8s-monitoring-alloy Value:0xc012f46ce0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049054918s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-m657m, workload=grafana-k8s-monitoring-alloy} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=config-reloader, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-alloy-logs-m657m, workload=grafana-k8s-monitoring-alloy} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=csi-driver-registrar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-8f8x5, workload=pdcsi State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=csi-driver-registrar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-8f8x5, workload=pdcsi Value:0xc012f472b0} B:{Var:B Labels:cluster=consumer-k8s-dev, container=csi-driver-registrar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-8f8x5, workload=pdcsi Value:0xc012f473d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049060839s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=csi-driver-registrar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-8f8x5, workload=pdcsi} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=csi-driver-registrar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-8f8x5, workload=pdcsi} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=csi-driver-registrar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-9q8k4, workload=pdcsi State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=csi-driver-registrar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-9q8k4, workload=pdcsi Value:0xc012f47628} B:{Var:B Labels:cluster=consumer-k8s-dev, container=csi-driver-registrar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-9q8k4, workload=pdcsi Value:0xc012f476d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049068959s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=csi-driver-registrar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-9q8k4, workload=pdcsi} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=csi-driver-registrar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-9q8k4, workload=pdcsi} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=csi-driver-registrar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-m7skc, workload=pdcsi State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=csi-driver-registrar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-m7skc, workload=pdcsi Value:0xc012f47870} B:{Var:B Labels:cluster=consumer-k8s-dev, container=csi-driver-registrar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-m7skc, workload=pdcsi Value:0xc012f47a68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049075468s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=csi-driver-registrar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-m7skc, workload=pdcsi} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=csi-driver-registrar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-m7skc, workload=pdcsi} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=default-http-backend, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=l7-default-backend-745c798fdd-g4k7r, workload=l7-default-backend State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=default-http-backend, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=l7-default-backend-745c798fdd-g4k7r, workload=l7-default-backend Value:0xc012f47c50} B:{Var:B Labels:cluster=consumer-k8s-dev, container=default-http-backend, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=l7-default-backend-745c798fdd-g4k7r, workload=l7-default-backend Value:0xc012f47b50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049082329s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=default-http-backend, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=l7-default-backend-745c798fdd-g4k7r, workload=l7-default-backend} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=default-http-backend, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=l7-default-backend-745c798fdd-g4k7r, workload=l7-default-backend} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=dnsmasq, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-jjx98, workload=kube-dns State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=dnsmasq, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-jjx98, workload=kube-dns Value:0xc012f47d48} B:{Var:B Labels:cluster=consumer-k8s-dev, container=dnsmasq, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-jjx98, workload=kube-dns Value:0xc012f47e20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049088898s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=dnsmasq, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-jjx98, workload=kube-dns} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=dnsmasq, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-jjx98, workload=kube-dns} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=dnsmasq, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-ldrbh, workload=kube-dns State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=dnsmasq, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-ldrbh, workload=kube-dns Value:0xc0620f20c8} B:{Var:B Labels:cluster=consumer-k8s-dev, container=dnsmasq, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-ldrbh, workload=kube-dns Value:0xc0620f2198}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049095769s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=dnsmasq, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-ldrbh, workload=kube-dns} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=dnsmasq, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-ldrbh, workload=kube-dns} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=gce-pd-driver, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-8f8x5, workload=pdcsi State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=gce-pd-driver, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-8f8x5, workload=pdcsi Value:0xc0620f2330} B:{Var:B Labels:cluster=consumer-k8s-dev, container=gce-pd-driver, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-8f8x5, workload=pdcsi Value:0xc0620f23f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049101388s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=gce-pd-driver, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-8f8x5, workload=pdcsi} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=gce-pd-driver, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-8f8x5, workload=pdcsi} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=gce-pd-driver, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-9q8k4, workload=pdcsi State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=gce-pd-driver, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-9q8k4, workload=pdcsi Value:0xc0620f2620} B:{Var:B Labels:cluster=consumer-k8s-dev, container=gce-pd-driver, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-9q8k4, workload=pdcsi Value:0xc0620f2700}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049107749s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=gce-pd-driver, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-9q8k4, workload=pdcsi} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=gce-pd-driver, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-9q8k4, workload=pdcsi} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=gce-pd-driver, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-m7skc, workload=pdcsi State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=gce-pd-driver, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-m7skc, workload=pdcsi Value:0xc0620f2870} B:{Var:B Labels:cluster=consumer-k8s-dev, container=gce-pd-driver, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-m7skc, workload=pdcsi Value:0xc0620f2970}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.04911457s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=gce-pd-driver, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-m7skc, workload=pdcsi} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=gce-pd-driver, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=pdcsi-node-m7skc, workload=pdcsi} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=gke-metadata-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=gke-metadata-server-66slh, workload=gke-metadata State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=gke-metadata-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=gke-metadata-server-66slh, workload=gke-metadata Value:0xc0620f2c40} B:{Var:B Labels:cluster=consumer-k8s-dev, container=gke-metadata-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=gke-metadata-server-66slh, workload=gke-metadata Value:0xc0620f2b40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.04912052s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=gke-metadata-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=gke-metadata-server-66slh, workload=gke-metadata} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=gke-metadata-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=gke-metadata-server-66slh, workload=gke-metadata} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=gke-metadata-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=gke-metadata-server-fshwj, workload=gke-metadata State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=gke-metadata-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=gke-metadata-server-fshwj, workload=gke-metadata Value:0xc0620f2d68} B:{Var:B Labels:cluster=consumer-k8s-dev, container=gke-metadata-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=gke-metadata-server-fshwj, workload=gke-metadata Value:0xc0620f2e70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049126429s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=gke-metadata-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=gke-metadata-server-fshwj, workload=gke-metadata} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=gke-metadata-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=gke-metadata-server-fshwj, workload=gke-metadata} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=gke-metadata-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=gke-metadata-server-phtr9, workload=gke-metadata State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=gke-metadata-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=gke-metadata-server-phtr9, workload=gke-metadata Value:0xc0620f2fe0} B:{Var:B Labels:cluster=consumer-k8s-dev, container=gke-metadata-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=gke-metadata-server-phtr9, workload=gke-metadata Value:0xc0620f3090}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049132309s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=gke-metadata-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=gke-metadata-server-phtr9, workload=gke-metadata} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=gke-metadata-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=gke-metadata-server-phtr9, workload=gke-metadata} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=konnectivity-agent, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-j9sw5, workload=konnectivity-agent State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=konnectivity-agent, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-j9sw5, workload=konnectivity-agent Value:0xc0620f3288} B:{Var:B Labels:cluster=consumer-k8s-dev, container=konnectivity-agent, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-j9sw5, workload=konnectivity-agent Value:0xc0620f3330}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.04913913s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=konnectivity-agent, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-j9sw5, workload=konnectivity-agent} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=konnectivity-agent, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-j9sw5, workload=konnectivity-agent} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=konnectivity-agent, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-tprgl, workload=konnectivity-agent State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=konnectivity-agent, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-tprgl, workload=konnectivity-agent Value:0xc0620f3488} B:{Var:B Labels:cluster=consumer-k8s-dev, container=konnectivity-agent, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-tprgl, workload=konnectivity-agent Value:0xc0620f3410}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049158341s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=konnectivity-agent, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-tprgl, workload=konnectivity-agent} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=konnectivity-agent, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-tprgl, workload=konnectivity-agent} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=konnectivity-agent, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-wg8vz, workload=konnectivity-agent State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=konnectivity-agent, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-wg8vz, workload=konnectivity-agent Value:0xc0620f3578} B:{Var:B Labels:cluster=consumer-k8s-dev, container=konnectivity-agent, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-wg8vz, workload=konnectivity-agent Value:0xc0620f3660}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049164661s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=konnectivity-agent, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-wg8vz, workload=konnectivity-agent} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=konnectivity-agent, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-wg8vz, workload=konnectivity-agent} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=konnectivity-agent-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-j9sw5, workload=konnectivity-agent State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=konnectivity-agent-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-j9sw5, workload=konnectivity-agent Value:0xc0620f3840} B:{Var:B Labels:cluster=consumer-k8s-dev, container=konnectivity-agent-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-j9sw5, workload=konnectivity-agent Value:0xc0620f38b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.04917059s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=konnectivity-agent-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-j9sw5, workload=konnectivity-agent} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=konnectivity-agent-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-j9sw5, workload=konnectivity-agent} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=konnectivity-agent-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-tprgl, workload=konnectivity-agent State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=konnectivity-agent-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-tprgl, workload=konnectivity-agent Value:0xc0620f3a00} B:{Var:B Labels:cluster=consumer-k8s-dev, container=konnectivity-agent-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-tprgl, workload=konnectivity-agent Value:0xc0620f3ab0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049176931s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=konnectivity-agent-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-tprgl, workload=konnectivity-agent} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=konnectivity-agent-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-tprgl, workload=konnectivity-agent} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=konnectivity-agent-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-wg8vz, workload=konnectivity-agent State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=konnectivity-agent-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-wg8vz, workload=konnectivity-agent Value:0xc0620f3ba0} B:{Var:B Labels:cluster=consumer-k8s-dev, container=konnectivity-agent-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-wg8vz, workload=konnectivity-agent Value:0xc0620f3fe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049182501s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=konnectivity-agent-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-wg8vz, workload=konnectivity-agent} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=konnectivity-agent-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-5b55c55b9f-wg8vz, workload=konnectivity-agent} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=kube-proxy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev-5048c572-g2ro, workload=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=kube-proxy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev-5048c572-g2ro, workload=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev Value:0xc055ec20f8} B:{Var:B Labels:cluster=consumer-k8s-dev, container=kube-proxy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev-5048c572-g2ro, workload=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev Value:0xc055ec2180}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049190051s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=kube-proxy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev-5048c572-g2ro, workload=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=kube-proxy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev-5048c572-g2ro, workload=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=kube-proxy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev-ee36a1b8-m0ho, workload=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=kube-proxy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev-ee36a1b8-m0ho, workload=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev Value:0xc055ec22f0} B:{Var:B Labels:cluster=consumer-k8s-dev, container=kube-proxy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev-ee36a1b8-m0ho, workload=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev Value:0xc055ec2388}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049199422s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=kube-proxy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev-ee36a1b8-m0ho, workload=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=kube-proxy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev-ee36a1b8-m0ho, workload=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=kube-proxy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev-ffdaccce-wqv4, workload=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=kube-proxy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev-ffdaccce-wqv4, workload=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev Value:0xc055ec2490} B:{Var:B Labels:cluster=consumer-k8s-dev, container=kube-proxy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev-ffdaccce-wqv4, workload=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev Value:0xc055ec2520}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049205812s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=kube-proxy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev-ffdaccce-wqv4, workload=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=kube-proxy, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev-ffdaccce-wqv4, workload=kube-proxy-gke-consumer-k8s-dev-consumer-k8s-dev} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=kube-state-metrics, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-kube-state-metrics-75686cdc7c-qzkzf, workload=grafana-k8s-monitoring-kube-state-metrics State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=kube-state-metrics, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-kube-state-metrics-75686cdc7c-qzkzf, workload=grafana-k8s-monitoring-kube-state-metrics Value:0xc055ec25f0} B:{Var:B Labels:cluster=consumer-k8s-dev, container=kube-state-metrics, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-kube-state-metrics-75686cdc7c-qzkzf, workload=grafana-k8s-monitoring-kube-state-metrics Value:0xc055ec2660}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049211722s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=kube-state-metrics, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-kube-state-metrics-75686cdc7c-qzkzf, workload=grafana-k8s-monitoring-kube-state-metrics} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=kube-state-metrics, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-kube-state-metrics-75686cdc7c-qzkzf, workload=grafana-k8s-monitoring-kube-state-metrics} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=kubedns, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-jjx98, workload=kube-dns State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=kubedns, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-jjx98, workload=kube-dns Value:0xc055ec2770} B:{Var:B Labels:cluster=consumer-k8s-dev, container=kubedns, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-jjx98, workload=kube-dns Value:0xc055ec27f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049218082s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=kubedns, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-jjx98, workload=kube-dns} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=kubedns, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-jjx98, workload=kube-dns} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=kubedns, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-ldrbh, workload=kube-dns State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=kubedns, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-ldrbh, workload=kube-dns Value:0xc055ec2900} B:{Var:B Labels:cluster=consumer-k8s-dev, container=kubedns, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-ldrbh, workload=kube-dns Value:0xc055ec2990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049223991s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=kubedns, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-ldrbh, workload=kube-dns} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=kubedns, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-ldrbh, workload=kube-dns} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=metrics-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=metrics-server-v0.6.3-7fff9d9579-m8hgl, workload=metrics-server-v0.6.3 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=metrics-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=metrics-server-v0.6.3-7fff9d9579-m8hgl, workload=metrics-server-v0.6.3 Value:0xc055ec2b20} B:{Var:B Labels:cluster=consumer-k8s-dev, container=metrics-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=metrics-server-v0.6.3-7fff9d9579-m8hgl, workload=metrics-server-v0.6.3 Value:0xc055ec2a90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049229642s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=metrics-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=metrics-server-v0.6.3-7fff9d9579-m8hgl, workload=metrics-server-v0.6.3} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=metrics-server, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=metrics-server-v0.6.3-7fff9d9579-m8hgl, workload=metrics-server-v0.6.3} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=metrics-server-nanny, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=metrics-server-v0.6.3-7fff9d9579-m8hgl, workload=metrics-server-v0.6.3 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=metrics-server-nanny, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=metrics-server-v0.6.3-7fff9d9579-m8hgl, workload=metrics-server-v0.6.3 Value:0xc055ec2c00} B:{Var:B Labels:cluster=consumer-k8s-dev, container=metrics-server-nanny, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=metrics-server-v0.6.3-7fff9d9579-m8hgl, workload=metrics-server-v0.6.3 Value:0xc055ec2c78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049236342s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=metrics-server-nanny, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=metrics-server-v0.6.3-7fff9d9579-m8hgl, workload=metrics-server-v0.6.3} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=metrics-server-nanny, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=metrics-server-v0.6.3-7fff9d9579-m8hgl, workload=metrics-server-v0.6.3} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=netd, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-nwtd4 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=netd, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-nwtd4 Value:0xc055ec2d70} B:{Var:B Labels:cluster=consumer-k8s-dev, container=netd, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-nwtd4 Value:0xc055ec2df8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049243202s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=netd, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-nwtd4} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=netd, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-nwtd4} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=netd, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-sxmh5 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=netd, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-sxmh5 Value:0xc055ec2ef8} B:{Var:B Labels:cluster=consumer-k8s-dev, container=netd, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-sxmh5 Value:0xc055ec2f78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049249352s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=netd, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-sxmh5} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=netd, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-sxmh5} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=netd, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-v6ccv State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=netd, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-v6ccv Value:0xc055ec3060} B:{Var:B Labels:cluster=consumer-k8s-dev, container=netd, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-v6ccv Value:0xc055ec30d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049254523s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=netd, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-v6ccv} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=netd, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-v6ccv} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=netd-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-nwtd4 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=netd-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-nwtd4 Value:0xc055ec31e0} B:{Var:B Labels:cluster=consumer-k8s-dev, container=netd-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-nwtd4 Value:0xc055ec3280}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049259552s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=netd-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-nwtd4} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=netd-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-nwtd4} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=netd-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-sxmh5 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=netd-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-sxmh5 Value:0xc055ec3370} B:{Var:B Labels:cluster=consumer-k8s-dev, container=netd-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-sxmh5 Value:0xc055ec33f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049265093s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=netd-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-sxmh5} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=netd-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-sxmh5} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=netd-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-v6ccv State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=netd-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-v6ccv Value:0xc055ec3558} B:{Var:B Labels:cluster=consumer-k8s-dev, container=netd-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-v6ccv Value:0xc055ec34e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049273952s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=netd-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-v6ccv} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=netd-metrics-collector, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=netd-v6ccv} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=node-cache, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=node-local-dns-2mf68, workload=node-local State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=node-cache, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=node-local-dns-2mf68, workload=node-local Value:0xc055ec36a0} B:{Var:B Labels:cluster=consumer-k8s-dev, container=node-cache, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=node-local-dns-2mf68, workload=node-local Value:0xc055ec3740}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049280543s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=node-cache, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=node-local-dns-2mf68, workload=node-local} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=node-cache, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=node-local-dns-2mf68, workload=node-local} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=node-cache, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=node-local-dns-d9vtx, workload=node-local State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=node-cache, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=node-local-dns-d9vtx, workload=node-local Value:0xc055ec3910} B:{Var:B Labels:cluster=consumer-k8s-dev, container=node-cache, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=node-local-dns-d9vtx, workload=node-local Value:0xc055ec3870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049286643s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=node-cache, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=node-local-dns-d9vtx, workload=node-local} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=node-cache, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=node-local-dns-d9vtx, workload=node-local} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=node-cache, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=node-local-dns-s25pw, workload=node-local State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=node-cache, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=node-local-dns-s25pw, workload=node-local Value:0xc055ec3a38} B:{Var:B Labels:cluster=consumer-k8s-dev, container=node-cache, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=node-local-dns-s25pw, workload=node-local Value:0xc055ec3ac8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049332513s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=node-cache, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=node-local-dns-s25pw, workload=node-local} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=node-cache, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=node-local-dns-s25pw, workload=node-local} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=node-exporter, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-prometheus-node-exporter-c227c, workload=grafana-k8s-monitoring-prometheus-node State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=node-exporter, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-prometheus-node-exporter-c227c, workload=grafana-k8s-monitoring-prometheus-node Value:0xc055ec3c30} B:{Var:B Labels:cluster=consumer-k8s-dev, container=node-exporter, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-prometheus-node-exporter-c227c, workload=grafana-k8s-monitoring-prometheus-node Value:0xc055ec3bb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049339473s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=node-exporter, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-prometheus-node-exporter-c227c, workload=grafana-k8s-monitoring-prometheus-node} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=node-exporter, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-prometheus-node-exporter-c227c, workload=grafana-k8s-monitoring-prometheus-node} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=node-exporter, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-prometheus-node-exporter-l6b8g, workload=grafana-k8s-monitoring-prometheus-node State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=node-exporter, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-prometheus-node-exporter-l6b8g, workload=grafana-k8s-monitoring-prometheus-node Value:0xc055ec3d20} B:{Var:B Labels:cluster=consumer-k8s-dev, container=node-exporter, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-prometheus-node-exporter-l6b8g, workload=grafana-k8s-monitoring-prometheus-node Value:0xc055ec3d90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049346115s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=node-exporter, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-prometheus-node-exporter-l6b8g, workload=grafana-k8s-monitoring-prometheus-node} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=node-exporter, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-prometheus-node-exporter-l6b8g, workload=grafana-k8s-monitoring-prometheus-node} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=node-exporter, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-prometheus-node-exporter-v72df, workload=grafana-k8s-monitoring-prometheus-node State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=node-exporter, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-prometheus-node-exporter-v72df, workload=grafana-k8s-monitoring-prometheus-node Value:0xc055ec3f00} B:{Var:B Labels:cluster=consumer-k8s-dev, container=node-exporter, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-prometheus-node-exporter-v72df, workload=grafana-k8s-monitoring-prometheus-node Value:0xc055ec3e90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049355755s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=node-exporter, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-prometheus-node-exporter-v72df, workload=grafana-k8s-monitoring-prometheus-node} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=node-exporter, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=monitoring-system, pod=grafana-k8s-monitoring-prometheus-node-exporter-v72df, workload=grafana-k8s-monitoring-prometheus-node} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=sidecar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-jjx98, workload=kube-dns State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=sidecar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-jjx98, workload=kube-dns Value:0xc028dfb218} B:{Var:B Labels:cluster=consumer-k8s-dev, container=sidecar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-jjx98, workload=kube-dns Value:0xc028dfa440}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049361665s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=sidecar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-jjx98, workload=kube-dns} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=sidecar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-jjx98, workload=kube-dns} value=0 ]} {Instance:cluster=consumer-k8s-dev, container=sidecar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-ldrbh, workload=kube-dns State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=consumer-k8s-dev, container=sidecar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-ldrbh, workload=kube-dns Value:0xc028dfb4c8} B:{Var:B Labels:cluster=consumer-k8s-dev, container=sidecar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-ldrbh, workload=kube-dns Value:0xc028dfb400}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049368095s EvaluationString:[ var='A' labels={cluster=consumer-k8s-dev, container=sidecar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-ldrbh, workload=kube-dns} value=0 ], [ var='B' labels={cluster=consumer-k8s-dev, container=sidecar, environment=dev, instance=grafana-k8s-monitoring-kube-state-metrics.monitoring-system.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=kube-system, pod=kube-dns-8486cd48fc-ldrbh, workload=kube-dns} value=0 ]}]" duration=20.815497ms + logger=ngalert.scheduler user=849729 slug=medopsimscare t=2024-05-29T13:44:15.058145553Z level=debug msg="Skip rule evaluation because it is paused" + level=debug ts=2024-05-29T13:44:15.058234047Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=295631 slug=dapvizor instance="datasource_uid=Ta6tIPbnz, ref_id=A" t=2024-05-29T13:44:15.058074627Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=295631 slug=dapvizor instance="datasource_uid=Ta6tIPbnz, ref_id=A" t=2024-05-29T13:44:15.058056685Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=295631 slug=dapvizor t=2024-05-29T13:44:15.058035111Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=295631 slug=dapvizor version=76 fingerprint=a928c7616c37898c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.057944255Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=Ta6tIPbnz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.057515853s EvaluationString:}]" duration=34.947036ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.058059047Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=633335 slug=promqlworkshop instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.058016631Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=633335 slug=promqlworkshop instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.058012224Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=633335 slug=promqlworkshop instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.057995936Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=633335 slug=promqlworkshop instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.05795139Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.057944802Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=633335 slug=promqlworkshop t=2024-05-29T13:44:15.057934049Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.057842952Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Algiers, country=Algeria, datacenter=M247, environment=production, instance=176.125.228.27:9998, ip=176.125.228.27, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=algiers405, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.057671138Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.057634112Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.057450986Z caller=remote_instance_store.go:51 user=320906 slug=techcyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.057517138Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.05728565Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Algiers, country=Algeria, datacenter=M247, environment=production, instance=176.125.228.14:9998, ip=176.125.228.14, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=algiers404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.057251968Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.057232112Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.057199346Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.057123724Z caller=remote_alert_sender.go:94 user=542352 slug=scytaleai host=scytaleai-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.144.148.180:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=c1a69336-61ae-4c9a-87c4-de76a203003f alerts=1 + level=debug ts=2024-05-29T13:44:15.057028414Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=707603 slug=canoneurope t=2024-05-29T13:44:15.057024297Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=20.043485ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Algiers, country=Algeria, datacenter=M247, environment=production, instance=176.125.228.14:9998, ip=176.125.228.14, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/dz.crt, role=vpn, server=algiers404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.057012881Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Albuquerque, country=United States, datacenter=DataPacket, environment=production, instance=84.239.33.1:9998, ip=84.239.33.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/us-new-mexico-pf.crt, role=vpn, server=newmexico402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.056750285Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.056409824Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.056403952Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.056359067Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.05633643Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Adelaide, country=Australia, datacenter=GSL, environment=production, instance=173.244.62.31:9998, ip=173.244.62.31, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/au-adelaide-pf.crt, role=vpn, server=adelaide402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.055970315Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=381989 slug=vanoordacf instance="id=231235, name=vanoordacf-logs" t=2024-05-29T13:44:15.055964059Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Adelaide, country=Australia, datacenter=GSL, environment=production, instance=173.244.62.2:9998, ip=173.244.62.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=adelaide401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.055781562Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:15.055707196Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.055737804Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=432323 slug=lithic instance="TableName=vban_v1_live" t=2024-05-29T13:44:15.055689473Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Adelaide, country=Australia, datacenter=GSL, environment=production, instance=173.244.62.2:9998, ip=173.244.62.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/au-adelaide-pf.crt, role=vpn, server=adelaide401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.055579732Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Abuja, country=Nigeria, datacenter=M247, environment=production, instance=146.70.65.143:9998, ip=146.70.65.143, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=nigeria406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.055359547Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.055097438Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=70430 slug=dapperlabs instance= t=2024-05-29T13:44:15.054966291Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=70430 slug=dapperlabs version=1 fingerprint=64f767ec60638ddb attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.054798928Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.054384699s EvaluationString:}]" duration=2.021964151s + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.054887571Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.054678594Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Abu Dhabi, country=United Arab Emirates, datacenter=M247, environment=production, instance=45.9.250.62:9998, ip=45.9.250.62, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=dubai404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.054531451Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.054282921Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.054235525Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.054141856Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Abu Dhabi, country=United Arab Emirates, datacenter=M247, environment=production, instance=45.9.250.46:9998, ip=45.9.250.46, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=dubai403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.053976192Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=PIA, city=Abu Dhabi, country=United Arab Emirates, datacenter=M247, environment=production, instance=45.9.250.46:9998, ip=45.9.250.46, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/server_rsa4096v2.crt, role=vpn, server=dubai403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.053941855Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.053909713Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.053707771Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=884866 slug=cnonumerique t=2024-05-29T13:44:15.053472424Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Zurich, cluster=Zurich, country=Switzerland, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.52.61, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=zurich-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.05308532Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.052620897Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.historian backend=loki user=190917 slug=d1cx t=2024-05-29T13:44:15.052616865Z level=debug msg="Done saving alert state history batch" + level=debug ts=2024-05-29T13:44:15.052560956Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.052493339Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Zurich, cluster=Zurich, country=Switzerland, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.52.31, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=zurich-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.052320832Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.052202596Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Zagreb, cluster=Zagreb, country=Croatia, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=154.47.29.225, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=zagreb-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.052133557Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.052048335Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Zagreb, cluster=Zagreb, country=Croatia, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=154.47.29.225, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=zagreb-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.051973132Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=774110 slug=nvidiapoc t=2024-05-29T13:44:15.051907241Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.946846ms + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=vpn.gcp.roman.sh, device=/dev/sda15, fstype=vfat, instance=vpn.gcp.roman.sh, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:15.051865662Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=vpn.gcp.roman.sh, device=/dev/sda1, fstype=ext4, instance=vpn.gcp.roman.sh, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:15.051751439Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.051747981Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Zagreb, cluster=Zagreb, country=Croatia, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=154.47.29.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=zagreb-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.051606629Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.051556038Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.051375132Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=rpi4, device=/dev/mmcblk0p2, fstype=ext4, instance=rpi4.home.lan, job=integrations/node_exporter, mountpoint=/var/folder2ram/var/spool" t=2024-05-29T13:44:15.05140554Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.051382728Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=rpi4, device=/dev/mmcblk0p2, fstype=ext4, instance=rpi4.home.lan, job=integrations/node_exporter, mountpoint=/var/folder2ram/var/log" t=2024-05-29T13:44:15.051257569Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.051247601Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=rpi4, device=/dev/mmcblk0p2, fstype=ext4, instance=rpi4.home.lan, job=integrations/node_exporter, mountpoint=/var/folder2ram/var/log" t=2024-05-29T13:44:15.051241594Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.051208709Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=rpi4, device=/dev/mmcblk0p2, fstype=ext4, instance=rpi4.home.lan, job=integrations/node_exporter, mountpoint=/var/folder2ram/var/lib/rrdcached" t=2024-05-29T13:44:15.051127623Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=rpi4, device=/dev/mmcblk0p2, fstype=ext4, instance=rpi4.home.lan, job=integrations/node_exporter, mountpoint=/var/folder2ram/var/lib/rrdcached" t=2024-05-29T13:44:15.051112175Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=rpi4, device=/dev/mmcblk0p2, fstype=ext4, instance=rpi4.home.lan, job=integrations/node_exporter, mountpoint=/var/folder2ram/var/lib/openmediavault/rrd" t=2024-05-29T13:44:15.051016727Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.050992095Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.050940435Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Yerevan, cluster=Yerevan, country=Armenia, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.253.160.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=yerevan-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.05097757Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.050933332Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Yerevan, cluster=Yerevan, country=Armenia, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.253.160.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=yerevan-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.050963846Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.050932551Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.0508318Z caller=remote_instance_store.go:51 user=177465 slug=fairtiq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Yerevan, cluster=Yerevan, country=Armenia, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.253.160.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=yerevan-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.050826613Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=rpi4, device=/dev/mmcblk0p2, fstype=ext4, instance=rpi4.home.lan, job=integrations/node_exporter, mountpoint=/var/folder2ram/var/lib/monit" t=2024-05-29T13:44:15.050846511Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.05073412Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Yerevan, cluster=Yerevan, country=Armenia, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.253.160.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=yerevan-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.050817327Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.050558656Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:15.050553769Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:15.05053281Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:15.050497338Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=rpi4, device=/dev/mmcblk0p2, fstype=ext4, instance=rpi4.home.lan, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:15.050500608Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=523054 slug=vialtopartners instance= t=2024-05-29T13:44:15.050501467Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=523054 slug=vialtopartners t=2024-05-29T13:44:15.050474146Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.050403993Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Washington, cluster=Washington-2, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=149.18.24.62, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=washington-s421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.050303092Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.050189545Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Washington, cluster=Washington, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=154.16.49.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=washington-s406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.050083368Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.05002876Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=rpi3, device=/dev/mmcblk0p2, fstype=ext4, instance=rpi3.homelab.lan, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:15.050032401Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.049858946Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Washington, cluster=Washington, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=154.16.49.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=washington-s406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.049909247Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.049852591Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.049767327Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.049781711Z caller=remote_alert_sender.go:94 user=698103 slug=vericast host=vericast-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.245.254:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=adfnr97ryyih0e alerts=1 + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=omv, device=/dev/sde1, fstype=ext4, instance=omv.homelab.lan, job=integrations/node_exporter, mountpoint=/mnt/DATA2_Backup" t=2024-05-29T13:44:15.049739862Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Washington, cluster=Washington, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=154.16.49.3, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=washington-s405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.049707299Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=152504 slug=andrewbramble t=2024-05-29T13:44:15.049611204Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=663164 slug=mariandima instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.049624835Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=698103 slug=vericast t=2024-05-29T13:44:15.049617223Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.015525ms + level=debug ts=2024-05-29T13:44:15.049532519Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=663164 slug=mariandima version=2 fingerprint=98e8def1b3ad6aa4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.049504233Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.049122131s EvaluationString:}]" duration=9.006913ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Washington, cluster=Washington, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=154.16.49.3, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=washington-s405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.04956079Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.049527578Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.04932454Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=190917 slug=d1cx t=2024-05-29T13:44:15.049325827Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.432558ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Washington, cluster=Washington, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=154.16.49.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=washington-s404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.0493431Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.049265932Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.049180823Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=omv, device=/dev/sdb, fstype=btrfs, instance=omv.home.lan, job=integrations/node_exporter, mountpoint=/srv/dev-disk-by-uuid-ca53ab0c-bb29-4b7c-8f9d-6245ed50c498" t=2024-05-29T13:44:15.04923075Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=omv, device=/dev/sdb, fstype=btrfs, instance=omv.home.lan, job=integrations/node_exporter, mountpoint=/srv/dev-disk-by-uuid-ca53ab0c-bb29-4b7c-8f9d-6245ed50c498" t=2024-05-29T13:44:15.049215594Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Washington, cluster=Washington, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=154.16.49.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=washington-s404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.049140756Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=893048 slug=graulv t=2024-05-29T13:44:15.049184731Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.04910809Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=893048 slug=graulv instance="__name__=node_systemd_unit_state, agent_hostname=ip-172-31-7-251, instance=dzikie, job=integrations/node_exporter, name=mariadb.service, state=active, type=notify" t=2024-05-29T13:44:15.04914173Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=omv, device=/dev/sda1, fstype=ext4, instance=omv.homelab.lan, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:15.049098091Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893048 slug=graulv t=2024-05-29T13:44:15.049087499Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=893048 slug=graulv version=3 fingerprint=f9ca439ed887421c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.048962067Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=node_systemd_unit_state, agent_hostname=ip-172-31-7-251, instance=dzikie, job=integrations/node_exporter, name=mariadb.service, state=active, type=notify State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=node_systemd_unit_state, agent_hostname=ip-172-31-7-251, instance=dzikie, job=integrations/node_exporter, name=mariadb.service, state=active, type=notify Value:0xc0717be310} C:{Var:C Labels:__name__=node_systemd_unit_state, agent_hostname=ip-172-31-7-251, instance=dzikie, job=integrations/node_exporter, name=mariadb.service, state=active, type=notify Value:0xc0717be2a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.048027921s EvaluationString:[ var='A' labels={__name__=node_systemd_unit_state, agent_hostname=ip-172-31-7-251, instance=dzikie, job=integrations/node_exporter, name=mariadb.service, state=active, type=notify} value=1 ], [ var='C' labels={__name__=node_systemd_unit_state, agent_hostname=ip-172-31-7-251, instance=dzikie, job=integrations/node_exporter, name=mariadb.service, state=active, type=notify} value=0 ]}]" duration=10.718529ms + logger=ngalert.state.manager.persist user=692010 slug=mercariusprod t=2024-05-29T13:44:15.048961392Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.519883ms + logger=ngalert.state.manager user=818804 slug=dbgov instance="instance=172.18.1.3:9100" t=2024-05-29T13:44:15.048982646Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=818804 slug=dbgov t=2024-05-29T13:44:15.048842895Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Washington, cluster=Washington, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=102.165.48.5, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=washington-s499, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.048928717Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:15.04889428Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=26.665026ms + level=debug ts=2024-05-29T13:44:15.048885589Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=818804 slug=dbgov version=18 fingerprint=eb0c013a6e43533b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.048740533Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=172.18.1.3:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=172.18.1.3:9100 Value:0xc0045eded0} B:{Var:B Labels:instance=172.18.1.3:9100 Value:0xc0045edf10} C:{Var:C Labels:instance=172.18.1.3:9100 Value:0xc0045ede90} D:{Var:D Labels:instance=172.18.1.3:9100 Value:0xc0045edeb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.048455608s EvaluationString:[ var='A' labels={instance=172.18.1.3:9100} value=0.2788993880871119 ], [ var='B' labels={instance=172.18.1.3:9100} value=0.2788993880871119 ], [ var='C' labels={instance=172.18.1.3:9100} value=0 ], [ var='D' labels={instance=172.18.1.3:9100} value=0.002788993880871119 ]}]" duration=9.114407ms + logger=ngalert.scheduler user=707420 slug=pangealab version=2 fingerprint=e54c35d6d5dd6ead attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.048777155Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.048529801s EvaluationString:}]" duration=19.295519ms + level=debug ts=2024-05-29T13:44:15.048816846Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Washington, cluster=Washington, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=102.165.48.5, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=washington-s499, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.048733178Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.048749223Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.048658709Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.048619379Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:15.048517658Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.048479277Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206107 slug=hydrolix t=2024-05-29T13:44:15.048422944Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.048410422Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.647282ms + level=debug ts=2024-05-29T13:44:15.048348692Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.048244575Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=infra, device=/dev/sda1, fstype=ext4, instance=infra.home.lan, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:15.048235414Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=infra, device=/dev/sda1, fstype=ext4, instance=infra.home.lan, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:15.048218134Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.048187951Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.048143358Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Washington, cluster=Washington, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=102.165.48.3, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=washington-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.048085646Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=753403 slug=romich t=2024-05-29T13:44:15.047892057Z level=debug msg="State manager processing evaluation results" resultCount=28 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.048040747Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=info ts=2024-05-29T13:44:15.047972847Z caller=remote_alert_sender.go:94 user=806502 slug=sxhclgitpd host=sxhclgitpd-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.16.32.131:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=bee35b73-bacf-4a2e-84f4-fcbe82b58fc6 alerts=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Washington, cluster=Washington, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=102.165.48.3, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=washington-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.04791427Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Washington, cluster=Washington, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=102.165.48.3, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=washington-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.047902417Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.04780998Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.047597787Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.047694339Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.047426955Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.047225309Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.04729174Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=146728 slug=dgc instance= t=2024-05-29T13:44:15.047289457Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=146728 slug=dgc instance= t=2024-05-29T13:44:15.047283841Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=146728 slug=dgc instance= t=2024-05-29T13:44:15.047252319Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=146728 slug=dgc instance= t=2024-05-29T13:44:15.04723926Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=146728 slug=dgc version=1 fingerprint=5589992a3ec41553 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.047117617Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[C0:{Var:C Labels: Value:} C1:{Var:C Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.046733583s EvaluationString:[ var='C0' metric='NoData' labels={} value=null ], [ var='C1' metric='NoData' labels={} value=null ]}]" duration=89.090535ms + level=debug ts=2024-05-29T13:44:15.046943141Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Warsaw, cluster=Warsaw, country=Poland, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.59.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=warsaw-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.046822339Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.046780998Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Warsaw, cluster=Warsaw, country=Poland, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.59.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=warsaw-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.046810792Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=707420 slug=pangealab t=2024-05-29T13:44:15.046643809Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=903809 slug=geodis t=2024-05-29T13:44:15.046566471Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=807171 slug=unstarnp instance="datasource_uid=grafanacloud-prom, ref_id=A,D" t=2024-05-29T13:44:15.046462842Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Vilnius, cluster=Vilnius, country=Lithuania, datacenter=OneProvider, environment=production, instance=10.0.0.203:9998, ip=194.32.122.16, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=vilnius-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.046524023Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=807171 slug=unstarnp t=2024-05-29T13:44:15.046402431Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.046490557Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.scheduler user=807171 slug=unstarnp version=11 fingerprint=f613d29471aafdcc attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.046318601Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A,D State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.045985921s EvaluationString:}]" duration=12.46853ms + logger=ngalert.state.manager user=197492 slug=nbi instance= t=2024-05-29T13:44:15.046402669Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=197492 slug=nbi instance= t=2024-05-29T13:44:15.046387964Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=197492 slug=nbi t=2024-05-29T13:44:15.046355592Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Node Status" + level=debug ts=2024-05-29T13:44:15.046277102Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.046317256Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Vientiane, cluster=vientiane, country=Laos, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=188.240.11.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=vientiane-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.046144685Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.0461111Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=191103 slug=amazonadmin t=2024-05-29T13:44:15.045811493Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=235643 slug=livecirsa t=2024-05-29T13:44:15.045836557Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=235643 slug=livecirsa t=2024-05-29T13:44:15.045757256Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=191103 slug=amazonadmin version=75 fingerprint=878c681c890b1c85 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.045738403Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.045479783s EvaluationString:}]" duration=606.636963ms + logger=ngalert.scheduler user=235643 slug=livecirsa version=1 fingerprint=7acf4c6bc07cb259 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.045682055Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.047382727s EvaluationString:}]" duration=12.098931ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Vienna, cluster=Vienna, country=Austria, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=37.19.223.31, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=vienna-s412, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.045681725Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.045645456Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.045397948Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.045395467Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.045387815Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.045380165Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Vienna, cluster=Vienna, country=Austria, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=37.19.223.199, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=vienna-s410, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.045319833Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.045181908Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.045053986Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Vienna, cluster=Vienna, country=Austria, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=37.19.223.199, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=vienna-s410, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.04507922Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.045034989Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.044928631Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Vienna, cluster=Vienna, country=Austria, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=37.19.223.111, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=vienna-s404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.0448863Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.04485542Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:15.044776498Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=39.441983ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Vienna, cluster=Vienna, country=Austria, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=37.19.223.111, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=vienna-s404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.044733477Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.04469861Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.044724559Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.044591633Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.044559141Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.04446325Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.044453899Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.04426993Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.04425658Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.044205542Z caller=remote_instance_store.go:51 user=738479 slug=gohero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Vienna, cluster=Vienna, country=Austria, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=37.19.223.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=vienna-s411, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.044308262Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.044242472Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.044168317Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Vancouver, cluster=Vancouver-2, country=Canada, datacenter=TSS, environment=production, instance=10.0.0.203:9998, ip=172.98.89.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=vancouver-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.0441254Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.044042338Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.043995944Z caller=remote_instance_store.go:51 user=536824 slug=forgerockit msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=935619 slug=wanictf24 t=2024-05-29T13:44:15.043877267Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=31.018457ms + level=info ts=2024-05-29T13:44:15.043768607Z caller=remote_alert_sender.go:94 user=22398 slug=sunfolding host=sunfolding-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.37.117:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fd571e03-f846-4c6d-b04e-eec72b415f33 alerts=1 + logger=ngalert.state.manager.persist user=471861 slug=planetstaging t=2024-05-29T13:44:15.043595043Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=471861 slug=planetstaging instance= t=2024-05-29T13:44:15.043580635Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.043603839Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Vancouver, cluster=Vancouver, country=Canada, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.153.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=vancouver-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.043614335Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Vancouver, cluster=Vancouver, country=Canada, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.153.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=vancouver-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.043600499Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Valletta, cluster=Valletta, country=Malta, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.230.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=valletta-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.043442033Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.043265525Z caller=remote_instance_store.go:51 user=527202 slug=lnrsusinsurancedev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Valletta, cluster=Valletta, country=Malta, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.230.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=valletta-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.04329145Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Valletta, cluster=Valletta, country=Malta, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.230.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=valletta-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.043277743Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112732 slug=gleamer t=2024-05-29T13:44:15.043077843Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-sar-investigation-db, env=dev" t=2024-05-29T13:44:15.038237175Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-sar-investigation-db, env=dev" t=2024-05-29T13:44:15.038222946Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-risk-defense-platform-db, env=dev" t=2024-05-29T13:44:15.037910791Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Valencia, cluster=Valencia, country=Spain, datacenter=PacketExchange, environment=production, instance=10.0.0.203:9998, ip=196.245.54.30, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=valencia-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.043096843Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-periodic-reviews-db, env=dev" t=2024-05-29T13:44:15.03722861Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-idverse-enterprise-db, env=dev" t=2024-05-29T13:44:15.036499303Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="location=prod-us-east-2" t=2024-05-29T13:44:15.035662628Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=774110 slug=nvidiapoc t=2024-05-29T13:44:15.042957883Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=774110 slug=nvidiapoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.042941944Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=337951 slug=pawapay instance= t=2024-05-29T13:44:15.034990129Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=337951 slug=pawapay instance= t=2024-05-29T13:44:15.03497006Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Valencia, cluster=Valencia, country=Spain, datacenter=PacketExchange, environment=production, instance=10.0.0.203:9998, ip=196.245.54.30, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=valencia-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.042892538Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-frontend-db, env=dev" t=2024-05-29T13:44:15.031560416Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:15.040317737Z caller=grafana.go:247 user=391538 slug=risknarrative msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=15&state=firing&state=noData&state=error" groups=3 alerts=0 + level=debug ts=2024-05-29T13:44:15.034811232Z caller=remote_instance_store.go:51 user=328755 slug=infogrideu msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=337951 slug=pawapay version=8 fingerprint=cad0531b7f33448c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.031780267Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.946209099s EvaluationString:}]" duration=99.154341ms + logger=ngalert.state.manager user=328755 slug=infogrideu instance="DBClusterIdentifier=alert-service-live, Role=WRITER" t=2024-05-29T13:44:15.034715715Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=328755 slug=infogrideu t=2024-05-29T13:44:15.034567483Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.state.manager.persist user=543654 slug=jobcloudprogrammaticprod t=2024-05-29T13:44:15.03448247Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.034572585Z caller=remote_instance_store.go:51 user=543654 slug=jobcloudprogrammaticprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=774110 slug=nvidiapoc t=2024-05-29T13:44:15.04264184Z level=debug msg="State manager processing evaluation results" resultCount=1 + Error parsing panelUID for alert annotationruleID1874dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.042544524Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=37.744684ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Valencia, cluster=Valencia, country=Spain, datacenter=PacketExchange, environment=production, instance=10.0.0.203:9998, ip=196.245.54.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=valencia-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.042513419Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=532655 slug=chathamdirectdev t=2024-05-29T13:44:15.04247335Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=21.296805ms + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:15.042268118Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=390300 slug=astrachain t=2024-05-29T13:44:15.042190324Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=22.291937ms + logger=ngalert.state.manager user=4947 slug=mediamath instance="datasource_uid=000000265, ref_id=A" t=2024-05-29T13:44:15.042230633Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Vaduz, cluster=Vaduz, country=Liechtenstein, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=91.90.122.146, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=vaduz-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.042160571Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.041999468Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.041992857Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:15.041984806Z caller=remote_image_capturer.go:61 user=87052 slug=polystream rule_org_id=1 rule_uid=Ej9Py7_7z dashboard=A8VGUjvZk panel=30 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.041945071Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.04188197Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.041836627Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Vaduz, cluster=Vaduz, country=Liechtenstein, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=91.90.122.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=vaduz-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.041783841Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.04171873Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Ulaanbaatar, cluster=Ulaanbaatar, country=Mongolia, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.227.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=ulaanbaatar-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.041581691Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.041450791Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Ulaanbaatar, cluster=Ulaanbaatar, country=Mongolia, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.227.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=ulaanbaatar-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.041405972Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=114492 slug=railsbank version=2 fingerprint=a3cad0994d5409f2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.041226733Z level=debug msg="Alert rule evaluated" results="[{Instance:DBInstanceIdentifier=prod-play-cs-kyc-api-railsbank-2021112911320538340000000d State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=prod-play-cs-kyc-api-railsbank-2021112911320538340000000d Value:0xc02c134788} C:{Var:C Labels:DBInstanceIdentifier=prod-play-cs-kyc-api-railsbank-2021112911320538340000000d Value:0xc02c134780}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.040938703s EvaluationString:[ var='B' labels={DBInstanceIdentifier=prod-play-cs-kyc-api-railsbank-2021112911320538340000000d} value=0 ], [ var='C' labels={DBInstanceIdentifier=prod-play-cs-kyc-api-railsbank-2021112911320538340000000d} value=0 ]}]" duration=126.457374ms + logger=ngalert.state.manager user=87052 slug=polystream instance= t=2024-05-29T13:44:15.041302472Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=87052 slug=polystream instance= t=2024-05-29T13:44:15.041290814Z level=debug msg="Setting next state" handler=resultError + level=debug ts=2024-05-29T13:44:15.04121037Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.041031176Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.041177995Z caller=remote_instance_store.go:51 user=452115 slug=ybmetrics msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.041159157Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.041092479Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.041137913Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=657220 slug=b2u t=2024-05-29T13:44:15.041050624Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.00211ms + logger=ngalert.scheduler user=438185 slug=nodeinfra version=14 fingerprint=26ee38fb1fb2af38 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.040981834Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=up, chain=ETH, cloud=GCP, deployment=production, instance=202.8.9.132:5054, job=prod-ETH-mainnet, network=mainnet, node_name=prod_ETH_mainnet_validator_jp_1, purpose=obol, region=japan, servicetype=validator State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=up, chain=ETH, cloud=GCP, deployment=production, instance=202.8.9.132:5054, job=prod-ETH-mainnet, network=mainnet, node_name=prod_ETH_mainnet_validator_jp_1, purpose=obol, region=japan, servicetype=validator Value:0xc061f56cf8} B:{Var:B Labels:__name__=up, chain=ETH, cloud=GCP, deployment=production, instance=202.8.9.132:5054, job=prod-ETH-mainnet, network=mainnet, node_name=prod_ETH_mainnet_validator_jp_1, purpose=obol, region=japan, servicetype=validator Value:0xc061f56f00} C:{Var:C Labels:__name__=up, chain=ETH, cloud=GCP, deployment=production, instance=202.8.9.132:5054, job=prod-ETH-mainnet, network=mainnet, node_name=prod_ETH_mainnet_validator_jp_1, purpose=obol, region=japan, servicetype=validator Value:0xc061f570e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.040593267s EvaluationString:[ var='A' labels={__name__=up, chain=ETH, cloud=GCP, deployment=production, instance=202.8.9.132:5054, job=prod-ETH-mainnet, network=mainnet, node_name=prod_ETH_mainnet_validator_jp_1, purpose=obol, region=japan, servicetype=validator} value=1 ], [ var='B' labels={__name__=up, chain=ETH, cloud=GCP, deployment=production, instance=202.8.9.132:5054, job=prod-ETH-mainnet, network=mainnet, node_name=prod_ETH_mainnet_validator_jp_1, purpose=obol, region=japan, servicetype=validator} value=1 ], [ var='C' labels={__name__=up, chain=ETH, cloud=GCP, deployment=production, instance=202.8.9.132:5054, job=prod-ETH-mainnet, network=mainnet, node_name=prod_ETH_mainnet_validator_jp_1, purpose=obol, region=japan, servicetype=validator} value=0 ]}]" duration=41.190322ms + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=ee9e2f458c96e752 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.040988194Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.040743585s EvaluationString:}]" duration=183.973642ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.04095037Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=542352 slug=scytaleai instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.040848219Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:15.040834218Z caller=remote_instance_store.go:51 user=87780 slug=zencloudandhosting msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=542352 slug=scytaleai instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.040831139Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=542352 slug=scytaleai t=2024-05-29T13:44:15.040763844Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Travnik, cluster=Travnik, country=Bosnia and Herzegovina, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=98.159.36.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=travnik-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.04078265Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=542352 slug=scytaleai version=6 fingerprint=e35d5a90bad0a55f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.040672004Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.040341278s EvaluationString:}]" duration=10.333372ms + logger=ngalert.state.manager user=87780 slug=zencloudandhosting instance="datasource_uid=000000020, ref_id=A" t=2024-05-29T13:44:15.040766989Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:50:10Z next_ends_at=2024-05-29T13:52:10Z + logger=ngalert.state.manager user=87780 slug=zencloudandhosting instance="datasource_uid=000000020, ref_id=A" t=2024-05-29T13:44:15.040756058Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=87780 slug=zencloudandhosting version=1 fingerprint=6125a2abbb3ce4a0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.040637206Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=000000020, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.040384639s EvaluationString:}]" duration=127.499175ms + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.040553292Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.040394303Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.040424608Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.040128995Z caller=remote_instance_store.go:51 user=423441 slug=outgoinc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.040048373Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.039832509Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Toronto, cluster=Toronto-2, country=Canada, datacenter=TSS, environment=production, instance=10.0.0.203:9998, ip=66.115.142.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=toronto-s405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.039903651Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.039825956Z caller=remote_instance_store.go:51 user=354676 slug=gridmarketenergy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.039782855Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=806502 slug=sxhclgitpd instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.039643456Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.039618811Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=806502 slug=sxhclgitpd version=4 fingerprint=84e78cb8db7bfa6b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.039525846Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.039167073s EvaluationString:}]" duration=7.495852ms + logger=ngalert.state.manager.persist user=698103 slug=vericast t=2024-05-29T13:44:15.039595938Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.039544937Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.03927979Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=427067 slug=davitino t=2024-05-29T13:44:15.03924525Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=427067 slug=davitino instance= t=2024-05-29T13:44:15.039219486Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.039255222Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.039227874Z caller=remote_image_capturer.go:33 user=427067 slug=davitino rule_org_id=1 rule_uid=LcCiPP44k msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=error ts=2024-05-29T13:44:15.039118524Z caller=remote_rule_evaluator.go:110 user=427067 slug=davitino msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.scheduler user=427067 slug=davitino version=1 fingerprint=c474d143b0131202 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.039152317Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=2.494876ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Tokyo, cluster=Tokyo, country=Japan, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=156.146.35.26, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=tokyo-s407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.039086738Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.039055271Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.038984114Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.039046143Z caller=remote_instance_store.go:51 user=173374 slug=felmo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=173374 slug=felmo t=2024-05-29T13:44:15.039007751Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.03895235Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=173374 slug=felmo t=2024-05-29T13:44:15.038945485Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.038504624Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.038469464Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Tokyo, cluster=Tokyo, country=Japan, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=154.47.20.205, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=tokyo-s450, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.038346182Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=620339 slug=energostack instance="datasource_uid=bdjm40q3tf1tse, ref_id=A" t=2024-05-29T13:44:15.038273935Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=620339 slug=energostack instance="datasource_uid=bdjm40q3tf1tse, ref_id=A" t=2024-05-29T13:44:15.038253375Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=620339 slug=energostack version=1 fingerprint=b0cecb5eef1af948 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.038167383Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=bdjm40q3tf1tse, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.037883486s EvaluationString:}]" duration=30.725723ms + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:15.038189822Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.79117ms + logger=ngalert.state.manager user=112387 slug=lucidhq instance="ClientId=638305841129, DomainName=pre-euc1-releng-logging" t=2024-05-29T13:44:15.038154132Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Tokyo, cluster=Tokyo, country=Japan, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=154.47.20.205, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=tokyo-s450, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.038126362Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Tirana, cluster=Tirana, country=Albania, datacenter=OneProvider, environment=production, instance=10.0.0.203:9998, ip=31.171.155.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=tirana-s404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.038005851Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.037958047Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=190917 slug=d1cx instance= t=2024-05-29T13:44:15.037867808Z level=debug msg="Changing state" previous_state=Pending next_state=Error previous_ends_at=2024-05-29T13:46:00Z next_ends_at=2024-05-29T13:48:00Z + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Tirana, cluster=Tirana, country=Albania, datacenter=OneProvider, environment=production, instance=10.0.0.203:9998, ip=31.171.155.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=tirana-s404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.037856871Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.037826168Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.037618925Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Tirana, cluster=Tirana, country=Albania, datacenter=OneProvider, environment=production, instance=10.0.0.203:9998, ip=31.171.152.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=tirana-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.037271515Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.037238957Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Tirana, cluster=Tirana, country=Albania, datacenter=OneProvider, environment=production, instance=10.0.0.203:9998, ip=31.171.152.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=tirana-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.037039113Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.036933651Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.036956886Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=707603 slug=canoneurope instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.036887753Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:15.036877057Z caller=remote_instance_store.go:51 user=288032 slug=dapperlabssre msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.036838956Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Tbilisi, cluster=Tbilisi, country=Georgia, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=95.181.236.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=tbilisi-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.036722243Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.036371925Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Tallinn, cluster=Tallinn, country=Estonia, datacenter=PacketExchange, environment=production, instance=10.0.0.203:9998, ip=165.231.182.142, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=tallinn-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.03599058Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Tallinn, cluster=Tallinn, country=Estonia, datacenter=PacketExchange, environment=production, instance=10.0.0.203:9998, ip=165.231.182.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=tallinn-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.035773855Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Tallinn, cluster=Tallinn, country=Estonia, datacenter=PacketExchange, environment=production, instance=10.0.0.203:9998, ip=165.231.182.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=tallinn-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.035763225Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID2547dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.035745908Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=185.196886ms + level=debug ts=2024-05-29T13:44:15.035732639Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.035733911Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=316418 slug=workmotion instance="ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=hxHfRl-xQUauVzymQSapww" t=2024-05-29T13:44:15.035648973Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:15.035605794Z caller=remote_alert_sender.go:94 user=288032 slug=dapperlabssre host=dapperlabssre-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.145.122:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=kYUesRE4z alerts=1 + level=info ts=2024-05-29T13:44:15.035595876Z caller=remote_alert_sender.go:94 user=288032 slug=dapperlabssre host=dapperlabssre-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.118.165:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=kYUesRE4z alerts=1 + level=debug ts=2024-05-29T13:44:15.035510438Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance="datasource_uid=Ejs1P5xVk, ref_id=A" t=2024-05-29T13:44:15.03531543Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.035409505Z caller=remote_instance_store.go:51 user=290313 slug=replit msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.035133952Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:15.03533163Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=316418 slug=workmotion instance="ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=10eKxF4jRqenox4anBTrCw" t=2024-05-29T13:44:15.035350715Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=316418 slug=workmotion t=2024-05-29T13:44:15.035230022Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}The total number of rejections happened on the primary shards due to indexing pressure since the last OpenSearch Service process startup = {{ $values.B.Value }': error parsing template __alert_Prod - OpenSearch - PrimaryWriteRejected - prod-workmotion-auditlog: template: __alert_Prod - OpenSearch - PrimaryWriteRejected - prod-workmotion-auditlog:1: unexpected \"}\" in operand" + level=debug ts=2024-05-29T13:44:15.035120944Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Taipei, cluster=Taipei, country=Taiwan, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.244.49.26, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=taipei-s404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.035150681Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=310637 slug=notino instance="container=newsletter" t=2024-05-29T13:44:15.034847754Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=310637 slug=notino instance="container=newsletter" t=2024-05-29T13:44:15.034835862Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=310637 slug=notino instance="container=navigationapi" t=2024-05-29T13:44:15.03473536Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=rating-car, pod=rating-car-85b58db7ff-wggwj" t=2024-05-29T13:44:15.034638771Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=rating-car, pod=rating-car-85b58db7ff-wg6ck" t=2024-05-29T13:44:15.034576105Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:15.034506214Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.scheduler user=698963 slug=lemonade version=2 fingerprint=9f0afcae923d1a88 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.034369539Z level=debug msg="Alert rule evaluated" results="[{Instance:app=rating-car, pod=rating-car-85b58db7ff-wg6ck State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=rating-car, pod=rating-car-85b58db7ff-wg6ck Value:0xc024df6a68} THRESHOLD:{Var:THRESHOLD Labels:app=rating-car, pod=rating-car-85b58db7ff-wg6ck Value:0xc024df6aa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.033753481s EvaluationString:[ var='QUERY' labels={app=rating-car, pod=rating-car-85b58db7ff-wg6ck} value=0 ], [ var='THRESHOLD' labels={app=rating-car, pod=rating-car-85b58db7ff-wg6ck} value=0 ]} {Instance:app=rating-car, pod=rating-car-85b58db7ff-wggwj State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=rating-car, pod=rating-car-85b58db7ff-wggwj Value:0xc024df6ae8} THRESHOLD:{Var:THRESHOLD Labels:app=rating-car, pod=rating-car-85b58db7ff-wggwj Value:0xc024df6b20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.033782669s EvaluationString:[ var='QUERY' labels={app=rating-car, pod=rating-car-85b58db7ff-wggwj} value=0 ], [ var='THRESHOLD' labels={app=rating-car, pod=rating-car-85b58db7ff-wggwj} value=0 ]}]" duration=45.647278ms + logger=ngalert.state.manager user=310637 slug=notino instance="container=navigation-readmodel" t=2024-05-29T13:44:15.034496019Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Sydney, cluster=Sydney, country=Australia, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.101.210.3, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=sydney-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.03447565Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.034333763Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.034321742Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=47339fd3170b647a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.034218844Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.033986442s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=310.293483ms + level=debug ts=2024-05-29T13:44:15.03416485Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=310637 slug=notino instance="container=main-menu" t=2024-05-29T13:44:15.034092992Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.034125157Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.034047821Z caller=remote_instance_store.go:51 user=316418 slug=workmotion msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.033978381Z caller=remote_instance_store.go:51 user=524410 slug=syso msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.033962303Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.033873311Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Sydney, cluster=Sydney, country=Australia, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=154.16.81.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=sydney-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.033833321Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.033743076Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.033745792Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.03364303Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=310637 slug=notino version=78 fingerprint=b5ce31a92893e747 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.033357933Z level=debug msg="Alert rule evaluated" results="[{Instance:container=databreakersapi State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=databreakersapi Value:0xc070127b20} B:{Var:B Labels:container=databreakersapi Value:0xc070127b28} C:{Var:C Labels:container=databreakersapi Value:0xc070127b70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.032712398s EvaluationString:[ var='A' labels={container=databreakersapi} value=-0.0215129993047522 ], [ var='B' labels={container=databreakersapi} value=-0.0215129993047522 ], [ var='C' labels={container=databreakersapi} value=0 ]} {Instance:container=main-menu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=main-menu Value:0xc070127bc8} B:{Var:B Labels:container=main-menu Value:0xc070127c10} C:{Var:C Labels:container=main-menu Value:0xc070127bc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.032729478s EvaluationString:[ var='A' labels={container=main-menu} value=-0.282946044770284 ], [ var='B' labels={container=main-menu} value=-0.282946044770284 ], [ var='C' labels={container=main-menu} value=0 ]} {Instance:container=navigation-fragment State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=navigation-fragment Value:0xc070127c48} B:{Var:B Labels:container=navigation-fragment Value:0xc070127c70} C:{Var:C Labels:container=navigation-fragment Value:0xc070127c40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.032737414s EvaluationString:[ var='A' labels={container=navigation-fragment} value=-0.5467175371484418 ], [ var='B' labels={container=navigation-fragment} value=-0.5467175371484418 ], [ var='C' labels={container=navigation-fragment} value=0 ]} {Instance:container=navigation-readmodel State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=navigation-readmodel Value:0xc070127cd0} B:{Var:B Labels:container=navigation-readmodel Value:0xc070127ca0} C:{Var:C Labels:container=navigation-readmodel Value:0xc070127ca8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.032746691s EvaluationString:[ var='A' labels={container=navigation-readmodel} value=-1.091816645278731 ], [ var='B' labels={container=navigation-readmodel} value=-1.091816645278731 ], [ var='C' labels={container=navigation-readmodel} value=0 ]} {Instance:container=navigation-readmodel-ec State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=navigation-readmodel-ec Value:0xc070127d00} B:{Var:B Labels:container=navigation-readmodel-ec Value:0xc070127d08} C:{Var:C Labels:container=navigation-readmodel-ec Value:0xc070127d30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.032755007s EvaluationString:[ var='A' labels={container=navigation-readmodel-ec} value=-0.6281828788038569 ], [ var='B' labels={container=navigation-readmodel-ec} value=-0.6281828788038569 ], [ var='C' labels={container=navigation-readmodel-ec} value=0 ]} {Instance:container=navigationapi State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=navigationapi Value:0xc070127d88} B:{Var:B Labels:container=navigationapi Value:0xc070127dd0} C:{Var:C Labels:container=navigationapi Value:0xc070127d80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.032763848s EvaluationString:[ var='A' labels={container=navigationapi} value=-0.24768694419759385 ], [ var='B' labels={container=navigationapi} value=-0.24768694419759385 ], [ var='C' labels={container=navigationapi} value=0 ]} {Instance:container=newsletter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=newsletter Value:0xc070127e20} B:{Var:B Labels:container=newsletter Value:0xc070127e28} C:{Var:C Labels:container=newsletter Value:0xc070127e70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.032770698s EvaluationString:[ var='A' labels={container=newsletter} value=-0.15720511780279856 ], [ var='B' labels={container=newsletter} value=-0.15720511780279856 ], [ var='C' labels={container=newsletter} value=0 ]} {Instance:container=web-export State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=web-export Value:0xc070127ec0} B:{Var:B Labels:container=web-export Value:0xc070127ec8} C:{Var:C Labels:container=web-export Value:0xc070127f10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.032776826s EvaluationString:[ var='A' labels={container=web-export} value=-0.13115849331024465 ], [ var='B' labels={container=web-export} value=-0.13115849331024465 ], [ var='C' labels={container=web-export} value=0 ]}]" duration=49.953814ms + logger=ngalert.state.manager.persist user=692010 slug=mercariusprod t=2024-05-29T13:44:15.033434759Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.033320499Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.033251034Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Strasbourg, cluster=Strasbourg, country=France, datacenter=Velia, environment=production, instance=10.0.0.203:9998, ip=92.204.188.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=strasbourg-s408, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.033320645Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Strasbourg, cluster=Strasbourg, country=France, datacenter=Velia, environment=production, instance=10.0.0.203:9998, ip=92.204.188.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=strasbourg-s408, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.033103223Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:15.033039255Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.674094ms + logger=ngalert.state.manager user=328755 slug=infogrideu instance="ServiceName=mlflow" t=2024-05-29T13:44:15.032956307Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Strasbourg, cluster=Strasbourg, country=France, datacenter=Velia, environment=production, instance=10.0.0.203:9998, ip=92.204.175.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=strasbourg-s407, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.032914313Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.032798901Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.032723836Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.032765553Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.032714477Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.032696381Z caller=remote_instance_store.go:51 user=935619 slug=wanictf24 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.031188129Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.031128328Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Strasbourg, cluster=Strasbourg, country=France, datacenter=Velia, environment=production, instance=10.0.0.203:9998, ip=92.204.174.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=strasbourg-s406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.032550712Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.032346132Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.032268Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.032024414Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Strasbourg, cluster=Strasbourg, country=France, datacenter=Velia, environment=production, instance=10.0.0.203:9998, ip=92.204.174.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=strasbourg-s406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.032327908Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.032216562Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.03212609Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=236496 slug=improbable t=2024-05-29T13:44:15.032116169Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.527648ms + level=debug ts=2024-05-29T13:44:15.031935533Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.032098299Z caller=remote_instance_store.go:51 user=230713 slug=flocksafety msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Strasbourg, cluster=Strasbourg, country=France, datacenter=Velia, environment=production, instance=10.0.0.203:9998, ip=78.138.99.178, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=strasbourg-s409, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.032074829Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Strasbourg, cluster=Strasbourg, country=France, datacenter=Velia, environment=production, instance=10.0.0.203:9998, ip=78.138.99.178, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=strasbourg-s409, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.032034699Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.031978611Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.031790607Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=230713 slug=flocksafety t=2024-05-29T13:44:15.031958594Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=230713 slug=flocksafety version=25 fingerprint=65ddce1eea89237f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.03187323Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.031570083s EvaluationString:}]" duration=80.687316ms + level=debug ts=2024-05-29T13:44:15.031895204Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=527202 slug=lnrsusinsurancedev t=2024-05-29T13:44:15.031877304Z level=debug msg="Saving alert states" count=5 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.031828802Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=527202 slug=lnrsusinsurancedev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:15.031821771Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.031376097Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.031017806Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Strasbourg, cluster=Strasbourg, country=France, datacenter=Velia, environment=production, instance=10.0.0.203:9998, ip=78.138.99.178, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=strasbourg-s409, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.031763521Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=543654 slug=jobcloudprogrammaticprod version=1 fingerprint=d5d9d1589bc9b00f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.031388684Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.9514177s EvaluationString:}]" duration=98.705595ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Strasbourg, cluster=Strasbourg, country=France, datacenter=Velia, environment=production, instance=10.0.0.203:9998, ip=78.138.99.178, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=strasbourg-s409, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.031740784Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=527202 slug=lnrsusinsurancedev t=2024-05-29T13:44:15.031685796Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Strasbourg, cluster=Strasbourg, country=France, datacenter=Velia, environment=production, instance=10.0.0.203:9998, ip=185.136.160.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=strasbourg-s410, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.031512703Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.031449805Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.03143157Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Strasbourg, cluster=Strasbourg, country=France, datacenter=Velia, environment=production, instance=10.0.0.203:9998, ip=185.136.160.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=strasbourg-s410, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.031492242Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=543654 slug=jobcloudprogrammaticprod t=2024-05-29T13:44:15.031339472Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=97.429361ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Strasbourg, cluster=Strasbourg, country=France, datacenter=Velia, environment=production, instance=10.0.0.203:9998, ip=185.136.160.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=strasbourg-s410, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.031283173Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.031006155Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=281099 slug=nureva t=2024-05-29T13:44:15.031047675Z level=debug msg="Skip rule evaluation because it is paused" + level=debug ts=2024-05-29T13:44:15.030916084Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=696798 slug=mcv instance="name=keepLastValue(apex.Middle_East.players.ps4.mh448980.serverstats) Query" t=2024-05-29T13:44:15.03096351Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.030841477Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:15.030868433Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Strasbourg, cluster=Strasbourg, country=France, datacenter=Velia, environment=production, instance=10.0.0.203:9998, ip=151.106.8.98, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=strasbourg-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.030865657Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=1d24d0174f0106a6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.03080278Z level=debug msg="Alert rule evaluated" results="[{Instance:name=keepLastValue(apex.Middle_East.players.ps4.mh448980.serverstats) Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc06cce9138} Threshold:{Var:Threshold Labels: Value:0xc06cce9170} compare:{Var:compare Labels:name=keepLastValue(apex.Middle_East.players.ps4.mh448980.serverstats) Query Value:0xc06cce9190} sum:{Var:sum Labels:name=keepLastValue(apex.Middle_East.players.ps4.mh448980.serverstats) Query Value:0xc06cce91a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.030635215s EvaluationString:[ var='Breaches' labels={} value=360 ], [ var='Threshold' labels={} value=2 ], [ var='compare' labels={name=keepLastValue(apex.Middle_East.players.ps4.mh448980.serverstats) Query} value=0 ], [ var='sum' labels={name=keepLastValue(apex.Middle_East.players.ps4.mh448980.serverstats) Query} value=0 ]}]" duration=17.011619ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Strasbourg, cluster=Strasbourg, country=France, datacenter=Velia, environment=production, instance=10.0.0.203:9998, ip=151.106.8.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=strasbourg-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.03069171Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.030554572Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=472647 slug=planet t=2024-05-29T13:44:15.030556569Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=47.945286ms + level=debug ts=2024-05-29T13:44:15.03057056Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.030331832Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.030261981Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Strasbourg, cluster=Strasbourg, country=France, datacenter=Velia, environment=production, instance=10.0.0.203:9998, ip=151.106.12.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=strasbourg-s405, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.030285444Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.030248964Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=265756 slug=vowfood instance= t=2024-05-29T13:44:15.03008045Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=265756 slug=vowfood t=2024-05-29T13:44:15.030045356Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Strasbourg, cluster=Strasbourg, country=France, datacenter=Velia, environment=production, instance=10.0.0.203:9998, ip=151.106.12.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=strasbourg-s405, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.030069548Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=265756 slug=vowfood version=6 fingerprint=540efe29cdeec6d5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.029993014Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.029701452s EvaluationString:}]" duration=35.620916ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Strasbourg, cluster=Strasbourg, country=France, datacenter=Velia, environment=production, instance=10.0.0.203:9998, ip=151.106.12.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=strasbourg-s405, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.03003563Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.030014306Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.02980373Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Strasbourg, cluster=Strasbourg, country=France, datacenter=Velia, environment=production, instance=10.0.0.203:9998, ip=151.106.11.178, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=strasbourg-s404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.029804436Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Strasbourg, cluster=Strasbourg, country=France, datacenter=Velia, environment=production, instance=10.0.0.203:9998, ip=151.106.11.178, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=strasbourg-s404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.029677525Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.029652817Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Strasbourg, cluster=Strasbourg, country=France, datacenter=Velia, environment=production, instance=10.0.0.203:9998, ip=151.106.10.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=strasbourg-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.029516187Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Strasbourg, cluster=Strasbourg, country=France, datacenter=Velia, environment=production, instance=10.0.0.203:9998, ip=151.106.10.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=strasbourg-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.029355413Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.029304167Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=708873 slug=soultv t=2024-05-29T13:44:15.029212065Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.0886ms + level=debug ts=2024-05-29T13:44:15.029038144Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.028789673Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Stockholm, cluster=Stockholm, country=Sweden, datacenter=Glesys, environment=production, instance=10.0.0.203:9998, ip=46.246.3.141, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=stockholm-s415, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.028890791Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.028842789Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.028852338Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.028757991Z caller=remote_instance_store.go:51 user=487988 slug=microstrategyits msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.028829878Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.028723109Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=237629 slug=ocrolus t=2024-05-29T13:44:15.028668678Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.210188ms + logger=ngalert.state.manager.persist user=856040 slug=kuady t=2024-05-29T13:44:15.028580992Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.848317ms + level=debug ts=2024-05-29T13:44:15.028528859Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Stockholm, cluster=Stockholm, country=Sweden, datacenter=Glesys, environment=production, instance=10.0.0.203:9998, ip=188.126.79.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=stockholm-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.028281654Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Stockholm, cluster=Stockholm, country=Sweden, datacenter=Glesys, environment=production, instance=10.0.0.203:9998, ip=188.126.79.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=stockholm-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.028263317Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.028035479Z caller=remote_instance_store.go:51 user=228733 slug=csmoney msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=657220 slug=b2u instance="__name__=inv_1_operating_state, instance=localhost:8000, job=python_app, metric=operating_state, site=Cuyama, unit=units" t=2024-05-29T13:44:15.028032913Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=228733 slug=csmoney instance= t=2024-05-29T13:44:15.027930192Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Stockholm, cluster=Stockholm, country=Sweden, datacenter=Glesys, environment=production, instance=10.0.0.203:9998, ip=188.126.79.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=stockholm-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.028037667Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=657220 slug=b2u version=3 fingerprint=ade246edbc0c7549 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.027853002Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=inv_1_operating_state, instance=localhost:8000, job=python_app, metric=operating_state, site=Cuyama, unit=units State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=inv_1_operating_state, instance=localhost:8000, job=python_app, metric=operating_state, site=Cuyama, unit=units Value:0xc006561cc0} B:{Var:B Labels:__name__=inv_1_operating_state, instance=localhost:8000, job=python_app, metric=operating_state, site=Cuyama, unit=units Value:0xc006561db0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.016886368s EvaluationString:[ var='A' labels={__name__=inv_1_operating_state, instance=localhost:8000, job=python_app, metric=operating_state, site=Cuyama, unit=units} value=3526 ], [ var='B' labels={__name__=inv_1_operating_state, instance=localhost:8000, job=python_app, metric=operating_state, site=Cuyama, unit=units} value=0 ]}]" duration=14.486903ms + level=debug ts=2024-05-29T13:44:15.027821923Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Sofia, cluster=Sofia, country=Bulgaria, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=156.146.55.158, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=sofia-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.027805654Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.027772495Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Sofia, cluster=Sofia, country=Bulgaria, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=156.146.55.158, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=sofia-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.027637243Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Skopje, cluster=Skopje, country=Macedonia, datacenter=Interspace, environment=production, instance=10.0.0.203:9998, ip=185.225.28.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=skopje-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.027442173Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.027411703Z caller=remote_instance_store.go:51 user=22398 slug=sunfolding msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.027267895Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=22398 slug=sunfolding t=2024-05-29T13:44:15.0272771Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Singapore, cluster=Singapore, country=Singapore, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.162.67, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=singapore-s456, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.027032345Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.027000066Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=173730 slug=nikon t=2024-05-29T13:44:15.026443579Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Shenzhen, cluster=Shenzhen, country=China, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=188.241.80.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=shenzhen-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.026399247Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.026150453Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.026073167Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Seoul, cluster=Seoul, country=Republic of Korea, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.227.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=seoul-s404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.02600476Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Seoul, cluster=Seoul, country=Republic of Korea, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.227.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=seoul-s404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.025991165Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.025710694Z caller=remote_instance_store.go:51 user=354676 slug=gridmarketenergy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.025573081Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.025540372Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.025529841Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.025453447Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.025119357Z caller=remote_instance_store.go:51 user=672418 slug=streamkap msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Seattle, cluster=Seattle, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=156.146.49.161, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=seattle-s405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.025186934Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.025139746Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=374423 slug=bitburst t=2024-05-29T13:44:15.025017311Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.37102ms + level=debug ts=2024-05-29T13:44:15.024933863Z caller=remote_instance_store.go:51 user=526847 slug=soniclabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.024999361Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.024841676Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Sao Paulo, cluster=Sao Paulo, country=Brazil, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=188.241.177.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=saopaulo-s404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.0247984Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.024608711Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Sao Paulo, cluster=Sao Paulo, country=Brazil, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=188.241.177.146, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=saopaulo-s405, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.024624731Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.024451615Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:15.024384307Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.024397375Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:15.024370907Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.024209127Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Sao Paulo, cluster=Sao Paulo, country=Brazil, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=188.241.177.146, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=saopaulo-s405, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.024219161Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.023971771Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=166137 slug=teletracking t=2024-05-29T13:44:15.023828412Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=109.168698ms + level=debug ts=2024-05-29T13:44:15.023838765Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Sao Paulo, cluster=Sao Paulo, country=Brazil, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=188.241.177.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=saopaulo-s406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.02387422Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.023845793Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.023777481Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.023741468Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.023680276Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Sao Paulo, cluster=Sao Paulo, country=Brazil, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=188.241.177.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=saopaulo-s406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.023738876Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.023529889Z caller=remote_instance_store.go:51 user=177465 slug=fairtiq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.02362674Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.023503368Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.023362776Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Sao Paulo, cluster=Sao Paulo, country=Brazil, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=188.241.177.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=saopaulo-s406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.023355388Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.02326249Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.023246324Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.023168864Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.023050584Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.022776626Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.022671527Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=467258 slug=neonprod instance="neon_region=us-east-2" t=2024-05-29T13:44:15.022503991Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=467258 slug=neonprod instance="neon_region=eu-west-1" t=2024-05-29T13:44:15.022421312Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Sao Paulo, cluster=Sao Paulo, country=Brazil, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=188.241.177.114, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=saopaulo-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.022350677Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:15.022227151Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.022205501Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=467258 slug=neonprod version=35 fingerprint=5b9e435e0730ca60 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.022108034Z level=debug msg="Alert rule evaluated" results="[{Instance:neon_region=eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:neon_region=eu-west-1 Value:0xc0355a0a90} B:{Var:B Labels:neon_region=eu-west-1 Value:0xc0355a09f0} C:{Var:C Labels:neon_region=eu-west-1 Value:0xc0355a09f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.02169528s EvaluationString:[ var='A' labels={neon_region=eu-west-1} value=0 ], [ var='B' labels={neon_region=eu-west-1} value=0 ], [ var='C' labels={neon_region=eu-west-1} value=0 ]} {Instance:neon_region=us-east-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:neon_region=us-east-2 Value:0xc0355a0b00} B:{Var:B Labels:neon_region=us-east-2 Value:0xc0355a0b08} C:{Var:C Labels:neon_region=us-east-2 Value:0xc0355a0b50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.021712312s EvaluationString:[ var='A' labels={neon_region=us-east-2} value=0 ], [ var='B' labels={neon_region=us-east-2} value=0 ], [ var='C' labels={neon_region=us-east-2} value=0 ]}]" duration=115.25466ms + level=debug ts=2024-05-29T13:44:15.022207702Z caller=remote_instance_store.go:51 user=173730 slug=nikon msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=173730 slug=nikon t=2024-05-29T13:44:15.022165596Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=173730 slug=nikon instance= t=2024-05-29T13:44:15.022140021Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Santo Domingo, cluster=Santo Domingo, country=Dominican Republic, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=188.240.218.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=santodomingo-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.022088362Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.02190914Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.021892958Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.021832479Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.021758074Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=236496 slug=improbable t=2024-05-29T13:44:15.021534518Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Santiago, cluster=Santiago, country=Chile, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.11.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=santiago-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.021562001Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Santiago, cluster=Santiago, country=Chile, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.11.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=santiago-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.02153531Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.021303949Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.021252771Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.021212794Z caller=remote_instance_store.go:51 user=532655 slug=chathamdirectdev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.021178847Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.021121562Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=San Jose, cluster=San Jose, country=Costa Rica, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.10.40, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=sanjose-s404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.021080194Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=532655 slug=chathamdirectdev version=3 fingerprint=254bd29384d6f155 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.021026559Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.020757447s EvaluationString:}]" duration=2.296407835s + level=debug ts=2024-05-29T13:44:15.020950357Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.020903553Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=767797 slug=mgmresorts t=2024-05-29T13:44:15.020855089Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.307384ms + level=debug ts=2024-05-29T13:44:15.02074047Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.020579676Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.020605897Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.020470093Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.020384765Z caller=remote_instance_store.go:51 user=228733 slug=csmoney msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.020462219Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=228733 slug=csmoney instance= t=2024-05-29T13:44:15.020325171Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=San Francisco, cluster=San Francisco, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=102.129.252.143, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=sanfrancisco-s404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.020235675Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.020141246Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.020145376Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.020167284Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.019987682Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=390300 slug=astrachain instance="datasource_uid=o1ErKYq7z, ref_id=A" t=2024-05-29T13:44:15.019865661Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:15.019731666Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Rome, cluster=Rome, country=Italy, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=87.101.94.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=rome-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.019842793Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.historian backend=loki user=438855 slug=teckresources t=2024-05-29T13:44:15.019750114Z level=debug msg="Done saving alert state history batch" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Rome, cluster=Rome, country=Italy, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=87.101.94.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=rome-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.019655756Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.019608426Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=656459 slug=activeport instance="datasource_uid=a17a51ac-52fa-4a8f-ae4d-66e273cfbbfc, ref_id=A" t=2024-05-29T13:44:15.019509854Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=656459 slug=activeport t=2024-05-29T13:44:15.019456824Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.019488634Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=656459 slug=activeport version=113 fingerprint=1566bfa25747954c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.019360505Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=a17a51ac-52fa-4a8f-ae4d-66e273cfbbfc, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.018996531s EvaluationString:}]" duration=17.963598ms + level=debug ts=2024-05-29T13:44:15.019362197Z caller=remote_instance_store.go:51 user=491157 slug=prd01wr msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=155740 slug=routific t=2024-05-29T13:44:15.019326608Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.019269435Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=491157 slug=prd01wr instance= t=2024-05-29T13:44:15.019262984Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID1858dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=155740 slug=routific version=2 fingerprint=fb2a21a5e99345c2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.019226764Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.018961693s EvaluationString:}]" duration=13.239694ms + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.019206158Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Rome, cluster=Rome, country=Italy, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.217.71.178, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=rome-s406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.019022687Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.018975844Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Rome, cluster=Rome, country=Italy, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.217.71.178, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=rome-s406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.018815769Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Rome, cluster=Rome, country=Italy, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.217.71.146, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=rome-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.018645638Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.018563849Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:15.01817312Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.018136044Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=250150 slug=bizagi version=58 fingerprint=551737cd2404e3c1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.018035786Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.017092634s EvaluationString:}]" duration=499.897726ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Rome, cluster=Rome, country=Italy, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.217.71.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=rome-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.017978601Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.017753959Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Riyadh, cluster=Riyadh, country=Saudi Arabia, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=95.181.235.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=riyadh-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.017727659Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.017382013Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Riyadh, cluster=Riyadh, country=Saudi Arabia, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=95.181.235.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=riyadh-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.017435335Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.017367342Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.01717367Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.017210542Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.017123492Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.016995153Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=231061 slug=teamaround t=2024-05-29T13:44:15.016941341Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=231061 slug=teamaround version=58 fingerprint=1c2b6e03b2f548c4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.016876449Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.016580738s EvaluationString:}]" duration=1.086363175s + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Riga, cluster=Riga, country=Latvia, datacenter=PacketExchange, environment=production, instance=10.0.0.203:9998, ip=196.196.53.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=riga-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.016865899Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.016816072Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.01675436Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.016621886Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.543616ms + level=debug ts=2024-05-29T13:44:15.016542454Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.016531289Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.016540794Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:15.016435983Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Riga, cluster=Riga, country=Latvia, datacenter=PacketExchange, environment=production, instance=10.0.0.203:9998, ip=196.196.53.18, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=riga-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.016260068Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.016089643Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Riga, cluster=Riga, country=Latvia, datacenter=PacketExchange, environment=production, instance=10.0.0.203:9998, ip=196.196.53.114, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=riga-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.015873324Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.01557666Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.015506303Z caller=remote_instance_store.go:51 user=332534 slug=adevintakijiji msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Riga, cluster=Riga, country=Latvia, datacenter=PacketExchange, environment=production, instance=10.0.0.203:9998, ip=196.196.53.114, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=riga-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.015535426Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.015404917Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.015407892Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.01536481Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.015312015Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=332534 slug=adevintakijiji version=22 fingerprint=bf97a60d98c7aa99 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.015226116Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=sumSeries(event-processor.ca-kijiji-production-up0f.*.ca.kijiji.move.MoveMetadataProcessor.move.metadata.batch.errors.m1_rate) error State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:aggregatedBy=sum, name=sumSeries(event-processor.ca-kijiji-production-up0f.*.ca.kijiji.move.MoveMetadataProcessor.move.metadata.batch.errors.m1_rate) error Value:0xc0701277a0} B:{Var:B Labels:aggregatedBy=sum, name=sumSeries(event-processor.ca-kijiji-production-up0f.*.ca.kijiji.move.MoveMetadataProcessor.move.metadata.batch.errors.m1_rate) error Value:0xc0701277e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.014512251s EvaluationString:[ var='A' labels={aggregatedBy=sum, name=sumSeries(event-processor.ca-kijiji-production-up0f.*.ca.kijiji.move.MoveMetadataProcessor.move.metadata.batch.errors.m1_rate) error} value=0 ], [ var='B' labels={aggregatedBy=sum, name=sumSeries(event-processor.ca-kijiji-production-up0f.*.ca.kijiji.move.MoveMetadataProcessor.move.metadata.batch.errors.m1_rate) error} value=0 ]}]" duration=46.508021ms + level=debug ts=2024-05-29T13:44:15.015255964Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.015103855Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.015191103Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=708873 slug=soultv t=2024-05-29T13:44:15.015118486Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=472647 slug=planet t=2024-05-29T13:44:15.015143641Z level=debug msg="Saving alert states" count=24 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.01505489Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.41567ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.015032279Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=storage-04, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-k8s-prod, instance=172.24.13.142:9090, job=prometheus-k8s, k8s_cluster=storage-04, kubernetes_cluster=storage-04, namespace=monitoring, pod=prometheus-k8s-0, prometheus=monitoring/k8s, prometheus_shard=storage-04-0, service=prometheus-k8s" t=2024-05-29T13:44:15.015001691Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=storage-04, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-k8s-prod, instance=172.24.13.142:9090, job=prometheus-k8s, k8s_cluster=storage-04, kubernetes_cluster=storage-04, namespace=monitoring, pod=prometheus-k8s-0, prometheus=monitoring/k8s, prometheus_shard=storage-04-0, service=prometheus-k8s" t=2024-05-29T13:44:15.014989284Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.014913744Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=sksinfra-01, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-skysatinfra-prod, instance=172.24.9.144:9090, job=prometheus-k8s, k8s_cluster=sksinfra-01, kubernetes_cluster=sksinfra-01, namespace=monitoring, pod=prometheus-k8s-1, prometheus=monitoring/k8s, prometheus_shard=sksinfra-01-0, service=prometheus-k8s" t=2024-05-29T13:44:15.014898511Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:15.014803789Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.543057ms + level=debug ts=2024-05-29T13:44:15.014755085Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Reykjavik, cluster=Reykjavik, country=Iceland, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=45.133.193.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=reykjavik-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.014816707Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=sksinfra-01, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-skysatinfra-prod, instance=172.24.6.135:9090, job=prometheus-k8s, k8s_cluster=sksinfra-01, kubernetes_cluster=sksinfra-01, namespace=monitoring, pod=prometheus-k8s-0, prometheus=monitoring/k8s, prometheus_shard=sksinfra-01-0, service=prometheus-k8s" t=2024-05-29T13:44:15.014791465Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=shared-03, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-k8s-prod, instance=172.24.70.131:9090, job=prometheus-k8s, k8s_cluster=shared-03, kubernetes_cluster=shared-03, namespace=monitoring, pod=prometheus-k8s-1, prometheus=monitoring/k8s, prometheus_shard=shared-03-0, service=prometheus-k8s" t=2024-05-29T13:44:15.014667894Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=shared-03, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-k8s-prod, instance=172.24.70.131:9090, job=prometheus-k8s, k8s_cluster=shared-03, kubernetes_cluster=shared-03, namespace=monitoring, pod=prometheus-k8s-1, prometheus=monitoring/k8s, prometheus_shard=shared-03-0, service=prometheus-k8s" t=2024-05-29T13:44:15.014653469Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Reykjavik, cluster=Reykjavik, country=Iceland, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=45.133.193.18, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=reykjavik-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.014633735Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=shared-03, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-k8s-prod, instance=172.24.103.22:9090, job=prometheus-k8s, k8s_cluster=shared-03, kubernetes_cluster=shared-03, namespace=monitoring, pod=prometheus-k8s-0, prometheus=monitoring/k8s, prometheus_shard=shared-03-0, service=prometheus-k8s" t=2024-05-29T13:44:15.014548021Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:15.014324056Z caller=remote_alert_sender.go:94 user=698963 slug=lemonade host=lemonade-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.45.8:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=c43c0b55-5d4d-4e68-80f2-adfd886b9f99 alerts=1 + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:15.014260514Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service=prometheus-k8s" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Rabat, cluster=Rabat, country=Morocco, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=95.181.232.14, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=rabat-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.014250005Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:15.014142164Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service=prometheus-k8s" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.014022293Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:15.014053148Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service=prometheus-k8s" + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:15.013945876Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service=prometheus-k8s" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Rabat, cluster=Rabat, country=Morocco, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=95.181.232.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=rabat-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.01388506Z level=debug msg="Keeping state" state=Normal + level=error ts=2024-05-29T13:44:15.01386433Z caller=ruler.go:515 msg="failed to load config from grafana instance, skipping instance" user=467639 slug=alexandrasachelarescu3 err="user has the remote ruler not enabled" + level=debug ts=2024-05-29T13:44:15.013783279Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Rabat, cluster=Rabat, country=Morocco, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=95.181.232.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=rabat-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.013870823Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=compute-15, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-k8s-prod, instance=240.192.48.4:9090, job=prometheus-k8s, k8s_cluster=compute-15, kubernetes_cluster=compute-15, namespace=monitoring, pod=prom-agent-k8s-shard-1-0, prometheus=monitoring/k8s, prometheus_shard=compute-15-0, service=prometheus-k8s" t=2024-05-29T13:44:15.013879688Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.013839023Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.013779642Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.013730189Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=compute-15, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-k8s-prod, instance=240.192.3.196:9090, job=prometheus-k8s, k8s_cluster=compute-15, kubernetes_cluster=compute-15, namespace=monitoring, pod=prom-agent-k8s-0, prometheus=monitoring/k8s, prometheus_shard=compute-15-1, service=prometheus-k8s" t=2024-05-29T13:44:15.013760726Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Rabat, cluster=Rabat, country=Morocco, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=95.181.232.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=rabat-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.013709564Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.013659863Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=compute-13, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-k8s-prod, instance=240.0.40.131:9090, job=prometheus-k8s, k8s_cluster=compute-13, kubernetes_cluster=compute-13, namespace=monitoring, pod=prom-agent-k8s-shard-1-0, prometheus=monitoring/k8s, prometheus_shard=compute-13-0, service=prometheus-k8s" t=2024-05-29T13:44:15.013520699Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:15.013472227Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=430961 slug=solifi instance="Instance=--" t=2024-05-29T13:44:15.013458408Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.013462397Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.scheduler user=430961 slug=solifi version=1 fingerprint=71ded5b51e6a6b59 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.013369015Z level=debug msg="Alert rule evaluated" results="[{Instance:Instance=-- State:NoData Error: Results:map[] Values:map[B:{Var:B Labels:Instance=-- Value:} C:{Var:C Labels:Instance=-- Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.013063984s EvaluationString:[ var='B' labels={Instance=--} value=null ], [ var='C' labels={Instance=--} value=null ]}]" duration=416.068148ms + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=compute-12, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-k8s-prod, instance=240.192.15.132:9090, job=prometheus-k8s, k8s_cluster=compute-12, kubernetes_cluster=compute-12, namespace=monitoring, pod=prom-agent-k8s-0, prometheus=monitoring/k8s, prometheus_shard=compute-12-1, service=prometheus-k8s" t=2024-05-29T13:44:15.013412562Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=compute-11, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-k8s-prod, instance=240.0.210.131:9090, job=prometheus-k8s, k8s_cluster=compute-11, kubernetes_cluster=compute-11, namespace=monitoring, pod=prom-agent-k8s-0, prometheus=monitoring/k8s, prometheus_shard=compute-11-0, service=prometheus-k8s" t=2024-05-29T13:44:15.013220847Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=compute-11, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-k8s-prod, instance=240.0.127.4:9090, job=prometheus-k8s, k8s_cluster=compute-11, kubernetes_cluster=compute-11, namespace=monitoring, pod=prom-agent-k8s-shard-1-0, prometheus=monitoring/k8s, prometheus_shard=compute-11-1, service=prometheus-k8s" t=2024-05-29T13:44:15.013132092Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Prague, cluster=Prague, country=Czech Republic, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=195.181.161.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=prague-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.013102422Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.013045662Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=compute-10, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-k8s-prod, instance=240.0.48.132:9090, job=prometheus-k8s, k8s_cluster=compute-10, kubernetes_cluster=compute-10, namespace=monitoring, pod=prom-agent-k8s-shard-1-0, prometheus=monitoring/k8s, prometheus_shard=compute-10-0, service=prometheus-k8s" t=2024-05-29T13:44:15.013013181Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=935619 slug=wanictf24 t=2024-05-29T13:44:15.01285652Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=935619 slug=wanictf24 instance="service=wanictf24-chal-devserv, task_family=wanictf24-chal-devserv, task_id=8247703c744b4887ac3b409add6e0778, task_revision=2" t=2024-05-29T13:44:15.012817778Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=935619 slug=wanictf24 t=2024-05-29T13:44:15.012776916Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.state.manager.persist user=374423 slug=bitburst t=2024-05-29T13:44:15.01264166Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=compute-03, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-k8s-prod, instance=172.24.5.132:9090, job=prometheus-k8s, k8s_cluster=compute-03, kubernetes_cluster=compute-03, namespace=monitoring, pod=prometheus-k8s-shard-1-0, prometheus=monitoring/k8s, prometheus_shard=compute-03-1, service=prometheus-k8s" t=2024-05-29T13:44:15.012755409Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:15.012708712Z caller=remote_instance_store.go:51 user=374423 slug=bitburst msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=926401 slug=algnite instance= t=2024-05-29T13:44:15.012599896Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Prague, cluster=Prague, country=Czech Republic, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.56.251, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=prague-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.012645257Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=compute-03, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-k8s-prod, instance=172.24.35.196:9090, job=prometheus-k8s, k8s_cluster=compute-03, kubernetes_cluster=compute-03, namespace=monitoring, pod=prometheus-k8s-0, prometheus=monitoring/k8s, prometheus_shard=compute-03-1, service=prometheus-k8s" t=2024-05-29T13:44:15.012622514Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:15.012592564Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service=prometheus-k8s" + logger=ngalert.state.manager user=472647 slug=planet instance="cluster=analytics-01, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-k8s-prod, instance=172.24.21.10:9090, job=prometheus-k8s, k8s_cluster=analytics-01, kubernetes_cluster=analytics-01, namespace=monitoring, pod=prometheus-k8s-0, prometheus=monitoring/k8s, prometheus_shard=analytics-01-0, service=prometheus-k8s" t=2024-05-29T13:44:15.012487141Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Prague, cluster=Prague, country=Czech Republic, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.56.251, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=prague-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.012466196Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:15.012183349Z level=debug msg="State manager processing evaluation results" resultCount=24 + level=debug ts=2024-05-29T13:44:15.012215108Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Podgorica, cluster=Podgorica, country=Montenegro, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.229.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=podgorica-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.012280012Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=edic8i1pcbbpce, ref_id=A" t=2024-05-29T13:44:15.012279922Z level=debug msg="Setting next state" handler=resultNoData + level=info ts=2024-05-29T13:44:15.012239405Z caller=remote_alert_sender.go:94 user=656459 slug=activeport host=activeport-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.164.20.114:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=cacf8c17-9f52-4da1-bec4-240e1faa5d30 alerts=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.012232666Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.scheduler user=206107 slug=hydrolix version=4 fingerprint=1f51b8cadd60a524 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.012144784Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=edic8i1pcbbpce, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.01186772s EvaluationString:}]" duration=253.525292ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Podgorica, cluster=Podgorica, country=Montenegro, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.229.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=podgorica-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.012060771Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=806229 slug=simplisafe version=139 fingerprint=4d357fdcffe9e171 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.011949286Z level=debug msg="Alert rule evaluated" results="[{Instance:env=prd-east, region=us-east-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:env=prd-east, region=us-east-1 Value:0xc0684d2c38} B:{Var:B Labels:env=prd-east, region=us-east-1 Value:0xc0684d2c10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.011320377s EvaluationString:[ var='A' labels={env=prd-east, region=us-east-1} value=0 ], [ var='B' labels={env=prd-east, region=us-east-1} value=0 ]} {Instance:env=prd-west2, region=us-west-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:env=prd-west2, region=us-west-2 Value:0xc0684d2d20} B:{Var:B Labels:env=prd-west2, region=us-west-2 Value:0xc0684d2d60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.011336777s EvaluationString:[ var='A' labels={env=prd-west2, region=us-west-2} value=0 ], [ var='B' labels={env=prd-west2, region=us-west-2} value=0 ]}]" duration=20.864765ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Phoenix, cluster=Phoenix, country=United States, datacenter=TSS, environment=production, instance=10.0.0.203:9998, ip=184.170.240.221, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=phoenix-s456, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.011707942Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Phoenix, cluster=Phoenix, country=United States, datacenter=TSS, environment=production, instance=10.0.0.203:9998, ip=184.170.240.221, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=phoenix-s456, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.011683458Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.01149824Z caller=remote_instance_store.go:51 user=237629 slug=ocrolus msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=237629 slug=ocrolus t=2024-05-29T13:44:15.011454179Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=237629 slug=ocrolus instance="container=prometheus-server, namespace=devops, pod=devops-prometheus-server-5697859bd8-j4s86" t=2024-05-29T13:44:15.011436343Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Phnom Penh, cluster=Phnom Penh, country=Cambodia, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=188.215.235.46, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=phnompenh-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.011366869Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:15.011221423Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=26.000366ms + level=debug ts=2024-05-29T13:44:15.011188303Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.011175416Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:15.011048559Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=472647 slug=planet instance="__name__=kube_statefulset_status_replicas_available, app_kubernetes_io_component=exporter, app_kubernetes_io_name=kube-state-metrics, app_kubernetes_io_version=2.9.2, cluster=compute-03, cluster_type=gke, component=kube-state-metrics, container=kube-state-metrics, endpoint=http-metrics, gcp_project=planet-k8s-prod, instance=172.24.138.163:8080, job=kube-state-metrics, k8s_cluster=compute-03, kubernetes_cluster=compute-03, kubernetes_namespace=monitoring, kubernetes_node=gke-compute-03-compute-03-standardv3--902b2138-9sfg, kubernetes_pod=kube-state-metrics-cc944bdd5-nxldd, namespace=live, pod=kube-state-metrics-cc944bdd5-nxldd, prometheus=monitoring/k8s, prometheus_shard=compute-03-1, service=kube-state-metrics, statefulset=g4c-fusion-02-workload-weight-update-manager" t=2024-05-29T13:44:15.01110876Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=b8e12137be1ff55d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.010973956Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.010643414s EvaluationString:}]" duration=215.257548ms + logger=ngalert.state.manager.persist user=438855 slug=teckresources t=2024-05-29T13:44:15.010944096Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.442827ms + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:15.010908426Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.010894317Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:15.010925488Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=compute-03, service=kube-state-metrics" + logger=ngalert.state.manager user=472647 slug=planet instance="__name__=kube_statefulset_status_replicas_available, app_kubernetes_io_component=exporter, app_kubernetes_io_name=kube-state-metrics, app_kubernetes_io_version=2.9.2, cluster=compute-03, cluster_type=gke, component=kube-state-metrics, container=kube-state-metrics, endpoint=http-metrics, gcp_project=planet-k8s-prod, instance=172.24.138.163:8080, job=kube-state-metrics, k8s_cluster=compute-03, kubernetes_cluster=compute-03, kubernetes_namespace=monitoring, kubernetes_node=gke-compute-03-compute-03-standardv3--902b2138-9sfg, kubernetes_pod=kube-state-metrics-cc944bdd5-nxldd, namespace=live, pod=kube-state-metrics-cc944bdd5-nxldd, prometheus=monitoring/k8s, prometheus_shard=compute-03-1, service=kube-state-metrics, statefulset=g4c-fusion-02-workload-process-manager" t=2024-05-29T13:44:15.010895187Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.010873276Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=114492 slug=railsbank t=2024-05-29T13:44:15.010794197Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=856040 slug=kuady t=2024-05-29T13:44:15.010725215Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=114492 slug=railsbank version=2 fingerprint=b380f4414f2466db attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.010730216Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.010492794s EvaluationString:}]" duration=323.125488ms + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:15.010777024Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=compute-03, service=kube-state-metrics" + logger=ngalert.state.manager user=472647 slug=planet instance="__name__=kube_statefulset_status_replicas_available, app_kubernetes_io_component=exporter, app_kubernetes_io_name=kube-state-metrics, app_kubernetes_io_version=2.9.2, cluster=compute-03, cluster_type=gke, component=kube-state-metrics, container=kube-state-metrics, endpoint=http-metrics, gcp_project=planet-k8s-prod, instance=172.24.138.163:8080, job=kube-state-metrics, k8s_cluster=compute-03, kubernetes_cluster=compute-03, kubernetes_namespace=monitoring, kubernetes_node=gke-compute-03-compute-03-standardv3--902b2138-9sfg, kubernetes_pod=kube-state-metrics-cc944bdd5-nxldd, namespace=live, pod=kube-state-metrics-cc944bdd5-nxldd, prometheus=monitoring/k8s, prometheus_shard=compute-03-1, service=kube-state-metrics, statefulset=g4c-fusion-02-workload-monitor-manager" t=2024-05-29T13:44:15.010743776Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.010687311Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=472647 slug=planet instance="__name__=kube_statefulset_status_replicas_available, app_kubernetes_io_component=exporter, app_kubernetes_io_name=kube-state-metrics, app_kubernetes_io_version=2.9.2, cluster=compute-03, cluster_type=gke, component=kube-state-metrics, container=kube-state-metrics, endpoint=http-metrics, gcp_project=planet-k8s-prod, instance=172.24.138.163:8080, job=kube-state-metrics, k8s_cluster=compute-03, kubernetes_cluster=compute-03, kubernetes_namespace=monitoring, kubernetes_node=gke-compute-03-compute-03-standardv3--902b2138-9sfg, kubernetes_pod=kube-state-metrics-cc944bdd5-nxldd, namespace=live, pod=kube-state-metrics-cc944bdd5-nxldd, prometheus=monitoring/k8s, prometheus_shard=compute-03-1, service=kube-state-metrics, statefulset=g4c-fusion-02-task-manager" t=2024-05-29T13:44:15.010705251Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=856040 slug=kuady version=16 fingerprint=faaa10e4a57d20bc attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.010571132Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[Above75:{Var:Above75 Labels: Value:0xc004012898} Condition:{Var:Condition Labels: Value:0xc004012880}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.010244641s EvaluationString:[ var='Above75' labels={} value=0 ], [ var='Condition' labels={} value=0 ]}]" duration=384.832947ms + logger=ngalert.state.manager user=472647 slug=planet instance="__name__=kube_statefulset_status_replicas_available, app_kubernetes_io_component=exporter, app_kubernetes_io_name=kube-state-metrics, app_kubernetes_io_version=2.9.2, cluster=compute-03, cluster_type=gke, component=kube-state-metrics, container=kube-state-metrics, endpoint=http-metrics, gcp_project=planet-k8s-prod, instance=172.24.138.163:8080, job=kube-state-metrics, k8s_cluster=compute-03, kubernetes_cluster=compute-03, kubernetes_namespace=monitoring, kubernetes_node=gke-compute-03-compute-03-standardv3--902b2138-9sfg, kubernetes_pod=kube-state-metrics-cc944bdd5-nxldd, namespace=live, pod=kube-state-metrics-cc944bdd5-nxldd, prometheus=monitoring/k8s, prometheus_shard=compute-03-1, service=kube-state-metrics, statefulset=g4c-fusion-02-scheduler" t=2024-05-29T13:44:15.010658202Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:15.010625218Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=compute-03, service=kube-state-metrics" + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:15.010571617Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=compute-03, service=kube-state-metrics" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris-2, country=France, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.60.78, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=paris-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.010552922Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris-2, country=France, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.60.78, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=paris-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.010539144Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=856040 slug=kuady version=10 fingerprint=d85e4d78ff025a6a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.010409089Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[Above75:{Var:Above75 Labels: Value:0xc059511f30} Condition:{Var:Condition Labels: Value:0xc059511f38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.010036401s EvaluationString:[ var='Above75' labels={} value=0 ], [ var='Condition' labels={} value=0 ]}]" duration=181.68588ms + logger=ngalert.state.manager user=472647 slug=planet instance="__name__=kube_statefulset_status_replicas_available, app_kubernetes_io_component=exporter, app_kubernetes_io_name=kube-state-metrics, app_kubernetes_io_version=2.9.2, cluster=compute-03, cluster_type=gke, component=kube-state-metrics, container=kube-state-metrics, endpoint=http-metrics, gcp_project=planet-k8s-prod, instance=172.24.138.163:8080, job=kube-state-metrics, k8s_cluster=compute-03, kubernetes_cluster=compute-03, kubernetes_namespace=monitoring, kubernetes_node=gke-compute-03-compute-03-standardv3--902b2138-9sfg, kubernetes_pod=kube-state-metrics-cc944bdd5-nxldd, namespace=live, pod=kube-state-metrics-cc944bdd5-nxldd, prometheus=monitoring/k8s, prometheus_shard=compute-03-1, service=kube-state-metrics, statefulset=g4c-fusion-02-monitor-manager" t=2024-05-29T13:44:15.010546666Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance="__name__=kube_statefulset_status_replicas_available, app_kubernetes_io_component=exporter, app_kubernetes_io_name=kube-state-metrics, app_kubernetes_io_version=2.9.2, cluster=compute-03, cluster_type=gke, component=kube-state-metrics, container=kube-state-metrics, endpoint=http-metrics, gcp_project=planet-k8s-prod, instance=172.24.138.163:8080, job=kube-state-metrics, k8s_cluster=compute-03, kubernetes_cluster=compute-03, kubernetes_namespace=monitoring, kubernetes_node=gke-compute-03-compute-03-standardv3--902b2138-9sfg, kubernetes_pod=kube-state-metrics-cc944bdd5-nxldd, namespace=live, pod=kube-state-metrics-cc944bdd5-nxldd, prometheus=monitoring/k8s, prometheus_shard=compute-03-1, service=kube-state-metrics, statefulset=g4c-fusion-02-g4p-fusion-02-b-manager" t=2024-05-29T13:44:15.010496477Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:15.010464969Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=compute-03, service=kube-state-metrics" + logger=ngalert.state.manager user=472647 slug=planet instance="__name__=kube_statefulset_status_replicas_available, app_kubernetes_io_component=exporter, app_kubernetes_io_name=kube-state-metrics, app_kubernetes_io_version=2.9.2, cluster=compute-03, cluster_type=gke, component=kube-state-metrics, container=kube-state-metrics, endpoint=http-metrics, gcp_project=planet-k8s-prod, instance=172.24.138.163:8080, job=kube-state-metrics, k8s_cluster=compute-03, kubernetes_cluster=compute-03, kubernetes_namespace=monitoring, kubernetes_node=gke-compute-03-compute-03-standardv3--902b2138-9sfg, kubernetes_pod=kube-state-metrics-cc944bdd5-nxldd, namespace=live, pod=kube-state-metrics-cc944bdd5-nxldd, prometheus=monitoring/k8s, prometheus_shard=compute-03-1, service=kube-state-metrics, statefulset=g4c-fusion-02-g4p-fusion-02-b-lm-manager" t=2024-05-29T13:44:15.01039889Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance="__name__=kube_statefulset_status_replicas_available, app_kubernetes_io_component=exporter, app_kubernetes_io_name=kube-state-metrics, app_kubernetes_io_version=2.9.2, cluster=compute-03, cluster_type=gke, component=kube-state-metrics, container=kube-state-metrics, endpoint=http-metrics, gcp_project=planet-k8s-prod, instance=172.24.138.163:8080, job=kube-state-metrics, k8s_cluster=compute-03, kubernetes_cluster=compute-03, kubernetes_namespace=monitoring, kubernetes_node=gke-compute-03-compute-03-standardv3--902b2138-9sfg, kubernetes_pod=kube-state-metrics-cc944bdd5-nxldd, namespace=live, pod=kube-state-metrics-cc944bdd5-nxldd, prometheus=monitoring/k8s, prometheus_shard=compute-03-1, service=kube-state-metrics, statefulset=g4c-fusion-02-g4p-fusion-02-b-lm-manager" t=2024-05-29T13:44:15.010387905Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:15.010315557Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.045358ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris-2, country=France, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.60.78, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=paris-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.010360627Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris-2, country=France, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.60.78, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=paris-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.010349994Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet instance="__name__=kube_statefulset_status_replicas_available, app_kubernetes_io_component=exporter, app_kubernetes_io_name=kube-state-metrics, app_kubernetes_io_version=2.9.2, cluster=compute-03, cluster_type=gke, component=kube-state-metrics, container=kube-state-metrics, endpoint=http-metrics, gcp_project=planet-k8s-prod, instance=172.24.138.163:8080, job=kube-state-metrics, k8s_cluster=compute-03, kubernetes_cluster=compute-03, kubernetes_namespace=monitoring, kubernetes_node=gke-compute-03-compute-03-standardv3--902b2138-9sfg, kubernetes_pod=kube-state-metrics-cc944bdd5-nxldd, namespace=live, pod=kube-state-metrics-cc944bdd5-nxldd, prometheus=monitoring/k8s, prometheus_shard=compute-03-1, service=kube-state-metrics, statefulset=g4c-fusion-02-g4p-fusion-02-b-2-12-500-manager" t=2024-05-29T13:44:15.010346574Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=542095 slug=intelligencefusion t=2024-05-29T13:44:15.010196792Z level=debug msg="Saving alert states done" count=15 max_state_save_concurrency=1 duration=568.917685ms + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:15.010175615Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=compute-03, service=kube-state-metrics" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.010113454Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=472647 slug=planet instance="__name__=kube_statefulset_status_replicas_available, app_kubernetes_io_component=exporter, app_kubernetes_io_name=kube-state-metrics, app_kubernetes_io_version=2.9.2, cluster=compute-03, cluster_type=gke, component=kube-state-metrics, container=kube-state-metrics, endpoint=http-metrics, gcp_project=planet-k8s-prod, instance=172.24.138.163:8080, job=kube-state-metrics, k8s_cluster=compute-03, kubernetes_cluster=compute-03, kubernetes_namespace=monitoring, kubernetes_node=gke-compute-03-compute-03-standardv3--902b2138-9sfg, kubernetes_pod=kube-state-metrics-cc944bdd5-nxldd, namespace=live, pod=kube-state-metrics-cc944bdd5-nxldd, prometheus=monitoring/k8s, prometheus_shard=compute-03-1, service=kube-state-metrics, statefulset=g4c-fusion-02-data-collect-task-manager" t=2024-05-29T13:44:15.010147386Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:15.010125256Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=compute-03, service=kube-state-metrics" + logger=ngalert.state.manager user=472647 slug=planet instance="__name__=kube_statefulset_status_replicas_available, app_kubernetes_io_component=exporter, app_kubernetes_io_name=kube-state-metrics, app_kubernetes_io_version=2.9.2, cluster=compute-03, cluster_type=gke, component=kube-state-metrics, container=kube-state-metrics, endpoint=http-metrics, gcp_project=planet-k8s-prod, instance=172.24.138.163:8080, job=kube-state-metrics, k8s_cluster=compute-03, kubernetes_cluster=compute-03, kubernetes_namespace=monitoring, kubernetes_node=gke-compute-03-compute-03-standardv3--902b2138-9sfg, kubernetes_pod=kube-state-metrics-cc944bdd5-nxldd, namespace=live, pod=kube-state-metrics-cc944bdd5-nxldd, prometheus=monitoring/k8s, prometheus_shard=compute-03-1, service=kube-state-metrics, statefulset=g4c-fusion-02-data-collect-task-dep-purge-manager" t=2024-05-29T13:44:15.010095889Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:15.01007121Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=compute-03, service=kube-state-metrics" + logger=ngalert.state.manager user=472647 slug=planet instance="__name__=kube_statefulset_status_replicas_available, app_kubernetes_io_component=exporter, app_kubernetes_io_name=kube-state-metrics, app_kubernetes_io_version=2.9.2, cluster=compute-03, cluster_type=gke, component=kube-state-metrics, container=kube-state-metrics, endpoint=http-metrics, gcp_project=planet-k8s-prod, instance=172.24.138.163:8080, job=kube-state-metrics, k8s_cluster=compute-03, kubernetes_cluster=compute-03, kubernetes_namespace=monitoring, kubernetes_node=gke-compute-03-compute-03-standardv3--902b2138-9sfg, kubernetes_pod=kube-state-metrics-cc944bdd5-nxldd, namespace=live, pod=kube-state-metrics-cc944bdd5-nxldd, prometheus=monitoring/k8s, prometheus_shard=compute-03-1, service=kube-state-metrics, statefulset=g4c-fusion-02-data-collect-purge-manager" t=2024-05-29T13:44:15.010032644Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet instance="__name__=kube_statefulset_status_replicas_available, app_kubernetes_io_component=exporter, app_kubernetes_io_name=kube-state-metrics, app_kubernetes_io_version=2.9.2, cluster=compute-03, cluster_type=gke, component=kube-state-metrics, container=kube-state-metrics, endpoint=http-metrics, gcp_project=planet-k8s-prod, instance=172.24.138.163:8080, job=kube-state-metrics, k8s_cluster=compute-03, kubernetes_cluster=compute-03, kubernetes_namespace=monitoring, kubernetes_node=gke-compute-03-compute-03-standardv3--902b2138-9sfg, kubernetes_pod=kube-state-metrics-cc944bdd5-nxldd, namespace=live, pod=kube-state-metrics-cc944bdd5-nxldd, prometheus=monitoring/k8s, prometheus_shard=compute-03-1, service=kube-state-metrics, statefulset=g4c-fusion-02-data-collect-archive-manager" t=2024-05-29T13:44:15.009985167Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance="__name__=kube_statefulset_status_replicas_available, app_kubernetes_io_component=exporter, app_kubernetes_io_name=kube-state-metrics, app_kubernetes_io_version=2.9.2, cluster=compute-03, cluster_type=gke, component=kube-state-metrics, container=kube-state-metrics, endpoint=http-metrics, gcp_project=planet-k8s-prod, instance=172.24.138.163:8080, job=kube-state-metrics, k8s_cluster=compute-03, kubernetes_cluster=compute-03, kubernetes_namespace=monitoring, kubernetes_node=gke-compute-03-compute-03-standardv3--902b2138-9sfg, kubernetes_pod=kube-state-metrics-cc944bdd5-nxldd, namespace=live, pod=kube-state-metrics-cc944bdd5-nxldd, prometheus=monitoring/k8s, prometheus_shard=compute-03-1, service=kube-state-metrics, statefulset=g4c-fusion-02-data-collect-archive-manager" t=2024-05-29T13:44:15.009970545Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:15.009909284Z level=debug msg="State manager processing evaluation results" resultCount=20 + level=debug ts=2024-05-29T13:44:15.00966781Z caller=remote_instance_store.go:51 user=465816 slug=metricgamingqa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.00965878Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=190917 slug=d1cx t=2024-05-29T13:44:15.009526674Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.009521078Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.009358552Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.009416801Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=190917 slug=d1cx instance= t=2024-05-29T13:44:15.009508942Z level=debug msg="Changing state" previous_state=Normal next_state=Error previous_ends_at=2024-05-29T13:43:00Z next_ends_at=2024-05-29T13:48:00Z + level=debug ts=2024-05-29T13:44:15.009487962Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=190917 slug=d1cx instance= t=2024-05-29T13:44:15.009500776Z level=debug msg="Setting next state" handler=resultError + level=debug ts=2024-05-29T13:44:15.009482075Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=190917 slug=d1cx t=2024-05-29T13:44:15.00940978Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:15.00940477Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.009386392Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.009184758Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris-2, country=France, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.60.18, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=paris-s405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.009135241Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=507549 slug=coindcx t=2024-05-29T13:44:15.008833869Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.682448ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris-2, country=France, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.60.18, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=paris-s405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.008872316Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.008836402Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.008636517Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris-2, country=France, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.60.171, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=paris-s410, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.008692508Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.008636514Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris-2, country=France, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.60.171, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=paris-s410, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.008481321Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris-2, country=France, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.60.109, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=paris-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.00827875Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris-2, country=France, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.60.109, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=paris-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.008063562Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris-2, country=France, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.43.38, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=paris-s463, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.007634416Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.007587277Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.007559926Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris-2, country=France, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.43.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=paris-s421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.007430225Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris-2, country=France, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=37.19.217.228, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=paris-s415, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.007035612Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.006697667Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris, country=France, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.101.31.9, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=paris-s443, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.006630525Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.006101262Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris, country=France, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.101.31.8, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=paris-s442, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.006119416Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.006083401Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.00597875Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris, country=France, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.101.31.8, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=paris-s442, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.005885405Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.005582109Z caller=remote_instance_store.go:51 user=526847 slug=soniclabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.005640509Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.005563488Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.005563862Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=767797 slug=mgmresorts version=25 fingerprint=9fb6fee816035c77 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.005336603Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=d1aebc62-96b9-4d63-9239-4734a6bc96ce, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.004916498s EvaluationString:}]" duration=657.357235ms + level=debug ts=2024-05-29T13:44:15.005363054Z caller=remote_instance_store.go:51 user=763376 slug=f5nginxone msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.005147912Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=113.220314ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris, country=France, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.101.31.6, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=paris-s440, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.005240509Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.005133527Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.004920986Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=150145 slug=pleasant t=2024-05-29T13:44:15.004981267Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=150145 slug=pleasant instance= t=2024-05-29T13:44:15.004972052Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:15.00495596Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.159088ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris, country=France, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.101.31.6, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=paris-s440, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.004960624Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:15.004797359Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:15.004727732Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="region=us-east-1, service=kube-state-metrics, stage=production" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris, country=France, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.101.31.5, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=paris-s439, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.004558338Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.004532044Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=173374 slug=felmo t=2024-05-29T13:44:15.004482992Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris, country=France, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.101.31.5, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=paris-s439, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.004404207Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:15.004327537Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris, country=France, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.101.31.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=paris-s434, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.00427035Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris, country=France, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.101.31.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=paris-s434, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.004253348Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.004215006Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris, country=France, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.101.31.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=paris-s434, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.004032959Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.003994265Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris, country=France, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.101.217.9, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=paris-s431, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.003793618Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.003704454Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris, country=France, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.101.217.9, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=paris-s431, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.003563388Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.003519147Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.003407313Z caller=remote_instance_store.go:51 user=71676 slug=lemonbeat msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=152655 slug=orbweaver t=2024-05-29T13:44:15.003448245Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=28.768498ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris, country=France, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.101.217.8, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=paris-s430, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.003399961Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=656459 slug=activeport instance="datasource_uid=a17a51ac-52fa-4a8f-ae4d-66e273cfbbfc, ref_id=A" t=2024-05-29T13:44:15.00337853Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=656459 slug=activeport instance="datasource_uid=a17a51ac-52fa-4a8f-ae4d-66e273cfbbfc, ref_id=A" t=2024-05-29T13:44:15.003343953Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=656459 slug=activeport t=2024-05-29T13:44:15.003298125Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.003351686Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.003282412Z caller=remote_instance_store.go:51 user=679831 slug=joveostageaws msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=679831 slug=joveostageaws instance="datasource_uid=a6f971c0-2a39-492e-8c6a-8034f1702671, ref_id=A" t=2024-05-29T13:44:15.00323021Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris, country=France, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.101.217.8, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=paris-s430, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.003193432Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=679831 slug=joveostageaws instance="datasource_uid=a6f971c0-2a39-492e-8c6a-8034f1702671, ref_id=A" t=2024-05-29T13:44:15.003210707Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=332534 slug=adevintakijiji t=2024-05-29T13:44:15.00314296Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=22.912246ms + logger=ngalert.scheduler user=679831 slug=joveostageaws version=12984 fingerprint=e0304b59e64620a2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.003112703Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=a6f971c0-2a39-492e-8c6a-8034f1702671, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:5.002814656s EvaluationString:}]" duration=48.272705ms + logger=ngalert.state.manager.persist user=237629 slug=ocrolus t=2024-05-29T13:44:15.002990392Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.745202ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.002960716Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris, country=France, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.101.217.7, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=paris-s429, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.002842745Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris, country=France, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.101.217.7, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=paris-s429, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.002826331Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris, country=France, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.101.217.6, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=paris-s428, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.002653729Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris, country=France, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.101.217.6, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=paris-s428, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.002642267Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.002606988Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.002263437Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.002203169Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.00219391Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.002084639Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris, country=France, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.101.217.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=paris-s426, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.001838757Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=314067 slug=itsme t=2024-05-29T13:44:15.001770649Z level=debug msg="Saving alert states done" count=5 max_state_save_concurrency=1 duration=72.698741ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.001800185Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.001718929Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.001686876Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:15.001636114Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:15.001494872Z caller=remote_rule_evaluator.go:193 user=656459 slug=activeport msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager.persist user=475799 slug=dpdcz t=2024-05-29T13:44:15.001523508Z level=debug msg="Saving alert states done" count=36 max_state_save_concurrency=1 duration=564.56576ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Paris, cluster=Paris, country=France, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.101.217.3, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=paris-s425, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:15.001439634Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.000984057Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Panama City, cluster=Panama City, country=Panama, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=91.90.126.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=panamacity-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.000859431Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Oslo, cluster=Oslo, country=Norway, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.27.90, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=oslo-s408, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:15.000660016Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.000631042Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:15.000456933Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:15.000289544Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:15.000452624Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.scheduler user=316418 slug=workmotion version=6 fingerprint=1b0e4253e9c334b5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:15.000317058Z level=debug msg="Alert rule evaluated" results="[{Instance:LoadBalancer=app/prod-backend-lb/9f56d270ed19a3e0, TargetGroup=targetgroup/prod-invoice-tg/ef6536027fc8e4d3 State:NoData Error: Results:map[] Values:map[B:{Var:B Labels:LoadBalancer=app/prod-backend-lb/9f56d270ed19a3e0, TargetGroup=targetgroup/prod-invoice-tg/ef6536027fc8e4d3 Value:} C:{Var:C Labels:LoadBalancer=app/prod-backend-lb/9f56d270ed19a3e0, TargetGroup=targetgroup/prod-invoice-tg/ef6536027fc8e4d3 Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.999956023s EvaluationString:[ var='B' labels={LoadBalancer=app/prod-backend-lb/9f56d270ed19a3e0, TargetGroup=targetgroup/prod-invoice-tg/ef6536027fc8e4d3} value=null ], [ var='C' labels={LoadBalancer=app/prod-backend-lb/9f56d270ed19a3e0, TargetGroup=targetgroup/prod-invoice-tg/ef6536027fc8e4d3} value=null ]}]" duration=23.106187ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Nuuk, cluster=Nuuk, country=Greenland, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=91.90.120.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=nuuk-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.999901208Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.999762177Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.180836ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Nuuk, cluster=Nuuk, country=Greenland, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=91.90.120.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=nuuk-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.999723593Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Nuuk, cluster=Nuuk, country=Greenland, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=91.90.120.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=nuuk-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.999706515Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.999676909Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, cluster=NoSpy Bucharest, country=Romania, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=85.9.20.143, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bucharest-s409, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.999356927Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.999249967Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.997617914Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="deployment=testmarch20, environment_name=starbreeze-justice-pd3-prod, exported_namespace=accelbytetesting, version=2102smoketest" t=2024-05-29T13:44:14.999017576Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:59:10Z next_ends_at=2024-05-29T14:04:10Z + logger=ngalert.state.manager user=426229 slug=accelbyte instance="deployment=testmarch20, environment_name=starbreeze-justice-pd3-prod, exported_namespace=accelbytetesting, version=2102smoketest" t=2024-05-29T13:44:14.998995776Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.998860505Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, cluster=NoSpy Bucharest, country=Romania, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=85.9.20.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bucharest-s408, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.998878574Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.998760983Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Nicosia, cluster=Nicosia, country=Cyprus, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.253.162.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=nicosia-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.998703476Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.998522759Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.998507983Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.998495221Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Nicosia, cluster=Nicosia, country=Cyprus, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.253.162.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=nicosia-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.998451832Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.998372873Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.998103893Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.998157315Z caller=remote_instance_store.go:51 user=320906 slug=techcyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.99819275Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + level=debug component=discovery ts=2024-05-29T13:44:14.998214745Z caller=retry.go:58 user=328875 msg="retrying grpc request" method=/remoteruler.rules.v1.RulesService/GetByRuleGroup attempt=3 + logger=ngalert.state.manager.persist user=507549 slug=coindcx t=2024-05-29T13:44:14.998145731Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=507549 slug=coindcx instance="datasource_uid=4DXtZk24z, ref_id=A" t=2024-05-29T13:44:14.99812242Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=507549 slug=coindcx t=2024-05-29T13:44:14.998109179Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:14.997976136Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.606037ms + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.99809309Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.121542ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=New York, cluster=New York-2, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=156.146.59.161, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=newyork-s406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.998096269Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.998045178Z caller=remote_instance_store.go:51 user=738479 slug=gohero msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.998063785Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.997954154Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.997932358Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=371756 slug=asapp t=2024-05-29T13:44:14.997875552Z level=debug msg="Saving alert states done" count=10 max_state_save_concurrency=1 duration=193.229185ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=New York, cluster=New York-2, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=156.146.36.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=newyork-s416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.997911597Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.997870763Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.997845703Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.997730257Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=New York, cluster=New York-2, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=156.146.36.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=newyork-s416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.997716369Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=New York, cluster=New York-2, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=156.146.36.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=newyork-s416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.997700679Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:14.997510253Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=430961 slug=solifi version=1 fingerprint=9c9ccaec87c22e8a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.99737711Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[A0:{Var:A Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.997049289s EvaluationString:[ var='A0' metric='NoData' labels={} value=null ]}]" duration=425.465276ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.997401579Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=New York, cluster=New York, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.227.7, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=newyork-s422, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.997252317Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=New York, cluster=New York, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.227.7, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=newyork-s422, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.99723534Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.997186815Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.997181424Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.99720314Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=60603 slug=avalaratax t=2024-05-29T13:44:14.997053454Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.784177ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=New York, cluster=New York, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.227.6, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=newyork-s421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.997050833Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.99704889Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.997006015Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.996992735Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=250150 slug=bizagi version=58 fingerprint=fb085ace726b5b4e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.996883815Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.996650197s EvaluationString:}]" duration=237.318405ms + level=debug ts=2024-05-29T13:44:14.996946506Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.996665115Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=24.9916ms + level=debug ts=2024-05-29T13:44:14.996647458Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.9966465Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=New York, cluster=New York, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.150.6, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=newyork-s417, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.99663646Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.996557141Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=New York, cluster=New York, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.150.5, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=newyork-s415, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.99603393Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.995593562Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=a6e87073aca14ce4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.995607347Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=DUBAI Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc06cce8b38} IgnoreBelow:{Var:IgnoreBelow Labels: Value:0xc06cce8b70} Threshold:{Var:Threshold Labels: Value:0xc06cce8ab0} compare:{Var:compare Labels:aggregatedBy=sum, name=DUBAI Query Value:0xc06cce8af0} sum:{Var:sum Labels:aggregatedBy=sum, name=DUBAI Query Value:0xc06cce8b20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.995419213s EvaluationString:[ var='Breaches' labels={} value=1 ], [ var='IgnoreBelow' labels={} value=100 ], [ var='Threshold' labels={} value=-20 ], [ var='compare' labels={aggregatedBy=sum, name=DUBAI Query} value=0 ], [ var='sum' labels={aggregatedBy=sum, name=DUBAI Query} value=0 ]}]" duration=65.802979ms + level=debug ts=2024-05-29T13:44:14.995463081Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=New York, cluster=New York, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=154.16.192.5, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=newyork-s418, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.995426279Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.995379217Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=New York, cluster=New York, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=154.16.192.5, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=newyork-s418, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.99518994Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=92497 slug=regis24 t=2024-05-29T13:44:14.994788873Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=29.690581ms + level=debug ts=2024-05-29T13:44:14.994757087Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Naypyidaw, cluster=naypyidaw, country=Myanmar (Burma), datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=188.240.216.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=naypyidaw-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.994751147Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Naypyidaw, cluster=naypyidaw, country=Myanmar (Burma), datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=188.240.216.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=naypyidaw-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.994737495Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.994607642Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Nassau, cluster=Nassau, country=Bahamas, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=95.181.238.156, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=nassau-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.994569682Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Nassau, cluster=Nassau, country=Bahamas, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=95.181.238.156, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=nassau-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.994558279Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.994488067Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.994224422Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.994280244Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.994336328Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.994328783Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.994296337Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.994234323Z caller=remote_instance_store.go:51 user=224047 slug=ppbtradingtribeprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.994301515Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=698103 slug=vericast t=2024-05-29T13:44:14.994280971Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:14.994138117Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=26.378388ms + level=info ts=2024-05-29T13:44:14.994222511Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.156.57:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddbhspzwkjzsyd alerts=1 + level=debug ts=2024-05-29T13:44:14.994150124Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.994041403Z caller=remote_instance_store.go:51 user=94501 slug=datastax msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Nassau, cluster=Nassau, country=Bahamas, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=95.181.238.143, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=nassau-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.993963035Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.993915955Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.993896209Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.99390283Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=400599 slug=unionai instance="datasource_uid=ZMy3ehLVk, ref_id=A" t=2024-05-29T13:44:14.993835816Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=340750 slug=aptoslabs t=2024-05-29T13:44:14.993804796Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.993722029Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.993698701Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.993557749Z caller=remote_instance_store.go:51 user=438855 slug=teckresources msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:14.993518223Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:14.993509191Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.993502193Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=191103 slug=amazonadmin t=2024-05-29T13:44:14.993462907Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=438855 slug=teckresources t=2024-05-29T13:44:14.993417156Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=843304 slug=ppcgroup t=2024-05-29T13:44:14.993029607Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Mumbai, cluster=Mumbai, country=India, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=23.26.220.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=mumbai-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.99296584Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Mumbai, cluster=Mumbai, country=India, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=23.26.220.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=mumbai-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.992952056Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.992633544Z caller=remote_alert_sender.go:94 user=465668 slug=xpressinfra host=xpressinfra-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.214.234:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=c0c106b2-ed41-4268-b10a-c95f45248111 alerts=1 + logger=ngalert.state.manager.persist user=371756 slug=asapp t=2024-05-29T13:44:14.992554023Z level=debug msg="Saving alert states done" count=5 max_state_save_concurrency=1 duration=108.236982ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Moscow, cluster=Moscow, country=Russian Federation, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.52.98, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=moscow-s410, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.992510275Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.992172814Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Moscow, cluster=Moscow, country=Russian Federation, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.52.98, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=moscow-s410, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.992280404Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.991951316Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.991776486Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Moscow, cluster=Moscow, country=Russian Federation, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.52.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=moscow-s409, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.991559974Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.99128912Z caller=remote_instance_store.go:51 user=237629 slug=ocrolus msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.991234255Z caller=remote_alert_sender.go:94 user=937416 slug=cambridgeuniversitypress host=cambridgeuniversitypress-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.166.108:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=nON5tQUnk alerts=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Moscow, cluster=Moscow, country=Russian Federation, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.52.226, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=moscow-s406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.991246947Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Moscow, cluster=Moscow, country=Russian Federation, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.52.226, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=moscow-s406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.991230055Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.991111917Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=109452 slug=deltarisk t=2024-05-29T13:44:14.991199468Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.991009154Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=471861 slug=planetstaging t=2024-05-29T13:44:14.990746482Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.646988ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Moscow, cluster=Moscow, country=Russian Federation, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.52.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=moscow-s407, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.99074238Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.990584973Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.990464Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.990299421Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.990299759Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:14.990255036Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:14.990250154Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:14.990244668Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.9902679Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.990222269Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114492 slug=railsbank t=2024-05-29T13:44:14.990232868Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.990175332Z caller=remote_instance_store.go:51 user=530405 slug=zetetic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Moscow, cluster=Moscow, country=Russian Federation, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.52.13, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=moscow-s415, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.990099366Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.98995738Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Moscow, cluster=Moscow, country=Russian Federation, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.52.114, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=moscow-s411, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.989913815Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Montreal, cluster=Montreal-2, country=Canada, datacenter=TSS, environment=production, instance=10.0.0.203:9998, ip=104.200.151.7, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=montreal-s421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.989536775Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb-hub, phase=Unknown, pod=cb-hub-stripe-6dcf89bd9-ktwz6, uid=723eae93-fb1b-4668-a2c8-61ad90064958" t=2024-05-29T13:44:14.989092614Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb-hub, phase=Unknown, pod=cb-hub-stripe-6dcf89bd9-ktwz6, uid=723eae93-fb1b-4668-a2c8-61ad90064958" t=2024-05-29T13:44:14.989079085Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=526847 slug=soniclabs t=2024-05-29T13:44:14.989001272Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=coinbureau-eks-cluster" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Montreal, cluster=Montreal-2, country=Canada, datacenter=TSS, environment=production, instance=10.0.0.203:9998, ip=104.200.151.7, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=montreal-s421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.989321305Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb-hub, phase=Unknown, pod=cb-hub-frontend-5865df4d47-n6ctt, uid=e9070af6-d035-48ff-818a-746c3ff49d86" t=2024-05-29T13:44:14.988920996Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Montreal, cluster=Montreal-2, country=Canada, datacenter=TSS, environment=production, instance=10.0.0.203:9998, ip=104.200.151.7, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=montreal-s421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.989306767Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.989279793Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.989224342Z caller=remote_instance_store.go:51 user=177465 slug=fairtiq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb-hub, phase=Unknown, pod=cb-hub-backend-d54c69597-wg2cx, uid=c5c932ab-1081-485d-bd82-5acd0cb6fb48" t=2024-05-29T13:44:14.988721116Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb-hub, phase=Succeeded, pod=cb-hub-strapi-69b44fc8cc-hppwz, uid=0128a5cc-8e58-4897-b583-1cb4b28b7f6a" t=2024-05-29T13:44:14.988573235Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb-hub, phase=Succeeded, pod=cb-hub-strapi-69b44fc8cc-hppwz, uid=0128a5cc-8e58-4897-b583-1cb4b28b7f6a" t=2024-05-29T13:44:14.988558345Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb-hub, phase=Succeeded, pod=cb-hub-paypal-76f7686b7f-qx2dw, uid=6d9de381-3b0d-496a-a38e-e76bc918176c" t=2024-05-29T13:44:14.988515612Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=526847 slug=soniclabs t=2024-05-29T13:44:14.988477479Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=coinbureau-eks-cluster" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Montreal, cluster=Montreal-2, country=Canada, datacenter=TSS, environment=production, instance=10.0.0.203:9998, ip=104.200.151.6, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=montreal-s420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.989116478Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb-hub, phase=Succeeded, pod=cb-hub-frontend-5865df4d47-j27s9, uid=da9d84a2-2cc9-4350-95dd-93361f0515ec" t=2024-05-29T13:44:14.988385Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.989053664Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb-hub, phase=Succeeded, pod=cb-hub-backend-d54c69597-wg2cx, uid=c5c932ab-1081-485d-bd82-5acd0cb6fb48" t=2024-05-29T13:44:14.98828535Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=526847 slug=soniclabs t=2024-05-29T13:44:14.9882634Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=coinbureau-eks-cluster" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Montreal, cluster=Montreal-2, country=Canada, datacenter=TSS, environment=production, instance=10.0.0.203:9998, ip=104.200.151.6, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=montreal-s420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.988950181Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.988803382Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=526847 slug=soniclabs t=2024-05-29T13:44:14.988206219Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=coinbureau-eks-cluster" + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb-hub, phase=Pending, pod=cb-hub-stripe-6dcf89bd9-ktwz6, uid=723eae93-fb1b-4668-a2c8-61ad90064958" t=2024-05-29T13:44:14.988172738Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb-hub, phase=Pending, pod=cb-hub-paypal-76f7686b7f-qx2dw, uid=6d9de381-3b0d-496a-a38e-e76bc918176c" t=2024-05-29T13:44:14.987982721Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.98877957Z caller=remote_instance_store.go:51 user=109452 slug=deltarisk msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=109452 slug=deltarisk instance= t=2024-05-29T13:44:14.988738204Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=109452 slug=deltarisk t=2024-05-29T13:44:14.988683236Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=526847 slug=soniclabs t=2024-05-29T13:44:14.987895582Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=coinbureau-eks-cluster" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.988647091Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb-hub, phase=Pending, pod=cb-hub-frontend-5865df4d47-j27s9, uid=da9d84a2-2cc9-4350-95dd-93361f0515ec" t=2024-05-29T13:44:14.987865115Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=526847 slug=soniclabs t=2024-05-29T13:44:14.987843046Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=coinbureau-eks-cluster" + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb-hub, phase=Pending, pod=cb-hub-coinpayment-567557895c-cfcg7, uid=c196eeb2-e36d-4834-88a8-303c5ae0a3fa" t=2024-05-29T13:44:14.987810026Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb-hub, phase=Pending, pod=cb-hub-backend-d54c69597-wg2cx, uid=c5c932ab-1081-485d-bd82-5acd0cb6fb48" t=2024-05-29T13:44:14.987736416Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.988445788Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=526847 slug=soniclabs t=2024-05-29T13:44:14.987625069Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=coinbureau-eks-cluster" + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb-hub, phase=Failed, pod=cb-hub-paypal-76f7686b7f-qx2dw, uid=6d9de381-3b0d-496a-a38e-e76bc918176c" t=2024-05-29T13:44:14.987492616Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=526847 slug=soniclabs t=2024-05-29T13:44:14.987422243Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=coinbureau-eks-cluster" + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb-hub, phase=Failed, pod=cb-hub-frontend-5865df4d47-n6ctt, uid=e9070af6-d035-48ff-818a-746c3ff49d86" t=2024-05-29T13:44:14.987383258Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=526847 slug=soniclabs t=2024-05-29T13:44:14.987363142Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=coinbureau-eks-cluster" + level=debug ts=2024-05-29T13:44:14.988014622Z caller=remote_instance_store.go:51 user=542095 slug=intelligencefusion msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Montreal, cluster=Montreal-2, country=Canada, datacenter=TSS, environment=production, instance=10.0.0.203:9998, ip=104.200.151.3, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=montreal-s427, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.988054101Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Montreal, cluster=Montreal-2, country=Canada, datacenter=TSS, environment=production, instance=10.0.0.203:9998, ip=104.200.151.3, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=montreal-s427, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.988041613Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=526847 slug=soniclabs t=2024-05-29T13:44:14.987257252Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=coinbureau-eks-cluster" + level=debug ts=2024-05-29T13:44:14.98789319Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=526847 slug=soniclabs t=2024-05-29T13:44:14.987205496Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=coinbureau-eks-cluster" + level=debug ts=2024-05-29T13:44:14.987742876Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.987646064Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Montreal, cluster=Montreal-2, country=Canada, datacenter=TSS, environment=production, instance=10.0.0.203:9998, ip=104.200.151.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=montreal-s426, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.987639437Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.987546163Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.987591754Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.987591533Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=114492 slug=railsbank instance="DBInstanceIdentifier=iam-prod-12022050309120716060000001f" t=2024-05-29T13:44:14.987558557Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=455282 slug=rockwool t=2024-05-29T13:44:14.987365599Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.830436ms + level=debug ts=2024-05-29T13:44:14.987445961Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.987434334Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.987084405Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Montreal, cluster=Montreal, country=Canada, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=140.228.24.3, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=montreal-s433, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.986693821Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.986651218Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.98662544Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=22.742089ms + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.986551728Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.98646839Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.986207577Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.986248043Z caller=remote_instance_store.go:51 user=417450 slug=legitsecurity msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=417450 slug=legitsecurity t=2024-05-29T13:44:14.986210123Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=417450 slug=legitsecurity instance= t=2024-05-29T13:44:14.986196357Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance="datasource_uid=DTySajW4z, ref_id=A" t=2024-05-29T13:44:14.986232918Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.98619819Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=417450 slug=legitsecurity instance= t=2024-05-29T13:44:14.986183727Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=417450 slug=legitsecurity t=2024-05-29T13:44:14.986141447Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=417450 slug=legitsecurity version=102 fingerprint=2c938834028475cd attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.986044085Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc0035fbdb0} C:{Var:C Labels: Value:0xc0035fbdb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.985641899s EvaluationString:[ var='B' labels={} value=18001 ], [ var='C' labels={} value=0 ]}]" duration=258.456989ms + logger=ngalert.scheduler user=288032 slug=dapperlabssre version=1 fingerprint=3e076943af9df8a9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.986042791Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=DTySajW4z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.985725457s EvaluationString:}]" duration=21.416914ms + level=debug ts=2024-05-29T13:44:14.986021497Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.985990576Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.98600022Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.985968577Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=JOHANNESBURG Query" t=2024-05-29T13:44:14.985954637Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Montreal, cluster=Montreal, country=Canada, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=140.228.21.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=montreal-s430, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.985817445Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=a7eff4c65e9737b2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.985691423Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=JOHANNESBURG Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc0684d2038} Threshold:{Var:Threshold Labels: Value:0xc0684d2080} compare:{Var:compare Labels:aggregatedBy=sum, name=JOHANNESBURG Query Value:0xc0684d20b0} sum:{Var:sum Labels:aggregatedBy=sum, name=JOHANNESBURG Query Value:0xc0684d20e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.985196798s EvaluationString:[ var='Breaches' labels={} value=72 ], [ var='Threshold' labels={} value=2 ], [ var='compare' labels={aggregatedBy=sum, name=JOHANNESBURG Query} value=0 ], [ var='sum' labels={aggregatedBy=sum, name=JOHANNESBURG Query} value=0 ]}]" duration=30.372924ms + logger=ngalert.state.manager user=314947 slug=h10n instance= t=2024-05-29T13:44:14.985506316Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=314947 slug=h10n version=1 fingerprint=41f9b4d4e5f8e5d2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.98535921Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.98501501s EvaluationString:}]" duration=19.370973ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Montevideo, cluster=Montevideo, country=Uruguay, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=84.247.100.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=montevideo-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.985411756Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.985308297Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.985279598Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=612525 slug=adleyeview version=268 fingerprint=3f11fb5125539ed2 attempt=1 now=2024-05-29T13:43:50Z t=2024-05-29T13:44:14.985082396Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[C:{Var:C Labels: Value:0xc00a962af0} D:{Var:D Labels: Value:0xc00a962ae8}] EvaluatedAt:2024-05-29 13:43:50 +0000 UTC EvaluationDuration:24.978958731s EvaluationString:[ var='C' labels={} value=0 ], [ var='D' labels={} value=0 ]}]" duration=20.843886557s + level=debug ts=2024-05-29T13:44:14.985150054Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Monaco, cluster=Monaco, country=Monaco, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=95.181.233.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=monaco-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.985231943Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Monaco, cluster=Monaco, country=Monaco, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=95.181.233.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=monaco-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.985217765Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID8dashY7eX4aVMkactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=171235 slug=circleslabs t=2024-05-29T13:44:14.985030593Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=35.98606ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Monaco, cluster=Monaco, country=Monaco, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=95.181.233.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=monaco-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.985018982Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Monaco, cluster=Monaco, country=Monaco, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=95.181.233.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=monaco-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.984864998Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.98476727Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Monaco, cluster=Monaco, country=Monaco, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=95.181.233.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=monaco-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.984675279Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Monaco, cluster=Monaco, country=Monaco, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=95.181.233.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=monaco-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.984661346Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.984285786Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Minsk, cluster=Minsk, country=Belarus, datacenter=OneProvider, environment=production, instance=10.0.0.203:9998, ip=45.132.194.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=minsk-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.984134738Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.984059863Z caller=remote_rule_evaluator.go:193 user=656459 slug=activeport msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager user=112732 slug=gleamer instance= t=2024-05-29T13:44:14.984030272Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Minsk, cluster=Minsk, country=Belarus, datacenter=OneProvider, environment=production, instance=10.0.0.203:9998, ip=45.132.194.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=minsk-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.984003725Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.983737508Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Minsk, cluster=Minsk, country=Belarus, datacenter=OneProvider, environment=production, instance=10.0.0.203:9998, ip=45.132.194.14, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=minsk-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.983739576Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.983713218Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Milano, cluster=Milano, country=Italy, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.58.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=milano-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.983603431Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.983542913Z caller=remote_instance_store.go:51 user=937416 slug=cambridgeuniversitypress msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=937416 slug=cambridgeuniversitypress t=2024-05-29T13:44:14.983504691Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Milano, cluster=Milano, country=Italy, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.58.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=milano-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.983379908Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=22398 slug=sunfolding t=2024-05-29T13:44:14.983323997Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.003658ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.983338466Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.983196052Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=320906 slug=techcyte instance="AvailabilityZone=us-west-2c, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-sim-rout/95374aa4420b3d6d" t=2024-05-29T13:44:14.979535059Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.983156205Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.983107966Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=320906 slug=techcyte t=2024-05-29T13:44:14.979269005Z level=debug msg="State manager processing evaluation results" resultCount=8 + level=debug ts=2024-05-29T13:44:14.982949728Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=320906 slug=techcyte version=5 fingerprint=c99a36aca135fb10 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.97905624Z level=debug msg="Alert rule evaluated" results="[{Instance:AvailabilityZone=us-west-2a, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/prod-vetcyte-otel-firehose/4617c3469aa4d185 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2a, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/prod-vetcyte-otel-firehose/4617c3469aa4d185 Value:0xc00a5fb228} C:{Var:C Labels:AvailabilityZone=us-west-2a, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/prod-vetcyte-otel-firehose/4617c3469aa4d185 Value:0xc00a5fb220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.97839605s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2a, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/prod-vetcyte-otel-firehose/4617c3469aa4d185} value=0.217630915380395 ], [ var='C' labels={AvailabilityZone=us-west-2a, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/prod-vetcyte-otel-firehose/4617c3469aa4d185} value=0 ]} {Instance:AvailabilityZone=us-west-2a, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/prod-vetcyte-otel-grpc/b951e7af3da2327e State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2a, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/prod-vetcyte-otel-grpc/b951e7af3da2327e Value:0xc00a5fb690} C:{Var:C Labels:AvailabilityZone=us-west-2a, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/prod-vetcyte-otel-grpc/b951e7af3da2327e Value:0xc00a5fb698}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.97841712s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2a, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/prod-vetcyte-otel-grpc/b951e7af3da2327e} value=0.04430565846055024 ], [ var='C' labels={AvailabilityZone=us-west-2a, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/prod-vetcyte-otel-grpc/b951e7af3da2327e} value=0 ]} {Instance:AvailabilityZone=us-west-2a, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-grundium/5a13b46dc2bb304c State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2a, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-grundium/5a13b46dc2bb304c Value:0xc00a5fb768} C:{Var:C Labels:AvailabilityZone=us-west-2a, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-grundium/5a13b46dc2bb304c Value:0xc00a5fb760}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.978424805s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2a, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-grundium/5a13b46dc2bb304c} value=0.8798025426456729 ], [ var='C' labels={AvailabilityZone=us-west-2a, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-grundium/5a13b46dc2bb304c} value=0 ]} {Instance:AvailabilityZone=us-west-2b, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-grundium/5a13b46dc2bb304c State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2b, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-grundium/5a13b46dc2bb304c Value:0xc00a5fb830} C:{Var:C Labels:AvailabilityZone=us-west-2b, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-grundium/5a13b46dc2bb304c Value:0xc00a5fb838}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.978432497s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2b, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-grundium/5a13b46dc2bb304c} value=0.8294053938577669 ], [ var='C' labels={AvailabilityZone=us-west-2b, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-grundium/5a13b46dc2bb304c} value=0 ]} {Instance:AvailabilityZone=us-west-2c, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/prod-vetcyte-otel-firehose/4617c3469aa4d185 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2c, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/prod-vetcyte-otel-firehose/4617c3469aa4d185 Value:0xc00a5fb900} C:{Var:C Labels:AvailabilityZone=us-west-2c, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/prod-vetcyte-otel-firehose/4617c3469aa4d185 Value:0xc00a5fb908}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.978439161s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2c, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/prod-vetcyte-otel-firehose/4617c3469aa4d185} value=0.24771490337530452 ], [ var='C' labels={AvailabilityZone=us-west-2c, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/prod-vetcyte-otel-firehose/4617c3469aa4d185} value=0 ]} {Instance:AvailabilityZone=us-west-2c, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-grundium/5a13b46dc2bb304c State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2c, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-grundium/5a13b46dc2bb304c Value:0xc00a5fb9d8} C:{Var:C Labels:AvailabilityZone=us-west-2c, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-grundium/5a13b46dc2bb304c Value:0xc00a5fb9d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.978447803s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2c, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-grundium/5a13b46dc2bb304c} value=0.8780688103393222 ], [ var='C' labels={AvailabilityZone=us-west-2c, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-grundium/5a13b46dc2bb304c} value=0 ]} {Instance:AvailabilityZone=us-west-2c, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-sim-rout/95374aa4420b3d6d State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2c, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-sim-rout/95374aa4420b3d6d Value:0xc00a5fbaa0} C:{Var:C Labels:AvailabilityZone=us-west-2c, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-sim-rout/95374aa4420b3d6d Value:0xc00a5fbaa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.978453564s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2c, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-sim-rout/95374aa4420b3d6d} value=0.008084 ], [ var='C' labels={AvailabilityZone=us-west-2c, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-sim-rout/95374aa4420b3d6d} value=0 ]} {Instance:AvailabilityZone=us-west-2c, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-sim-scan/06041452bf618803 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2c, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-sim-scan/06041452bf618803 Value:0xc00a5fbb70} C:{Var:C Labels:AvailabilityZone=us-west-2c, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-sim-scan/06041452bf618803 Value:0xc00a5fbb78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.978460273s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2c, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-sim-scan/06041452bf618803} value=0.1979383680555425 ], [ var='C' labels={AvailabilityZone=us-west-2c, LoadBalancer=app/zprod-vetscanimagyst-internal/81408d90ebc4c546, TargetGroup=targetgroup/zprod-vetscanimagyst-sim-scan/06041452bf618803} value=0 ]}]" duration=114.067474ms + level=debug ts=2024-05-29T13:44:14.982956686Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Miami, cluster=Miami-2, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.40.89, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=miami-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.982956508Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Miami, cluster=Miami-2, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.40.89, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=miami-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.982946688Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.982799371Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.982835235Z caller=remote_instance_store.go:51 user=344017 slug=descript msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=344017 slug=descript instance="resource.label.project_id=production-273614, resource.type=k8s_container" t=2024-05-29T13:44:14.982764897Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Miami, cluster=Miami, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=102.129.153.9, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=miami-s456, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.982811443Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.982777854Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.982706361Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=937416 slug=cambridgeuniversitypress instance="datasource_uid=Zj00bGUnz, ref_id=A" t=2024-05-29T13:44:14.982601434Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.982570518Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.982591343Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=937416 slug=cambridgeuniversitypress instance="datasource_uid=Zj00bGUnz, ref_id=A" t=2024-05-29T13:44:14.982581023Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.982560899Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=937416 slug=cambridgeuniversitypress t=2024-05-29T13:44:14.982539722Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=472647 slug=planet version=10 fingerprint=fc38c80a4ecf681a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.982475118Z level=debug msg="Alert rule evaluated" results="[{Instance:namespace=next State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:namespace=next Value:0xc02415f718} C:{Var:C Labels:namespace=next Value:0xc02415f730}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.982144477s EvaluationString:[ var='A' labels={namespace=next} value=NaN ], [ var='C' labels={namespace=next} value=0 ]}]" duration=22.471969ms + level=debug ts=2024-05-29T13:44:14.982419748Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.982361729Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Miami, cluster=Miami, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=102.129.152.9, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=miami-s405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.982453661Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Miami, cluster=Miami, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=102.129.152.9, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=miami-s405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.982401604Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.982106751Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.982096229Z caller=remote_instance_store.go:51 user=316418 slug=workmotion msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.981902396Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Miami, cluster=Miami, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=102.129.152.8, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=miami-s455, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.981863723Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Mexico City, cluster=Mexico City, country=Mexico, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=77.81.142.218, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=mexicocity-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.981463953Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Mexico City, cluster=Mexico City, country=Mexico, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=77.81.142.218, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=mexicocity-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.981296912Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.981219766Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Mexico City, cluster=Mexico City, country=Mexico, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=77.81.142.206, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=mexicocity-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.981071635Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.980477857Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Mexico City, cluster=Mexico City, country=Mexico, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=77.81.142.160, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=mexicocity-s405, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.980510243Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.980433894Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=260827 slug=walley t=2024-05-29T13:44:14.980310983Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.980150895Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260827 slug=walley t=2024-05-29T13:44:14.980230757Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.980268411Z caller=remote_instance_store.go:51 user=332534 slug=adevintakijiji msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.9801166Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Mexico City, cluster=Mexico City, country=Mexico, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=77.81.142.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=mexicocity-s406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.980021496Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Mexico City, cluster=Mexico City, country=Mexico, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=77.81.142.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=mexicocity-s406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.980009956Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.979989337Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.979792616Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.979733977Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=332534 slug=adevintakijiji t=2024-05-29T13:44:14.979449234Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.979583025Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=332534 slug=adevintakijiji version=22 fingerprint=7b80ce802ac86b5c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.979220167Z level=debug msg="Alert rule evaluated" results="[{Instance:resource.label.database_id=ca-kijiji-production-up0f:msdbmaster-canal, resource.label.project_id=ca-kijiji-production-up0f, resource.label.region=us-east4, resource.type=cloudsql_database State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:resource.label.database_id=ca-kijiji-production-up0f:msdbmaster-canal, resource.label.project_id=ca-kijiji-production-up0f, resource.label.region=us-east4, resource.type=cloudsql_database Value:0xc143ba83c0} C:{Var:C Labels:resource.label.database_id=ca-kijiji-production-up0f:msdbmaster-canal, resource.label.project_id=ca-kijiji-production-up0f, resource.label.region=us-east4, resource.type=cloudsql_database Value:0xc143ba83e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.978673921s EvaluationString:[ var='B' labels={resource.label.database_id=ca-kijiji-production-up0f:msdbmaster-canal, resource.label.project_id=ca-kijiji-production-up0f, resource.label.region=us-east4, resource.type=cloudsql_database} value=0.02146746049976274 ], [ var='C' labels={resource.label.database_id=ca-kijiji-production-up0f:msdbmaster-canal, resource.label.project_id=ca-kijiji-production-up0f, resource.label.region=us-east4, resource.type=cloudsql_database} value=0 ]}]" duration=202.78717ms + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.979329972Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.239606ms + logger=ngalert.state.manager.persist user=306551 slug=teckresourcesalerts t=2024-05-29T13:44:14.9792882Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=679029 slug=joveoprodaws t=2024-05-29T13:44:14.97910081Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.979176323Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.979084508Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.979067448Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.979162822Z caller=remote_instance_store.go:51 user=679029 slug=joveoprodaws msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.979007323Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=679029 slug=joveoprodaws t=2024-05-29T13:44:14.979008085Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.97662448Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=679029 slug=joveoprodaws version=1667 fingerprint=00e9826d75bbb4b2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.978898609Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.978568829s EvaluationString:}]" duration=11.123339ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Marseille, cluster=Marseille, country=France, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.18.63, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=marseille-s407, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.978834145Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Marseille, cluster=Marseille, country=France, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.18.63, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=marseille-s407, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.97881929Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.978747344Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.978612187Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.978599914Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=901230 slug=integromonitor t=2024-05-29T13:44:14.978531157Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.978509002Z caller=remote_instance_store.go:51 user=147497 slug=rhodev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=901230 slug=integromonitor instance= previous_handler=resultError t=2024-05-29T13:44:14.97851799Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=901230 slug=integromonitor instance= previous_handler=resultError t=2024-05-29T13:44:14.978511854Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.978420658Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.978390595Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.97834888Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.978243714Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Marseille, cluster=Marseille, country=France, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.18.24, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=marseille-s404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.978240413Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.978191539Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.977957316Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Marseille, cluster=Marseille, country=France, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.18.11, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=marseille-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.977814333Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.977777972Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.977557742Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.977434623Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Manila, cluster=Manila-2, country=Philippines, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=188.214.125.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=manila-s451, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.977398246Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.977317052Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.977293286Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=60603 slug=avalaratax instance="datasource_uid=000000004, ref_id=C" t=2024-05-29T13:44:14.977252532Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:45:40Z next_ends_at=2024-05-29T13:46:10Z + logger=ngalert.state.manager user=396586 slug=opengov instance= t=2024-05-29T13:44:14.977127668Z level=debug msg="Setting next state" handler=resultError + level=debug ts=2024-05-29T13:44:14.977137367Z caller=remote_instance_store.go:51 user=471861 slug=planetstaging msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.977108052Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=471861 slug=planetstaging instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.977070357Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Manila, cluster=Manila-2, country=Philippines, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=188.214.125.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=manila-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.976945937Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Manila, cluster=Manila-2, country=Philippines, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=188.214.125.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=manila-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.976930556Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.976697177Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.976684274Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.976645132Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Manila, cluster=Manila, country=Philippines, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.18.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=manila-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.976526859Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.976530609Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.976273323Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.976314366Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Manchester, cluster=n/a, country=United Kingdom, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=91.90.121.178, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=manchester-s408, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.976145927Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.976199178Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.976086968Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.976063868Z caller=remote_instance_store.go:51 user=150145 slug=pleasant msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=120621 slug=jdall t=2024-05-29T13:44:14.975965977Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.337473ms + logger=ngalert.state.manager user=811546 slug=fyld instance="DBInstanceIdentifier=sitestream-transgrid-db-encrypted" t=2024-05-29T13:44:14.975928708Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.975886596Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.975741589Z caller=remote_instance_store.go:51 user=763376 slug=f5nginxone msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Manchester, cluster=Manchester-2, country=United Kingdom, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.133.146, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=manchester-s407, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.97554573Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.97549226Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.975384471Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Manchester, cluster=Manchester-2, country=United Kingdom, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.121.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=manchester-s409, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.975094984Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=152655 slug=orbweaver instance= t=2024-05-29T13:44:14.974660978Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.974681841Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=152655 slug=orbweaver t=2024-05-29T13:44:14.974626578Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=152655 slug=orbweaver version=1 fingerprint=4e22dc09a6441400 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.974571152Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.97428432s EvaluationString:}]" duration=74.534531ms + level=debug ts=2024-05-29T13:44:14.974450295Z caller=remote_instance_store.go:51 user=137351 slug=pinnacle21 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Manchester, cluster=Manchester, country=United Kingdom, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=45.133.173.60, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=manchester-s418, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.974327534Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.974288064Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Manchester, cluster=Manchester, country=United Kingdom, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=45.133.173.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=manchester-s417, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.974129874Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.974077821Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.973993782Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.973837676Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=635771 slug=sharedservices t=2024-05-29T13:44:14.973773123Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.848142ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.973632896Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.973533926Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.973409258Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.973300757Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.973325014Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.973254343Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=845543 slug=deliveryhero t=2024-05-29T13:44:14.973168615Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.142292ms + level=debug ts=2024-05-29T13:44:14.973142225Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.97308598Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.973076871Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.972798267Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.972855701Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.972802345Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.972755977Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.972641927Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.972727897Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.972593588Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Madrid, cluster=Madrid, country=Spain, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=45.134.213.161, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=madrid-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.972590438Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.972544834Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.97236229Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Madrid, cluster=Madrid, country=Spain, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=45.134.213.161, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=madrid-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.972418161Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.972360989Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=344017 slug=descript t=2024-05-29T13:44:14.972305523Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=344017 slug=descript instance="task_queue=temporalTranscriptionPipeline" t=2024-05-29T13:44:14.972277157Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=344017 slug=descript t=2024-05-29T13:44:14.972210346Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Madrid, cluster=Madrid, country=Spain, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=185.93.3.104, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=madrid-s408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.972249038Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.972136468Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Madrid, cluster=Madrid, country=Spain, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=185.93.3.104, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=madrid-s408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.972047964Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.972014658Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Macau, cluster=Macau, country=Macao, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=84.252.92.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=macau-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.971860514Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.969588867Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.971711793Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.971672115Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.971624605Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.971531729Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-df44ec6215b048f6, persistentvolumeclaim=data-rabbitmq-0" t=2024-05-29T13:44:14.971233446Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-de9b64cc3a3c4960, persistentvolumeclaim=data-zookeeper-0" t=2024-05-29T13:44:14.971201449Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.971328978Z caller=remote_instance_store.go:51 user=206439 slug=relaypro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.971228118Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.971370923Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-b62e3a960e714e57, persistentvolumeclaim=data-rabbitmq-1" t=2024-05-29T13:44:14.971150046Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-ab6e4a5913724511, persistentvolumeclaim=main-main-zf2f-pgdata" t=2024-05-29T13:44:14.971134038Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.971291154Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.971254425Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.971222496Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.970570445Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Luxembourg, cluster=Luxembourg, country=Luxembourg, datacenter=Altushost, environment=production, instance=10.0.0.203:9998, ip=37.46.113.223, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=luxembourg-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.971142402Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=277970 slug=teckresourcestest t=2024-05-29T13:44:14.971081765Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-94ae969cf07f4e38, persistentvolumeclaim=data-zookeeper-1" t=2024-05-29T13:44:14.971062175Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.971023237Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=84360 slug=sib t=2024-05-29T13:44:14.971030174Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.201292ms + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=4 fingerprint=811fea96508f33b9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.970969057Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=4.787337ms + level=error ts=2024-05-29T13:44:14.970913422Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Los Angeles, cluster=Los Angeles-2, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.45.97, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=losangeles-s409, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.970970612Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Los Angeles, cluster=Los Angeles-2, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.45.97, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=losangeles-s409, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.970956908Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-45bacb849ad74a24, persistentvolumeclaim=data-rabbitmq-2" t=2024-05-29T13:44:14.970907139Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-38bc2fceaae84d40, persistentvolumeclaim=grafana-grafana-gjtm-pgdata" t=2024-05-29T13:44:14.970891384Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-250ed32722eb4889, persistentvolumeclaim=data-zookeeper-2" t=2024-05-29T13:44:14.970855989Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.970785146Z caller=remote_instance_store.go:51 user=224047 slug=ppbtradingtribeprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-14a8cc803ab542ce, persistentvolumeclaim=grafana-repo1" t=2024-05-29T13:44:14.970829013Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=206107 slug=hydrolix version=3 fingerprint=8e667ea0dece0617 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.970566847Z level=debug msg="Alert rule evaluated" results="[{Instance:persistentvolume=pvc-14a8cc803ab542ce, persistentvolumeclaim=grafana-repo1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-14a8cc803ab542ce, persistentvolumeclaim=grafana-repo1 Value:0xc016503f40} B:{Var:B Labels:persistentvolume=pvc-14a8cc803ab542ce, persistentvolumeclaim=grafana-repo1 Value:0xc016503f48} C:{Var:C Labels:persistentvolume=pvc-14a8cc803ab542ce, persistentvolumeclaim=grafana-repo1 Value:0xc016503ef8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.969802931s EvaluationString:[ var='A' labels={persistentvolume=pvc-14a8cc803ab542ce, persistentvolumeclaim=grafana-repo1} value=0.008579895901386194 ], [ var='B' labels={persistentvolume=pvc-14a8cc803ab542ce, persistentvolumeclaim=grafana-repo1} value=0.008579895901386194 ], [ var='C' labels={persistentvolume=pvc-14a8cc803ab542ce, persistentvolumeclaim=grafana-repo1} value=0 ]} {Instance:persistentvolume=pvc-250ed32722eb4889, persistentvolumeclaim=data-zookeeper-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-250ed32722eb4889, persistentvolumeclaim=data-zookeeper-2 Value:0xc016503fe0} B:{Var:B Labels:persistentvolume=pvc-250ed32722eb4889, persistentvolumeclaim=data-zookeeper-2 Value:0xc016503fe8} C:{Var:C Labels:persistentvolume=pvc-250ed32722eb4889, persistentvolumeclaim=data-zookeeper-2 Value:0xc016503f98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.969814128s EvaluationString:[ var='A' labels={persistentvolume=pvc-250ed32722eb4889, persistentvolumeclaim=data-zookeeper-2} value=0.0032172163152284832 ], [ var='B' labels={persistentvolume=pvc-250ed32722eb4889, persistentvolumeclaim=data-zookeeper-2} value=0.0032172163152284832 ], [ var='C' labels={persistentvolume=pvc-250ed32722eb4889, persistentvolumeclaim=data-zookeeper-2} value=0 ]} {Instance:persistentvolume=pvc-38bc2fceaae84d40, persistentvolumeclaim=grafana-grafana-gjtm-pgdata State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-38bc2fceaae84d40, persistentvolumeclaim=grafana-grafana-gjtm-pgdata Value:0xc00b272028} B:{Var:B Labels:persistentvolume=pvc-38bc2fceaae84d40, persistentvolumeclaim=grafana-grafana-gjtm-pgdata Value:0xc00b272060} C:{Var:C Labels:persistentvolume=pvc-38bc2fceaae84d40, persistentvolumeclaim=grafana-grafana-gjtm-pgdata Value:0xc00b272068}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.969821085s EvaluationString:[ var='A' labels={persistentvolume=pvc-38bc2fceaae84d40, persistentvolumeclaim=grafana-grafana-gjtm-pgdata} value=0.026228983286837204 ], [ var='B' labels={persistentvolume=pvc-38bc2fceaae84d40, persistentvolumeclaim=grafana-grafana-gjtm-pgdata} value=0.026228983286837204 ], [ var='C' labels={persistentvolume=pvc-38bc2fceaae84d40, persistentvolumeclaim=grafana-grafana-gjtm-pgdata} value=0 ]} {Instance:persistentvolume=pvc-45bacb849ad74a24, persistentvolumeclaim=data-rabbitmq-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-45bacb849ad74a24, persistentvolumeclaim=data-rabbitmq-2 Value:0xc00b272158} B:{Var:B Labels:persistentvolume=pvc-45bacb849ad74a24, persistentvolumeclaim=data-rabbitmq-2 Value:0xc00b2720b8} C:{Var:C Labels:persistentvolume=pvc-45bacb849ad74a24, persistentvolumeclaim=data-rabbitmq-2 Value:0xc00b272150}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.969827037s EvaluationString:[ var='A' labels={persistentvolume=pvc-45bacb849ad74a24, persistentvolumeclaim=data-rabbitmq-2} value=0.0026218414502251347 ], [ var='B' labels={persistentvolume=pvc-45bacb849ad74a24, persistentvolumeclaim=data-rabbitmq-2} value=0.0026218414502251347 ], [ var='C' labels={persistentvolume=pvc-45bacb849ad74a24, persistentvolumeclaim=data-rabbitmq-2} value=0 ]} {Instance:persistentvolume=pvc-6a39a516fb6f4811, persistentvolumeclaim=data-redpanda-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-6a39a516fb6f4811, persistentvolumeclaim=data-redpanda-0 Value:0xc00b272238} B:{Var:B Labels:persistentvolume=pvc-6a39a516fb6f4811, persistentvolumeclaim=data-redpanda-0 Value:0xc00b272280} C:{Var:C Labels:persistentvolume=pvc-6a39a516fb6f4811, persistentvolumeclaim=data-redpanda-0 Value:0xc00b272288}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.969833951s EvaluationString:[ var='A' labels={persistentvolume=pvc-6a39a516fb6f4811, persistentvolumeclaim=data-redpanda-0} value=0.20290983693136078 ], [ var='B' labels={persistentvolume=pvc-6a39a516fb6f4811, persistentvolumeclaim=data-redpanda-0} value=0.20290983693136078 ], [ var='C' labels={persistentvolume=pvc-6a39a516fb6f4811, persistentvolumeclaim=data-redpanda-0} value=0 ]} {Instance:persistentvolume=pvc-6e6bc758869b492f, persistentvolumeclaim=main-main-mbh2-pgdata State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-6e6bc758869b492f, persistentvolumeclaim=main-main-mbh2-pgdata Value:0xc00b2722e8} B:{Var:B Labels:persistentvolume=pvc-6e6bc758869b492f, persistentvolumeclaim=main-main-mbh2-pgdata Value:0xc00b272350} C:{Var:C Labels:persistentvolume=pvc-6e6bc758869b492f, persistentvolumeclaim=main-main-mbh2-pgdata Value:0xc00b272358}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.969839647s EvaluationString:[ var='A' labels={persistentvolume=pvc-6e6bc758869b492f, persistentvolumeclaim=main-main-mbh2-pgdata} value=0.004476633225703931 ], [ var='B' labels={persistentvolume=pvc-6e6bc758869b492f, persistentvolumeclaim=main-main-mbh2-pgdata} value=0.004476633225703931 ], [ var='C' labels={persistentvolume=pvc-6e6bc758869b492f, persistentvolumeclaim=main-main-mbh2-pgdata} value=0 ]} {Instance:persistentvolume=pvc-7b1f76661e924fda, persistentvolumeclaim=data-prometheus-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-7b1f76661e924fda, persistentvolumeclaim=data-prometheus-0 Value:0xc00b272398} B:{Var:B Labels:persistentvolume=pvc-7b1f76661e924fda, persistentvolumeclaim=data-prometheus-0 Value:0xc00b2723f0} C:{Var:C Labels:persistentvolume=pvc-7b1f76661e924fda, persistentvolumeclaim=data-prometheus-0 Value:0xc00b2723f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.969844323s EvaluationString:[ var='A' labels={persistentvolume=pvc-7b1f76661e924fda, persistentvolumeclaim=data-prometheus-0} value=0.46331116630511887 ], [ var='B' labels={persistentvolume=pvc-7b1f76661e924fda, persistentvolumeclaim=data-prometheus-0} value=0.46331116630511887 ], [ var='C' labels={persistentvolume=pvc-7b1f76661e924fda, persistentvolumeclaim=data-prometheus-0} value=0 ]} {Instance:persistentvolume=pvc-93a34a5a456f4134, persistentvolumeclaim=main-repo1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-93a34a5a456f4134, persistentvolumeclaim=main-repo1 Value:0xc00b272498} B:{Var:B Labels:persistentvolume=pvc-93a34a5a456f4134, persistentvolumeclaim=main-repo1 Value:0xc00b272500} C:{Var:C Labels:persistentvolume=pvc-93a34a5a456f4134, persistentvolumeclaim=main-repo1 Value:0xc00b272490}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.969848858s EvaluationString:[ var='A' labels={persistentvolume=pvc-93a34a5a456f4134, persistentvolumeclaim=main-repo1} value=0.005990850720846325 ], [ var='B' labels={persistentvolume=pvc-93a34a5a456f4134, persistentvolumeclaim=main-repo1} value=0.005990850720846325 ], [ var='C' labels={persistentvolume=pvc-93a34a5a456f4134, persistentvolumeclaim=main-repo1} value=0 ]} {Instance:persistentvolume=pvc-94ae969cf07f4e38, persistentvolumeclaim=data-zookeeper-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-94ae969cf07f4e38, persistentvolumeclaim=data-zookeeper-1 Value:0xc00b272560} B:{Var:B Labels:persistentvolume=pvc-94ae969cf07f4e38, persistentvolumeclaim=data-zookeeper-1 Value:0xc00b272568} C:{Var:C Labels:persistentvolume=pvc-94ae969cf07f4e38, persistentvolumeclaim=data-zookeeper-1 Value:0xc00b2725c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.969852872s EvaluationString:[ var='A' labels={persistentvolume=pvc-94ae969cf07f4e38, persistentvolumeclaim=data-zookeeper-1} value=0.003217607751694626 ], [ var='B' labels={persistentvolume=pvc-94ae969cf07f4e38, persistentvolumeclaim=data-zookeeper-1} value=0.003217607751694626 ], [ var='C' labels={persistentvolume=pvc-94ae969cf07f4e38, persistentvolumeclaim=data-zookeeper-1} value=0 ]} {Instance:persistentvolume=pvc-a7e3ab3a684641a4, persistentvolumeclaim=grafana-grafana-kv8q-pgdata State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-a7e3ab3a684641a4, persistentvolumeclaim=grafana-grafana-kv8q-pgdata Value:0xc00b272640} B:{Var:B Labels:persistentvolume=pvc-a7e3ab3a684641a4, persistentvolumeclaim=grafana-grafana-kv8q-pgdata Value:0xc00b272600} C:{Var:C Labels:persistentvolume=pvc-a7e3ab3a684641a4, persistentvolumeclaim=grafana-grafana-kv8q-pgdata Value:0xc00b272608}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.969857028s EvaluationString:[ var='A' labels={persistentvolume=pvc-a7e3ab3a684641a4, persistentvolumeclaim=grafana-grafana-kv8q-pgdata} value=0.024615090736930034 ], [ var='B' labels={persistentvolume=pvc-a7e3ab3a684641a4, persistentvolumeclaim=grafana-grafana-kv8q-pgdata} value=0.024615090736930034 ], [ var='C' labels={persistentvolume=pvc-a7e3ab3a684641a4, persistentvolumeclaim=grafana-grafana-kv8q-pgdata} value=0 ]} {Instance:persistentvolume=pvc-ab6e4a5913724511, persistentvolumeclaim=main-main-zf2f-pgdata State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-ab6e4a5913724511, persistentvolumeclaim=main-main-zf2f-pgdata Value:0xc00b2726b0} B:{Var:B Labels:persistentvolume=pvc-ab6e4a5913724511, persistentvolumeclaim=main-main-zf2f-pgdata Value:0xc00b272670} C:{Var:C Labels:persistentvolume=pvc-ab6e4a5913724511, persistentvolumeclaim=main-main-zf2f-pgdata Value:0xc00b272678}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.969862803s EvaluationString:[ var='A' labels={persistentvolume=pvc-ab6e4a5913724511, persistentvolumeclaim=main-main-zf2f-pgdata} value=0.004113178392830402 ], [ var='B' labels={persistentvolume=pvc-ab6e4a5913724511, persistentvolumeclaim=main-main-zf2f-pgdata} value=0.004113178392830402 ], [ var='C' labels={persistentvolume=pvc-ab6e4a5913724511, persistentvolumeclaim=main-main-zf2f-pgdata} value=0 ]} {Instance:persistentvolume=pvc-b62e3a960e714e57, persistentvolumeclaim=data-rabbitmq-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-b62e3a960e714e57, persistentvolumeclaim=data-rabbitmq-1 Value:0xc00b272710} B:{Var:B Labels:persistentvolume=pvc-b62e3a960e714e57, persistentvolumeclaim=data-rabbitmq-1 Value:0xc00b272718} C:{Var:C Labels:persistentvolume=pvc-b62e3a960e714e57, persistentvolumeclaim=data-rabbitmq-1 Value:0xc00b272760}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.969868194s EvaluationString:[ var='A' labels={persistentvolume=pvc-b62e3a960e714e57, persistentvolumeclaim=data-rabbitmq-1} value=0.002335309957008533 ], [ var='B' labels={persistentvolume=pvc-b62e3a960e714e57, persistentvolumeclaim=data-rabbitmq-1} value=0.002335309957008533 ], [ var='C' labels={persistentvolume=pvc-b62e3a960e714e57, persistentvolumeclaim=data-rabbitmq-1} value=0 ]} {Instance:persistentvolume=pvc-de9b64cc3a3c4960, persistentvolumeclaim=data-zookeeper-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-de9b64cc3a3c4960, persistentvolumeclaim=data-zookeeper-0 Value:0xc00b2727d0} B:{Var:B Labels:persistentvolume=pvc-de9b64cc3a3c4960, persistentvolumeclaim=data-zookeeper-0 Value:0xc00b2727d8} C:{Var:C Labels:persistentvolume=pvc-de9b64cc3a3c4960, persistentvolumeclaim=data-zookeeper-0 Value:0xc00b272840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.969872263s EvaluationString:[ var='A' labels={persistentvolume=pvc-de9b64cc3a3c4960, persistentvolumeclaim=data-zookeeper-0} value=0.003217607751694626 ], [ var='B' labels={persistentvolume=pvc-de9b64cc3a3c4960, persistentvolumeclaim=data-zookeeper-0} value=0.003217607751694626 ], [ var='C' labels={persistentvolume=pvc-de9b64cc3a3c4960, persistentvolumeclaim=data-zookeeper-0} value=0 ]} {Instance:persistentvolume=pvc-df44ec6215b048f6, persistentvolumeclaim=data-rabbitmq-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-df44ec6215b048f6, persistentvolumeclaim=data-rabbitmq-0 Value:0xc00b272940} B:{Var:B Labels:persistentvolume=pvc-df44ec6215b048f6, persistentvolumeclaim=data-rabbitmq-0 Value:0xc00b2728f0} C:{Var:C Labels:persistentvolume=pvc-df44ec6215b048f6, persistentvolumeclaim=data-rabbitmq-0 Value:0xc00b2728f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.969878246s EvaluationString:[ var='A' labels={persistentvolume=pvc-df44ec6215b048f6, persistentvolumeclaim=data-rabbitmq-0} value=0.003331124326876067 ], [ var='B' labels={persistentvolume=pvc-df44ec6215b048f6, persistentvolumeclaim=data-rabbitmq-0} value=0.003331124326876067 ], [ var='C' labels={persistentvolume=pvc-df44ec6215b048f6, persistentvolumeclaim=data-rabbitmq-0} value=0 ]}]" duration=156.43666ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Los Angeles, cluster=Los Angeles-2, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.45.97, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=losangeles-s409, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.970782608Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:14.970670402Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Los Angeles, cluster=Los Angeles, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=102.129.145.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=losangeles-s405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.970587616Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.970552709Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.970563249Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.970540628Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Los Angeles, cluster=Los Angeles, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=102.129.145.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=losangeles-s405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.970437438Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.970365425Z caller=remote_instance_store.go:51 user=22398 slug=sunfolding msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Los Angeles, cluster=Los Angeles, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=102.129.145.3, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=losangeles-s404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.970321724Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=22398 slug=sunfolding t=2024-05-29T13:44:14.970316388Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=A" t=2024-05-29T13:44:14.970293821Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.970208418Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.970187451Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=22398 slug=sunfolding version=1 fingerprint=61976fb26a52e879 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.970151529Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-sunfolding, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.969786604s EvaluationString:}]" duration=30.99091ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.970048702Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.969917724Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.306326ms + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.969846661Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Los Angeles, cluster=Los Angeles, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=102.129.145.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=losangeles-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.969880648Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.969646982Z caller=remote_instance_store.go:51 user=177465 slug=fairtiq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.969745401Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.969656689Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.969594716Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Los Angeles, cluster=Los Angeles, country=United States, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=102.129.145.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=losangeles-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.969649671Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=455282 slug=rockwool t=2024-05-29T13:44:14.969526517Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.969336826Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=455282 slug=rockwool t=2024-05-29T13:44:14.969294813Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=London, cluster=n/a, country=United Kingdom, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.133.156, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=london-s455, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.969117545Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=London, cluster=n/a, country=United Kingdom, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.133.156, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=london-s455, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.968930614Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.968901001Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=London, cluster=n/a, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=143.244.37.91, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=london-s439, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.968792522Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.968761205Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=info ts=2024-05-29T13:44:14.968683834Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.156.57:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddbhspyh8n401d alerts=1 + logger=ngalert.state.manager user=4947 slug=mediamath instance= t=2024-05-29T13:44:14.968707921Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.968693518Z caller=remote_instance_store.go:51 user=112732 slug=gleamer msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112732 slug=gleamer t=2024-05-29T13:44:14.968565241Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.96858388Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=London, cluster=London-2, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.30, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=london-s424, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.968397817Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=London, cluster=London-2, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.31.91, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=london-s473, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.968033553Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=London, cluster=London-2, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.31.91, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=london-s473, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.968023126Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.967990849Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb15, instance_name=common1-w-2" t=2024-05-29T13:44:14.967914144Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=London, cluster=London-2, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.31.91, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=london-s473, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.967835592Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.967805503Z caller=remote_instance_store.go:51 user=70430 slug=dapperlabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=stritzdev-w-1" t=2024-05-29T13:44:14.967783634Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=70430 slug=dapperlabs instance= t=2024-05-29T13:44:14.967737311Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.967604945Z caller=remote_instance_store.go:51 user=465816 slug=metricgamingqa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=London, cluster=London-2, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.30.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=london-s440, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.967666148Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=70430 slug=dapperlabs version=1 fingerprint=7cb9075e51b0b3d6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.967558461Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.967255141s EvaluationString:}]" duration=18.171749ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.967619247Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.967553912Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.967463501Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.967471546Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.967398864Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=farmstritz1-w-7" t=2024-05-29T13:44:14.967365256Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.967235942Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.967216012Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:14.967186924Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.231137ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=London, cluster=London-1, country=United Kingdom, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=194.110.13.97, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=london-s423, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.967116869Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.966947969Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=farmstritz1-w-3" t=2024-05-29T13:44:14.96699619Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.966810059Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.966653586Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=524410 slug=syso instance="STS=Wales, STS_BMS=Wales_SEL_735" t=2024-05-29T13:44:14.96671583Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=London, cluster=London-1, country=United Kingdom, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.101.209.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=london-s406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.966625929Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.96659375Z caller=remote_instance_store.go:51 user=698103 slug=vericast msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=524410 slug=syso instance="STS=Sutton, STS_BMS=Sutton_SEL_735" t=2024-05-29T13:44:14.966554854Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=common2-w-6" t=2024-05-29T13:44:14.966534526Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698103 slug=vericast instance= t=2024-05-29T13:44:14.96653325Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=common2-w-6" t=2024-05-29T13:44:14.966523584Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=524410 slug=syso instance="STS=Palmer, STS_BMS=Palmer_SEL_735" t=2024-05-29T13:44:14.966430212Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.966421675Z caller=remote_instance_store.go:51 user=465668 slug=xpressinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=524410 slug=syso instance="STS=Palmer, STS_BMS=Palmer_SEL_735" t=2024-05-29T13:44:14.966418107Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.966416722Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=common2-w-3" t=2024-05-29T13:44:14.966299595Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=524410 slug=syso instance="STS=Huxley 2, STS_BMS=Huxley 2_SEL_735" t=2024-05-29T13:44:14.966321198Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=common2-w-2" t=2024-05-29T13:44:14.96621079Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=524410 slug=syso instance="STS=HeliosSouth, STS_BMS=HeliosSouth_SEL_735" t=2024-05-29T13:44:14.966132346Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=common2-w-1" t=2024-05-29T13:44:14.9661093Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=common2-w-1" t=2024-05-29T13:44:14.966099219Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=common1-w-5" t=2024-05-29T13:44:14.965899033Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=common1-w-5" t=2024-05-29T13:44:14.965890029Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=common1-w-1" t=2024-05-29T13:44:14.965686243Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=London, cluster=London-1, country=United Kingdom, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.215.176.97, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=london-s410, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.965676371Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=common1-w-0" t=2024-05-29T13:44:14.965617104Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.965483452Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=524410 slug=syso instance="STS=Charlemont, STS_BMS=Charlemont_SEL_735" t=2024-05-29T13:44:14.965513144Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=524410 slug=syso instance="STS=Charlemont, STS_BMS=Charlemont_SEL_735" t=2024-05-29T13:44:14.965478887Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=candycrush2-w-8" t=2024-05-29T13:44:14.965460904Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=London, cluster=London-1, country=United Kingdom, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.215.176.161, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=london-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.965414554Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=candycrush2-w-7" t=2024-05-29T13:44:14.965401894Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=524410 slug=syso instance="STS=Brockelman, STS_BMS=Brockelman_SEL_735" t=2024-05-29T13:44:14.965351877Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=524410 slug=syso instance="STS=Brockelman, STS_BMS=Brockelman_SEL_735" t=2024-05-29T13:44:14.965339377Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=327842 slug=exabeam t=2024-05-29T13:44:14.965150106Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.027014ms + logger=ngalert.state.manager.persist user=92497 slug=regis24 t=2024-05-29T13:44:14.965091259Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=92497 slug=regis24 instance= t=2024-05-29T13:44:14.965046111Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=London, cluster=London-1, country=United Kingdom, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.215.176.161, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=london-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.965211379Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.965128454Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=92497 slug=regis24 version=1 fingerprint=dd70affb51aa4a0c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.964846257Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.964452467s EvaluationString:}]" duration=239.563345ms + level=debug ts=2024-05-29T13:44:14.96488532Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=London, cluster=London-1, country=United Kingdom, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.215.176.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=london-s405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.965023709Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=candycrush2-w-14" t=2024-05-29T13:44:14.964850684Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=London, cluster=London-1, country=United Kingdom, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.215.176.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=london-s405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.964796718Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=524410 slug=syso t=2024-05-29T13:44:14.964721951Z level=debug msg="State manager processing evaluation results" resultCount=14 + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=candycrush2-w-13" t=2024-05-29T13:44:14.964772813Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=524410 slug=syso version=102 fingerprint=f05f942a655a2e2c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.964458677Z level=debug msg="Alert rule evaluated" results="[{Instance:STS=Amherst, STS_BMS=Amherst_SEL_735 State:Normal Error: Results:map[] Values:map[LAST_MINIMUM_FRESHNESS:{Var:LAST_MINIMUM_FRESHNESS Labels:STS=Amherst, STS_BMS=Amherst_SEL_735 Value:0xc008874b18} OVER_FIVE_MINTUES_STALE:{Var:OVER_FIVE_MINTUES_STALE Labels:STS=Amherst, STS_BMS=Amherst_SEL_735 Value:0xc008874b38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.963583657s EvaluationString:[ var='LAST_MINIMUM_FRESHNESS' labels={STS=Amherst, STS_BMS=Amherst_SEL_735} value=9.469658 ], [ var='OVER_FIVE_MINTUES_STALE' labels={STS=Amherst, STS_BMS=Amherst_SEL_735} value=0 ]} {Instance:STS=Blacksmith, STS_BMS=Blacksmith_SEL_735 State:Normal Error: Results:map[] Values:map[LAST_MINIMUM_FRESHNESS:{Var:LAST_MINIMUM_FRESHNESS Labels:STS=Blacksmith, STS_BMS=Blacksmith_SEL_735 Value:0xc008874b90} OVER_FIVE_MINTUES_STALE:{Var:OVER_FIVE_MINTUES_STALE Labels:STS=Blacksmith, STS_BMS=Blacksmith_SEL_735 Value:0xc008874bb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.963606208s EvaluationString:[ var='LAST_MINIMUM_FRESHNESS' labels={STS=Blacksmith, STS_BMS=Blacksmith_SEL_735} value=10.6088 ], [ var='OVER_FIVE_MINTUES_STALE' labels={STS=Blacksmith, STS_BMS=Blacksmith_SEL_735} value=0 ]} {Instance:STS=Brockelman, STS_BMS=Brockelman_SEL_735 State:Normal Error: Results:map[] Values:map[LAST_MINIMUM_FRESHNESS:{Var:LAST_MINIMUM_FRESHNESS Labels:STS=Brockelman, STS_BMS=Brockelman_SEL_735 Value:0xc008874ca0} OVER_FIVE_MINTUES_STALE:{Var:OVER_FIVE_MINTUES_STALE Labels:STS=Brockelman, STS_BMS=Brockelman_SEL_735 Value:0xc008874c78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.963616395s EvaluationString:[ var='LAST_MINIMUM_FRESHNESS' labels={STS=Brockelman, STS_BMS=Brockelman_SEL_735} value=16.322883 ], [ var='OVER_FIVE_MINTUES_STALE' labels={STS=Brockelman, STS_BMS=Brockelman_SEL_735} value=0 ]} {Instance:STS=Charlemont, STS_BMS=Charlemont_SEL_735 State:Normal Error: Results:map[] Values:map[LAST_MINIMUM_FRESHNESS:{Var:LAST_MINIMUM_FRESHNESS Labels:STS=Charlemont, STS_BMS=Charlemont_SEL_735 Value:0xc008874ce8} OVER_FIVE_MINTUES_STALE:{Var:OVER_FIVE_MINTUES_STALE Labels:STS=Charlemont, STS_BMS=Charlemont_SEL_735 Value:0xc008874d20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.963623747s EvaluationString:[ var='LAST_MINIMUM_FRESHNESS' labels={STS=Charlemont, STS_BMS=Charlemont_SEL_735} value=8.604141 ], [ var='OVER_FIVE_MINTUES_STALE' labels={STS=Charlemont, STS_BMS=Charlemont_SEL_735} value=0 ]} {Instance:STS=Clark, STS_BMS=Clark_SEL_735 State:Normal Error: Results:map[] Values:map[LAST_MINIMUM_FRESHNESS:{Var:LAST_MINIMUM_FRESHNESS Labels:STS=Clark, STS_BMS=Clark_SEL_735 Value:0xc008874d88} OVER_FIVE_MINTUES_STALE:{Var:OVER_FIVE_MINTUES_STALE Labels:STS=Clark, STS_BMS=Clark_SEL_735 Value:0xc008874d70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.963630127s EvaluationString:[ var='LAST_MINIMUM_FRESHNESS' labels={STS=Clark, STS_BMS=Clark_SEL_735} value=19.32975 ], [ var='OVER_FIVE_MINTUES_STALE' labels={STS=Clark, STS_BMS=Clark_SEL_735} value=0 ]} {Instance:STS=Dalton 1, STS_BMS=Dalton 1_SEL_735 State:Normal Error: Results:map[] Values:map[LAST_MINIMUM_FRESHNESS:{Var:LAST_MINIMUM_FRESHNESS Labels:STS=Dalton 1, STS_BMS=Dalton 1_SEL_735 Value:0xc008874de8} OVER_FIVE_MINTUES_STALE:{Var:OVER_FIVE_MINTUES_STALE Labels:STS=Dalton 1, STS_BMS=Dalton 1_SEL_735 Value:0xc008874e28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.96363669s EvaluationString:[ var='LAST_MINIMUM_FRESHNESS' labels={STS=Dalton 1, STS_BMS=Dalton 1_SEL_735} value=3.43693 ], [ var='OVER_FIVE_MINTUES_STALE' labels={STS=Dalton 1, STS_BMS=Dalton 1_SEL_735} value=0 ]} {Instance:STS=Dalton 2, STS_BMS=Dalton 2_SEL_735 State:Normal Error: Results:map[] Values:map[LAST_MINIMUM_FRESHNESS:{Var:LAST_MINIMUM_FRESHNESS Labels:STS=Dalton 2, STS_BMS=Dalton 2_SEL_735 Value:0xc008874ec8} OVER_FIVE_MINTUES_STALE:{Var:OVER_FIVE_MINTUES_STALE Labels:STS=Dalton 2, STS_BMS=Dalton 2_SEL_735 Value:0xc008874e88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.963644677s EvaluationString:[ var='LAST_MINIMUM_FRESHNESS' labels={STS=Dalton 2, STS_BMS=Dalton 2_SEL_735} value=10.031398 ], [ var='OVER_FIVE_MINTUES_STALE' labels={STS=Dalton 2, STS_BMS=Dalton 2_SEL_735} value=0 ]} {Instance:STS=HeliosNorth, STS_BMS=HeliosNorth_SEL_735 State:Normal Error: Results:map[] Values:map[LAST_MINIMUM_FRESHNESS:{Var:LAST_MINIMUM_FRESHNESS Labels:STS=HeliosNorth, STS_BMS=HeliosNorth_SEL_735 Value:0xc008874f18} OVER_FIVE_MINTUES_STALE:{Var:OVER_FIVE_MINTUES_STALE Labels:STS=HeliosNorth, STS_BMS=HeliosNorth_SEL_735 Value:0xc008874f48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.963650979s EvaluationString:[ var='LAST_MINIMUM_FRESHNESS' labels={STS=HeliosNorth, STS_BMS=HeliosNorth_SEL_735} value=15.907758 ], [ var='OVER_FIVE_MINTUES_STALE' labels={STS=HeliosNorth, STS_BMS=HeliosNorth_SEL_735} value=0 ]} {Instance:STS=HeliosSouth, STS_BMS=HeliosSouth_SEL_735 State:Normal Error: Results:map[] Values:map[LAST_MINIMUM_FRESHNESS:{Var:LAST_MINIMUM_FRESHNESS Labels:STS=HeliosSouth, STS_BMS=HeliosSouth_SEL_735 Value:0xc008874f88} OVER_FIVE_MINTUES_STALE:{Var:OVER_FIVE_MINTUES_STALE Labels:STS=HeliosSouth, STS_BMS=HeliosSouth_SEL_735 Value:0xc008874fb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.963729425s EvaluationString:[ var='LAST_MINIMUM_FRESHNESS' labels={STS=HeliosSouth, STS_BMS=HeliosSouth_SEL_735} value=13.970513 ], [ var='OVER_FIVE_MINTUES_STALE' labels={STS=HeliosSouth, STS_BMS=HeliosSouth_SEL_735} value=0 ]} {Instance:STS=Huxley 1, STS_BMS=Huxley 1_SEL_735 State:Normal Error: Results:map[] Values:map[LAST_MINIMUM_FRESHNESS:{Var:LAST_MINIMUM_FRESHNESS Labels:STS=Huxley 1, STS_BMS=Huxley 1_SEL_735 Value:0xc008875048} OVER_FIVE_MINTUES_STALE:{Var:OVER_FIVE_MINTUES_STALE Labels:STS=Huxley 1, STS_BMS=Huxley 1_SEL_735 Value:0xc008875008}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.963735465s EvaluationString:[ var='LAST_MINIMUM_FRESHNESS' labels={STS=Huxley 1, STS_BMS=Huxley 1_SEL_735} value=12.232304 ], [ var='OVER_FIVE_MINTUES_STALE' labels={STS=Huxley 1, STS_BMS=Huxley 1_SEL_735} value=0 ]} {Instance:STS=Huxley 2, STS_BMS=Huxley 2_SEL_735 State:Normal Error: Results:map[] Values:map[LAST_MINIMUM_FRESHNESS:{Var:LAST_MINIMUM_FRESHNESS Labels:STS=Huxley 2, STS_BMS=Huxley 2_SEL_735 Value:0xc0088750c8} OVER_FIVE_MINTUES_STALE:{Var:OVER_FIVE_MINTUES_STALE Labels:STS=Huxley 2, STS_BMS=Huxley 2_SEL_735 Value:0xc0088750a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.963743593s EvaluationString:[ var='LAST_MINIMUM_FRESHNESS' labels={STS=Huxley 2, STS_BMS=Huxley 2_SEL_735} value=9.164227 ], [ var='OVER_FIVE_MINTUES_STALE' labels={STS=Huxley 2, STS_BMS=Huxley 2_SEL_735} value=0 ]} {Instance:STS=Palmer, STS_BMS=Palmer_SEL_735 State:Normal Error: Results:map[] Values:map[LAST_MINIMUM_FRESHNESS:{Var:LAST_MINIMUM_FRESHNESS Labels:STS=Palmer, STS_BMS=Palmer_SEL_735 Value:0xc008875128} OVER_FIVE_MINTUES_STALE:{Var:OVER_FIVE_MINTUES_STALE Labels:STS=Palmer, STS_BMS=Palmer_SEL_735 Value:0xc008875158}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.96375682s EvaluationString:[ var='LAST_MINIMUM_FRESHNESS' labels={STS=Palmer, STS_BMS=Palmer_SEL_735} value=12.291828 ], [ var='OVER_FIVE_MINTUES_STALE' labels={STS=Palmer, STS_BMS=Palmer_SEL_735} value=0 ]} {Instance:STS=Sutton, STS_BMS=Sutton_SEL_735 State:Normal Error: Results:map[] Values:map[LAST_MINIMUM_FRESHNESS:{Var:LAST_MINIMUM_FRESHNESS Labels:STS=Sutton, STS_BMS=Sutton_SEL_735 Value:0xc0088751f8} OVER_FIVE_MINTUES_STALE:{Var:OVER_FIVE_MINTUES_STALE Labels:STS=Sutton, STS_BMS=Sutton_SEL_735 Value:0xc0088751c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.963763412s EvaluationString:[ var='LAST_MINIMUM_FRESHNESS' labels={STS=Sutton, STS_BMS=Sutton_SEL_735} value=17.480603 ], [ var='OVER_FIVE_MINTUES_STALE' labels={STS=Sutton, STS_BMS=Sutton_SEL_735} value=0 ]} {Instance:STS=Wales, STS_BMS=Wales_SEL_735 State:Normal Error: Results:map[] Values:map[LAST_MINIMUM_FRESHNESS:{Var:LAST_MINIMUM_FRESHNESS Labels:STS=Wales, STS_BMS=Wales_SEL_735 Value:0xc008875280} OVER_FIVE_MINTUES_STALE:{Var:OVER_FIVE_MINTUES_STALE Labels:STS=Wales, STS_BMS=Wales_SEL_735 Value:0xc008875248}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.963769519s EvaluationString:[ var='LAST_MINIMUM_FRESHNESS' labels={STS=Wales, STS_BMS=Wales_SEL_735} value=15.24468 ], [ var='OVER_FIVE_MINTUES_STALE' labels={STS=Wales, STS_BMS=Wales_SEL_735} value=0 ]}]" duration=426.28822ms + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=candycrush2-w-11" t=2024-05-29T13:44:14.964617328Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.964545199Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Ljubljana, cluster=Ljubljana, country=Slovenia, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=195.80.150.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=ljubljana-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.964601381Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=candycrush2-w-1" t=2024-05-29T13:44:14.964479858Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=candycrush2-w-0" t=2024-05-29T13:44:14.964394608Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=candycrush1-w-9" t=2024-05-29T13:44:14.964313756Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=candycrush1-w-9" t=2024-05-29T13:44:14.964304226Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.964122984Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.964117391Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.964113163Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.96407951Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=candycrush1-w-5" t=2024-05-29T13:44:14.963972033Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.963933461Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.963926479Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=candycrush1-w-4" t=2024-05-29T13:44:14.963899858Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.963812137Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=candycrush1-w-3" t=2024-05-29T13:44:14.963830137Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.963621016Z caller=remote_instance_store.go:51 user=213445 slug=gan msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Lisbon, cluster=Lisbon, country=Portugal, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.59.3, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=lisbon-s405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.963651651Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.963549543Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.963491047Z caller=remote_instance_store.go:51 user=700783 slug=gsgmedia msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Lima, cluster=Lima, country=Peru, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=84.247.98.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=lima-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.963477554Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=80822 slug=corescientific t=2024-05-29T13:44:14.963415386Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=27.174387ms + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=candycrush1-w-12" t=2024-05-29T13:44:14.963342256Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.963280779Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=candycrush1-w-11" t=2024-05-29T13:44:14.963260341Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.963146401Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=20177 slug=paddledash t=2024-05-29T13:44:14.963154244Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=28.863708ms + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=candycrush1-w-10" t=2024-05-29T13:44:14.963173139Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.962796102Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.963046656Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sdb, instance_name=candycrush1-w-1" t=2024-05-29T13:44:14.963074236Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:14.96302277Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.962956032Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=25.905182ms + level=debug ts=2024-05-29T13:44:14.962924831Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.960254237Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.962897173Z caller=remote_instance_store.go:51 user=354676 slug=gridmarketenergy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.962859439Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.962858544Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=stritzdev-w-0" t=2024-05-29T13:44:14.962870814Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=stritzdev-w-0" t=2024-05-29T13:44:14.962859817Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.962809956Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=stritzdev-m-2" t=2024-05-29T13:44:14.962763869Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.962822917Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.962745468Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.962702218Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.962690601Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=stritzdev-m-1" t=2024-05-29T13:44:14.962699703Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=stritzdev-m-1" t=2024-05-29T13:44:14.962689857Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.962601112Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=qa-w-1" t=2024-05-29T13:44:14.962554045Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=qa-w-0" t=2024-05-29T13:44:14.962465294Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=qa-m-2" t=2024-05-29T13:44:14.962392347Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=qa-m-2" t=2024-05-29T13:44:14.962381643Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=qa-m-1" t=2024-05-29T13:44:14.962301202Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.962192497Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=qa-m-0" t=2024-05-29T13:44:14.962196165Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Lapaz, cluster=Lapaz, country=Bolivia, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=84.247.90.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=lapaz-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.962166279Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.962028191Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=farmstritz1-w-8" t=2024-05-29T13:44:14.962098557Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.962025749Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Lapaz, cluster=Lapaz, country=Bolivia, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=84.247.90.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=lapaz-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.961928254Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=farmstritz1-w-5" t=2024-05-29T13:44:14.961869648Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=farmstritz1-w-5" t=2024-05-29T13:44:14.961854898Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=farmstritz1-w-4" t=2024-05-29T13:44:14.961780086Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=farmstritz1-w-4" t=2024-05-29T13:44:14.961771044Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Lagos, cluster=Lagos, country=Nigeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.65.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=lagos-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.961727026Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=farmstritz1-w-3" t=2024-05-29T13:44:14.961708144Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.961665969Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=farmstritz1-w-3" t=2024-05-29T13:44:14.961698482Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Lagos, cluster=Lagos, country=Nigeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.65.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=lagos-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.961606791Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=farmstritz1-w-1" t=2024-05-29T13:44:14.961573968Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=farmstritz1-w-1" t=2024-05-29T13:44:14.961562842Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=farmstritz1-m-2" t=2024-05-29T13:44:14.961426643Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, cluster=Kuala Lumpur-2, country=Malaysia, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.15.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=kualalumpur-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.961454248Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.961208379Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=common2-w-8" t=2024-05-29T13:44:14.961133937Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.96097426Z caller=remote_instance_store.go:51 user=635771 slug=sharedservices msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=common2-w-7" t=2024-05-29T13:44:14.961071465Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=884866 slug=cnonumerique t=2024-05-29T13:44:14.961067229Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=common2-w-7" t=2024-05-29T13:44:14.961059896Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=common2-w-6" t=2024-05-29T13:44:14.961000287Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=635771 slug=sharedservices instance= t=2024-05-29T13:44:14.96090492Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=635771 slug=sharedservices instance= t=2024-05-29T13:44:14.960883229Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.960848897Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.960751504Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=common2-w-2" t=2024-05-29T13:44:14.960720889Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=common2-w-2" t=2024-05-29T13:44:14.960710918Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Kiev, cluster=Kyiv, country=Ukraine, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.239.42.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=kiev-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.960352499Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=common2-m-0" t=2024-05-29T13:44:14.960338247Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=common2-m-0" t=2024-05-29T13:44:14.960328666Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=common1-w-5" t=2024-05-29T13:44:14.960177379Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.960045771Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=common1-w-4" t=2024-05-29T13:44:14.960070068Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.960007134Z caller=remote_instance_store.go:51 user=701741 slug=thetradingpitproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=common1-w-1" t=2024-05-29T13:44:14.959902397Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Kathmandu, cluster=kathmandu, country=Nepal, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=84.247.96.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=kathmandu-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.959900296Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Kathmandu, cluster=kathmandu, country=Nepal, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=84.247.96.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=kathmandu-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.959886006Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=common1-w-0" t=2024-05-29T13:44:14.9598246Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.959681891Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=common1-m-1" t=2024-05-29T13:44:14.959634114Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=common1-m-1" t=2024-05-29T13:44:14.959622665Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Karachi, cluster=Karachi, country=Pakistan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.12.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=karachi-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.959524716Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=common1-m-0" t=2024-05-29T13:44:14.959521535Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Johannesburg, cluster=Johannesburg, country=South Africa, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=154.47.30.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=johannesburg-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.959333658Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.959301948Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush2-w-4" t=2024-05-29T13:44:14.959023279Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.959007143Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush2-w-4" t=2024-05-29T13:44:14.959013749Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=307381 slug=kambitaskforce t=2024-05-29T13:44:14.95894829Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:14.958949398Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.252418ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Jerusalem, cluster=Jerusalem, country=Israel, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=149.88.26.51, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=jerusalem-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.958959127Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.958931143Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.958823922Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush2-w-15" t=2024-05-29T13:44:14.958804107Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.958696745Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.958666269Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=120621 slug=jdall t=2024-05-29T13:44:14.958627222Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=info ts=2024-05-29T13:44:14.958588903Z caller=remote_image_capturer.go:61 user=120621 slug=jdall rule_org_id=1 rule_uid=CGdM7Flnz dashboard=I14aw-NMz panel=3 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush2-w-12" t=2024-05-29T13:44:14.958543331Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush2-w-11" t=2024-05-29T13:44:14.958450574Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.958349101Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=70430 slug=dapperlabs instance= t=2024-05-29T13:44:14.95833054Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush2-w-1" t=2024-05-29T13:44:14.958294511Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=288032 slug=dapperlabssre t=2024-05-29T13:44:14.958179294Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush2-w-1" t=2024-05-29T13:44:14.958278014Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush2-w-0" t=2024-05-29T13:44:14.95820192Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.958067945Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.95815493Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Jakarta, cluster=Jakarta, country=Indonesia, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.226.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=jakarta-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.958114991Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:14.958032375Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=4947 slug=mediamath version=1 fingerprint=fdafef79a5541ece attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.957962607Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[C0:{Var:C Labels:aggregatedBy=sum, name=pao B Value:0xc00a4d4770}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.95764746s EvaluationString:[ var='C0' metric='B' labels={aggregatedBy=sum, name=pao B} value=0 ]}]" duration=46.069272ms + logger=ngalert.state.manager user=120621 slug=jdall instance= t=2024-05-29T13:44:14.957972734Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Jakarta, cluster=Jakarta, country=Indonesia, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.226.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=jakarta-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.95795557Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=120621 slug=jdall version=1 fingerprint=9c6ea6437838459b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.95790435Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=3.846402ms + level=error ts=2024-05-29T13:44:14.957880829Z caller=remote_rule_evaluator.go:110 user=120621 slug=jdall msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + level=debug ts=2024-05-29T13:44:14.957881956Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:14.957840461Z level=debug msg="Saving alert states done" count=29 max_state_save_concurrency=1 duration=475.227382ms + level=debug ts=2024-05-29T13:44:14.957737104Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush1-w-9" t=2024-05-29T13:44:14.957726166Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Istanbul, cluster=Istanbul, country=Turkey, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=188.213.34.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=istanbul-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.957725853Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush1-w-7" t=2024-05-29T13:44:14.957566569Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Istanbul, cluster=Istanbul, country=Turkey, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=188.213.34.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=istanbul-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.957530307Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.957492488Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush1-w-6" t=2024-05-29T13:44:14.95750552Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush1-w-6" t=2024-05-29T13:44:14.957497499Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush1-w-5" t=2024-05-29T13:44:14.957437261Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush1-w-4" t=2024-05-29T13:44:14.957363081Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Istanbul, cluster=Istanbul, country=Turkey, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=188.213.34.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=istanbul-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.957313111Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush1-w-3" t=2024-05-29T13:44:14.957268309Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Istanbul, cluster=Istanbul, country=Turkey, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=188.213.34.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=istanbul-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.957133544Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Istanbul, cluster=Istanbul, country=Turkey, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=188.213.34.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=istanbul-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.957122929Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.957099897Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush1-w-17" t=2024-05-29T13:44:14.957027277Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:14.956950586Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.956927176Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush1-w-16" t=2024-05-29T13:44:14.956918163Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush1-w-15" t=2024-05-29T13:44:14.956787774Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.956742551Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush1-w-13" t=2024-05-29T13:44:14.956612282Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush1-w-12" t=2024-05-29T13:44:14.956544302Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush1-w-12" t=2024-05-29T13:44:14.956535509Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.956452392Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.956442608Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush1-w-11" t=2024-05-29T13:44:14.956472757Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Huenenberg, cluster=Huenenberg, country=Switzerland, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=102.129.143.25, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=huenenberg-s410, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.956421609Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Huenenberg, cluster=Huenenberg, country=Switzerland, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=102.129.143.25, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=huenenberg-s410, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.956408027Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.956320224Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush1-w-10" t=2024-05-29T13:44:14.95636112Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush1-w-10" t=2024-05-29T13:44:14.956348426Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.956137886Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.648633ms + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush1-w-0" t=2024-05-29T13:44:14.95620102Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.956195222Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.956047498Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda15, instance_name=candycrush1-m-0" t=2024-05-29T13:44:14.955890148Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.95582203Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=stritzdev-w-1" t=2024-05-29T13:44:14.955775676Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.955589776Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.955525056Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Hong Kong, cluster=Hong Kong, country=Hong Kong, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.56.160, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=hongkong-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.955493963Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=stritzdev-m-0" t=2024-05-29T13:44:14.95541861Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183427 slug=kaiku instance= t=2024-05-29T13:44:14.955403511Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=qa-w-1" t=2024-05-29T13:44:14.95533374Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.955260659Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Hong Kong, cluster=Hong Kong, country=Hong Kong, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.56.160, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=hongkong-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.955283941Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.955248628Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=qa-m-2" t=2024-05-29T13:44:14.955190718Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=qa-m-1" t=2024-05-29T13:44:14.95510256Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=qa-m-1" t=2024-05-29T13:44:14.955093719Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=qa-m-0" t=2024-05-29T13:44:14.955037397Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=qa-m-0" t=2024-05-29T13:44:14.955027968Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Hong Kong, cluster=Hong Kong, country=Hong Kong, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.56.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=hongkong-s405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.954839012Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=farmstritz1-w-7" t=2024-05-29T13:44:14.954843921Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=farmstritz1-w-6" t=2024-05-29T13:44:14.954761288Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.954669522Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.954601633Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.954545511Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=farmstritz1-w-2" t=2024-05-29T13:44:14.954361596Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.86.78:9100" t=2024-05-29T13:44:14.953872345Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.85.193:9100" t=2024-05-29T13:44:14.9537742Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.85.193:9100" t=2024-05-29T13:44:14.953762776Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.84.96:9100" t=2024-05-29T13:44:14.95374134Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.954266236Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.83.7:9100" t=2024-05-29T13:44:14.953699617Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.83.38:9100" t=2024-05-29T13:44:14.95366951Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.83.18:9100" t=2024-05-29T13:44:14.953650078Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=farmstritz1-w-1" t=2024-05-29T13:44:14.954245536Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.82.212:9100" t=2024-05-29T13:44:14.953504905Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.82.15:9100" t=2024-05-29T13:44:14.953481209Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.81.71:9100" t=2024-05-29T13:44:14.953445521Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.80.28:9100" t=2024-05-29T13:44:14.953372508Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Helsinki, cluster=Helsinki, country=Finland, datacenter=Glesys, environment=production, instance=10.0.0.203:9998, ip=188.126.88.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=helsinki-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.954031865Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Helsinki, cluster=Helsinki, country=Finland, datacenter=Glesys, environment=production, instance=10.0.0.203:9998, ip=188.126.88.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=helsinki-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.954016217Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.953981417Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.953931821Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.953906262Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=farmstritz1-m-0" t=2024-05-29T13:44:14.953901484Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.953707959Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.953809979Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.953613097Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=common2-w-6" t=2024-05-29T13:44:14.953669896Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.953615716Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.953359959Z caller=remote_instance_store.go:51 user=763376 slug=f5nginxone msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.953543636Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=common2-w-5" t=2024-05-29T13:44:14.953561545Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.953475184Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=common2-w-4" t=2024-05-29T13:44:14.953491639Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=common2-w-3" t=2024-05-29T13:44:14.953419649Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=common2-w-1" t=2024-05-29T13:44:14.953273101Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.79.114:9100" t=2024-05-29T13:44:14.953276225Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=common2-w-0" t=2024-05-29T13:44:14.953188678Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.77.245:9100" t=2024-05-29T13:44:14.953159619Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.77.228:9100" t=2024-05-29T13:44:14.953079878Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.77.180:9100" t=2024-05-29T13:44:14.953050382Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.953057038Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:14.951107917Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=32.906668ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Hanoi, cluster=Hanoi, country=Vietnam, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.247.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=hanoi-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.953053804Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.953005198Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=31.757738ms + level=debug ts=2024-05-29T13:44:14.952991675Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.952985421Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.76.148:9100" t=2024-05-29T13:44:14.952992662Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.73.146:9100" t=2024-05-29T13:44:14.952867427Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.73.146:9100" t=2024-05-29T13:44:14.952860078Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.952835518Z caller=remote_instance_store.go:51 user=137351 slug=pinnacle21 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.72.194:9100" t=2024-05-29T13:44:14.95280176Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.952818612Z caller=remote_instance_store.go:51 user=419587 slug=greenpass msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.72.183:9100" t=2024-05-29T13:44:14.952771412Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=common1-w-5" t=2024-05-29T13:44:14.952799904Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.72.183:9100" t=2024-05-29T13:44:14.95276142Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.71.245:9100" t=2024-05-29T13:44:14.95274097Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=common1-w-4" t=2024-05-29T13:44:14.952729777Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.70.80:9100" t=2024-05-29T13:44:14.95269928Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Guatemala City, cluster=Guatemala City, country=Guatemala, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=84.247.94.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=guatemalacity-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.952631484Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.68.137:9100" t=2024-05-29T13:44:14.95263833Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=432323 slug=lithic instance="Cluster Name=sandbox-v2, Consumer Group=ledger-xfer-consumer-v1, Topic=treasury.transfers.v2" t=2024-05-29T13:44:14.952585682Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.67.84:9100" t=2024-05-29T13:44:14.952596612Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=432323 slug=lithic instance="Cluster Name=sandbox-v2, Consumer Group=ledger-xfer-consumer-v1, Topic=treasury.transfers.v2" t=2024-05-29T13:44:14.952567379Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.67.80:9100" t=2024-05-29T13:44:14.952564088Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.67.68:9100" t=2024-05-29T13:44:14.952533476Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.952495241Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.67.242:9100" t=2024-05-29T13:44:14.952459595Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=432323 slug=lithic t=2024-05-29T13:44:14.952407811Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.95240902Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=245291 slug=pismo version=69 fingerprint=989f081f1fa49ff5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.952381658Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.952193231s EvaluationString:}]" duration=42.68049ms + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=common1-m-2" t=2024-05-29T13:44:14.952446684Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt-2, country=Germany, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=87.249.132.26, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s435, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.952403884Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.66.11:9100" t=2024-05-29T13:44:14.952363888Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.65.27:9100" t=2024-05-29T13:44:14.952341794Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.65.27:9100" t=2024-05-29T13:44:14.952329922Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.65.227:9100" t=2024-05-29T13:44:14.952306658Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.65.206:9100" t=2024-05-29T13:44:14.952272491Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.65.206:9100" t=2024-05-29T13:44:14.952261852Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="cluster=prd-use1-prd-eks, instance=10.21.64.9:9100" t=2024-05-29T13:44:14.9522126Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=candycrush2-w-9" t=2024-05-29T13:44:14.952233354Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.952180643Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=112387 slug=lucidhq version=6 fingerprint=aa0de6696cf14a7a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.951494634Z level=debug msg="Alert rule evaluated" results="[{Instance:cluster=prd-use1-prd-eks, instance=10.21.64.9:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.64.9:9100 Value:0xc0116fb3f0} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.64.9:9100 Value:0xc0116fb418} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.64.9:9100 Value:0xc0116fb470}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.94854432s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.64.9:9100} value=27.074306759098747 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.64.9:9100} value=27.074306759098747 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.64.9:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.65.206:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.65.206:9100 Value:0xc0116fb510} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.65.206:9100 Value:0xc0116fb568} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.65.206:9100 Value:0xc0116fb5a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948574164s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.65.206:9100} value=33.731249999958294 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.65.206:9100} value=33.731249999958294 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.65.206:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.65.227:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.65.227:9100 Value:0xc0116fb600} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.65.227:9100 Value:0xc0116fb628} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.65.227:9100 Value:0xc0116fb650}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948603255s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.65.227:9100} value=42.11458333332605 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.65.227:9100} value=42.11458333332605 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.65.227:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.65.27:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.65.27:9100 Value:0xc0116fb780} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.65.27:9100 Value:0xc0116fb7a8} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.65.27:9100 Value:0xc0116fb8b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948619659s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.65.27:9100} value=32.270833333334 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.65.27:9100} value=32.270833333334 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.65.27:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.66.11:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.66.11:9100 Value:0xc0116fb920} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.66.11:9100 Value:0xc0116fb958} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.66.11:9100 Value:0xc0116fb9b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948631602s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.66.11:9100} value=32.831250000000026 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.66.11:9100} value=32.831250000000026 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.66.11:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.67.119:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.67.119:9100 Value:0xc0116fba70} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.67.119:9100 Value:0xc0116fba10} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.67.119:9100 Value:0xc0116fba48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948649405s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.67.119:9100} value=0.190624999910753 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.67.119:9100} value=0.190624999910753 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.67.119:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.67.242:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.67.242:9100 Value:0xc0116fbac0} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.67.242:9100 Value:0xc0116fbae8} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.67.242:9100 Value:0xc0116fbb10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948668574s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.67.242:9100} value=22.191666666694786 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.67.242:9100} value=22.191666666694786 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.67.242:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.67.253:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.67.253:9100 Value:0xc0116fbc20} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.67.253:9100 Value:0xc0116fbc48} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.67.253:9100 Value:0xc0116fbc80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948686164s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.67.253:9100} value=3.383333333333667 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.67.253:9100} value=3.383333333333667 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.67.253:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.67.68:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.67.68:9100 Value:0xc0116fbd28} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.67.68:9100 Value:0xc0116fbe70} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.67.68:9100 Value:0xc0116fbd00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948696315s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.67.68:9100} value=28.914583333195566 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.67.68:9100} value=28.914583333195566 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.67.68:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.67.80:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.67.80:9100 Value:0xc0116fbf90} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.67.80:9100 Value:0xc0116fbfb8} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.67.80:9100 Value:0xc0776260e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948709094s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.67.80:9100} value=39.235416666645804 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.67.80:9100} value=39.235416666645804 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.67.80:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.67.84:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.67.84:9100 Value:0xc077626200} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.67.84:9100 Value:0xc077626180} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.67.84:9100 Value:0xc0776261b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948724009s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.67.84:9100} value=30.049999999999955 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.67.84:9100} value=30.049999999999955 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.67.84:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.68.137:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.68.137:9100 Value:0xc077626260} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.68.137:9100 Value:0xc077626288} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.68.137:9100 Value:0xc0776262b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948737748s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.68.137:9100} value=24.141666666668016 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.68.137:9100} value=24.141666666668016 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.68.137:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.69.245:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.69.245:9100 Value:0xc077626310} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.69.245:9100 Value:0xc077626338} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.69.245:9100 Value:0xc0776263c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948747195s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.69.245:9100} value=21.95625000000291 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.69.245:9100} value=21.95625000000291 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.69.245:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.70.165:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.70.165:9100 Value:0xc077626460} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.70.165:9100 Value:0xc077626478} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.70.165:9100 Value:0xc0776264c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.94876245s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.70.165:9100} value=31.99166666666619 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.70.165:9100} value=31.99166666666619 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.70.165:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.70.80:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.70.80:9100 Value:0xc077626530} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.70.80:9100 Value:0xc077626558} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.70.80:9100 Value:0xc0776265b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948778037s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.70.80:9100} value=18.273876966142353 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.70.80:9100} value=18.273876966142353 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.70.80:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.71.18:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.71.18:9100 Value:0xc077626708} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.71.18:9100 Value:0xc0776267c0} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.71.18:9100 Value:0xc0776266e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948788701s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.71.18:9100} value=27.610416666666808 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.71.18:9100} value=27.610416666666808 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.71.18:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.71.245:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.71.245:9100 Value:0xc077626840} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.71.245:9100 Value:0xc077626868} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.71.245:9100 Value:0xc077626890}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948806061s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.71.245:9100} value=13.087499999998812 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.71.245:9100} value=13.087499999998812 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.71.245:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.72.183:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.72.183:9100 Value:0xc077626958} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.72.183:9100 Value:0xc077626980} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.72.183:9100 Value:0xc077626930}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948820552s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.72.183:9100} value=16.279166666718098 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.72.183:9100} value=16.279166666718098 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.72.183:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.72.194:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.72.194:9100 Value:0xc0776269f8} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.72.194:9100 Value:0xc077626ac0} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.72.194:9100 Value:0xc0776269d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948853775s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.72.194:9100} value=23.662500000000364 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.72.194:9100} value=23.662500000000364 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.72.194:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.72.199:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.72.199:9100 Value:0xc077626b50} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.72.199:9100 Value:0xc077626b78} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.72.199:9100 Value:0xc077626bd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948881443s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.72.199:9100} value=0.5249999999978172 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.72.199:9100} value=0.5249999999978172 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.72.199:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.73.146:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.73.146:9100 Value:0xc077626c50} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.73.146:9100 Value:0xc077626c78} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.73.146:9100 Value:0xc077626ca0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948893328s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.73.146:9100} value=21.547916666750098 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.73.146:9100} value=21.547916666750098 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.73.146:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.73.208:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.73.208:9100 Value:0xc077626d30} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.73.208:9100 Value:0xc077626d58} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.73.208:9100 Value:0xc077627060}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948907609s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.73.208:9100} value=15.174999999993204 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.73.208:9100} value=15.174999999993204 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.73.208:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.74.224:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.74.224:9100 Value:0xc077627110} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.74.224:9100 Value:0xc0776270b0} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.74.224:9100 Value:0xc0776270d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948921022s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.74.224:9100} value=31.24791666667079 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.74.224:9100} value=31.24791666667079 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.74.224:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.76.133:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.76.133:9100 Value:0xc0776271b0} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.76.133:9100 Value:0xc077627160} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.76.133:9100 Value:0xc077627188}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948935023s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.76.133:9100} value=30.172916666666637 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.76.133:9100} value=30.172916666666637 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.76.133:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.76.148:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.76.148:9100 Value:0xc077627200} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.76.148:9100 Value:0xc077627218} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.76.148:9100 Value:0xc077627250}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948947495s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.76.148:9100} value=35.083333333329094 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.76.148:9100} value=35.083333333329094 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.76.148:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.76.150:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.76.150:9100 Value:0xc077627460} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.76.150:9100 Value:0xc0776272a0} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.76.150:9100 Value:0xc0776272e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948963578s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.76.150:9100} value=38.450000000048014 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.76.150:9100} value=38.450000000048014 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.76.150:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.77.180:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.77.180:9100 Value:0xc0776274b0} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.77.180:9100 Value:0xc0776274d8} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.77.180:9100 Value:0xc077627510}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948978517s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.77.180:9100} value=39.75416666648621 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.77.180:9100} value=39.75416666648621 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.77.180:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.77.228:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.77.228:9100 Value:0xc077627588} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.77.228:9100 Value:0xc0776275b0} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.77.228:9100 Value:0xc077627560}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948993738s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.77.228:9100} value=3.2405792044642254 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.77.228:9100} value=3.2405792044642254 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.77.228:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.77.245:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.77.245:9100 Value:0xc077627600} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.77.245:9100 Value:0xc077627628} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.77.245:9100 Value:0xc077627650}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.949003835s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.77.245:9100} value=19.660416666667203 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.77.245:9100} value=19.660416666667203 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.77.245:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.77.65:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.77.65:9100 Value:0xc0776277f0} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.77.65:9100 Value:0xc077627780} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.77.65:9100 Value:0xc0776277a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.949037336s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.77.65:9100} value=5.208333333333343 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.77.65:9100} value=5.208333333333343 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.77.65:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.77.76:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.77.76:9100 Value:0xc077627890} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.77.76:9100 Value:0xc0776278b8} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.77.76:9100 Value:0xc077627900}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.949052228s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.77.76:9100} value=26.870833333334005 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.77.76:9100} value=26.870833333334005 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.77.76:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.78.98:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.78.98:9100 Value:0xc077627970} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.78.98:9100 Value:0xc077627998} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.78.98:9100 Value:0xc077627a90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.94906545s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.78.98:9100} value=19.924999999978652 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.78.98:9100} value=19.924999999978652 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.78.98:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.79.114:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.79.114:9100 Value:0xc077627ae0} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.79.114:9100 Value:0xc077627b18} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.79.114:9100 Value:0xc077627b40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.94907554s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.79.114:9100} value=30.585416666667314 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.79.114:9100} value=30.585416666667314 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.79.114:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.79.79:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.79.79:9100 Value:0xc077627bc0} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.79.79:9100 Value:0xc077627be8} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.79.79:9100 Value:0xc077627c30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.949090121s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.79.79:9100} value=33.854166666667425 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.79.79:9100} value=33.854166666667425 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.79.79:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.80.13:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.80.13:9100 Value:0xc077627db0} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.80.13:9100 Value:0xc077627ca0} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.80.13:9100 Value:0xc077627ce8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.949105625s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.80.13:9100} value=24.414583333333823 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.80.13:9100} value=24.414583333333823 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.80.13:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.80.28:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.80.28:9100 Value:0xc077627e58} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.80.28:9100 Value:0xc077627ea0} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.80.28:9100 Value:0xc077627e20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.949118915s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.80.28:9100} value=26.195833333329873 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.80.28:9100} value=26.195833333329873 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.80.28:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.81.216:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.81.216:9100 Value:0xc077627ef0} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.81.216:9100 Value:0xc077627f18} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.81.216:9100 Value:0xc077627f50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.949128898s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.81.216:9100} value=31.37291666666708 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.81.216:9100} value=31.37291666666708 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.81.216:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.81.71:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.81.71:9100 Value:0xc0d7a16060} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.81.71:9100 Value:0xc0d7a16098} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.81.71:9100 Value:0xc0d7a160e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.949144428s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.81.71:9100} value=45.825000000002845 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.81.71:9100} value=45.825000000002845 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.81.71:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.82.15:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.82.15:9100 Value:0xc0d7a16160} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.82.15:9100 Value:0xc0d7a16198} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.82.15:9100 Value:0xc0d7a16210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.949157115s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.82.15:9100} value=7.1437499998137355 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.82.15:9100} value=7.1437499998137355 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.82.15:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.82.212:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.82.212:9100 Value:0xc0d7a162d0} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.82.212:9100 Value:0xc0d7a16270} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.82.212:9100 Value:0xc0d7a16298}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.94917222s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.82.212:9100} value=16.777083333327013 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.82.212:9100} value=16.777083333327013 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.82.212:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.82.21:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.82.21:9100 Value:0xc0d7a16380} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.82.21:9100 Value:0xc0d7a163b8} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.82.21:9100 Value:0xc0d7a16480}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.949182887s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.82.21:9100} value=16.681250000062093 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.82.21:9100} value=16.681250000062093 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.82.21:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.83.180:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.83.180:9100 Value:0xc0d7a16560} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.83.180:9100 Value:0xc0d7a16510} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.83.180:9100 Value:0xc0d7a16538}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.949197195s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.83.180:9100} value=18.010416666667197 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.83.180:9100} value=18.010416666667197 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.83.180:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.83.18:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.83.18:9100 Value:0xc0d7a165e0} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.83.18:9100 Value:0xc0d7a16608} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.83.18:9100 Value:0xc0d7a16660}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.949211647s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.83.18:9100} value=20.885416666569654 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.83.18:9100} value=20.885416666569654 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.83.18:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.83.38:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.83.38:9100 Value:0xc0d7a166d0} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.83.38:9100 Value:0xc0d7a166f8} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.83.38:9100 Value:0xc0d7a16760}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.949229916s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.83.38:9100} value=29.456250000000637 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.83.38:9100} value=29.456250000000637 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.83.38:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.83.7:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.83.7:9100 Value:0xc0d7a167d0} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.83.7:9100 Value:0xc0d7a167f8} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.83.7:9100 Value:0xc0d7a16850}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.94924451s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.83.7:9100} value=40.44166666665356 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.83.7:9100} value=40.44166666665356 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.83.7:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.84.96:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.84.96:9100 Value:0xc0d7a16940} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.84.96:9100 Value:0xc0d7a168d0} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.84.96:9100 Value:0xc0d7a168f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.949260428s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.84.96:9100} value=26.39999999999911 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.84.96:9100} value=26.39999999999911 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.84.96:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.85.193:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.85.193:9100 Value:0xc0d7a169a0} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.85.193:9100 Value:0xc0d7a169c8} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.85.193:9100 Value:0xc0d7a169f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.949270551s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.85.193:9100} value=24.552083333352726 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.85.193:9100} value=24.552083333352726 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.85.193:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.85.29:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.85.29:9100 Value:0xc0d7a16a70} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.85.29:9100 Value:0xc0d7a16a98} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.85.29:9100 Value:0xc0d7a16af0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.949283401s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.85.29:9100} value=20.477083333333894 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.85.29:9100} value=20.477083333333894 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.85.29:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.85.98:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.85.98:9100 Value:0xc0d7a16ba0} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.85.98:9100 Value:0xc0d7a16bc8} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.85.98:9100 Value:0xc0d7a16c20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.949298316s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.85.98:9100} value=15.608333333333732 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.85.98:9100} value=15.608333333333732 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.85.98:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.86.78:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.86.78:9100 Value:0xc0d7a16d10} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.86.78:9100 Value:0xc0d7a16c90} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.86.78:9100 Value:0xc0d7a16cb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.949309633s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.86.78:9100} value=26.372916666665574 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.86.78:9100} value=26.372916666665574 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.86.78:9100} value=0 ]} {Instance:cluster=prd-use1-prd-eks, instance=10.21.87.119:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=prd-use1-prd-eks, instance=10.21.87.119:9100 Value:0xc0d7a16d70} C:{Var:C Labels:cluster=prd-use1-prd-eks, instance=10.21.87.119:9100 Value:0xc0d7a16d98} D:{Var:D Labels:cluster=prd-use1-prd-eks, instance=10.21.87.119:9100 Value:0xc0d7a16dc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.949323331s EvaluationString:[ var='A' labels={cluster=prd-use1-prd-eks, instance=10.21.87.119:9100} value=42.82708333335905 ], [ var='C' labels={cluster=prd-use1-prd-eks, instance=10.21.87.119:9100} value=42.82708333335905 ], [ var='D' labels={cluster=prd-use1-prd-eks, instance=10.21.87.119:9100} value=0 ]}]" duration=39.521417ms + level=debug ts=2024-05-29T13:44:14.952062833Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=candycrush2-w-7" t=2024-05-29T13:44:14.951966804Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=candycrush2-w-5" t=2024-05-29T13:44:14.951783716Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt-2, country=Germany, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=87.249.132.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s434, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.951608934Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=candycrush2-w-3" t=2024-05-29T13:44:14.951575447Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=candycrush2-w-2" t=2024-05-29T13:44:14.951470554Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.95143994Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=candycrush2-w-15" t=2024-05-29T13:44:14.951349648Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.951252871Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.951121401Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.95115658Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.950939902Z caller=remote_instance_store.go:51 user=487988 slug=microstrategyits msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:14.951020184Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=52.986125ms + level=debug ts=2024-05-29T13:44:14.950859486Z caller=remote_instance_store.go:51 user=738479 slug=gohero msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.950978783Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt-2, country=Germany, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.49.61, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s409, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.950884138Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt-2, country=Germany, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.49.61, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s409, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.950874839Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=candycrush2-w-11" t=2024-05-29T13:44:14.950802658Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=430961 slug=solifi instance="Instance=--" t=2024-05-29T13:44:14.950692741Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt-2, country=Germany, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.49.61, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=frankfurt-s409, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.950739224Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=candycrush2-w-10" t=2024-05-29T13:44:14.950684674Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.950642513Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=candycrush2-w-1" t=2024-05-29T13:44:14.950580712Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt-2, country=Germany, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.49.31, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s410, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.950617711Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=candycrush2-w-0" t=2024-05-29T13:44:14.950482476Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.950435028Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.950343717Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt-2, country=Germany, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.49.121, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.950310185Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt-2, country=Germany, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.49.121, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.950298197Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.950257843Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=candycrush2-m-1" t=2024-05-29T13:44:14.950202852Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=candycrush2-m-0" t=2024-05-29T13:44:14.950059374Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.950022897Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=candycrush1-w-9" t=2024-05-29T13:44:14.94997488Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.949777339Z caller=remote_instance_store.go:51 user=700783 slug=gsgmedia msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=608555 slug=ias t=2024-05-29T13:44:14.949884847Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=608555 slug=ias t=2024-05-29T13:44:14.949843631Z level=debug msg="Deleting alert states" count=1 + level=debug ts=2024-05-29T13:44:14.949858647Z caller=remote_instance_store.go:57 user=608555 slug=ias msg="calling DeleteAlertInstances - not implemented" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt-2, country=Germany, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.49.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=frankfurt-s411, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.949692309Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=60199 slug=wallapop version=2 fingerprint=9b2e535cfb272796 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.949612366Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.949282386s EvaluationString:}]" duration=16.756152ms + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=candycrush1-w-6" t=2024-05-29T13:44:14.949629154Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=candycrush1-w-6" t=2024-05-29T13:44:14.949608429Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.949414239Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.949369189Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.949271671Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.949046053Z caller=remote_instance_store.go:51 user=679831 slug=joveostageaws msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=171235 slug=circleslabs instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.94903012Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt-2, country=Germany, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.48.61, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s418, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.94907893Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=171235 slug=circleslabs instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.949022682Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=171235 slug=circleslabs t=2024-05-29T13:44:14.948994876Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.948923308Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=171235 slug=circleslabs version=66 fingerprint=2c010f2db07886a9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.948937891Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.948672052s EvaluationString:}]" duration=53.534811ms + level=debug ts=2024-05-29T13:44:14.948815158Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt-2, country=Germany, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.48.61, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=frankfurt-s418, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.948845374Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt-2, country=Germany, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.48.61, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=frankfurt-s418, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.948827841Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt-2, country=Germany, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.48.211, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s413, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.948598807Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=candycrush1-w-16" t=2024-05-29T13:44:14.948582857Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=candycrush1-w-15" t=2024-05-29T13:44:14.948381111Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.948279814Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.948193336Z caller=remote_instance_store.go:51 user=314067 slug=itsme msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.948187748Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.948058569Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=327842 slug=exabeam t=2024-05-29T13:44:14.948066156Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=candycrush1-w-14" t=2024-05-29T13:44:14.948043944Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.947982198Z caller=remote_instance_store.go:51 user=177465 slug=fairtiq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=177465 slug=fairtiq instance="app=fairtiq-travel, collection=trackers, command=update" t=2024-05-29T13:44:14.947828138Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=177465 slug=fairtiq instance="app=fairtiq-travel, collection=trackers, command=insert" t=2024-05-29T13:44:14.947688782Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=177465 slug=fairtiq instance="app=fairtiq-travel, collection=trackers, command=getMore" t=2024-05-29T13:44:14.947587092Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.94754529Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=177465 slug=fairtiq instance="app=fairtiq-travel, collection=trackers, command=getMore" t=2024-05-29T13:44:14.947570153Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt-2, country=Germany, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.48.151, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=frankfurt-s414, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.947427757Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=177465 slug=fairtiq instance="app=fairtiq-travel, collection=trackers, command=findAndModify" t=2024-05-29T13:44:14.947409534Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=177465 slug=fairtiq instance="app=fairtiq-travel, collection=trackers, command=find" t=2024-05-29T13:44:14.94730975Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=177465 slug=fairtiq instance="app=fairtiq-travel, collection=trackers, command=find" t=2024-05-29T13:44:14.9472938Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=316418 slug=workmotion instance="datasource_uid=uu7Nh0Bnk, ref_id=A" t=2024-05-29T13:44:14.947236273Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=316418 slug=workmotion instance="datasource_uid=uu7Nh0Bnk, ref_id=A" t=2024-05-29T13:44:14.947221817Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=316418 slug=workmotion instance="datasource_uid=uu7Nh0Bnk, ref_id=A" t=2024-05-29T13:44:14.947155428Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt-2, country=Germany, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.48.121, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s415, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.947212377Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=316418 slug=workmotion instance="datasource_uid=uu7Nh0Bnk, ref_id=A" t=2024-05-29T13:44:14.947135318Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=316418 slug=workmotion instance="datasource_uid=uu7Nh0Bnk, ref_id=A" t=2024-05-29T13:44:14.947095339Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=316418 slug=workmotion instance="datasource_uid=uu7Nh0Bnk, ref_id=A" t=2024-05-29T13:44:14.947088032Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=316418 slug=workmotion instance="datasource_uid=uu7Nh0Bnk, ref_id=A" t=2024-05-29T13:44:14.94708058Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.947129965Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=316418 slug=workmotion instance="datasource_uid=uu7Nh0Bnk, ref_id=A" t=2024-05-29T13:44:14.947032887Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=316418 slug=workmotion instance="datasource_uid=uu7Nh0Bnk, ref_id=A" t=2024-05-29T13:44:14.946995366Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=316418 slug=workmotion instance="datasource_uid=uu7Nh0Bnk, ref_id=A" t=2024-05-29T13:44:14.946933352Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=316418 slug=workmotion instance="datasource_uid=uu7Nh0Bnk, ref_id=A" t=2024-05-29T13:44:14.946917508Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=316418 slug=workmotion instance="datasource_uid=uu7Nh0Bnk, ref_id=A" t=2024-05-29T13:44:14.946906979Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.946969049Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=316418 slug=workmotion instance="datasource_uid=uu7Nh0Bnk, ref_id=A" t=2024-05-29T13:44:14.946791008Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=177465 slug=fairtiq instance="app=fairtiq-travel, collection=trackerEvents, command=killCursors" t=2024-05-29T13:44:14.946761196Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=177465 slug=fairtiq instance="app=fairtiq-travel, collection=trackerEvents, command=insert" t=2024-05-29T13:44:14.946644227Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=316418 slug=workmotion t=2024-05-29T13:44:14.946774639Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.946670308Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.946632187Z caller=remote_instance_store.go:51 user=155740 slug=routific msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.946620958Z caller=remote_instance_store.go:51 user=244426 slug=readmeio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=177465 slug=fairtiq instance="app=fairtiq-travel, collection=trackerEvents, command=find" t=2024-05-29T13:44:14.946516814Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=244426 slug=readmeio t=2024-05-29T13:44:14.946487686Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=155740 slug=routific version=1 fingerprint=3ad64cc535c17a6c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.946427124Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.94613064s EvaluationString:}]" duration=65.057809ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt-2, country=Germany, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.36.224, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=frankfurt-s443, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.946423204Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=candycrush1-m-2" t=2024-05-29T13:44:14.946404421Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="device=sda1, instance_name=candycrush1-m-1" t=2024-05-29T13:44:14.946245877Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt-2, country=Germany, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.36.187, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s431, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.946184135Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=177465 slug=fairtiq instance="app=fairtiq-travel, collection=tracker.beOutStatus, command=find" t=2024-05-29T13:44:14.946126633Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=344017 slug=descript t=2024-05-29T13:44:14.943284577Z level=debug msg="Skip rule evaluation because it is paused" + level=debug ts=2024-05-29T13:44:14.942994943Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=177465 slug=fairtiq instance="app=fairtiq-travel, collection=lifelineSchedule, command=findAndModify" t=2024-05-29T13:44:14.945830799Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.945798463Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.9457833Z caller=remote_instance_store.go:51 user=824501 slug=bendingspoons msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory t=2024-05-29T13:44:14.945524301Z level=debug msg="State manager processing evaluation results" resultCount=231 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=45.88.97.5, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s491, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.945740086Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.945687795Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.945504154Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.scheduler user=637816 slug=kingobservatory version=76 fingerprint=56ed96eb6451214a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.939659261Z level=debug msg="Alert rule evaluated" results="[{Instance:device=sda, instance_name=common1-w-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda, instance_name=common1-w-2 Value:0xc10c684c60} C:{Var:C Labels:device=sda, instance_name=common1-w-2 Value:0xc10c684c90} D:{Var:D Labels:device=sda, instance_name=common1-w-2 Value:0xc10c684c30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.929947357s EvaluationString:[ var='B' labels={device=sda, instance_name=common1-w-2} value=4.961183 ], [ var='C' labels={device=sda, instance_name=common1-w-2} value=0 ], [ var='D' labels={device=sda, instance_name=common1-w-2} value=4.9 ]} {Instance:device=sda1, instance_name=candycrush1-m-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush1-m-0 Value:0xc10c684d50} C:{Var:C Labels:device=sda1, instance_name=candycrush1-m-0 Value:0xc10c684dd8} D:{Var:D Labels:device=sda1, instance_name=candycrush1-m-0 Value:0xc10c684cd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.92996868s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush1-m-0} value=14.88049 ], [ var='C' labels={device=sda1, instance_name=candycrush1-m-0} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush1-m-0} value=14.8 ]} {Instance:device=sda1, instance_name=candycrush1-m-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush1-m-1 Value:0xc10c684e50} C:{Var:C Labels:device=sda1, instance_name=candycrush1-m-1 Value:0xc10c684e98} D:{Var:D Labels:device=sda1, instance_name=candycrush1-m-1 Value:0xc10c684ed0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.929978837s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush1-m-1} value=11.045198 ], [ var='C' labels={device=sda1, instance_name=candycrush1-m-1} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush1-m-1} value=11 ]} {Instance:device=sda1, instance_name=candycrush1-m-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush1-m-2 Value:0xc10c684fb8} C:{Var:C Labels:device=sda1, instance_name=candycrush1-m-2 Value:0xc10c684f48} D:{Var:D Labels:device=sda1, instance_name=candycrush1-m-2 Value:0xc10c684f80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.929988646s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush1-m-2} value=11.005215 ], [ var='C' labels={device=sda1, instance_name=candycrush1-m-2} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush1-m-2} value=11 ]} {Instance:device=sda1, instance_name=candycrush1-w-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush1-w-0 Value:0xc10c685048} C:{Var:C Labels:device=sda1, instance_name=candycrush1-w-0 Value:0xc10c6850b0} D:{Var:D Labels:device=sda1, instance_name=candycrush1-w-0 Value:0xc10c685030}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.92999637s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush1-w-0} value=10.583307 ], [ var='C' labels={device=sda1, instance_name=candycrush1-w-0} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush1-w-0} value=10.5 ]} {Instance:device=sda1, instance_name=candycrush1-w-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush1-w-1 Value:0xc10c6851a8} C:{Var:C Labels:device=sda1, instance_name=candycrush1-w-1 Value:0xc10c6850f8} D:{Var:D Labels:device=sda1, instance_name=candycrush1-w-1 Value:0xc10c685170}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930007978s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush1-w-1} value=10.558913 ], [ var='C' labels={device=sda1, instance_name=candycrush1-w-1} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush1-w-1} value=10.5 ]} {Instance:device=sda1, instance_name=candycrush1-w-10 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush1-w-10 Value:0xc10c685230} C:{Var:C Labels:device=sda1, instance_name=candycrush1-w-10 Value:0xc10c685268} D:{Var:D Labels:device=sda1, instance_name=candycrush1-w-10 Value:0xc10c6852b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930015535s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush1-w-10} value=10.51278 ], [ var='C' labels={device=sda1, instance_name=candycrush1-w-10} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush1-w-10} value=10.5 ]} {Instance:device=sda1, instance_name=candycrush1-w-11 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush1-w-11 Value:0xc10c685360} C:{Var:C Labels:device=sda1, instance_name=candycrush1-w-11 Value:0xc10c685378} D:{Var:D Labels:device=sda1, instance_name=candycrush1-w-11 Value:0xc10c685328}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930023565s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush1-w-11} value=10.564224 ], [ var='C' labels={device=sda1, instance_name=candycrush1-w-11} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush1-w-11} value=10.5 ]} {Instance:device=sda1, instance_name=candycrush1-w-12 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush1-w-12 Value:0xc10c685410} C:{Var:C Labels:device=sda1, instance_name=candycrush1-w-12 Value:0xc10c685458} D:{Var:D Labels:device=sda1, instance_name=candycrush1-w-12 Value:0xc10c6854a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930032545s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush1-w-12} value=10.553368 ], [ var='C' labels={device=sda1, instance_name=candycrush1-w-12} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush1-w-12} value=10.5 ]} {Instance:device=sda1, instance_name=candycrush1-w-13 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush1-w-13 Value:0xc10c6854f8} C:{Var:C Labels:device=sda1, instance_name=candycrush1-w-13 Value:0xc10c685560} D:{Var:D Labels:device=sda1, instance_name=candycrush1-w-13 Value:0xc10c685578}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.93004294s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush1-w-13} value=10.561009 ], [ var='C' labels={device=sda1, instance_name=candycrush1-w-13} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush1-w-13} value=10.5 ]} {Instance:device=sda1, instance_name=candycrush1-w-14 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush1-w-14 Value:0xc10c685638} C:{Var:C Labels:device=sda1, instance_name=candycrush1-w-14 Value:0xc10c685690} D:{Var:D Labels:device=sda1, instance_name=candycrush1-w-14 Value:0xc10c685610}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.93005181s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush1-w-14} value=10.532853 ], [ var='C' labels={device=sda1, instance_name=candycrush1-w-14} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush1-w-14} value=10.5 ]} {Instance:device=sda1, instance_name=candycrush1-w-15 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush1-w-15 Value:0xc10c685748} C:{Var:C Labels:device=sda1, instance_name=candycrush1-w-15 Value:0xc10c6857b0} D:{Var:D Labels:device=sda1, instance_name=candycrush1-w-15 Value:0xc10c6857c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930061s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush1-w-15} value=10.532494 ], [ var='C' labels={device=sda1, instance_name=candycrush1-w-15} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush1-w-15} value=10.5 ]} {Instance:device=sda1, instance_name=candycrush1-w-16 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush1-w-16 Value:0xc10c685850} C:{Var:C Labels:device=sda1, instance_name=candycrush1-w-16 Value:0xc10c685868} D:{Var:D Labels:device=sda1, instance_name=candycrush1-w-16 Value:0xc10c6858d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930068511s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush1-w-16} value=10.535259 ], [ var='C' labels={device=sda1, instance_name=candycrush1-w-16} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush1-w-16} value=10.5 ]} {Instance:device=sda1, instance_name=candycrush1-w-17 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush1-w-17 Value:0xc10c685928} C:{Var:C Labels:device=sda1, instance_name=candycrush1-w-17 Value:0xc10c685980} D:{Var:D Labels:device=sda1, instance_name=candycrush1-w-17 Value:0xc10c685998}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930078688s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush1-w-17} value=10.524817 ], [ var='C' labels={device=sda1, instance_name=candycrush1-w-17} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush1-w-17} value=10.5 ]} {Instance:device=sda1, instance_name=candycrush1-w-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush1-w-2 Value:0xc10c685a30} C:{Var:C Labels:device=sda1, instance_name=candycrush1-w-2 Value:0xc10c685a48} D:{Var:D Labels:device=sda1, instance_name=candycrush1-w-2 Value:0xc10c685ab0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930086506s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush1-w-2} value=10.545376 ], [ var='C' labels={device=sda1, instance_name=candycrush1-w-2} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush1-w-2} value=10.5 ]} {Instance:device=sda1, instance_name=candycrush1-w-3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush1-w-3 Value:0xc10c685b08} C:{Var:C Labels:device=sda1, instance_name=candycrush1-w-3 Value:0xc10c685b70} D:{Var:D Labels:device=sda1, instance_name=candycrush1-w-3 Value:0xc10c685bb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930094572s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush1-w-3} value=10.548819 ], [ var='C' labels={device=sda1, instance_name=candycrush1-w-3} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush1-w-3} value=10.5 ]} {Instance:device=sda1, instance_name=candycrush1-w-4 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush1-w-4 Value:0xc10c685c48} C:{Var:C Labels:device=sda1, instance_name=candycrush1-w-4 Value:0xc10c685cf0} D:{Var:D Labels:device=sda1, instance_name=candycrush1-w-4 Value:0xc10c685c30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930102347s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush1-w-4} value=10.535641 ], [ var='C' labels={device=sda1, instance_name=candycrush1-w-4} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush1-w-4} value=10.5 ]} {Instance:device=sda1, instance_name=candycrush1-w-5 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush1-w-5 Value:0xc10c685d68} C:{Var:C Labels:device=sda1, instance_name=candycrush1-w-5 Value:0xc10c685da0} D:{Var:D Labels:device=sda1, instance_name=candycrush1-w-5 Value:0xc10c685de8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930110064s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush1-w-5} value=10.555115 ], [ var='C' labels={device=sda1, instance_name=candycrush1-w-5} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush1-w-5} value=10.5 ]} {Instance:device=sda1, instance_name=candycrush1-w-6 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush1-w-6 Value:0xc10c685e98} C:{Var:C Labels:device=sda1, instance_name=candycrush1-w-6 Value:0xc10c685f20} D:{Var:D Labels:device=sda1, instance_name=candycrush1-w-6 Value:0xc10c685e80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930228257s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush1-w-6} value=10.566333 ], [ var='C' labels={device=sda1, instance_name=candycrush1-w-6} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush1-w-6} value=10.5 ]} {Instance:device=sda1, instance_name=candycrush1-w-7 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush1-w-7 Value:0xc0037786b0} C:{Var:C Labels:device=sda1, instance_name=candycrush1-w-7 Value:0xc003778ea8} D:{Var:D Labels:device=sda1, instance_name=candycrush1-w-7 Value:0xc10c685f88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930251458s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush1-w-7} value=11.072371 ], [ var='C' labels={device=sda1, instance_name=candycrush1-w-7} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush1-w-7} value=11 ]} {Instance:device=sda1, instance_name=candycrush1-w-8 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush1-w-8 Value:0xc003779490} C:{Var:C Labels:device=sda1, instance_name=candycrush1-w-8 Value:0xc0037794b8} D:{Var:D Labels:device=sda1, instance_name=candycrush1-w-8 Value:0xc003779510}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930261098s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush1-w-8} value=10.550735 ], [ var='C' labels={device=sda1, instance_name=candycrush1-w-8} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush1-w-8} value=10.5 ]} {Instance:device=sda1, instance_name=candycrush1-w-9 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush1-w-9 Value:0xc003779620} C:{Var:C Labels:device=sda1, instance_name=candycrush1-w-9 Value:0xc003779638} D:{Var:D Labels:device=sda1, instance_name=candycrush1-w-9 Value:0xc0037795a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930269495s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush1-w-9} value=10.733878 ], [ var='C' labels={device=sda1, instance_name=candycrush1-w-9} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush1-w-9} value=10.7 ]} {Instance:device=sda1, instance_name=candycrush2-m-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush2-m-0 Value:0xc0037796c0} C:{Var:C Labels:device=sda1, instance_name=candycrush2-m-0 Value:0xc0037797d8} D:{Var:D Labels:device=sda1, instance_name=candycrush2-m-0 Value:0xc0037799a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930277314s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush2-m-0} value=14.876225 ], [ var='C' labels={device=sda1, instance_name=candycrush2-m-0} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush2-m-0} value=14.8 ]} {Instance:device=sda1, instance_name=candycrush2-m-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush2-m-1 Value:0xc003779b08} C:{Var:C Labels:device=sda1, instance_name=candycrush2-m-1 Value:0xc003779b70} D:{Var:D Labels:device=sda1, instance_name=candycrush2-m-1 Value:0xc003779b98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930285663s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush2-m-1} value=10.883788 ], [ var='C' labels={device=sda1, instance_name=candycrush2-m-1} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush2-m-1} value=10.8 ]} {Instance:device=sda1, instance_name=candycrush2-m-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush2-m-2 Value:0xc003779c20} C:{Var:C Labels:device=sda1, instance_name=candycrush2-m-2 Value:0xc003779c58} D:{Var:D Labels:device=sda1, instance_name=candycrush2-m-2 Value:0xc003779ec0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930293343s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush2-m-2} value=11.099838 ], [ var='C' labels={device=sda1, instance_name=candycrush2-m-2} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush2-m-2} value=11 ]} {Instance:device=sda1, instance_name=candycrush2-w-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush2-w-0 Value:0xc099fa8058} C:{Var:C Labels:device=sda1, instance_name=candycrush2-w-0 Value:0xc099fa80a0} D:{Var:D Labels:device=sda1, instance_name=candycrush2-w-0 Value:0xc099fa80d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930300378s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush2-w-0} value=10.511111 ], [ var='C' labels={device=sda1, instance_name=candycrush2-w-0} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush2-w-0} value=10.5 ]} {Instance:device=sda1, instance_name=candycrush2-w-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush2-w-1 Value:0xc099fa8178} C:{Var:C Labels:device=sda1, instance_name=candycrush2-w-1 Value:0xc099fa81b0} D:{Var:D Labels:device=sda1, instance_name=candycrush2-w-1 Value:0xc099fa8140}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930308083s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush2-w-1} value=10.386452 ], [ var='C' labels={device=sda1, instance_name=candycrush2-w-1} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush2-w-1} value=10.3 ]} {Instance:device=sda1, instance_name=candycrush2-w-10 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush2-w-10 Value:0xc099fa81f8} C:{Var:C Labels:device=sda1, instance_name=candycrush2-w-10 Value:0xc099fa8250} D:{Var:D Labels:device=sda1, instance_name=candycrush2-w-10 Value:0xc099fa8298}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930315798s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush2-w-10} value=10.378183 ], [ var='C' labels={device=sda1, instance_name=candycrush2-w-10} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush2-w-10} value=10.3 ]} {Instance:device=sda1, instance_name=candycrush2-w-11 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush2-w-11 Value:0xc099fa83b0} C:{Var:C Labels:device=sda1, instance_name=candycrush2-w-11 Value:0xc099fa83c8} D:{Var:D Labels:device=sda1, instance_name=candycrush2-w-11 Value:0xc099fa8420}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930323383s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush2-w-11} value=10.421989 ], [ var='C' labels={device=sda1, instance_name=candycrush2-w-11} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush2-w-11} value=10.4 ]} {Instance:device=sda1, instance_name=candycrush2-w-12 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush2-w-12 Value:0xc099fa8488} C:{Var:C Labels:device=sda1, instance_name=candycrush2-w-12 Value:0xc099fa84c0} D:{Var:D Labels:device=sda1, instance_name=candycrush2-w-12 Value:0xc099fa84d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930332333s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush2-w-12} value=10.377933 ], [ var='C' labels={device=sda1, instance_name=candycrush2-w-12} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush2-w-12} value=10.3 ]} {Instance:device=sda1, instance_name=candycrush2-w-13 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush2-w-13 Value:0xc099fa8598} C:{Var:C Labels:device=sda1, instance_name=candycrush2-w-13 Value:0xc099fa85d0} D:{Var:D Labels:device=sda1, instance_name=candycrush2-w-13 Value:0xc099fa8560}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.9303447s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush2-w-13} value=10.369897 ], [ var='C' labels={device=sda1, instance_name=candycrush2-w-13} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush2-w-13} value=10.3 ]} {Instance:device=sda1, instance_name=candycrush2-w-14 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush2-w-14 Value:0xc099fa8670} C:{Var:C Labels:device=sda1, instance_name=candycrush2-w-14 Value:0xc099fa86a8} D:{Var:D Labels:device=sda1, instance_name=candycrush2-w-14 Value:0xc099fa8618}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930354133s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush2-w-14} value=10.392299 ], [ var='C' labels={device=sda1, instance_name=candycrush2-w-14} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush2-w-14} value=10.3 ]} {Instance:device=sda1, instance_name=candycrush2-w-15 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush2-w-15 Value:0xc099fa8710} C:{Var:C Labels:device=sda1, instance_name=candycrush2-w-15 Value:0xc099fa8748} D:{Var:D Labels:device=sda1, instance_name=candycrush2-w-15 Value:0xc099fa8780}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930362094s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush2-w-15} value=10.280359 ], [ var='C' labels={device=sda1, instance_name=candycrush2-w-15} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush2-w-15} value=10.2 ]} {Instance:device=sda1, instance_name=candycrush2-w-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush2-w-2 Value:0xc099fa8838} C:{Var:C Labels:device=sda1, instance_name=candycrush2-w-2 Value:0xc099fa87e8} D:{Var:D Labels:device=sda1, instance_name=candycrush2-w-2 Value:0xc099fa8820}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930369888s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush2-w-2} value=10.421423 ], [ var='C' labels={device=sda1, instance_name=candycrush2-w-2} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush2-w-2} value=10.4 ]} {Instance:device=sda1, instance_name=candycrush2-w-3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush2-w-3 Value:0xc099fa88c0} C:{Var:C Labels:device=sda1, instance_name=candycrush2-w-3 Value:0xc099fa88d8} D:{Var:D Labels:device=sda1, instance_name=candycrush2-w-3 Value:0xc099fa8930}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930378088s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush2-w-3} value=10.373308 ], [ var='C' labels={device=sda1, instance_name=candycrush2-w-3} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush2-w-3} value=10.3 ]} {Instance:device=sda1, instance_name=candycrush2-w-4 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush2-w-4 Value:0xc099fa89e8} C:{Var:C Labels:device=sda1, instance_name=candycrush2-w-4 Value:0xc099fa8998} D:{Var:D Labels:device=sda1, instance_name=candycrush2-w-4 Value:0xc099fa89d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930386025s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush2-w-4} value=10.338857 ], [ var='C' labels={device=sda1, instance_name=candycrush2-w-4} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush2-w-4} value=10.3 ]} {Instance:device=sda1, instance_name=candycrush2-w-5 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush2-w-5 Value:0xc099fa8a70} C:{Var:C Labels:device=sda1, instance_name=candycrush2-w-5 Value:0xc099fa8a88} D:{Var:D Labels:device=sda1, instance_name=candycrush2-w-5 Value:0xc099fa8ae0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930394125s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush2-w-5} value=10.38619 ], [ var='C' labels={device=sda1, instance_name=candycrush2-w-5} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush2-w-5} value=10.3 ]} {Instance:device=sda1, instance_name=candycrush2-w-6 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush2-w-6 Value:0xc099fa8b28} C:{Var:C Labels:device=sda1, instance_name=candycrush2-w-6 Value:0xc099fa8b90} D:{Var:D Labels:device=sda1, instance_name=candycrush2-w-6 Value:0xc099fa8ba8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.93040286s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush2-w-6} value=10.378781 ], [ var='C' labels={device=sda1, instance_name=candycrush2-w-6} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush2-w-6} value=10.3 ]} {Instance:device=sda1, instance_name=candycrush2-w-7 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush2-w-7 Value:0xc099fa8c30} C:{Var:C Labels:device=sda1, instance_name=candycrush2-w-7 Value:0xc099fa8c48} D:{Var:D Labels:device=sda1, instance_name=candycrush2-w-7 Value:0xc099fa8ca0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930410675s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush2-w-7} value=10.427551 ], [ var='C' labels={device=sda1, instance_name=candycrush2-w-7} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush2-w-7} value=10.4 ]} {Instance:device=sda1, instance_name=candycrush2-w-8 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush2-w-8 Value:0xc099fa8ce8} C:{Var:C Labels:device=sda1, instance_name=candycrush2-w-8 Value:0xc099fa8d40} D:{Var:D Labels:device=sda1, instance_name=candycrush2-w-8 Value:0xc099fa8d78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930418161s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush2-w-8} value=10.383638 ], [ var='C' labels={device=sda1, instance_name=candycrush2-w-8} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush2-w-8} value=10.3 ]} {Instance:device=sda1, instance_name=candycrush2-w-9 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=candycrush2-w-9 Value:0xc099fa8de0} C:{Var:C Labels:device=sda1, instance_name=candycrush2-w-9 Value:0xc099fa8df8} D:{Var:D Labels:device=sda1, instance_name=candycrush2-w-9 Value:0xc099fa8e50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930426154s EvaluationString:[ var='B' labels={device=sda1, instance_name=candycrush2-w-9} value=10.517276 ], [ var='C' labels={device=sda1, instance_name=candycrush2-w-9} value=0 ], [ var='D' labels={device=sda1, instance_name=candycrush2-w-9} value=10.5 ]} {Instance:device=sda1, instance_name=common1-m-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=common1-m-0 Value:0xc099fa8eb0} C:{Var:C Labels:device=sda1, instance_name=common1-m-0 Value:0xc099fa8ee0} D:{Var:D Labels:device=sda1, instance_name=common1-m-0 Value:0xc099fa8f10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930433678s EvaluationString:[ var='B' labels={device=sda1, instance_name=common1-m-0} value=15.024832 ], [ var='C' labels={device=sda1, instance_name=common1-m-0} value=0 ], [ var='D' labels={device=sda1, instance_name=common1-m-0} value=15 ]} {Instance:device=sda1, instance_name=common1-m-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=common1-m-1 Value:0xc099fa8f70} C:{Var:C Labels:device=sda1, instance_name=common1-m-1 Value:0xc099fa8fa0} D:{Var:D Labels:device=sda1, instance_name=common1-m-1 Value:0xc099fa8fd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930442711s EvaluationString:[ var='B' labels={device=sda1, instance_name=common1-m-1} value=11.116841 ], [ var='C' labels={device=sda1, instance_name=common1-m-1} value=0 ], [ var='D' labels={device=sda1, instance_name=common1-m-1} value=11.1 ]} {Instance:device=sda1, instance_name=common1-m-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=common1-m-2 Value:0xc099fa9030} C:{Var:C Labels:device=sda1, instance_name=common1-m-2 Value:0xc099fa9060} D:{Var:D Labels:device=sda1, instance_name=common1-m-2 Value:0xc099fa9090}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930456298s EvaluationString:[ var='B' labels={device=sda1, instance_name=common1-m-2} value=11.12371 ], [ var='C' labels={device=sda1, instance_name=common1-m-2} value=0 ], [ var='D' labels={device=sda1, instance_name=common1-m-2} value=11.1 ]} {Instance:device=sda1, instance_name=common1-w-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=common1-w-0 Value:0xc099fa9108} C:{Var:C Labels:device=sda1, instance_name=common1-w-0 Value:0xc099fa9150} D:{Var:D Labels:device=sda1, instance_name=common1-w-0 Value:0xc099fa91e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930467265s EvaluationString:[ var='B' labels={device=sda1, instance_name=common1-w-0} value=11.61025 ], [ var='C' labels={device=sda1, instance_name=common1-w-0} value=0 ], [ var='D' labels={device=sda1, instance_name=common1-w-0} value=11.6 ]} {Instance:device=sda1, instance_name=common1-w-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=common1-w-1 Value:0xc099fa9240} C:{Var:C Labels:device=sda1, instance_name=common1-w-1 Value:0xc099fa9270} D:{Var:D Labels:device=sda1, instance_name=common1-w-1 Value:0xc099fa92a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930475213s EvaluationString:[ var='B' labels={device=sda1, instance_name=common1-w-1} value=10.998533 ], [ var='C' labels={device=sda1, instance_name=common1-w-1} value=0 ], [ var='D' labels={device=sda1, instance_name=common1-w-1} value=10.9 ]} {Instance:device=sda1, instance_name=common1-w-3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=common1-w-3 Value:0xc099fa9300} C:{Var:C Labels:device=sda1, instance_name=common1-w-3 Value:0xc099fa9330} D:{Var:D Labels:device=sda1, instance_name=common1-w-3 Value:0xc099fa9360}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930484385s EvaluationString:[ var='B' labels={device=sda1, instance_name=common1-w-3} value=11.032218 ], [ var='C' labels={device=sda1, instance_name=common1-w-3} value=0 ], [ var='D' labels={device=sda1, instance_name=common1-w-3} value=11 ]} {Instance:device=sda1, instance_name=common1-w-4 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=common1-w-4 Value:0xc099fa9420} C:{Var:C Labels:device=sda1, instance_name=common1-w-4 Value:0xc099fa93c0} D:{Var:D Labels:device=sda1, instance_name=common1-w-4 Value:0xc099fa93f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930493408s EvaluationString:[ var='B' labels={device=sda1, instance_name=common1-w-4} value=11.617174 ], [ var='C' labels={device=sda1, instance_name=common1-w-4} value=0 ], [ var='D' labels={device=sda1, instance_name=common1-w-4} value=11.6 ]} {Instance:device=sda1, instance_name=common1-w-5 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=common1-w-5 Value:0xc099fa94e8} C:{Var:C Labels:device=sda1, instance_name=common1-w-5 Value:0xc099fa9480} D:{Var:D Labels:device=sda1, instance_name=common1-w-5 Value:0xc099fa94b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930502142s EvaluationString:[ var='B' labels={device=sda1, instance_name=common1-w-5} value=10.980334 ], [ var='C' labels={device=sda1, instance_name=common1-w-5} value=0 ], [ var='D' labels={device=sda1, instance_name=common1-w-5} value=10.9 ]} {Instance:device=sda1, instance_name=common1-w-6 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=common1-w-6 Value:0xc099fa9550} C:{Var:C Labels:device=sda1, instance_name=common1-w-6 Value:0xc099fa9588} D:{Var:D Labels:device=sda1, instance_name=common1-w-6 Value:0xc099fa95c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.93051196s EvaluationString:[ var='B' labels={device=sda1, instance_name=common1-w-6} value=11.364431 ], [ var='C' labels={device=sda1, instance_name=common1-w-6} value=0 ], [ var='D' labels={device=sda1, instance_name=common1-w-6} value=11.3 ]} {Instance:device=sda1, instance_name=common2-m-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=common2-m-0 Value:0xc099fa9620} C:{Var:C Labels:device=sda1, instance_name=common2-m-0 Value:0xc099fa9660} D:{Var:D Labels:device=sda1, instance_name=common2-m-0 Value:0xc099fa9690}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930519848s EvaluationString:[ var='B' labels={device=sda1, instance_name=common2-m-0} value=14.868477 ], [ var='C' labels={device=sda1, instance_name=common2-m-0} value=0 ], [ var='D' labels={device=sda1, instance_name=common2-m-0} value=14.8 ]} {Instance:device=sda1, instance_name=common2-m-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=common2-m-1 Value:0xc099fa96f0} C:{Var:C Labels:device=sda1, instance_name=common2-m-1 Value:0xc099fa9720} D:{Var:D Labels:device=sda1, instance_name=common2-m-1 Value:0xc099fa9750}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930527829s EvaluationString:[ var='B' labels={device=sda1, instance_name=common2-m-1} value=11.06157 ], [ var='C' labels={device=sda1, instance_name=common2-m-1} value=0 ], [ var='D' labels={device=sda1, instance_name=common2-m-1} value=11 ]} {Instance:device=sda1, instance_name=common2-m-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=common2-m-2 Value:0xc099fa97b0} C:{Var:C Labels:device=sda1, instance_name=common2-m-2 Value:0xc099fa97e0} D:{Var:D Labels:device=sda1, instance_name=common2-m-2 Value:0xc099fa9810}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930541331s EvaluationString:[ var='B' labels={device=sda1, instance_name=common2-m-2} value=11.058491 ], [ var='C' labels={device=sda1, instance_name=common2-m-2} value=0 ], [ var='D' labels={device=sda1, instance_name=common2-m-2} value=11 ]} {Instance:device=sda1, instance_name=common2-w-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=common2-w-0 Value:0xc099fa9870} C:{Var:C Labels:device=sda1, instance_name=common2-w-0 Value:0xc099fa98a8} D:{Var:D Labels:device=sda1, instance_name=common2-w-0 Value:0xc099fa98e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930551713s EvaluationString:[ var='B' labels={device=sda1, instance_name=common2-w-0} value=10.37104 ], [ var='C' labels={device=sda1, instance_name=common2-w-0} value=0 ], [ var='D' labels={device=sda1, instance_name=common2-w-0} value=10.3 ]} {Instance:device=sda1, instance_name=common2-w-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=common2-w-1 Value:0xc099fa9948} C:{Var:C Labels:device=sda1, instance_name=common2-w-1 Value:0xc099fa9980} D:{Var:D Labels:device=sda1, instance_name=common2-w-1 Value:0xc099fa99b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.93055957s EvaluationString:[ var='B' labels={device=sda1, instance_name=common2-w-1} value=10.319926 ], [ var='C' labels={device=sda1, instance_name=common2-w-1} value=0 ], [ var='D' labels={device=sda1, instance_name=common2-w-1} value=10.3 ]} {Instance:device=sda1, instance_name=common2-w-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=common2-w-2 Value:0xc099fa9aa8} C:{Var:C Labels:device=sda1, instance_name=common2-w-2 Value:0xc099fa9ae0} D:{Var:D Labels:device=sda1, instance_name=common2-w-2 Value:0xc099fa9a20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930567248s EvaluationString:[ var='B' labels={device=sda1, instance_name=common2-w-2} value=10.448788 ], [ var='C' labels={device=sda1, instance_name=common2-w-2} value=0 ], [ var='D' labels={device=sda1, instance_name=common2-w-2} value=10.4 ]} {Instance:device=sda1, instance_name=common2-w-3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=common2-w-3 Value:0xc099fa9c20} C:{Var:C Labels:device=sda1, instance_name=common2-w-3 Value:0xc099fa9c50} D:{Var:D Labels:device=sda1, instance_name=common2-w-3 Value:0xc099fa9bb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930574876s EvaluationString:[ var='B' labels={device=sda1, instance_name=common2-w-3} value=10.346719 ], [ var='C' labels={device=sda1, instance_name=common2-w-3} value=0 ], [ var='D' labels={device=sda1, instance_name=common2-w-3} value=10.3 ]} {Instance:device=sda1, instance_name=common2-w-4 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=common2-w-4 Value:0xc099fa9ce0} C:{Var:C Labels:device=sda1, instance_name=common2-w-4 Value:0xc099fa9dc0} D:{Var:D Labels:device=sda1, instance_name=common2-w-4 Value:0xc099fa9cb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930582352s EvaluationString:[ var='B' labels={device=sda1, instance_name=common2-w-4} value=10.403009 ], [ var='C' labels={device=sda1, instance_name=common2-w-4} value=0 ], [ var='D' labels={device=sda1, instance_name=common2-w-4} value=10.4 ]} {Instance:device=sda1, instance_name=common2-w-5 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=common2-w-5 Value:0xc099fa9e80} C:{Var:C Labels:device=sda1, instance_name=common2-w-5 Value:0xc099fa9e20} D:{Var:D Labels:device=sda1, instance_name=common2-w-5 Value:0xc099fa9e50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930590891s EvaluationString:[ var='B' labels={device=sda1, instance_name=common2-w-5} value=10.384462 ], [ var='C' labels={device=sda1, instance_name=common2-w-5} value=0 ], [ var='D' labels={device=sda1, instance_name=common2-w-5} value=10.3 ]} {Instance:device=sda1, instance_name=common2-w-6 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=common2-w-6 Value:0xc099fa9ee0} C:{Var:C Labels:device=sda1, instance_name=common2-w-6 Value:0xc099fa9f10} D:{Var:D Labels:device=sda1, instance_name=common2-w-6 Value:0xc099fa9f40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930598569s EvaluationString:[ var='B' labels={device=sda1, instance_name=common2-w-6} value=10.354221 ], [ var='C' labels={device=sda1, instance_name=common2-w-6} value=0 ], [ var='D' labels={device=sda1, instance_name=common2-w-6} value=10.3 ]} {Instance:device=sda1, instance_name=common2-w-7 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=common2-w-7 Value:0xc099fa9ff0} C:{Var:C Labels:device=sda1, instance_name=common2-w-7 Value:0xc05c916380} D:{Var:D Labels:device=sda1, instance_name=common2-w-7 Value:0xc05c9165f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930606713s EvaluationString:[ var='B' labels={device=sda1, instance_name=common2-w-7} value=10.323011 ], [ var='C' labels={device=sda1, instance_name=common2-w-7} value=0 ], [ var='D' labels={device=sda1, instance_name=common2-w-7} value=10.3 ]} {Instance:device=sda1, instance_name=common2-w-8 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=common2-w-8 Value:0xc05c916870} C:{Var:C Labels:device=sda1, instance_name=common2-w-8 Value:0xc05c916900} D:{Var:D Labels:device=sda1, instance_name=common2-w-8 Value:0xc05c916938}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930616329s EvaluationString:[ var='B' labels={device=sda1, instance_name=common2-w-8} value=10.327521 ], [ var='C' labels={device=sda1, instance_name=common2-w-8} value=0 ], [ var='D' labels={device=sda1, instance_name=common2-w-8} value=10.3 ]} {Instance:device=sda1, instance_name=farmstritz1-m-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=farmstritz1-m-0 Value:0xc05c916a18} C:{Var:C Labels:device=sda1, instance_name=farmstritz1-m-0 Value:0xc05c916b30} D:{Var:D Labels:device=sda1, instance_name=farmstritz1-m-0 Value:0xc05c916a00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930624771s EvaluationString:[ var='B' labels={device=sda1, instance_name=farmstritz1-m-0} value=14.836711 ], [ var='C' labels={device=sda1, instance_name=farmstritz1-m-0} value=0 ], [ var='D' labels={device=sda1, instance_name=farmstritz1-m-0} value=14.8 ]} {Instance:device=sda1, instance_name=farmstritz1-m-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=farmstritz1-m-1 Value:0xc05c917098} C:{Var:C Labels:device=sda1, instance_name=farmstritz1-m-1 Value:0xc05c916cc8} D:{Var:D Labels:device=sda1, instance_name=farmstritz1-m-1 Value:0xc05c917010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930632788s EvaluationString:[ var='B' labels={device=sda1, instance_name=farmstritz1-m-1} value=11.029343 ], [ var='C' labels={device=sda1, instance_name=farmstritz1-m-1} value=0 ], [ var='D' labels={device=sda1, instance_name=farmstritz1-m-1} value=11 ]} {Instance:device=sda1, instance_name=farmstritz1-m-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=farmstritz1-m-2 Value:0xc05c917c90} C:{Var:C Labels:device=sda1, instance_name=farmstritz1-m-2 Value:0xc05c917750} D:{Var:D Labels:device=sda1, instance_name=farmstritz1-m-2 Value:0xc05c917868}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930640865s EvaluationString:[ var='B' labels={device=sda1, instance_name=farmstritz1-m-2} value=11.037685 ], [ var='C' labels={device=sda1, instance_name=farmstritz1-m-2} value=0 ], [ var='D' labels={device=sda1, instance_name=farmstritz1-m-2} value=11 ]} {Instance:device=sda1, instance_name=farmstritz1-w-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=farmstritz1-w-0 Value:0xc05c917cd8} C:{Var:C Labels:device=sda1, instance_name=farmstritz1-w-0 Value:0xc05c917d50} D:{Var:D Labels:device=sda1, instance_name=farmstritz1-w-0 Value:0xc05c917d68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930648923s EvaluationString:[ var='B' labels={device=sda1, instance_name=farmstritz1-w-0} value=10.403473 ], [ var='C' labels={device=sda1, instance_name=farmstritz1-w-0} value=0 ], [ var='D' labels={device=sda1, instance_name=farmstritz1-w-0} value=10.4 ]} {Instance:device=sda1, instance_name=farmstritz1-w-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=farmstritz1-w-1 Value:0xc05c917e70} C:{Var:C Labels:device=sda1, instance_name=farmstritz1-w-1 Value:0xc05c917e98} D:{Var:D Labels:device=sda1, instance_name=farmstritz1-w-1 Value:0xc05c917f20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930657638s EvaluationString:[ var='B' labels={device=sda1, instance_name=farmstritz1-w-1} value=10.638328 ], [ var='C' labels={device=sda1, instance_name=farmstritz1-w-1} value=0 ], [ var='D' labels={device=sda1, instance_name=farmstritz1-w-1} value=10.6 ]} {Instance:device=sda1, instance_name=farmstritz1-w-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=farmstritz1-w-2 Value:0xc080a5c4e8} C:{Var:C Labels:device=sda1, instance_name=farmstritz1-w-2 Value:0xc080a5c088} D:{Var:D Labels:device=sda1, instance_name=farmstritz1-w-2 Value:0xc080a5c3f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930666441s EvaluationString:[ var='B' labels={device=sda1, instance_name=farmstritz1-w-2} value=10.385287 ], [ var='C' labels={device=sda1, instance_name=farmstritz1-w-2} value=0 ], [ var='D' labels={device=sda1, instance_name=farmstritz1-w-2} value=10.3 ]} {Instance:device=sda1, instance_name=farmstritz1-w-3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=farmstritz1-w-3 Value:0xc080a5c9f0} C:{Var:C Labels:device=sda1, instance_name=farmstritz1-w-3 Value:0xc080a5cb28} D:{Var:D Labels:device=sda1, instance_name=farmstritz1-w-3 Value:0xc080a5cfe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930674679s EvaluationString:[ var='B' labels={device=sda1, instance_name=farmstritz1-w-3} value=10.614397 ], [ var='C' labels={device=sda1, instance_name=farmstritz1-w-3} value=0 ], [ var='D' labels={device=sda1, instance_name=farmstritz1-w-3} value=10.6 ]} {Instance:device=sda1, instance_name=farmstritz1-w-4 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=farmstritz1-w-4 Value:0xc080a5d698} C:{Var:C Labels:device=sda1, instance_name=farmstritz1-w-4 Value:0xc080a5d288} D:{Var:D Labels:device=sda1, instance_name=farmstritz1-w-4 Value:0xc080a5d5e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930682699s EvaluationString:[ var='B' labels={device=sda1, instance_name=farmstritz1-w-4} value=10.381374 ], [ var='C' labels={device=sda1, instance_name=farmstritz1-w-4} value=0 ], [ var='D' labels={device=sda1, instance_name=farmstritz1-w-4} value=10.3 ]} {Instance:device=sda1, instance_name=farmstritz1-w-5 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=farmstritz1-w-5 Value:0xc080a5dea0} C:{Var:C Labels:device=sda1, instance_name=farmstritz1-w-5 Value:0xc080a5dc90} D:{Var:D Labels:device=sda1, instance_name=farmstritz1-w-5 Value:0xc080a5dd08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930692398s EvaluationString:[ var='B' labels={device=sda1, instance_name=farmstritz1-w-5} value=10.379594 ], [ var='C' labels={device=sda1, instance_name=farmstritz1-w-5} value=0 ], [ var='D' labels={device=sda1, instance_name=farmstritz1-w-5} value=10.3 ]} {Instance:device=sda1, instance_name=farmstritz1-w-6 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=farmstritz1-w-6 Value:0xc085d4c028} C:{Var:C Labels:device=sda1, instance_name=farmstritz1-w-6 Value:0xc085d4c0b0} D:{Var:D Labels:device=sda1, instance_name=farmstritz1-w-6 Value:0xc085d4c0c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930703163s EvaluationString:[ var='B' labels={device=sda1, instance_name=farmstritz1-w-6} value=10.440287 ], [ var='C' labels={device=sda1, instance_name=farmstritz1-w-6} value=0 ], [ var='D' labels={device=sda1, instance_name=farmstritz1-w-6} value=10.4 ]} {Instance:device=sda1, instance_name=farmstritz1-w-7 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=farmstritz1-w-7 Value:0xc085d4c190} C:{Var:C Labels:device=sda1, instance_name=farmstritz1-w-7 Value:0xc085d4c1a8} D:{Var:D Labels:device=sda1, instance_name=farmstritz1-w-7 Value:0xc085d4c570}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930712884s EvaluationString:[ var='B' labels={device=sda1, instance_name=farmstritz1-w-7} value=10.420109 ], [ var='C' labels={device=sda1, instance_name=farmstritz1-w-7} value=0 ], [ var='D' labels={device=sda1, instance_name=farmstritz1-w-7} value=10.4 ]} {Instance:device=sda1, instance_name=farmstritz1-w-8 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=farmstritz1-w-8 Value:0xc085d4cd68} C:{Var:C Labels:device=sda1, instance_name=farmstritz1-w-8 Value:0xc085d4c5b8} D:{Var:D Labels:device=sda1, instance_name=farmstritz1-w-8 Value:0xc085d4c640}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930724873s EvaluationString:[ var='B' labels={device=sda1, instance_name=farmstritz1-w-8} value=10.397435 ], [ var='C' labels={device=sda1, instance_name=farmstritz1-w-8} value=0 ], [ var='D' labels={device=sda1, instance_name=farmstritz1-w-8} value=10.3 ]} {Instance:device=sda1, instance_name=qa-m-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=qa-m-0 Value:0xc085d4ce38} C:{Var:C Labels:device=sda1, instance_name=qa-m-0 Value:0xc085d4ce78} D:{Var:D Labels:device=sda1, instance_name=qa-m-0 Value:0xc085d4d0e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930734078s EvaluationString:[ var='B' labels={device=sda1, instance_name=qa-m-0} value=23.084717 ], [ var='C' labels={device=sda1, instance_name=qa-m-0} value=0 ], [ var='D' labels={device=sda1, instance_name=qa-m-0} value=23 ]} {Instance:device=sda1, instance_name=qa-m-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=qa-m-1 Value:0xc085d4d168} C:{Var:C Labels:device=sda1, instance_name=qa-m-1 Value:0xc085d4d190} D:{Var:D Labels:device=sda1, instance_name=qa-m-1 Value:0xc085d4d480}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930742741s EvaluationString:[ var='B' labels={device=sda1, instance_name=qa-m-1} value=16.781813 ], [ var='C' labels={device=sda1, instance_name=qa-m-1} value=0 ], [ var='D' labels={device=sda1, instance_name=qa-m-1} value=16.7 ]} {Instance:device=sda1, instance_name=qa-m-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=qa-m-2 Value:0xc085d4d6b8} C:{Var:C Labels:device=sda1, instance_name=qa-m-2 Value:0xc085d4d708} D:{Var:D Labels:device=sda1, instance_name=qa-m-2 Value:0xc085d4d748}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930752759s EvaluationString:[ var='B' labels={device=sda1, instance_name=qa-m-2} value=16.786894 ], [ var='C' labels={device=sda1, instance_name=qa-m-2} value=0 ], [ var='D' labels={device=sda1, instance_name=qa-m-2} value=16.7 ]} {Instance:device=sda1, instance_name=qa-w-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=qa-w-0 Value:0xc085d4dc68} C:{Var:C Labels:device=sda1, instance_name=qa-w-0 Value:0xc085d4dc98} D:{Var:D Labels:device=sda1, instance_name=qa-w-0 Value:0xc085d4dcd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930760869s EvaluationString:[ var='B' labels={device=sda1, instance_name=qa-w-0} value=17.1271 ], [ var='C' labels={device=sda1, instance_name=qa-w-0} value=0 ], [ var='D' labels={device=sda1, instance_name=qa-w-0} value=17.1 ]} {Instance:device=sda1, instance_name=qa-w-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=qa-w-1 Value:0xc0ffe442c8} C:{Var:C Labels:device=sda1, instance_name=qa-w-1 Value:0xc0ffe44008} D:{Var:D Labels:device=sda1, instance_name=qa-w-1 Value:0xc0ffe44198}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930769943s EvaluationString:[ var='B' labels={device=sda1, instance_name=qa-w-1} value=16.861307 ], [ var='C' labels={device=sda1, instance_name=qa-w-1} value=0 ], [ var='D' labels={device=sda1, instance_name=qa-w-1} value=16.8 ]} {Instance:device=sda1, instance_name=stritzdev-m-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=stritzdev-m-0 Value:0xc0ffe443a0} C:{Var:C Labels:device=sda1, instance_name=stritzdev-m-0 Value:0xc0ffe44330} D:{Var:D Labels:device=sda1, instance_name=stritzdev-m-0 Value:0xc0ffe44368}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930779997s EvaluationString:[ var='B' labels={device=sda1, instance_name=stritzdev-m-0} value=10.558973 ], [ var='C' labels={device=sda1, instance_name=stritzdev-m-0} value=0 ], [ var='D' labels={device=sda1, instance_name=stritzdev-m-0} value=10.5 ]} {Instance:device=sda1, instance_name=stritzdev-m-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=stritzdev-m-1 Value:0xc0ffe443e8} C:{Var:C Labels:device=sda1, instance_name=stritzdev-m-1 Value:0xc0ffe44440} D:{Var:D Labels:device=sda1, instance_name=stritzdev-m-1 Value:0xc0ffe44458}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930788114s EvaluationString:[ var='B' labels={device=sda1, instance_name=stritzdev-m-1} value=10.107727 ], [ var='C' labels={device=sda1, instance_name=stritzdev-m-1} value=0 ], [ var='D' labels={device=sda1, instance_name=stritzdev-m-1} value=10.1 ]} {Instance:device=sda1, instance_name=stritzdev-m-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=stritzdev-m-2 Value:0xc0ffe444e0} C:{Var:C Labels:device=sda1, instance_name=stritzdev-m-2 Value:0xc0ffe444f8} D:{Var:D Labels:device=sda1, instance_name=stritzdev-m-2 Value:0xc0ffe44550}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930796504s EvaluationString:[ var='B' labels={device=sda1, instance_name=stritzdev-m-2} value=10.091766 ], [ var='C' labels={device=sda1, instance_name=stritzdev-m-2} value=0 ], [ var='D' labels={device=sda1, instance_name=stritzdev-m-2} value=10 ]} {Instance:device=sda1, instance_name=stritzdev-w-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=stritzdev-w-0 Value:0xc0ffe44600} C:{Var:C Labels:device=sda1, instance_name=stritzdev-w-0 Value:0xc0ffe44618} D:{Var:D Labels:device=sda1, instance_name=stritzdev-w-0 Value:0xc0ffe445a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930805218s EvaluationString:[ var='B' labels={device=sda1, instance_name=stritzdev-w-0} value=10.080651 ], [ var='C' labels={device=sda1, instance_name=stritzdev-w-0} value=0 ], [ var='D' labels={device=sda1, instance_name=stritzdev-w-0} value=10 ]} {Instance:device=sda1, instance_name=stritzdev-w-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda1, instance_name=stritzdev-w-1 Value:0xc0ffe446a0} C:{Var:C Labels:device=sda1, instance_name=stritzdev-w-1 Value:0xc0ffe446b8} D:{Var:D Labels:device=sda1, instance_name=stritzdev-w-1 Value:0xc0ffe44710}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930814001s EvaluationString:[ var='B' labels={device=sda1, instance_name=stritzdev-w-1} value=10.085876 ], [ var='C' labels={device=sda1, instance_name=stritzdev-w-1} value=0 ], [ var='D' labels={device=sda1, instance_name=stritzdev-w-1} value=10 ]} {Instance:device=sda15, instance_name=candycrush1-m-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush1-m-0 Value:0xc0ffe447b8} C:{Var:C Labels:device=sda15, instance_name=candycrush1-m-0 Value:0xc0ffe447f0} D:{Var:D Labels:device=sda15, instance_name=candycrush1-m-0 Value:0xc0ffe44780}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930822015s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush1-m-0} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush1-m-0} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush1-m-0} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush1-m-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush1-m-1 Value:0xc0ffe44860} C:{Var:C Labels:device=sda15, instance_name=candycrush1-m-1 Value:0xc0ffe44878} D:{Var:D Labels:device=sda15, instance_name=candycrush1-m-1 Value:0xc0ffe448d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930830098s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush1-m-1} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush1-m-1} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush1-m-1} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush1-m-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush1-m-2 Value:0xc0ffe44940} C:{Var:C Labels:device=sda15, instance_name=candycrush1-m-2 Value:0xc0ffe44958} D:{Var:D Labels:device=sda15, instance_name=candycrush1-m-2 Value:0xc0ffe449b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930848069s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush1-m-2} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush1-m-2} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush1-m-2} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush1-w-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush1-w-0 Value:0xc0ffe44a20} C:{Var:C Labels:device=sda15, instance_name=candycrush1-w-0 Value:0xc0ffe44a38} D:{Var:D Labels:device=sda15, instance_name=candycrush1-w-0 Value:0xc0ffe44aa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930857375s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush1-w-0} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush1-w-0} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush1-w-0} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush1-w-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush1-w-1 Value:0xc0ffe44b10} C:{Var:C Labels:device=sda15, instance_name=candycrush1-w-1 Value:0xc0ffe44b28} D:{Var:D Labels:device=sda15, instance_name=candycrush1-w-1 Value:0xc0ffe44b80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930866523s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush1-w-1} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush1-w-1} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush1-w-1} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush1-w-10 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush1-w-10 Value:0xc0ffe44bf0} C:{Var:C Labels:device=sda15, instance_name=candycrush1-w-10 Value:0xc0ffe44c08} D:{Var:D Labels:device=sda15, instance_name=candycrush1-w-10 Value:0xc0ffe44c60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930874943s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush1-w-10} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush1-w-10} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush1-w-10} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush1-w-11 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush1-w-11 Value:0xc0ffe44d50} C:{Var:C Labels:device=sda15, instance_name=candycrush1-w-11 Value:0xc0ffe44ce0} D:{Var:D Labels:device=sda15, instance_name=candycrush1-w-11 Value:0xc0ffe44d18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930884074s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush1-w-11} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush1-w-11} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush1-w-11} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush1-w-12 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush1-w-12 Value:0xc0ffe44dc0} C:{Var:C Labels:device=sda15, instance_name=candycrush1-w-12 Value:0xc0ffe44dd8} D:{Var:D Labels:device=sda15, instance_name=candycrush1-w-12 Value:0xc0ffe44e30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930892485s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush1-w-12} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush1-w-12} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush1-w-12} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush1-w-13 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush1-w-13 Value:0xc0ffe44f10} C:{Var:C Labels:device=sda15, instance_name=candycrush1-w-13 Value:0xc0ffe44ea0} D:{Var:D Labels:device=sda15, instance_name=candycrush1-w-13 Value:0xc0ffe44eb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930902366s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush1-w-13} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush1-w-13} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush1-w-13} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush1-w-14 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush1-w-14 Value:0xc0ffe44f80} C:{Var:C Labels:device=sda15, instance_name=candycrush1-w-14 Value:0xc0ffe44f98} D:{Var:D Labels:device=sda15, instance_name=candycrush1-w-14 Value:0xc0ffe44ff0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930911353s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush1-w-14} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush1-w-14} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush1-w-14} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush1-w-15 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush1-w-15 Value:0xc0ffe450d0} C:{Var:C Labels:device=sda15, instance_name=candycrush1-w-15 Value:0xc0ffe45060} D:{Var:D Labels:device=sda15, instance_name=candycrush1-w-15 Value:0xc0ffe45078}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930920358s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush1-w-15} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush1-w-15} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush1-w-15} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush1-w-16 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush1-w-16 Value:0xc0ffe45140} C:{Var:C Labels:device=sda15, instance_name=candycrush1-w-16 Value:0xc0ffe45178} D:{Var:D Labels:device=sda15, instance_name=candycrush1-w-16 Value:0xc0ffe451b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930928163s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush1-w-16} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush1-w-16} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush1-w-16} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush1-w-17 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush1-w-17 Value:0xc0ffe45220} C:{Var:C Labels:device=sda15, instance_name=candycrush1-w-17 Value:0xc0ffe45238} D:{Var:D Labels:device=sda15, instance_name=candycrush1-w-17 Value:0xc0ffe45290}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930937087s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush1-w-17} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush1-w-17} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush1-w-17} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush1-w-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush1-w-2 Value:0xc0ffe45300} C:{Var:C Labels:device=sda15, instance_name=candycrush1-w-2 Value:0xc0ffe45318} D:{Var:D Labels:device=sda15, instance_name=candycrush1-w-2 Value:0xc0ffe45370}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930944897s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush1-w-2} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush1-w-2} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush1-w-2} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush1-w-3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush1-w-3 Value:0xc0ffe453e0} C:{Var:C Labels:device=sda15, instance_name=candycrush1-w-3 Value:0xc0ffe45418} D:{Var:D Labels:device=sda15, instance_name=candycrush1-w-3 Value:0xc0ffe45450}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930952101s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush1-w-3} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush1-w-3} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush1-w-3} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush1-w-4 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush1-w-4 Value:0xc0ffe45530} C:{Var:C Labels:device=sda15, instance_name=candycrush1-w-4 Value:0xc0ffe454c0} D:{Var:D Labels:device=sda15, instance_name=candycrush1-w-4 Value:0xc0ffe454d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930961828s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush1-w-4} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush1-w-4} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush1-w-4} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush1-w-5 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush1-w-5 Value:0xc0ffe455a0} C:{Var:C Labels:device=sda15, instance_name=candycrush1-w-5 Value:0xc0ffe455d8} D:{Var:D Labels:device=sda15, instance_name=candycrush1-w-5 Value:0xc0ffe45610}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930971261s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush1-w-5} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush1-w-5} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush1-w-5} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush1-w-6 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush1-w-6 Value:0xc0ffe456f0} C:{Var:C Labels:device=sda15, instance_name=candycrush1-w-6 Value:0xc0ffe45680} D:{Var:D Labels:device=sda15, instance_name=candycrush1-w-6 Value:0xc0ffe45698}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930979039s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush1-w-6} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush1-w-6} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush1-w-6} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush1-w-7 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush1-w-7 Value:0xc0ffe45778} C:{Var:C Labels:device=sda15, instance_name=candycrush1-w-7 Value:0xc0ffe457d0} D:{Var:D Labels:device=sda15, instance_name=candycrush1-w-7 Value:0xc0ffe45760}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930987136s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush1-w-7} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush1-w-7} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush1-w-7} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush1-w-8 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush1-w-8 Value:0xc0ffe458b0} C:{Var:C Labels:device=sda15, instance_name=candycrush1-w-8 Value:0xc0ffe45840} D:{Var:D Labels:device=sda15, instance_name=candycrush1-w-8 Value:0xc0ffe45858}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.930995255s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush1-w-8} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush1-w-8} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush1-w-8} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush1-w-9 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush1-w-9 Value:0xc0ffe45920} C:{Var:C Labels:device=sda15, instance_name=candycrush1-w-9 Value:0xc0ffe45938} D:{Var:D Labels:device=sda15, instance_name=candycrush1-w-9 Value:0xc0ffe45990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931004371s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush1-w-9} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush1-w-9} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush1-w-9} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush2-m-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush2-m-0 Value:0xc0ffe45a00} C:{Var:C Labels:device=sda15, instance_name=candycrush2-m-0 Value:0xc0ffe45a38} D:{Var:D Labels:device=sda15, instance_name=candycrush2-m-0 Value:0xc0ffe45aa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931012613s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush2-m-0} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush2-m-0} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush2-m-0} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush2-m-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush2-m-1 Value:0xc0ffe45b68} C:{Var:C Labels:device=sda15, instance_name=candycrush2-m-1 Value:0xc0ffe45ba0} D:{Var:D Labels:device=sda15, instance_name=candycrush2-m-1 Value:0xc0ffe45b30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931020035s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush2-m-1} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush2-m-1} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush2-m-1} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush2-m-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush2-m-2 Value:0xc0ffe45c80} C:{Var:C Labels:device=sda15, instance_name=candycrush2-m-2 Value:0xc0ffe45c10} D:{Var:D Labels:device=sda15, instance_name=candycrush2-m-2 Value:0xc0ffe45c48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931028355s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush2-m-2} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush2-m-2} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush2-m-2} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush2-w-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush2-w-0 Value:0xc0ffe45cf0} C:{Var:C Labels:device=sda15, instance_name=candycrush2-w-0 Value:0xc0ffe45d08} D:{Var:D Labels:device=sda15, instance_name=candycrush2-w-0 Value:0xc0ffe45d60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931038438s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush2-w-0} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush2-w-0} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush2-w-0} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush2-w-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush2-w-1 Value:0xc0ffe45dd0} C:{Var:C Labels:device=sda15, instance_name=candycrush2-w-1 Value:0xc0ffe45de8} D:{Var:D Labels:device=sda15, instance_name=candycrush2-w-1 Value:0xc0ffe45e40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931046441s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush2-w-1} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush2-w-1} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush2-w-1} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush2-w-10 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush2-w-10 Value:0xc0ffe45eb0} C:{Var:C Labels:device=sda15, instance_name=candycrush2-w-10 Value:0xc0ffe45ec8} D:{Var:D Labels:device=sda15, instance_name=candycrush2-w-10 Value:0xc0ffe45f20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931053978s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush2-w-10} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush2-w-10} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush2-w-10} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush2-w-11 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush2-w-11 Value:0xc0ffe45f90} C:{Var:C Labels:device=sda15, instance_name=candycrush2-w-11 Value:0xc0ffe45fc8} D:{Var:D Labels:device=sda15, instance_name=candycrush2-w-11 Value:0xc07d7bc000}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931062081s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush2-w-11} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush2-w-11} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush2-w-11} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush2-w-12 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush2-w-12 Value:0xc07d7bc080} C:{Var:C Labels:device=sda15, instance_name=candycrush2-w-12 Value:0xc07d7bc0a8} D:{Var:D Labels:device=sda15, instance_name=candycrush2-w-12 Value:0xc07d7bc110}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931070331s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush2-w-12} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush2-w-12} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush2-w-12} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush2-w-13 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush2-w-13 Value:0xc07d7bc220} C:{Var:C Labels:device=sda15, instance_name=candycrush2-w-13 Value:0xc07d7bc1a0} D:{Var:D Labels:device=sda15, instance_name=candycrush2-w-13 Value:0xc07d7bc1b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931077961s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush2-w-13} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush2-w-13} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush2-w-13} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush2-w-14 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush2-w-14 Value:0xc07d7bc640} C:{Var:C Labels:device=sda15, instance_name=candycrush2-w-14 Value:0xc07d7bc2c0} D:{Var:D Labels:device=sda15, instance_name=candycrush2-w-14 Value:0xc07d7bc3a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931086963s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush2-w-14} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush2-w-14} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush2-w-14} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush2-w-15 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush2-w-15 Value:0xc07d7bcbb0} C:{Var:C Labels:device=sda15, instance_name=candycrush2-w-15 Value:0xc07d7bcbd8} D:{Var:D Labels:device=sda15, instance_name=candycrush2-w-15 Value:0xc07d7bcf70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931095336s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush2-w-15} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush2-w-15} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush2-w-15} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush2-w-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush2-w-2 Value:0xc07d7bd560} C:{Var:C Labels:device=sda15, instance_name=candycrush2-w-2 Value:0xc07d7bd988} D:{Var:D Labels:device=sda15, instance_name=candycrush2-w-2 Value:0xc07d7bda00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931104878s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush2-w-2} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush2-w-2} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush2-w-2} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush2-w-3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush2-w-3 Value:0xc07d7bdf60} C:{Var:C Labels:device=sda15, instance_name=candycrush2-w-3 Value:0xc07d7bddd0} D:{Var:D Labels:device=sda15, instance_name=candycrush2-w-3 Value:0xc07d7bdf08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931112855s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush2-w-3} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush2-w-3} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush2-w-3} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush2-w-4 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush2-w-4 Value:0xc0d662c070} C:{Var:C Labels:device=sda15, instance_name=candycrush2-w-4 Value:0xc0d662c088} D:{Var:D Labels:device=sda15, instance_name=candycrush2-w-4 Value:0xc0d662c0f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931142204s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush2-w-4} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush2-w-4} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush2-w-4} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush2-w-5 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush2-w-5 Value:0xc0d662c1b0} C:{Var:C Labels:device=sda15, instance_name=candycrush2-w-5 Value:0xc0d662c1e8} D:{Var:D Labels:device=sda15, instance_name=candycrush2-w-5 Value:0xc0d662c230}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931150854s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush2-w-5} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush2-w-5} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush2-w-5} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush2-w-6 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush2-w-6 Value:0xc0d662c2c0} C:{Var:C Labels:device=sda15, instance_name=candycrush2-w-6 Value:0xc0d662c2d8} D:{Var:D Labels:device=sda15, instance_name=candycrush2-w-6 Value:0xc0d662c370}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931161196s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush2-w-6} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush2-w-6} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush2-w-6} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush2-w-7 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush2-w-7 Value:0xc0d662c400} C:{Var:C Labels:device=sda15, instance_name=candycrush2-w-7 Value:0xc0d662c418} D:{Var:D Labels:device=sda15, instance_name=candycrush2-w-7 Value:0xc0d662c490}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931169237s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush2-w-7} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush2-w-7} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush2-w-7} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush2-w-8 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush2-w-8 Value:0xc0d662c510} C:{Var:C Labels:device=sda15, instance_name=candycrush2-w-8 Value:0xc0d662c538} D:{Var:D Labels:device=sda15, instance_name=candycrush2-w-8 Value:0xc0d662c590}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931177832s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush2-w-8} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush2-w-8} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush2-w-8} value=8.6 ]} {Instance:device=sda15, instance_name=candycrush2-w-9 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=candycrush2-w-9 Value:0xc0d662c668} C:{Var:C Labels:device=sda15, instance_name=candycrush2-w-9 Value:0xc0d662c6a0} D:{Var:D Labels:device=sda15, instance_name=candycrush2-w-9 Value:0xc0d662c630}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931188331s EvaluationString:[ var='B' labels={device=sda15, instance_name=candycrush2-w-9} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=candycrush2-w-9} value=0 ], [ var='D' labels={device=sda15, instance_name=candycrush2-w-9} value=8.6 ]} {Instance:device=sda15, instance_name=common1-m-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=common1-m-0 Value:0xc0d662c740} C:{Var:C Labels:device=sda15, instance_name=common1-m-0 Value:0xc0d662c770} D:{Var:D Labels:device=sda15, instance_name=common1-m-0 Value:0xc0d662c710}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931196086s EvaluationString:[ var='B' labels={device=sda15, instance_name=common1-m-0} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=common1-m-0} value=0 ], [ var='D' labels={device=sda15, instance_name=common1-m-0} value=8.6 ]} {Instance:device=sda15, instance_name=common1-m-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=common1-m-1 Value:0xc0d662c818} C:{Var:C Labels:device=sda15, instance_name=common1-m-1 Value:0xc0d662c860} D:{Var:D Labels:device=sda15, instance_name=common1-m-1 Value:0xc0d662c7e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931210071s EvaluationString:[ var='B' labels={device=sda15, instance_name=common1-m-1} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=common1-m-1} value=0 ], [ var='D' labels={device=sda15, instance_name=common1-m-1} value=8.6 ]} {Instance:device=sda15, instance_name=common1-m-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=common1-m-2 Value:0xc0d662c908} C:{Var:C Labels:device=sda15, instance_name=common1-m-2 Value:0xc0d662c940} D:{Var:D Labels:device=sda15, instance_name=common1-m-2 Value:0xc0d662c8d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931218066s EvaluationString:[ var='B' labels={device=sda15, instance_name=common1-m-2} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=common1-m-2} value=0 ], [ var='D' labels={device=sda15, instance_name=common1-m-2} value=8.6 ]} {Instance:device=sda15, instance_name=common1-w-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=common1-w-0 Value:0xc0d662c9e0} C:{Var:C Labels:device=sda15, instance_name=common1-w-0 Value:0xc0d662ca10} D:{Var:D Labels:device=sda15, instance_name=common1-w-0 Value:0xc0d662c9b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931226984s EvaluationString:[ var='B' labels={device=sda15, instance_name=common1-w-0} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=common1-w-0} value=0 ], [ var='D' labels={device=sda15, instance_name=common1-w-0} value=8.6 ]} {Instance:device=sda15, instance_name=common1-w-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=common1-w-1 Value:0xc0d662cb08} C:{Var:C Labels:device=sda15, instance_name=common1-w-1 Value:0xc0d662ca90} D:{Var:D Labels:device=sda15, instance_name=common1-w-1 Value:0xc0d662cac0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931235767s EvaluationString:[ var='B' labels={device=sda15, instance_name=common1-w-1} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=common1-w-1} value=0 ], [ var='D' labels={device=sda15, instance_name=common1-w-1} value=8.6 ]} {Instance:device=sda15, instance_name=common1-w-3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=common1-w-3 Value:0xc0d662cbe8} C:{Var:C Labels:device=sda15, instance_name=common1-w-3 Value:0xc0d662cc20} D:{Var:D Labels:device=sda15, instance_name=common1-w-3 Value:0xc0d662cbb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931243806s EvaluationString:[ var='B' labels={device=sda15, instance_name=common1-w-3} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=common1-w-3} value=0 ], [ var='D' labels={device=sda15, instance_name=common1-w-3} value=8.6 ]} {Instance:device=sda15, instance_name=common1-w-4 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=common1-w-4 Value:0xc0d662cc80} C:{Var:C Labels:device=sda15, instance_name=common1-w-4 Value:0xc0d662ccb0} D:{Var:D Labels:device=sda15, instance_name=common1-w-4 Value:0xc0d662ccf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931252104s EvaluationString:[ var='B' labels={device=sda15, instance_name=common1-w-4} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=common1-w-4} value=0 ], [ var='D' labels={device=sda15, instance_name=common1-w-4} value=8.6 ]} {Instance:device=sda15, instance_name=common1-w-5 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=common1-w-5 Value:0xc0d662cda0} C:{Var:C Labels:device=sda15, instance_name=common1-w-5 Value:0xc0d662ce08} D:{Var:D Labels:device=sda15, instance_name=common1-w-5 Value:0xc0d662cd70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931261673s EvaluationString:[ var='B' labels={device=sda15, instance_name=common1-w-5} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=common1-w-5} value=0 ], [ var='D' labels={device=sda15, instance_name=common1-w-5} value=8.6 ]} {Instance:device=sda15, instance_name=common1-w-6 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=common1-w-6 Value:0xc0d662cec0} C:{Var:C Labels:device=sda15, instance_name=common1-w-6 Value:0xc0d662cf00} D:{Var:D Labels:device=sda15, instance_name=common1-w-6 Value:0xc0d662ce88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931271461s EvaluationString:[ var='B' labels={device=sda15, instance_name=common1-w-6} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=common1-w-6} value=0 ], [ var='D' labels={device=sda15, instance_name=common1-w-6} value=8.6 ]} {Instance:device=sda15, instance_name=common2-m-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=common2-m-0 Value:0xc0d662cf98} C:{Var:C Labels:device=sda15, instance_name=common2-m-0 Value:0xc0d662cfe0} D:{Var:D Labels:device=sda15, instance_name=common2-m-0 Value:0xc0d662cf60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931280668s EvaluationString:[ var='B' labels={device=sda15, instance_name=common2-m-0} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=common2-m-0} value=0 ], [ var='D' labels={device=sda15, instance_name=common2-m-0} value=8.6 ]} {Instance:device=sda15, instance_name=common2-m-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=common2-m-1 Value:0xc0d662d050} C:{Var:C Labels:device=sda15, instance_name=common2-m-1 Value:0xc0d662d090} D:{Var:D Labels:device=sda15, instance_name=common2-m-1 Value:0xc0d662d0d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931289071s EvaluationString:[ var='B' labels={device=sda15, instance_name=common2-m-1} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=common2-m-1} value=0 ], [ var='D' labels={device=sda15, instance_name=common2-m-1} value=8.6 ]} {Instance:device=sda15, instance_name=common2-m-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=common2-m-2 Value:0xc0d662d180} C:{Var:C Labels:device=sda15, instance_name=common2-m-2 Value:0xc0d662d1c0} D:{Var:D Labels:device=sda15, instance_name=common2-m-2 Value:0xc0d662d1f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931298633s EvaluationString:[ var='B' labels={device=sda15, instance_name=common2-m-2} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=common2-m-2} value=0 ], [ var='D' labels={device=sda15, instance_name=common2-m-2} value=8.6 ]} {Instance:device=sda15, instance_name=common2-w-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=common2-w-0 Value:0xc0d662d2b0} C:{Var:C Labels:device=sda15, instance_name=common2-w-0 Value:0xc0d662d2e0} D:{Var:D Labels:device=sda15, instance_name=common2-w-0 Value:0xc0d662d268}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931306904s EvaluationString:[ var='B' labels={device=sda15, instance_name=common2-w-0} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=common2-w-0} value=0 ], [ var='D' labels={device=sda15, instance_name=common2-w-0} value=8.6 ]} {Instance:device=sda15, instance_name=common2-w-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=common2-w-1 Value:0xc0d662d340} C:{Var:C Labels:device=sda15, instance_name=common2-w-1 Value:0xc0d662d370} D:{Var:D Labels:device=sda15, instance_name=common2-w-1 Value:0xc0d662d3c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931315468s EvaluationString:[ var='B' labels={device=sda15, instance_name=common2-w-1} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=common2-w-1} value=0 ], [ var='D' labels={device=sda15, instance_name=common2-w-1} value=8.6 ]} {Instance:device=sda15, instance_name=common2-w-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=common2-w-2 Value:0xc0d662d420} C:{Var:C Labels:device=sda15, instance_name=common2-w-2 Value:0xc0d662d460} D:{Var:D Labels:device=sda15, instance_name=common2-w-2 Value:0xc0d662d490}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931324084s EvaluationString:[ var='B' labels={device=sda15, instance_name=common2-w-2} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=common2-w-2} value=0 ], [ var='D' labels={device=sda15, instance_name=common2-w-2} value=8.6 ]} {Instance:device=sda15, instance_name=common2-w-3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=common2-w-3 Value:0xc0d662d648} C:{Var:C Labels:device=sda15, instance_name=common2-w-3 Value:0xc0d662d690} D:{Var:D Labels:device=sda15, instance_name=common2-w-3 Value:0xc0d662d500}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931332366s EvaluationString:[ var='B' labels={device=sda15, instance_name=common2-w-3} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=common2-w-3} value=0 ], [ var='D' labels={device=sda15, instance_name=common2-w-3} value=8.6 ]} {Instance:device=sda15, instance_name=common2-w-4 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=common2-w-4 Value:0xc0d662d738} C:{Var:C Labels:device=sda15, instance_name=common2-w-4 Value:0xc0d662d770} D:{Var:D Labels:device=sda15, instance_name=common2-w-4 Value:0xc0d662d6f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931341204s EvaluationString:[ var='B' labels={device=sda15, instance_name=common2-w-4} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=common2-w-4} value=0 ], [ var='D' labels={device=sda15, instance_name=common2-w-4} value=8.6 ]} {Instance:device=sda15, instance_name=common2-w-5 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=common2-w-5 Value:0xc0d662d7e0} C:{Var:C Labels:device=sda15, instance_name=common2-w-5 Value:0xc0d662d818} D:{Var:D Labels:device=sda15, instance_name=common2-w-5 Value:0xc0d662d850}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931349116s EvaluationString:[ var='B' labels={device=sda15, instance_name=common2-w-5} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=common2-w-5} value=0 ], [ var='D' labels={device=sda15, instance_name=common2-w-5} value=8.6 ]} {Instance:device=sda15, instance_name=common2-w-6 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=common2-w-6 Value:0xc0d662d928} C:{Var:C Labels:device=sda15, instance_name=common2-w-6 Value:0xc0d662d8c0} D:{Var:D Labels:device=sda15, instance_name=common2-w-6 Value:0xc0d662d8f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931357676s EvaluationString:[ var='B' labels={device=sda15, instance_name=common2-w-6} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=common2-w-6} value=0 ], [ var='D' labels={device=sda15, instance_name=common2-w-6} value=8.6 ]} {Instance:device=sda15, instance_name=common2-w-7 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=common2-w-7 Value:0xc0d662d9a0} C:{Var:C Labels:device=sda15, instance_name=common2-w-7 Value:0xc0d662d9f0} D:{Var:D Labels:device=sda15, instance_name=common2-w-7 Value:0xc0d662da40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931365274s EvaluationString:[ var='B' labels={device=sda15, instance_name=common2-w-7} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=common2-w-7} value=0 ], [ var='D' labels={device=sda15, instance_name=common2-w-7} value=8.6 ]} {Instance:device=sda15, instance_name=common2-w-8 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=common2-w-8 Value:0xc0d662db40} C:{Var:C Labels:device=sda15, instance_name=common2-w-8 Value:0xc0d662dad8} D:{Var:D Labels:device=sda15, instance_name=common2-w-8 Value:0xc0d662db10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931375287s EvaluationString:[ var='B' labels={device=sda15, instance_name=common2-w-8} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=common2-w-8} value=0 ], [ var='D' labels={device=sda15, instance_name=common2-w-8} value=8.6 ]} {Instance:device=sda15, instance_name=farmstritz1-m-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=farmstritz1-m-0 Value:0xc0d662dbd0} C:{Var:C Labels:device=sda15, instance_name=farmstritz1-m-0 Value:0xc0d662dbe8} D:{Var:D Labels:device=sda15, instance_name=farmstritz1-m-0 Value:0xc0d662dc50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931383608s EvaluationString:[ var='B' labels={device=sda15, instance_name=farmstritz1-m-0} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=farmstritz1-m-0} value=0 ], [ var='D' labels={device=sda15, instance_name=farmstritz1-m-0} value=8.6 ]} {Instance:device=sda15, instance_name=farmstritz1-m-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=farmstritz1-m-1 Value:0xc0d662dd08} C:{Var:C Labels:device=sda15, instance_name=farmstritz1-m-1 Value:0xc0d662dd40} D:{Var:D Labels:device=sda15, instance_name=farmstritz1-m-1 Value:0xc0d662dcc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931392036s EvaluationString:[ var='B' labels={device=sda15, instance_name=farmstritz1-m-1} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=farmstritz1-m-1} value=0 ], [ var='D' labels={device=sda15, instance_name=farmstritz1-m-1} value=8.6 ]} {Instance:device=sda15, instance_name=farmstritz1-m-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=farmstritz1-m-2 Value:0xc0d662ddd8} C:{Var:C Labels:device=sda15, instance_name=farmstritz1-m-2 Value:0xc0d662de70} D:{Var:D Labels:device=sda15, instance_name=farmstritz1-m-2 Value:0xc0d662ddc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931400902s EvaluationString:[ var='B' labels={device=sda15, instance_name=farmstritz1-m-2} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=farmstritz1-m-2} value=0 ], [ var='D' labels={device=sda15, instance_name=farmstritz1-m-2} value=8.6 ]} {Instance:device=sda15, instance_name=farmstritz1-w-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=farmstritz1-w-0 Value:0xc0d662df28} C:{Var:C Labels:device=sda15, instance_name=farmstritz1-w-0 Value:0xc0d662dfc0} D:{Var:D Labels:device=sda15, instance_name=farmstritz1-w-0 Value:0xc0d662def0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931408699s EvaluationString:[ var='B' labels={device=sda15, instance_name=farmstritz1-w-0} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=farmstritz1-w-0} value=0 ], [ var='D' labels={device=sda15, instance_name=farmstritz1-w-0} value=8.6 ]} {Instance:device=sda15, instance_name=farmstritz1-w-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=farmstritz1-w-1 Value:0xc03872d530} C:{Var:C Labels:device=sda15, instance_name=farmstritz1-w-1 Value:0xc03872d5a8} D:{Var:D Labels:device=sda15, instance_name=farmstritz1-w-1 Value:0xc03872d6d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931416884s EvaluationString:[ var='B' labels={device=sda15, instance_name=farmstritz1-w-1} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=farmstritz1-w-1} value=0 ], [ var='D' labels={device=sda15, instance_name=farmstritz1-w-1} value=8.6 ]} {Instance:device=sda15, instance_name=farmstritz1-w-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=farmstritz1-w-2 Value:0xc03872d850} C:{Var:C Labels:device=sda15, instance_name=farmstritz1-w-2 Value:0xc03872d7c0} D:{Var:D Labels:device=sda15, instance_name=farmstritz1-w-2 Value:0xc03872d7d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931425148s EvaluationString:[ var='B' labels={device=sda15, instance_name=farmstritz1-w-2} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=farmstritz1-w-2} value=0 ], [ var='D' labels={device=sda15, instance_name=farmstritz1-w-2} value=8.6 ]} {Instance:device=sda15, instance_name=farmstritz1-w-3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=farmstritz1-w-3 Value:0xc03872d8f8} C:{Var:C Labels:device=sda15, instance_name=farmstritz1-w-3 Value:0xc03872d930} D:{Var:D Labels:device=sda15, instance_name=farmstritz1-w-3 Value:0xc03872d8c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931434241s EvaluationString:[ var='B' labels={device=sda15, instance_name=farmstritz1-w-3} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=farmstritz1-w-3} value=0 ], [ var='D' labels={device=sda15, instance_name=farmstritz1-w-3} value=8.6 ]} {Instance:device=sda15, instance_name=farmstritz1-w-4 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=farmstritz1-w-4 Value:0xc03872d9a0} C:{Var:C Labels:device=sda15, instance_name=farmstritz1-w-4 Value:0xc03872d9d8} D:{Var:D Labels:device=sda15, instance_name=farmstritz1-w-4 Value:0xc03872de70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931444332s EvaluationString:[ var='B' labels={device=sda15, instance_name=farmstritz1-w-4} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=farmstritz1-w-4} value=0 ], [ var='D' labels={device=sda15, instance_name=farmstritz1-w-4} value=8.6 ]} {Instance:device=sda15, instance_name=farmstritz1-w-5 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=farmstritz1-w-5 Value:0xc03872dee0} C:{Var:C Labels:device=sda15, instance_name=farmstritz1-w-5 Value:0xc03872def8} D:{Var:D Labels:device=sda15, instance_name=farmstritz1-w-5 Value:0xc03872df50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931453071s EvaluationString:[ var='B' labels={device=sda15, instance_name=farmstritz1-w-5} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=farmstritz1-w-5} value=0 ], [ var='D' labels={device=sda15, instance_name=farmstritz1-w-5} value=8.6 ]} {Instance:device=sda15, instance_name=farmstritz1-w-6 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=farmstritz1-w-6 Value:0xc03872dfc0} C:{Var:C Labels:device=sda15, instance_name=farmstritz1-w-6 Value:0xc03872dfd8} D:{Var:D Labels:device=sda15, instance_name=farmstritz1-w-6 Value:0xc02f7ac030}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.93146123s EvaluationString:[ var='B' labels={device=sda15, instance_name=farmstritz1-w-6} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=farmstritz1-w-6} value=0 ], [ var='D' labels={device=sda15, instance_name=farmstritz1-w-6} value=8.6 ]} {Instance:device=sda15, instance_name=farmstritz1-w-7 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=farmstritz1-w-7 Value:0xc02f7ac0a0} C:{Var:C Labels:device=sda15, instance_name=farmstritz1-w-7 Value:0xc02f7ac0b8} D:{Var:D Labels:device=sda15, instance_name=farmstritz1-w-7 Value:0xc02f7ac120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931469488s EvaluationString:[ var='B' labels={device=sda15, instance_name=farmstritz1-w-7} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=farmstritz1-w-7} value=0 ], [ var='D' labels={device=sda15, instance_name=farmstritz1-w-7} value=8.6 ]} {Instance:device=sda15, instance_name=farmstritz1-w-8 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=farmstritz1-w-8 Value:0xc02f7ac1c8} C:{Var:C Labels:device=sda15, instance_name=farmstritz1-w-8 Value:0xc02f7ac200} D:{Var:D Labels:device=sda15, instance_name=farmstritz1-w-8 Value:0xc02f7ac190}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931477074s EvaluationString:[ var='B' labels={device=sda15, instance_name=farmstritz1-w-8} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=farmstritz1-w-8} value=0 ], [ var='D' labels={device=sda15, instance_name=farmstritz1-w-8} value=8.6 ]} {Instance:device=sda15, instance_name=qa-m-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=qa-m-0 Value:0xc02f7ac260} C:{Var:C Labels:device=sda15, instance_name=qa-m-0 Value:0xc02f7ac290} D:{Var:D Labels:device=sda15, instance_name=qa-m-0 Value:0xc02f7ac2c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.93148527s EvaluationString:[ var='B' labels={device=sda15, instance_name=qa-m-0} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=qa-m-0} value=0 ], [ var='D' labels={device=sda15, instance_name=qa-m-0} value=8.6 ]} {Instance:device=sda15, instance_name=qa-m-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=qa-m-1 Value:0xc02f7ac380} C:{Var:C Labels:device=sda15, instance_name=qa-m-1 Value:0xc02f7ac320} D:{Var:D Labels:device=sda15, instance_name=qa-m-1 Value:0xc02f7ac350}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931494448s EvaluationString:[ var='B' labels={device=sda15, instance_name=qa-m-1} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=qa-m-1} value=0 ], [ var='D' labels={device=sda15, instance_name=qa-m-1} value=8.6 ]} {Instance:device=sda15, instance_name=qa-m-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=qa-m-2 Value:0xc02f7ac3e0} C:{Var:C Labels:device=sda15, instance_name=qa-m-2 Value:0xc02f7ac410} D:{Var:D Labels:device=sda15, instance_name=qa-m-2 Value:0xc02f7ac440}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931502836s EvaluationString:[ var='B' labels={device=sda15, instance_name=qa-m-2} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=qa-m-2} value=0 ], [ var='D' labels={device=sda15, instance_name=qa-m-2} value=8.6 ]} {Instance:device=sda15, instance_name=qa-w-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=qa-w-0 Value:0xc02f7ac500} C:{Var:C Labels:device=sda15, instance_name=qa-w-0 Value:0xc02f7ac4a0} D:{Var:D Labels:device=sda15, instance_name=qa-w-0 Value:0xc02f7ac4d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931511095s EvaluationString:[ var='B' labels={device=sda15, instance_name=qa-w-0} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=qa-w-0} value=0 ], [ var='D' labels={device=sda15, instance_name=qa-w-0} value=8.6 ]} {Instance:device=sda15, instance_name=qa-w-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=qa-w-1 Value:0xc02f7ac590} C:{Var:C Labels:device=sda15, instance_name=qa-w-1 Value:0xc02f7ac5c0} D:{Var:D Labels:device=sda15, instance_name=qa-w-1 Value:0xc02f7ac560}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931519731s EvaluationString:[ var='B' labels={device=sda15, instance_name=qa-w-1} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=qa-w-1} value=0 ], [ var='D' labels={device=sda15, instance_name=qa-w-1} value=8.6 ]} {Instance:device=sda15, instance_name=stritzdev-m-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=stritzdev-m-0 Value:0xc02f7ac630} C:{Var:C Labels:device=sda15, instance_name=stritzdev-m-0 Value:0xc02f7ac648} D:{Var:D Labels:device=sda15, instance_name=stritzdev-m-0 Value:0xc02f7ac6a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931527839s EvaluationString:[ var='B' labels={device=sda15, instance_name=stritzdev-m-0} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=stritzdev-m-0} value=0 ], [ var='D' labels={device=sda15, instance_name=stritzdev-m-0} value=8.6 ]} {Instance:device=sda15, instance_name=stritzdev-m-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=stritzdev-m-1 Value:0xc02f7ac710} C:{Var:C Labels:device=sda15, instance_name=stritzdev-m-1 Value:0xc02f7ac728} D:{Var:D Labels:device=sda15, instance_name=stritzdev-m-1 Value:0xc02f7ac780}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931537979s EvaluationString:[ var='B' labels={device=sda15, instance_name=stritzdev-m-1} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=stritzdev-m-1} value=0 ], [ var='D' labels={device=sda15, instance_name=stritzdev-m-1} value=8.6 ]} {Instance:device=sda15, instance_name=stritzdev-m-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=stritzdev-m-2 Value:0xc02f7ac808} C:{Var:C Labels:device=sda15, instance_name=stritzdev-m-2 Value:0xc02f7ac860} D:{Var:D Labels:device=sda15, instance_name=stritzdev-m-2 Value:0xc02f7ac7f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931547779s EvaluationString:[ var='B' labels={device=sda15, instance_name=stritzdev-m-2} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=stritzdev-m-2} value=0 ], [ var='D' labels={device=sda15, instance_name=stritzdev-m-2} value=8.6 ]} {Instance:device=sda15, instance_name=stritzdev-w-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=stritzdev-w-0 Value:0xc02f7ac8e8} C:{Var:C Labels:device=sda15, instance_name=stritzdev-w-0 Value:0xc02f7ac940} D:{Var:D Labels:device=sda15, instance_name=stritzdev-w-0 Value:0xc02f7ac8d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931556196s EvaluationString:[ var='B' labels={device=sda15, instance_name=stritzdev-w-0} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=stritzdev-w-0} value=0 ], [ var='D' labels={device=sda15, instance_name=stritzdev-w-0} value=8.6 ]} {Instance:device=sda15, instance_name=stritzdev-w-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sda15, instance_name=stritzdev-w-1 Value:0xc02f7ac9b0} C:{Var:C Labels:device=sda15, instance_name=stritzdev-w-1 Value:0xc02f7ac9c8} D:{Var:D Labels:device=sda15, instance_name=stritzdev-w-1 Value:0xc02f7aca20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931565186s EvaluationString:[ var='B' labels={device=sda15, instance_name=stritzdev-w-1} value=8.602321 ], [ var='C' labels={device=sda15, instance_name=stritzdev-w-1} value=0 ], [ var='D' labels={device=sda15, instance_name=stritzdev-w-1} value=8.6 ]} {Instance:device=sdb, instance_name=candycrush1-w-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush1-w-0 Value:0xc02f7acab0} C:{Var:C Labels:device=sdb, instance_name=candycrush1-w-0 Value:0xc02f7acae0} D:{Var:D Labels:device=sdb, instance_name=candycrush1-w-0 Value:0xc02f7aca80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931574806s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush1-w-0} value=37.778946 ], [ var='C' labels={device=sdb, instance_name=candycrush1-w-0} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush1-w-0} value=37.7 ]} {Instance:device=sdb, instance_name=candycrush1-w-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush1-w-1 Value:0xc02f7acb40} C:{Var:C Labels:device=sdb, instance_name=candycrush1-w-1 Value:0xc02f7acb70} D:{Var:D Labels:device=sdb, instance_name=candycrush1-w-1 Value:0xc02f7acba0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931582997s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush1-w-1} value=36.966183 ], [ var='C' labels={device=sdb, instance_name=candycrush1-w-1} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush1-w-1} value=36.9 ]} {Instance:device=sdb, instance_name=candycrush1-w-10 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush1-w-10 Value:0xc02f7acc60} C:{Var:C Labels:device=sdb, instance_name=candycrush1-w-10 Value:0xc02f7acc00} D:{Var:D Labels:device=sdb, instance_name=candycrush1-w-10 Value:0xc02f7acc30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931591599s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush1-w-10} value=37.150139 ], [ var='C' labels={device=sdb, instance_name=candycrush1-w-10} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush1-w-10} value=37.1 ]} {Instance:device=sdb, instance_name=candycrush1-w-11 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush1-w-11 Value:0xc02f7accc0} C:{Var:C Labels:device=sdb, instance_name=candycrush1-w-11 Value:0xc02f7accf8} D:{Var:D Labels:device=sdb, instance_name=candycrush1-w-11 Value:0xc02f7acd30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931600094s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush1-w-11} value=37.653076 ], [ var='C' labels={device=sdb, instance_name=candycrush1-w-11} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush1-w-11} value=37.6 ]} {Instance:device=sdb, instance_name=candycrush1-w-12 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush1-w-12 Value:0xc02f7acd90} C:{Var:C Labels:device=sdb, instance_name=candycrush1-w-12 Value:0xc02f7acdc0} D:{Var:D Labels:device=sdb, instance_name=candycrush1-w-12 Value:0xc02f7acdf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931618712s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush1-w-12} value=37.172119 ], [ var='C' labels={device=sdb, instance_name=candycrush1-w-12} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush1-w-12} value=37.1 ]} {Instance:device=sdb, instance_name=candycrush1-w-13 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush1-w-13 Value:0xc02f7ace50} C:{Var:C Labels:device=sdb, instance_name=candycrush1-w-13 Value:0xc02f7ace80} D:{Var:D Labels:device=sdb, instance_name=candycrush1-w-13 Value:0xc02f7aceb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.93162791s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush1-w-13} value=37.652596 ], [ var='C' labels={device=sdb, instance_name=candycrush1-w-13} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush1-w-13} value=37.6 ]} {Instance:device=sdb, instance_name=candycrush1-w-14 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush1-w-14 Value:0xc02f7acf98} C:{Var:C Labels:device=sdb, instance_name=candycrush1-w-14 Value:0xc02f7acf20} D:{Var:D Labels:device=sdb, instance_name=candycrush1-w-14 Value:0xc02f7acf50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931635479s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush1-w-14} value=37.375942 ], [ var='C' labels={device=sdb, instance_name=candycrush1-w-14} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush1-w-14} value=37.3 ]} {Instance:device=sdb, instance_name=candycrush1-w-15 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush1-w-15 Value:0xc02f7ad060} C:{Var:C Labels:device=sdb, instance_name=candycrush1-w-15 Value:0xc02f7ad000} D:{Var:D Labels:device=sdb, instance_name=candycrush1-w-15 Value:0xc02f7ad030}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931643947s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush1-w-15} value=37.355961 ], [ var='C' labels={device=sdb, instance_name=candycrush1-w-15} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush1-w-15} value=37.3 ]} {Instance:device=sdb, instance_name=candycrush1-w-16 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush1-w-16 Value:0xc02f7ad0c0} C:{Var:C Labels:device=sdb, instance_name=candycrush1-w-16 Value:0xc02f7ad0f0} D:{Var:D Labels:device=sdb, instance_name=candycrush1-w-16 Value:0xc02f7ad120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931654454s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush1-w-16} value=36.965137 ], [ var='C' labels={device=sdb, instance_name=candycrush1-w-16} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush1-w-16} value=36.9 ]} {Instance:device=sdb, instance_name=candycrush1-w-17 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush1-w-17 Value:0xc02f7ad180} C:{Var:C Labels:device=sdb, instance_name=candycrush1-w-17 Value:0xc02f7ad1b0} D:{Var:D Labels:device=sdb, instance_name=candycrush1-w-17 Value:0xc02f7ad1e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.93166315s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush1-w-17} value=38.127213 ], [ var='C' labels={device=sdb, instance_name=candycrush1-w-17} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush1-w-17} value=38.1 ]} {Instance:device=sdb, instance_name=candycrush1-w-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush1-w-2 Value:0xc02f7ad280} C:{Var:C Labels:device=sdb, instance_name=candycrush1-w-2 Value:0xc02f7ad2b0} D:{Var:D Labels:device=sdb, instance_name=candycrush1-w-2 Value:0xc02f7ad250}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931672006s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush1-w-2} value=37.835594 ], [ var='C' labels={device=sdb, instance_name=candycrush1-w-2} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush1-w-2} value=37.8 ]} {Instance:device=sdb, instance_name=candycrush1-w-3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush1-w-3 Value:0xc02f7ad348} C:{Var:C Labels:device=sdb, instance_name=candycrush1-w-3 Value:0xc02f7ad380} D:{Var:D Labels:device=sdb, instance_name=candycrush1-w-3 Value:0xc02f7ad310}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931680136s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush1-w-3} value=37.298412 ], [ var='C' labels={device=sdb, instance_name=candycrush1-w-3} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush1-w-3} value=37.2 ]} {Instance:device=sdb, instance_name=candycrush1-w-4 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush1-w-4 Value:0xc02f7ad3e0} C:{Var:C Labels:device=sdb, instance_name=candycrush1-w-4 Value:0xc02f7ad410} D:{Var:D Labels:device=sdb, instance_name=candycrush1-w-4 Value:0xc02f7ad440}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931688994s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush1-w-4} value=37.341843 ], [ var='C' labels={device=sdb, instance_name=candycrush1-w-4} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush1-w-4} value=37.3 ]} {Instance:device=sdb, instance_name=candycrush1-w-5 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush1-w-5 Value:0xc02f7ad500} C:{Var:C Labels:device=sdb, instance_name=candycrush1-w-5 Value:0xc02f7ad4a0} D:{Var:D Labels:device=sdb, instance_name=candycrush1-w-5 Value:0xc02f7ad4d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931697375s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush1-w-5} value=37.575699 ], [ var='C' labels={device=sdb, instance_name=candycrush1-w-5} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush1-w-5} value=37.5 ]} {Instance:device=sdb, instance_name=candycrush1-w-6 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush1-w-6 Value:0xc02f7ad560} C:{Var:C Labels:device=sdb, instance_name=candycrush1-w-6 Value:0xc02f7ad590} D:{Var:D Labels:device=sdb, instance_name=candycrush1-w-6 Value:0xc02f7ad5d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931705855s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush1-w-6} value=37.101273 ], [ var='C' labels={device=sdb, instance_name=candycrush1-w-6} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush1-w-6} value=37.1 ]} {Instance:device=sdb, instance_name=candycrush1-w-7 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush1-w-7 Value:0xc02f7ad640} C:{Var:C Labels:device=sdb, instance_name=candycrush1-w-7 Value:0xc02f7ad670} D:{Var:D Labels:device=sdb, instance_name=candycrush1-w-7 Value:0xc02f7ad6a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931713954s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush1-w-7} value=37.493599 ], [ var='C' labels={device=sdb, instance_name=candycrush1-w-7} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush1-w-7} value=37.4 ]} {Instance:device=sdb, instance_name=candycrush1-w-8 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush1-w-8 Value:0xc02f7ad710} C:{Var:C Labels:device=sdb, instance_name=candycrush1-w-8 Value:0xc02f7ad740} D:{Var:D Labels:device=sdb, instance_name=candycrush1-w-8 Value:0xc02f7ad770}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931721531s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush1-w-8} value=37.327721 ], [ var='C' labels={device=sdb, instance_name=candycrush1-w-8} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush1-w-8} value=37.3 ]} {Instance:device=sdb, instance_name=candycrush1-w-9 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush1-w-9 Value:0xc02f7ad7d0} C:{Var:C Labels:device=sdb, instance_name=candycrush1-w-9 Value:0xc02f7ad808} D:{Var:D Labels:device=sdb, instance_name=candycrush1-w-9 Value:0xc02f7ad840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931730789s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush1-w-9} value=37.611671 ], [ var='C' labels={device=sdb, instance_name=candycrush1-w-9} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush1-w-9} value=37.6 ]} {Instance:device=sdb, instance_name=candycrush2-w-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush2-w-0 Value:0xc02f7ad8a0} C:{Var:C Labels:device=sdb, instance_name=candycrush2-w-0 Value:0xc02f7ad8d8} D:{Var:D Labels:device=sdb, instance_name=candycrush2-w-0 Value:0xc02f7ad910}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931739015s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush2-w-0} value=47.674675 ], [ var='C' labels={device=sdb, instance_name=candycrush2-w-0} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush2-w-0} value=47.6 ]} {Instance:device=sdb, instance_name=candycrush2-w-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush2-w-1 Value:0xc02f7ad970} C:{Var:C Labels:device=sdb, instance_name=candycrush2-w-1 Value:0xc02f7ad9a0} D:{Var:D Labels:device=sdb, instance_name=candycrush2-w-1 Value:0xc02f7ad9d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931746966s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush2-w-1} value=48.258518 ], [ var='C' labels={device=sdb, instance_name=candycrush2-w-1} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush2-w-1} value=48.2 ]} {Instance:device=sdb, instance_name=candycrush2-w-10 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush2-w-10 Value:0xc02f7ada30} C:{Var:C Labels:device=sdb, instance_name=candycrush2-w-10 Value:0xc02f7ada60} D:{Var:D Labels:device=sdb, instance_name=candycrush2-w-10 Value:0xc02f7ada98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931754708s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush2-w-10} value=46.875443 ], [ var='C' labels={device=sdb, instance_name=candycrush2-w-10} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush2-w-10} value=46.8 ]} {Instance:device=sdb, instance_name=candycrush2-w-11 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush2-w-11 Value:0xc02f7adb00} C:{Var:C Labels:device=sdb, instance_name=candycrush2-w-11 Value:0xc02f7adb30} D:{Var:D Labels:device=sdb, instance_name=candycrush2-w-11 Value:0xc02f7adb60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931763481s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush2-w-11} value=47.925945 ], [ var='C' labels={device=sdb, instance_name=candycrush2-w-11} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush2-w-11} value=47.9 ]} {Instance:device=sdb, instance_name=candycrush2-w-12 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush2-w-12 Value:0xc02f7adbc0} C:{Var:C Labels:device=sdb, instance_name=candycrush2-w-12 Value:0xc02f7adbf8} D:{Var:D Labels:device=sdb, instance_name=candycrush2-w-12 Value:0xc02f7adc60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931771497s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush2-w-12} value=46.011024 ], [ var='C' labels={device=sdb, instance_name=candycrush2-w-12} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush2-w-12} value=46 ]} {Instance:device=sdb, instance_name=candycrush2-w-13 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush2-w-13 Value:0xc02f7adcc0} C:{Var:C Labels:device=sdb, instance_name=candycrush2-w-13 Value:0xc02f7adcf0} D:{Var:D Labels:device=sdb, instance_name=candycrush2-w-13 Value:0xc02f7add28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931780121s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush2-w-13} value=44.490574 ], [ var='C' labels={device=sdb, instance_name=candycrush2-w-13} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush2-w-13} value=44.4 ]} {Instance:device=sdb, instance_name=candycrush2-w-14 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush2-w-14 Value:0xc02f7add90} C:{Var:C Labels:device=sdb, instance_name=candycrush2-w-14 Value:0xc02f7addc0} D:{Var:D Labels:device=sdb, instance_name=candycrush2-w-14 Value:0xc02f7addf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931788644s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush2-w-14} value=46.64922 ], [ var='C' labels={device=sdb, instance_name=candycrush2-w-14} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush2-w-14} value=46.6 ]} {Instance:device=sdb, instance_name=candycrush2-w-15 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush2-w-15 Value:0xc02f7ade88} C:{Var:C Labels:device=sdb, instance_name=candycrush2-w-15 Value:0xc02f7aded0} D:{Var:D Labels:device=sdb, instance_name=candycrush2-w-15 Value:0xc02f7ade50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931798065s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush2-w-15} value=46.017094 ], [ var='C' labels={device=sdb, instance_name=candycrush2-w-15} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush2-w-15} value=46 ]} {Instance:device=sdb, instance_name=candycrush2-w-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush2-w-2 Value:0xc02f7adf70} C:{Var:C Labels:device=sdb, instance_name=candycrush2-w-2 Value:0xc02f7adfa8} D:{Var:D Labels:device=sdb, instance_name=candycrush2-w-2 Value:0xc02f7adf38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931808472s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush2-w-2} value=46.758076 ], [ var='C' labels={device=sdb, instance_name=candycrush2-w-2} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush2-w-2} value=46.7 ]} {Instance:device=sdb, instance_name=candycrush2-w-3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush2-w-3 Value:0xc082204090} C:{Var:C Labels:device=sdb, instance_name=candycrush2-w-3 Value:0xc082204010} D:{Var:D Labels:device=sdb, instance_name=candycrush2-w-3 Value:0xc082204058}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931816282s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush2-w-3} value=0.375682 ], [ var='C' labels={device=sdb, instance_name=candycrush2-w-3} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush2-w-3} value=0.3 ]} {Instance:device=sdb, instance_name=candycrush2-w-4 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush2-w-4 Value:0xc082204110} C:{Var:C Labels:device=sdb, instance_name=candycrush2-w-4 Value:0xc082204140} D:{Var:D Labels:device=sdb, instance_name=candycrush2-w-4 Value:0xc082204180}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931824826s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush2-w-4} value=45.279877 ], [ var='C' labels={device=sdb, instance_name=candycrush2-w-4} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush2-w-4} value=45.2 ]} {Instance:device=sdb, instance_name=candycrush2-w-5 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush2-w-5 Value:0xc082204298} C:{Var:C Labels:device=sdb, instance_name=candycrush2-w-5 Value:0xc082204210} D:{Var:D Labels:device=sdb, instance_name=candycrush2-w-5 Value:0xc082204240}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931834231s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush2-w-5} value=44.1982 ], [ var='C' labels={device=sdb, instance_name=candycrush2-w-5} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush2-w-5} value=44.1 ]} {Instance:device=sdb, instance_name=candycrush2-w-6 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush2-w-6 Value:0xc082204320} C:{Var:C Labels:device=sdb, instance_name=candycrush2-w-6 Value:0xc082204380} D:{Var:D Labels:device=sdb, instance_name=candycrush2-w-6 Value:0xc0822043c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931843051s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush2-w-6} value=45.753231 ], [ var='C' labels={device=sdb, instance_name=candycrush2-w-6} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush2-w-6} value=45.7 ]} {Instance:device=sdb, instance_name=candycrush2-w-7 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush2-w-7 Value:0xc0822044a0} C:{Var:C Labels:device=sdb, instance_name=candycrush2-w-7 Value:0xc082204440} D:{Var:D Labels:device=sdb, instance_name=candycrush2-w-7 Value:0xc082204470}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931851206s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush2-w-7} value=45.64175 ], [ var='C' labels={device=sdb, instance_name=candycrush2-w-7} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush2-w-7} value=45.6 ]} {Instance:device=sdb, instance_name=candycrush2-w-8 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush2-w-8 Value:0xc082204510} C:{Var:C Labels:device=sdb, instance_name=candycrush2-w-8 Value:0xc082204548} D:{Var:D Labels:device=sdb, instance_name=candycrush2-w-8 Value:0xc082204590}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931860944s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush2-w-8} value=45.772606 ], [ var='C' labels={device=sdb, instance_name=candycrush2-w-8} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush2-w-8} value=45.7 ]} {Instance:device=sdb, instance_name=candycrush2-w-9 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=candycrush2-w-9 Value:0xc082204600} C:{Var:C Labels:device=sdb, instance_name=candycrush2-w-9 Value:0xc082204630} D:{Var:D Labels:device=sdb, instance_name=candycrush2-w-9 Value:0xc082204660}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931869318s EvaluationString:[ var='B' labels={device=sdb, instance_name=candycrush2-w-9} value=46.669788 ], [ var='C' labels={device=sdb, instance_name=candycrush2-w-9} value=0 ], [ var='D' labels={device=sdb, instance_name=candycrush2-w-9} value=46.6 ]} {Instance:device=sdb, instance_name=common1-w-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=common1-w-0 Value:0xc082204728} C:{Var:C Labels:device=sdb, instance_name=common1-w-0 Value:0xc0822046c0} D:{Var:D Labels:device=sdb, instance_name=common1-w-0 Value:0xc0822046f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931876791s EvaluationString:[ var='B' labels={device=sdb, instance_name=common1-w-0} value=22.055262 ], [ var='C' labels={device=sdb, instance_name=common1-w-0} value=0 ], [ var='D' labels={device=sdb, instance_name=common1-w-0} value=22 ]} {Instance:device=sdb, instance_name=common1-w-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=common1-w-1 Value:0xc082204810} C:{Var:C Labels:device=sdb, instance_name=common1-w-1 Value:0xc0822047a0} D:{Var:D Labels:device=sdb, instance_name=common1-w-1 Value:0xc0822047d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931884771s EvaluationString:[ var='B' labels={device=sdb, instance_name=common1-w-1} value=19.127148 ], [ var='C' labels={device=sdb, instance_name=common1-w-1} value=0 ], [ var='D' labels={device=sdb, instance_name=common1-w-1} value=19.1 ]} {Instance:device=sdb, instance_name=common1-w-3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=common1-w-3 Value:0xc082204870} C:{Var:C Labels:device=sdb, instance_name=common1-w-3 Value:0xc0822048b8} D:{Var:D Labels:device=sdb, instance_name=common1-w-3 Value:0xc0822048f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931893501s EvaluationString:[ var='B' labels={device=sdb, instance_name=common1-w-3} value=9.491591 ], [ var='C' labels={device=sdb, instance_name=common1-w-3} value=0 ], [ var='D' labels={device=sdb, instance_name=common1-w-3} value=9.4 ]} {Instance:device=sdb, instance_name=common1-w-4 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=common1-w-4 Value:0xc082204960} C:{Var:C Labels:device=sdb, instance_name=common1-w-4 Value:0xc082204990} D:{Var:D Labels:device=sdb, instance_name=common1-w-4 Value:0xc0822049c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931902269s EvaluationString:[ var='B' labels={device=sdb, instance_name=common1-w-4} value=12.589849 ], [ var='C' labels={device=sdb, instance_name=common1-w-4} value=0 ], [ var='D' labels={device=sdb, instance_name=common1-w-4} value=12.5 ]} {Instance:device=sdb, instance_name=common1-w-5 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=common1-w-5 Value:0xc082204ad0} C:{Var:C Labels:device=sdb, instance_name=common1-w-5 Value:0xc082204a40} D:{Var:D Labels:device=sdb, instance_name=common1-w-5 Value:0xc082204a70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931910432s EvaluationString:[ var='B' labels={device=sdb, instance_name=common1-w-5} value=17.817173 ], [ var='C' labels={device=sdb, instance_name=common1-w-5} value=0 ], [ var='D' labels={device=sdb, instance_name=common1-w-5} value=17.8 ]} {Instance:device=sdb, instance_name=common1-w-6 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=common1-w-6 Value:0xc082204b70} C:{Var:C Labels:device=sdb, instance_name=common1-w-6 Value:0xc082204ba8} D:{Var:D Labels:device=sdb, instance_name=common1-w-6 Value:0xc082204b30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931919054s EvaluationString:[ var='B' labels={device=sdb, instance_name=common1-w-6} value=19.195686 ], [ var='C' labels={device=sdb, instance_name=common1-w-6} value=0 ], [ var='D' labels={device=sdb, instance_name=common1-w-6} value=19.1 ]} {Instance:device=sdb, instance_name=common2-w-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=common2-w-0 Value:0xc082204c30} C:{Var:C Labels:device=sdb, instance_name=common2-w-0 Value:0xc082204c60} D:{Var:D Labels:device=sdb, instance_name=common2-w-0 Value:0xc082204ca8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931928128s EvaluationString:[ var='B' labels={device=sdb, instance_name=common2-w-0} value=17.913275 ], [ var='C' labels={device=sdb, instance_name=common2-w-0} value=0 ], [ var='D' labels={device=sdb, instance_name=common2-w-0} value=17.9 ]} {Instance:device=sdb, instance_name=common2-w-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=common2-w-1 Value:0xc082204da0} C:{Var:C Labels:device=sdb, instance_name=common2-w-1 Value:0xc082204d30} D:{Var:D Labels:device=sdb, instance_name=common2-w-1 Value:0xc082204d60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931936591s EvaluationString:[ var='B' labels={device=sdb, instance_name=common2-w-1} value=0.417843 ], [ var='C' labels={device=sdb, instance_name=common2-w-1} value=0 ], [ var='D' labels={device=sdb, instance_name=common2-w-1} value=0.4 ]} {Instance:device=sdb, instance_name=common2-w-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=common2-w-2 Value:0xc082204e08} C:{Var:C Labels:device=sdb, instance_name=common2-w-2 Value:0xc082204e50} D:{Var:D Labels:device=sdb, instance_name=common2-w-2 Value:0xc082204e88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931944832s EvaluationString:[ var='B' labels={device=sdb, instance_name=common2-w-2} value=18.169764 ], [ var='C' labels={device=sdb, instance_name=common2-w-2} value=0 ], [ var='D' labels={device=sdb, instance_name=common2-w-2} value=18.1 ]} {Instance:device=sdb, instance_name=common2-w-3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=common2-w-3 Value:0xc082204f30} C:{Var:C Labels:device=sdb, instance_name=common2-w-3 Value:0xc082204f60} D:{Var:D Labels:device=sdb, instance_name=common2-w-3 Value:0xc082204f00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931952779s EvaluationString:[ var='B' labels={device=sdb, instance_name=common2-w-3} value=17.832111 ], [ var='C' labels={device=sdb, instance_name=common2-w-3} value=0 ], [ var='D' labels={device=sdb, instance_name=common2-w-3} value=17.8 ]} {Instance:device=sdb, instance_name=common2-w-4 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=common2-w-4 Value:0xc082205008} C:{Var:C Labels:device=sdb, instance_name=common2-w-4 Value:0xc082205040} D:{Var:D Labels:device=sdb, instance_name=common2-w-4 Value:0xc082204fd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931961522s EvaluationString:[ var='B' labels={device=sdb, instance_name=common2-w-4} value=39.859489 ], [ var='C' labels={device=sdb, instance_name=common2-w-4} value=0 ], [ var='D' labels={device=sdb, instance_name=common2-w-4} value=39.8 ]} {Instance:device=sdb, instance_name=common2-w-5 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=common2-w-5 Value:0xc0822050d0} C:{Var:C Labels:device=sdb, instance_name=common2-w-5 Value:0xc082205110} D:{Var:D Labels:device=sdb, instance_name=common2-w-5 Value:0xc0822050a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931970008s EvaluationString:[ var='B' labels={device=sdb, instance_name=common2-w-5} value=17.286173 ], [ var='C' labels={device=sdb, instance_name=common2-w-5} value=0 ], [ var='D' labels={device=sdb, instance_name=common2-w-5} value=17.2 ]} {Instance:device=sdb, instance_name=common2-w-6 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=common2-w-6 Value:0xc0822051a0} C:{Var:C Labels:device=sdb, instance_name=common2-w-6 Value:0xc0822051e0} D:{Var:D Labels:device=sdb, instance_name=common2-w-6 Value:0xc082205170}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931980909s EvaluationString:[ var='B' labels={device=sdb, instance_name=common2-w-6} value=17.859844 ], [ var='C' labels={device=sdb, instance_name=common2-w-6} value=0 ], [ var='D' labels={device=sdb, instance_name=common2-w-6} value=17.8 ]} {Instance:device=sdb, instance_name=common2-w-7 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=common2-w-7 Value:0xc0822052b0} C:{Var:C Labels:device=sdb, instance_name=common2-w-7 Value:0xc082205240} D:{Var:D Labels:device=sdb, instance_name=common2-w-7 Value:0xc082205280}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931989995s EvaluationString:[ var='B' labels={device=sdb, instance_name=common2-w-7} value=17.602999 ], [ var='C' labels={device=sdb, instance_name=common2-w-7} value=0 ], [ var='D' labels={device=sdb, instance_name=common2-w-7} value=17.6 ]} {Instance:device=sdb, instance_name=common2-w-8 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=common2-w-8 Value:0xc082205350} C:{Var:C Labels:device=sdb, instance_name=common2-w-8 Value:0xc082205380} D:{Var:D Labels:device=sdb, instance_name=common2-w-8 Value:0xc082205320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.931997644s EvaluationString:[ var='B' labels={device=sdb, instance_name=common2-w-8} value=0.531943 ], [ var='C' labels={device=sdb, instance_name=common2-w-8} value=0 ], [ var='D' labels={device=sdb, instance_name=common2-w-8} value=0.5 ]} {Instance:device=sdb, instance_name=farmstritz1-w-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=farmstritz1-w-0 Value:0xc082205438} C:{Var:C Labels:device=sdb, instance_name=farmstritz1-w-0 Value:0xc082205470} D:{Var:D Labels:device=sdb, instance_name=farmstritz1-w-0 Value:0xc0822053f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.932005647s EvaluationString:[ var='B' labels={device=sdb, instance_name=farmstritz1-w-0} value=28.100609 ], [ var='C' labels={device=sdb, instance_name=farmstritz1-w-0} value=0 ], [ var='D' labels={device=sdb, instance_name=farmstritz1-w-0} value=28.1 ]} {Instance:device=sdb, instance_name=farmstritz1-w-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=farmstritz1-w-1 Value:0xc0822054d0} C:{Var:C Labels:device=sdb, instance_name=farmstritz1-w-1 Value:0xc082205508} D:{Var:D Labels:device=sdb, instance_name=farmstritz1-w-1 Value:0xc082205540}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.932015781s EvaluationString:[ var='B' labels={device=sdb, instance_name=farmstritz1-w-1} value=11.197509 ], [ var='C' labels={device=sdb, instance_name=farmstritz1-w-1} value=0 ], [ var='D' labels={device=sdb, instance_name=farmstritz1-w-1} value=11.1 ]} {Instance:device=sdb, instance_name=farmstritz1-w-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=farmstritz1-w-2 Value:0xc082205600} C:{Var:C Labels:device=sdb, instance_name=farmstritz1-w-2 Value:0xc0822055a0} D:{Var:D Labels:device=sdb, instance_name=farmstritz1-w-2 Value:0xc0822055d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.932026063s EvaluationString:[ var='B' labels={device=sdb, instance_name=farmstritz1-w-2} value=27.817703 ], [ var='C' labels={device=sdb, instance_name=farmstritz1-w-2} value=0 ], [ var='D' labels={device=sdb, instance_name=farmstritz1-w-2} value=27.8 ]} {Instance:device=sdb, instance_name=farmstritz1-w-3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=farmstritz1-w-3 Value:0xc082205660} C:{Var:C Labels:device=sdb, instance_name=farmstritz1-w-3 Value:0xc082205690} D:{Var:D Labels:device=sdb, instance_name=farmstritz1-w-3 Value:0xc0822056c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.93203421s EvaluationString:[ var='B' labels={device=sdb, instance_name=farmstritz1-w-3} value=28.009066 ], [ var='C' labels={device=sdb, instance_name=farmstritz1-w-3} value=0 ], [ var='D' labels={device=sdb, instance_name=farmstritz1-w-3} value=28 ]} {Instance:device=sdb, instance_name=farmstritz1-w-4 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=farmstritz1-w-4 Value:0xc082205740} C:{Var:C Labels:device=sdb, instance_name=farmstritz1-w-4 Value:0xc082205770} D:{Var:D Labels:device=sdb, instance_name=farmstritz1-w-4 Value:0xc0822057a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.932043032s EvaluationString:[ var='B' labels={device=sdb, instance_name=farmstritz1-w-4} value=0.196373 ], [ var='C' labels={device=sdb, instance_name=farmstritz1-w-4} value=0 ], [ var='D' labels={device=sdb, instance_name=farmstritz1-w-4} value=0.1 ]} {Instance:device=sdb, instance_name=farmstritz1-w-5 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=farmstritz1-w-5 Value:0xc082205890} C:{Var:C Labels:device=sdb, instance_name=farmstritz1-w-5 Value:0xc082205820} D:{Var:D Labels:device=sdb, instance_name=farmstritz1-w-5 Value:0xc082205850}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.932052785s EvaluationString:[ var='B' labels={device=sdb, instance_name=farmstritz1-w-5} value=11.316422 ], [ var='C' labels={device=sdb, instance_name=farmstritz1-w-5} value=0 ], [ var='D' labels={device=sdb, instance_name=farmstritz1-w-5} value=11.3 ]} {Instance:device=sdb, instance_name=farmstritz1-w-6 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=farmstritz1-w-6 Value:0xc082205950} C:{Var:C Labels:device=sdb, instance_name=farmstritz1-w-6 Value:0xc0822058f0} D:{Var:D Labels:device=sdb, instance_name=farmstritz1-w-6 Value:0xc082205920}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.932061588s EvaluationString:[ var='B' labels={device=sdb, instance_name=farmstritz1-w-6} value=11.049838 ], [ var='C' labels={device=sdb, instance_name=farmstritz1-w-6} value=0 ], [ var='D' labels={device=sdb, instance_name=farmstritz1-w-6} value=11 ]} {Instance:device=sdb, instance_name=farmstritz1-w-7 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=farmstritz1-w-7 Value:0xc082205a30} C:{Var:C Labels:device=sdb, instance_name=farmstritz1-w-7 Value:0xc0822059c0} D:{Var:D Labels:device=sdb, instance_name=farmstritz1-w-7 Value:0xc0822059f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.932070573s EvaluationString:[ var='B' labels={device=sdb, instance_name=farmstritz1-w-7} value=27.842087 ], [ var='C' labels={device=sdb, instance_name=farmstritz1-w-7} value=0 ], [ var='D' labels={device=sdb, instance_name=farmstritz1-w-7} value=27.8 ]} {Instance:device=sdb, instance_name=farmstritz1-w-8 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=farmstritz1-w-8 Value:0xc082205a90} C:{Var:C Labels:device=sdb, instance_name=farmstritz1-w-8 Value:0xc082205ac0} D:{Var:D Labels:device=sdb, instance_name=farmstritz1-w-8 Value:0xc082205b00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.932079159s EvaluationString:[ var='B' labels={device=sdb, instance_name=farmstritz1-w-8} value=11.10883 ], [ var='C' labels={device=sdb, instance_name=farmstritz1-w-8} value=0 ], [ var='D' labels={device=sdb, instance_name=farmstritz1-w-8} value=11.1 ]} {Instance:device=sdb, instance_name=qa-w-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=qa-w-0 Value:0xc082205b50} C:{Var:C Labels:device=sdb, instance_name=qa-w-0 Value:0xc082205b80} D:{Var:D Labels:device=sdb, instance_name=qa-w-0 Value:0xc082205ba8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.932087218s EvaluationString:[ var='B' labels={device=sdb, instance_name=qa-w-0} value=8.841171 ], [ var='C' labels={device=sdb, instance_name=qa-w-0} value=0 ], [ var='D' labels={device=sdb, instance_name=qa-w-0} value=8.8 ]} {Instance:device=sdb, instance_name=qa-w-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=qa-w-1 Value:0xc082205bf8} C:{Var:C Labels:device=sdb, instance_name=qa-w-1 Value:0xc082205c30} D:{Var:D Labels:device=sdb, instance_name=qa-w-1 Value:0xc082205c58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.932095055s EvaluationString:[ var='B' labels={device=sdb, instance_name=qa-w-1} value=7.853065 ], [ var='C' labels={device=sdb, instance_name=qa-w-1} value=0 ], [ var='D' labels={device=sdb, instance_name=qa-w-1} value=7.8 ]} {Instance:device=sdb, instance_name=stritzdev-w-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=stritzdev-w-0 Value:0xc082205d30} C:{Var:C Labels:device=sdb, instance_name=stritzdev-w-0 Value:0xc082205cd0} D:{Var:D Labels:device=sdb, instance_name=stritzdev-w-0 Value:0xc082205d00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.932103421s EvaluationString:[ var='B' labels={device=sdb, instance_name=stritzdev-w-0} value=4.323204 ], [ var='C' labels={device=sdb, instance_name=stritzdev-w-0} value=0 ], [ var='D' labels={device=sdb, instance_name=stritzdev-w-0} value=4.3 ]} {Instance:device=sdb, instance_name=stritzdev-w-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb, instance_name=stritzdev-w-1 Value:0xc082205dd0} C:{Var:C Labels:device=sdb, instance_name=stritzdev-w-1 Value:0xc082205e10} D:{Var:D Labels:device=sdb, instance_name=stritzdev-w-1 Value:0xc082205da0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.93211216s EvaluationString:[ var='B' labels={device=sdb, instance_name=stritzdev-w-1} value=4.425639 ], [ var='C' labels={device=sdb, instance_name=stritzdev-w-1} value=0 ], [ var='D' labels={device=sdb, instance_name=stritzdev-w-1} value=4.4 ]} {Instance:device=sdb1, instance_name=common1-w-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb1, instance_name=common1-w-2 Value:0xc082205e70} C:{Var:C Labels:device=sdb1, instance_name=common1-w-2 Value:0xc082205eb0} D:{Var:D Labels:device=sdb1, instance_name=common1-w-2 Value:0xc082205ee0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.932135543s EvaluationString:[ var='B' labels={device=sdb1, instance_name=common1-w-2} value=10.96114 ], [ var='C' labels={device=sdb1, instance_name=common1-w-2} value=0 ], [ var='D' labels={device=sdb1, instance_name=common1-w-2} value=10.9 ]} {Instance:device=sdb15, instance_name=common1-w-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=sdb15, instance_name=common1-w-2 Value:0xc082205f58} C:{Var:C Labels:device=sdb15, instance_name=common1-w-2 Value:0xc082205f90} D:{Var:D Labels:device=sdb15, instance_name=common1-w-2 Value:0xc082205fc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.932143881s EvaluationString:[ var='B' labels={device=sdb15, instance_name=common1-w-2} value=8.602321 ], [ var='C' labels={device=sdb15, instance_name=common1-w-2} value=0 ], [ var='D' labels={device=sdb15, instance_name=common1-w-2} value=8.6 ]}]" duration=1.449945215s + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=45.88.97.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s490, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.945354416Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=177465 slug=fairtiq instance="app=fairtiq-idle-tracker-notifier, collection=idleNotifications, command=find" t=2024-05-29T13:44:14.945280814Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.945105126Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.94507416Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.375963ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=45.88.97.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=frankfurt-s490, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.945134845Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=45.88.97.3, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s489, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.944944788Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=45.88.97.3, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=frankfurt-s489, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.944811812Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=45.88.97.3, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=frankfurt-s489, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.944804417Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=216.24.216.5, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s495, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.944637078Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=216.24.216.5, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s495, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.944628689Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.944606869Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.944547165Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.944442556Z caller=remote_instance_store.go:51 user=354676 slug=gridmarketenergy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.944274929Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.944226852Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.944007767Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=177465 slug=fairtiq instance="app=fairtiq-activity-log, collection=activityLogs, command=insert" t=2024-05-29T13:44:14.943912559Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=177465 slug=fairtiq instance="app=fairtiq-activity-log, collection=activityLogs, command=insert" t=2024-05-29T13:44:14.94383713Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=177465 slug=fairtiq instance="app=fairtiq-activity-log, collection=activityLogs, command=find" t=2024-05-29T13:44:14.943714458Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=177465 slug=fairtiq instance="app=fairtiq-activity-log, collection=activityLogs, command=find" t=2024-05-29T13:44:14.943686281Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.943735168Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:14.943695114Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=216.24.216.3, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=frankfurt-s493, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.943642485Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=216.24.216.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s492, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.94346046Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.943294679Z caller=remote_instance_store.go:51 user=691855 slug=chainlake msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.943131229Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.943126442Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.942972103Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.942641508Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-creditsafe-db, env=dev" t=2024-05-29T13:44:14.942681532Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=216.24.213.5, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s487, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.942616398Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=216.24.213.5, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s487, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.942599329Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-creditsafe-db, env=dev" t=2024-05-29T13:44:14.942537422Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.942566083Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.942388624Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=523906 slug=cyberark t=2024-05-29T13:44:14.94225919Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=115.235857ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=216.24.213.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s486, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.942247196Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=216.24.213.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=frankfurt-s486, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.94223131Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Frankfurt, cluster=Frankfurt, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=216.24.213.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=frankfurt-s486, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.942036534Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.942001963Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.94197953Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.940064986Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.941952705Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=206439 slug=relaypro t=2024-05-29T13:44:14.941844416Z level=debug msg="Saving alert states" count=7 max_state_save_concurrency=1 + logger=ngalert.state.manager user=206439 slug=relaypro instance="cluster=stage, environment=qa, role=ibot_fdb" t=2024-05-29T13:44:14.941831332Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.941720538Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206439 slug=relaypro instance="cluster=mob, environment=qa, role=ibotdr_fdb" t=2024-05-29T13:44:14.941720305Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206439 slug=relaypro instance="cluster=mob, environment=qa, role=ibotdr_fdb" t=2024-05-29T13:44:14.941706475Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.941564672Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.941594235Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.94146307Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=112732 slug=gleamer version=1 fingerprint=2b92b22bf115209b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.941475562Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.941127519s EvaluationString:}]" duration=30.20462ms + level=debug ts=2024-05-29T13:44:14.941504634Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.941481378Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206439 slug=relaypro instance="cluster=mob, environment=qa, role=ibot_fdb" t=2024-05-29T13:44:14.941508909Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.941403515Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206439 slug=relaypro instance="cluster=mob, environment=pro, role=ibotdr_fdb" t=2024-05-29T13:44:14.941400055Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=460952 slug=prdnextgen instance= t=2024-05-29T13:44:14.941398636Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.941257162Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Düsseldorf, cluster=Düsseldorf, country=Germany, datacenter=OneProvider, environment=production, instance=10.0.0.203:9998, ip=213.202.233.112, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=dusseldorf-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.941366649Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.941222168Z caller=remote_instance_store.go:51 user=224047 slug=ppbtradingtribeprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Düsseldorf, cluster=Düsseldorf, country=Germany, datacenter=OneProvider, environment=production, instance=10.0.0.203:9998, ip=213.202.233.112, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=dusseldorf-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.941349597Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=460952 slug=prdnextgen t=2024-05-29T13:44:14.941236384Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.941254395Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206439 slug=relaypro instance="cluster=mob, environment=pro, role=ibot_fdb" t=2024-05-29T13:44:14.941280199Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206439 slug=relaypro instance="cluster=mob, environment=pro, role=fabric_fdb" t=2024-05-29T13:44:14.941196616Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206439 slug=relaypro instance="cluster=beta, environment=qa, role=ibot_fdb" t=2024-05-29T13:44:14.941073696Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206439 slug=relaypro t=2024-05-29T13:44:14.94102775Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}The log input rate (# of bytes input into memory for each transaction log, lives in memory for at least 5 seconds) has been larger than 80MB/s for 20 out of the last 60 minutes on the {{role}} FDB cluster in the {{cluster}}_{{environment}} environment. This can indicate that we are using a sizable fraction of our logs' capacity.': error parsing template __alert_FDB - Log Input Rate: template: __alert_FDB - Log Input Rate:1: function \"role\" not defined" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Düsseldorf, cluster=Düsseldorf, country=Germany, datacenter=OneProvider, environment=production, instance=10.0.0.203:9998, ip=213.202.233.112, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=dusseldorf-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.941075451Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.940891158Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Düsseldorf, cluster=Düsseldorf, country=Germany, datacenter=OneProvider, environment=production, instance=10.0.0.203:9998, ip=213.202.233.112, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=dusseldorf-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.941059196Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206439 slug=relaypro t=2024-05-29T13:44:14.940941372Z level=debug msg="State manager processing evaluation results" resultCount=7 + logger=ngalert.scheduler user=206439 slug=relaypro version=22 fingerprint=325a159106fc1d1b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.94076941Z level=debug msg="Alert rule evaluated" results="[{Instance:cluster=beta, environment=qa, role=ibot_fdb State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:cluster=beta, environment=qa, role=ibot_fdb Value:0xc0337ac7e8} D:{Var:D Labels:cluster=beta, environment=qa, role=ibot_fdb Value:0xc0337ac820}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.940248791s EvaluationString:[ var='C' labels={cluster=beta, environment=qa, role=ibot_fdb} value=0 ], [ var='D' labels={cluster=beta, environment=qa, role=ibot_fdb} value=0 ]} {Instance:cluster=mob, environment=pro, role=fabric_fdb State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:cluster=mob, environment=pro, role=fabric_fdb Value:0xc0337ac890} D:{Var:D Labels:cluster=mob, environment=pro, role=fabric_fdb Value:0xc0337ac8b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.940264861s EvaluationString:[ var='C' labels={cluster=mob, environment=pro, role=fabric_fdb} value=0 ], [ var='D' labels={cluster=mob, environment=pro, role=fabric_fdb} value=0 ]} {Instance:cluster=mob, environment=pro, role=ibot_fdb State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:cluster=mob, environment=pro, role=ibot_fdb Value:0xc0337ac930} D:{Var:D Labels:cluster=mob, environment=pro, role=ibot_fdb Value:0xc0337ac960}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.940271965s EvaluationString:[ var='C' labels={cluster=mob, environment=pro, role=ibot_fdb} value=0 ], [ var='D' labels={cluster=mob, environment=pro, role=ibot_fdb} value=0 ]} {Instance:cluster=mob, environment=pro, role=ibotdr_fdb State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:cluster=mob, environment=pro, role=ibotdr_fdb Value:0xc0337aca00} D:{Var:D Labels:cluster=mob, environment=pro, role=ibotdr_fdb Value:0xc0337aca50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.940278101s EvaluationString:[ var='C' labels={cluster=mob, environment=pro, role=ibotdr_fdb} value=0 ], [ var='D' labels={cluster=mob, environment=pro, role=ibotdr_fdb} value=0 ]} {Instance:cluster=mob, environment=qa, role=ibot_fdb State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:cluster=mob, environment=qa, role=ibot_fdb Value:0xc0337acae0} D:{Var:D Labels:cluster=mob, environment=qa, role=ibot_fdb Value:0xc0337acab0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.940284821s EvaluationString:[ var='C' labels={cluster=mob, environment=qa, role=ibot_fdb} value=0 ], [ var='D' labels={cluster=mob, environment=qa, role=ibot_fdb} value=0 ]} {Instance:cluster=mob, environment=qa, role=ibotdr_fdb State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:cluster=mob, environment=qa, role=ibotdr_fdb Value:0xc0337acb50} D:{Var:D Labels:cluster=mob, environment=qa, role=ibotdr_fdb Value:0xc0337acb80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.940289474s EvaluationString:[ var='C' labels={cluster=mob, environment=qa, role=ibotdr_fdb} value=0 ], [ var='D' labels={cluster=mob, environment=qa, role=ibotdr_fdb} value=0 ]} {Instance:cluster=stage, environment=qa, role=ibot_fdb State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:cluster=stage, environment=qa, role=ibot_fdb Value:0xc0337acbf0} D:{Var:D Labels:cluster=stage, environment=qa, role=ibot_fdb Value:0xc0337acc20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.940292734s EvaluationString:[ var='C' labels={cluster=stage, environment=qa, role=ibot_fdb} value=0 ], [ var='D' labels={cluster=stage, environment=qa, role=ibot_fdb} value=0 ]}]" duration=19.125979ms + level=debug ts=2024-05-29T13:44:14.940840454Z caller=remote_instance_store.go:51 user=451750 slug=amadeuspfpprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.940762762Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.940752374Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Dusseldorf, cluster=Düsseldorf, country=Germany, datacenter=OneProvider, environment=production, instance=10.0.0.203:9998, ip=213.202.225.18, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=dusseldorf-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.940629163Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=260796 slug=expressvpn t=2024-05-29T13:44:14.94065659Z level=debug msg="Saving alert states" count=10 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=USA - New York" t=2024-05-29T13:44:14.94064512Z level=debug msg="Keeping state" state=Normal + 2024/05/29 13:44:14 ERROR: [transport] Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII "too_many_pings". + level=debug ts=2024-05-29T13:44:14.940266498Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.940255605Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.940249004Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.940210096Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.940078589Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Dublin, cluster=Dublin-2, country=Ireland, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=84.247.48.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=dublin-s451, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.940066875Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.940013156Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.940043743Z caller=remote_alert_sender.go:94 user=250150 slug=bizagi host=bizagi-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.117.7:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fecc92bb-bccc-48b4-8df6-c13462c741d5 alerts=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.940021377Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.939934847Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=USA - Dallas" t=2024-05-29T13:44:14.939897446Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=4947 slug=mediamath instance= t=2024-05-29T13:44:14.939868729Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.939847814Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.939711765Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.570847ms + level=debug ts=2024-05-29T13:44:14.939684035Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-backend-db, env=dev" t=2024-05-29T13:44:14.939664071Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.939653266Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=USA - Atlanta" t=2024-05-29T13:44:14.939501443Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.939460566Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Dublin, cluster=Dublin-2, country=Ireland, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=84.247.48.18, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=dublin-s404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.939474971Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=UK - London" t=2024-05-29T13:44:14.939289267Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=UK - London" t=2024-05-29T13:44:14.939275004Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Dublin, cluster=Dublin-2, country=Ireland, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=84.247.48.18, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=dublin-s404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.939275446Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.939260687Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.939271567Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.93918887Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.939230757Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=150145 slug=pleasant instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.939142848Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=150145 slug=pleasant instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.93913482Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Dublin, cluster=Dublin-2, country=Ireland, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.235.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=dublin-s405, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.939085105Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Dublin, cluster=Dublin-2, country=Ireland, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.235.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=dublin-s405, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.939072421Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=84360 slug=sib instance= t=2024-05-29T13:44:14.938970792Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.938914225Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.938971445Z caller=remote_instance_store.go:51 user=679831 slug=joveostageaws msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=679831 slug=joveostageaws instance="datasource_uid=a6f971c0-2a39-492e-8c6a-8034f1702671, ref_id=A" t=2024-05-29T13:44:14.938904744Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=679831 slug=joveostageaws instance="datasource_uid=a6f971c0-2a39-492e-8c6a-8034f1702671, ref_id=A" t=2024-05-29T13:44:14.938892714Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=679831 slug=joveostageaws instance="datasource_uid=a6f971c0-2a39-492e-8c6a-8034f1702671, ref_id=A" t=2024-05-29T13:44:14.938886923Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=84360 slug=sib instance= t=2024-05-29T13:44:14.938940185Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.938852099Z level=debug msg="State manager processing evaluation results" resultCount=10 + level=debug ts=2024-05-29T13:44:14.938874513Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.938793168Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:14.938792859Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.938747073Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114492 slug=railsbank t=2024-05-29T13:44:14.938724735Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=114492 slug=railsbank version=2 fingerprint=9f59cc44d6a3b00b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.938606926Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.938210523s EvaluationString:}]" duration=462.833552ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Dublin, cluster=Dublin-2, country=Ireland, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=149.34.243.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=dublin-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.938671002Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Dublin, cluster=Dublin-2, country=Ireland, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=149.34.243.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=dublin-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.938505804Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Dublin, cluster=Dublin-2, country=Ireland, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=149.34.243.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=dublin-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.938469079Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.920945681Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" + Error parsing panelUID for alert annotationruleID241dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=109452 slug=deltarisk t=2024-05-29T13:44:14.92543313Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=39.18267ms + level=debug ts=2024-05-29T13:44:14.938174082Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Dublin, cluster=Dublin, country=Ireland, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=149.34.242.223, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=dublin-s407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.938314993Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Dublin, cluster=Dublin, country=Ireland, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=149.34.242.223, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=dublin-s407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.938091775Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.938008677Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.937961835Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.937965987Z caller=remote_instance_store.go:51 user=27014 slug=baseline msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.93774538Z caller=remote_instance_store.go:51 user=412525 slug=motoblouz msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.937628736Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Dubai, cluster=Dubai, country=United Arab Emirates, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=217.138.193.178, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=dubai-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.937673367Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Dubai, cluster=Dubai, country=United Arab Emirates, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=217.138.193.178, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=dubai-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.937651881Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.937582579Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.937512835Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.937516623Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.937490598Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Douglas, cluster=Douglas, country=Isle of Man, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=91.90.124.146, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=douglas-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.937448541Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=523054 slug=vialtopartners instance= t=2024-05-29T13:44:14.937403641Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=523054 slug=vialtopartners t=2024-05-29T13:44:14.937364914Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.937289326Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.937217258Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.93705035Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.937150899Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Douglas, cluster=Douglas, country=Isle of Man, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=91.90.124.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=douglas-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.93713097Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=27014 slug=baseline t=2024-05-29T13:44:14.937064238Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.937082846Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:14.936979838Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.936848029Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Doha, cluster=Doha, country=Qatar, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=95.181.234.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=doha-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.936702865Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Doha, cluster=Doha, country=Qatar, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=95.181.234.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=doha-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.936523356Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.936496912Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-risk-defense-platform-db, env=eu" t=2024-05-29T13:44:14.936453496Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.936483745Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.936472956Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.93629057Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Dhaka, cluster=Dhaka-2, country=Bangladesh, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=64.64.112.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=dhaka-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.936365644Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.936301112Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=80822 slug=corescientific t=2024-05-29T13:44:14.936238466Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.936270793Z caller=remote_instance_store.go:51 user=80822 slug=corescientific msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=80822 slug=corescientific instance="datasource_uid=000000005, ref_id=A,B" t=2024-05-29T13:44:14.936219207Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=80822 slug=corescientific instance="datasource_uid=000000005, ref_id=A,B" t=2024-05-29T13:44:14.936206067Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.936159898Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.936015876Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=80822 slug=corescientific version=1 fingerprint=977a4d593575f8a0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.936092362Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=000000005, ref_id=A,B State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.935791373s EvaluationString:}]" duration=175.385358ms + level=debug ts=2024-05-29T13:44:14.936082103Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Dhaka, cluster=Dhaka-2, country=Bangladesh, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=64.64.112.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=dhaka-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.936096638Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.936041289Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.936024203Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-periodic-reviews-db, env=eu" t=2024-05-29T13:44:14.935801868Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Dallas, cluster=Dallas, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=212.102.41.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=dallas-s409, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.935552956Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.935506761Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.93555776Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Dallas, cluster=Dallas, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=212.102.41.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=dallas-s409, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.935361442Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.935154043Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.935277045Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.935203764Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=23.614208ms + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-instant-id-qa-db, env=eu" t=2024-05-29T13:44:14.935283519Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Copenhagen, cluster=Copenhagen, country=Denmark, datacenter=Glesys, environment=production, instance=10.0.0.203:9998, ip=188.126.94.226, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=copenhagen-s408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.935163401Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.935057559Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.935043835Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.934950604Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.934924751Z caller=remote_instance_store.go:51 user=618621 slug=sendamatic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Copenhagen, cluster=Copenhagen, country=Denmark, datacenter=Glesys, environment=production, instance=10.0.0.203:9998, ip=188.126.94.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=copenhagen-s407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.934569073Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.93451854Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.934474718Z caller=remote_instance_store.go:51 user=691855 slug=chainlake msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.934294218Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.934332463Z caller=remote_instance_store.go:51 user=20177 slug=paddledash msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=318220 slug=deepalert t=2024-05-29T13:44:14.845158544Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.752378ms + logger=ngalert.state.manager user=691855 slug=chainlake instance="__name__=consul_health_service_status, check=_nomad-check-87bb0e0e6fbe34b9278c316c9d3d167651e33904, instance=germany-nomad-server-0, node=compute-hel-1-cpx51-compute-hel-1, service_id=_nomad-task-ea25cafe-7149-6a4b-26f6-7a6c52047b1e-group-dagster-server-dagster-dind-market-intelligence-pre-prod-2-, service_name=dagster-dind-market-intelligence-pre-prod-2, status=warning" t=2024-05-29T13:44:14.934245019Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-idverse-enterprise-db, env=eu" t=2024-05-29T13:44:14.9341829Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Colombo, cluster=Colombo-2, country=Sri Lanka, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=95.181.239.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=colombo-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.934124491Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.93411668Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.934063939Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="datasource_uid=1x3mYGa7z, ref_id=A" t=2024-05-29T13:44:14.934010732Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="datasource_uid=1x3mYGa7z, ref_id=A" t=2024-05-29T13:44:14.933994398Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Colombo, cluster=Colombo, country=Sri Lanka, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.17.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=colombo-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.933693825Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank t=2024-05-29T13:44:14.933599942Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=114492 slug=railsbank version=1 fingerprint=bc47c892ae3021f7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.933520892Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.93320902s EvaluationString:}]" duration=472.721965ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Chisinau, cluster=Chisinau, country=Moldova, datacenter=Trabia Network, environment=production, instance=10.0.0.203:9998, ip=178.175.142.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=chisinau-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.933483431Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.933455373Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-frontend-log-db, env=eu" t=2024-05-29T13:44:14.933391853Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Chisinau, cluster=Chisinau, country=Moldova, datacenter=Trabia Network, environment=production, instance=10.0.0.203:9998, ip=178.175.142.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=chisinau-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.933262399Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.933228668Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.933019908Z caller=remote_instance_store.go:51 user=679029 slug=joveoprodaws msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.932946328Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=679029 slug=joveoprodaws t=2024-05-29T13:44:14.932917276Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="datasource_uid=1x3mYGa7z, ref_id=A" t=2024-05-29T13:44:14.932837384Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Chicago, cluster=Chicago, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.182.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=chicago-s409, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.932831909Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Chicago, cluster=Chicago, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.182.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=chicago-s409, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.932820869Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.932638083Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.932486592Z caller=remote_instance_store.go:51 user=701741 slug=thetradingpitproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance= t=2024-05-29T13:44:14.932386655Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.932465435Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.932438641Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=543654 slug=jobcloudprogrammaticprod t=2024-05-29T13:44:14.847827756Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.605477ms + logger=ngalert.scheduler user=543654 slug=jobcloudprogrammaticprod version=1 fingerprint=616966175eeb55ed attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.932027935Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.850228539s EvaluationString:}]" duration=99.889147ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.932239616Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.932191282Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.932035008Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.932002874Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Cairo, cluster=Cairo, country=Egypt, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=188.214.122.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=cairo-s451, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.931592975Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.93133772Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.931248628Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.931018658Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.930850704Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.930706126Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.930627985Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Buenos Aires, cluster=Buenos Aires, country=Argentina, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.39.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=buenosaires-s403, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.930467824Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.930411332Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=637816 slug=kingobservatory t=2024-05-29T13:44:14.930392736Z level=debug msg="Saving alert states" count=9 max_state_save_concurrency=1 + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="namespace=upsi-live, pod=upsi-live-696f469547-gnnzb" t=2024-05-29T13:44:14.9303597Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Buenos Aires, cluster=Buenos Aires, country=Argentina, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.39.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=buenosaires-s404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.93024867Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.930013857Z caller=grafana.go:247 user=60199 slug=wallapop msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query= groups=164 alerts=0 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Buenos Aires, cluster=Buenos Aires, country=Argentina, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.39.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=buenosaires-s404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.930007175Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="namespace=aggrigatoload-anodot-live, pod=aggrigatoload-anodot-live-7797bcd4cd-6hp5l" t=2024-05-29T13:44:14.929773142Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Buenos Aires, cluster=Buenos Aires, country=Argentina, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.38.62, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=buenosaires-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.929779286Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Buenos Aires, cluster=Buenos Aires, country=Argentina, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.38.62, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=buenosaires-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.92976094Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.929704702Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.929649997Z caller=remote_instance_store.go:51 user=354676 slug=gridmarketenergy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.929618955Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="namespace=aggrigato-live, pod=aggrigato-live-6df544948b-k8gpq" t=2024-05-29T13:44:14.929571531Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Buenos Aires, cluster=Buenos Aires, country=Argentina, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.38.62, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=buenosaires-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.929591338Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Budapest, cluster=Budapest, country=Hungary, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=86.106.74.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=budapest-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.929234439Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.929184099Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.929011214Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=314067 slug=itsme instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.929024411Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Budapest, cluster=Budapest, country=Hungary, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.189.114.114, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=budapest-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.929034886Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=314067 slug=itsme instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.928977104Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=314067 slug=itsme instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.92895942Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=314067 slug=itsme instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.928911401Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bucharest, cluster=Bucharest-3, country=Romania, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.49.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=bucharest-s480, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.928802353Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=314067 slug=itsme version=4 fingerprint=6696f9c0e7dc218c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.928667831Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.928359309s EvaluationString:}]" duration=10.16073ms + level=debug ts=2024-05-29T13:44:14.928689539Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bucharest, cluster=Bucharest-3, country=Romania, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.49.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bucharest-s480, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.928671755Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.928599229Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.928414472Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bucharest, cluster=Bucharest-3, country=Romania, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.49.20, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bucharest-s481, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.928288451Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bucharest, cluster=Bucharest-3, country=Romania, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.49.20, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bucharest-s481, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.928280609Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.928251268Z caller=remote_instance_store.go:51 user=451427 slug=rocketchat msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.928183739Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:14.928113597Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:14.928048439Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.927976217Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.927776046Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.92768656Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=3902250becbc59ed attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.927562866Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.927316958s EvaluationString:}]" duration=179.918196ms + level=debug ts=2024-05-29T13:44:14.92759135Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bucharest, cluster=Bucharest-3, country=Romania, datacenter=Binbox, environment=production, instance=10.0.0.203:9998, ip=84.239.14.177, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=bucharest-s492, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.927552071Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.927493062Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.927522956Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:14.927245322Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=21.733386ms + logger=ngalert.state.manager.persist user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:14.926731039Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=28.249503ms + level=debug ts=2024-05-29T13:44:14.926738069Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:14.926545206Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Brussels, cluster=Brussels, country=Belgium, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.218.9, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=brussels-s417, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.926541015Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Brussels, cluster=Brussels, country=Belgium, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.218.9, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=brussels-s417, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.926522251Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.926456535Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.926352885Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:14.926296483Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=538037 slug=drivewealth instance= t=2024-05-29T13:44:14.926283782Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=538037 slug=drivewealth t=2024-05-29T13:44:14.926216851Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.926218683Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Brussels, cluster=Brussels, country=Belgium, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.218.8, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=brussels-s416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.926059277Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.926025046Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.scheduler user=245291 slug=pismo version=29 fingerprint=a6d3df1c44e4ba6f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.925897128Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.925671774s EvaluationString:}]" duration=265.681928ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Brussels, cluster=Brussels, country=Belgium, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.218.8, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=brussels-s416, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.925875956Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.925830393Z caller=remote_instance_store.go:51 user=538355 slug=flogic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.925826013Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=538355 slug=flogic instance="account_id=641264638977, dimension_DBInstanceIdentifier=daiwapr-monitor-prod, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:rds:ap-northeast-1:641264638977:db:daiwapr-monitor-prod, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter" t=2024-05-29T13:44:14.925770128Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538355 slug=flogic t=2024-05-29T13:44:14.925732172Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Brussels, cluster=Brussels, country=Belgium, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.218.6, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=brussels-s414, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.925673886Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.925644436Z caller=remote_alert_sender.go:94 user=542900 slug=yuktarthtrehan host=yuktarthtrehan-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.184.24.35:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=b24853c4-dd6e-4519-a20d-9c57a0f0afd6 alerts=1 + logger=ngalert.state.manager.persist user=770248 slug=aurora t=2024-05-29T13:44:14.925523045Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.208357ms + level=debug ts=2024-05-29T13:44:14.925495735Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Brussels, cluster=Brussels, country=Belgium, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.218.17, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=brussels-s424, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.925124653Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.925024008Z caller=grafana.go:247 user=60199 slug=wallapop msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query= groups=176 alerts=0 + level=debug ts=2024-05-29T13:44:14.925014621Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Brussels, cluster=Brussels, country=Belgium, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.218.16, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=brussels-s423, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.924867279Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Brussels, cluster=Brussels, country=Belgium, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.218.16, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=brussels-s423, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.92485537Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.924826452Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Brussels, cluster=Brussels, country=Belgium, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.218.16, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=brussels-s423, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.924653838Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.924620798Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Brussels, cluster=Brussels, country=Belgium, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.218.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=brussels-s422, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.924428558Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Brussels, cluster=Brussels, country=Belgium, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.218.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=brussels-s422, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.924415408Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.924238076Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.923994061Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.923920762Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Brussels, cluster=Brussels, country=Belgium, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.218.14, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=brussels-s421, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.92387155Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.923745627Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.923840109Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.923785721Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.923768511Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.923721045Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.923681009Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ty8.tky, group=directory, instance=directory-02.ty8.tky, origin=volterra-infra-vm" t=2024-05-29T13:44:14.923710261Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.923545143Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ty8.tky, group=directory, instance=directory-01.ty8.tky, origin=volterra-infra-vm" t=2024-05-29T13:44:14.923649468Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.923551337Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.923554096Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Brussels, cluster=Brussels, country=Belgium, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.218.13, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=brussels-s420, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.923521266Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sy5.syd, group=directory, instance=directory-01.sy5.syd, origin=volterra-infra-vm" t=2024-05-29T13:44:14.923522319Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sv10.sjc, group=directory, instance=directory-02.sv10.sjc, origin=volterra-infra-vm" t=2024-05-29T13:44:14.923491015Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sv10.sjc, group=directory, instance=directory-01.sv10.sjc, origin=volterra-infra-vm" t=2024-05-29T13:44:14.923453334Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sv10.sjc, group=directory, instance=directory-01.sv10.sjc, origin=volterra-infra-vm" t=2024-05-29T13:44:14.923429934Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.923422213Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.923376943Z caller=remote_instance_store.go:51 user=452115 slug=ybmetrics msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sto6.sto, group=directory, instance=directory-02.sto6.sto, origin=volterra-infra-vm" t=2024-05-29T13:44:14.923408691Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sto6.sto, group=directory, instance=directory-02.sto6.sto, origin=volterra-infra-vm" t=2024-05-29T13:44:14.923395151Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sto6.sto, group=directory, instance=directory-01.sto6.sto, origin=volterra-infra-vm" t=2024-05-29T13:44:14.923329244Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.923308641Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sp4.sao, group=directory, instance=directory-01.sp4.sao, origin=volterra-infra-vm" t=2024-05-29T13:44:14.92325555Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sif.che, group=directory, instance=directory-02.sif.che, origin=volterra-infra-vm" t=2024-05-29T13:44:14.923235982Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sif.che, group=directory, instance=directory-01.sif.che, origin=volterra-infra-vm" t=2024-05-29T13:44:14.923205635Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.923095668Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sg3.sin, group=directory, instance=directory-01.sg3.sin, origin=volterra-infra-vm" t=2024-05-29T13:44:14.923140284Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sg3.sin, group=directory, instance=directory-01.sg3.sin, origin=volterra-infra-vm" t=2024-05-29T13:44:14.923130951Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Brussels, cluster=Brussels, country=Belgium, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.218.11, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=brussels-s418, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.923026533Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa2.par, group=directory, instance=directory-main-01.pa2.par, origin=volterra-infra-vm" t=2024-05-29T13:44:14.92301285Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.922897543Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.922819302Z caller=grafana.go:247 user=391538 slug=risknarrative msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=15&state=firing&state=noData&state=error" groups=7 alerts=0 + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=os1.osa, group=directory, instance=directory-02.os1.osa, origin=volterra-infra-vm" t=2024-05-29T13:44:14.922927358Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ny2.nyc, group=directory, instance=directory-02.ny2.nyc, origin=volterra-infra-vm" t=2024-05-29T13:44:14.922855872Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:14.922831341Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=30.741596ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.922821423Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=mtl7.mon, group=directory, instance=directory-02.mtl7.mon, origin=volterra-infra-vm" t=2024-05-29T13:44:14.922796064Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=23997 slug=wheniwork t=2024-05-29T13:44:14.922621999Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.1338ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Brussels, cluster=Brussels, country=Belgium, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.218.10, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=brussels-s413, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.922643903Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.922582333Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ld6.lon, group=directory, instance=directory-02.ld6.lon, origin=volterra-infra-vm" t=2024-05-29T13:44:14.922481202Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Brussels, cluster=Brussels, country=Belgium, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.218.10, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=brussels-s413, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.922481283Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=hk2.hkg, group=directory, instance=directory-02.hk2.hkg, origin=volterra-infra-vm" t=2024-05-29T13:44:14.922429975Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.922409281Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=fr4.fra, group=directory, instance=directory-02.fr4.fra, origin=volterra-infra-vm" t=2024-05-29T13:44:14.922339855Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=fr4.fra, group=directory, instance=directory-01.fr4.fra, origin=volterra-infra-vm" t=2024-05-29T13:44:14.922307768Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.922355729Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dx1.dxb, group=directory, instance=directory-02.dx1.dxb, origin=volterra-infra-vm" t=2024-05-29T13:44:14.922290059Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Brisbane, cluster=Brisbane, country=Australia, datacenter=ServersAustralia, environment=production, instance=10.0.0.203:9998, ip=223.252.16.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=brisbane-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.9222861Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dc12.ash, group=directory, instance=directory-01.dc12.ash, origin=volterra-infra-vm" t=2024-05-29T13:44:14.922207874Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dc12.ash, group=directory, instance=directory-01.dc12.ash, origin=volterra-infra-vm" t=2024-05-29T13:44:14.922196219Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dal3.dal, group=directory, instance=directory-02.dal3.dal, origin=volterra-infra-vm" t=2024-05-29T13:44:14.922168765Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ams9.ams, group=directory, instance=directory-02.ams9.ams, origin=volterra-infra-vm" t=2024-05-29T13:44:14.922086339Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc t=2024-05-29T13:44:14.922007355Z level=debug msg="State manager processing evaluation results" resultCount=51 + logger=ngalert.state.manager.persist user=707607 slug=obi t=2024-05-29T13:44:14.922058044Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.658163ms + logger=ngalert.scheduler user=343338 slug=f5sdc version=107 fingerprint=5b1ad323f2ea8c93 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.921326805Z level=debug msg="Alert rule evaluated" results="[{Instance:datacenter=ams9.ams, group=directory, instance=directory-01.ams9.ams, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ams9.ams, group=directory, instance=directory-01.ams9.ams, origin=volterra-infra-vm Value:0xc0440aaf10} B:{Var:B Labels:datacenter=ams9.ams, group=directory, instance=directory-01.ams9.ams, origin=volterra-infra-vm Value:0xc0440aaf60} C:{Var:C Labels:datacenter=ams9.ams, group=directory, instance=directory-01.ams9.ams, origin=volterra-infra-vm Value:0xc0440aaeb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919092948s EvaluationString:[ var='A' labels={datacenter=ams9.ams, group=directory, instance=directory-01.ams9.ams, origin=volterra-infra-vm} value=23 ], [ var='B' labels={datacenter=ams9.ams, group=directory, instance=directory-01.ams9.ams, origin=volterra-infra-vm} value=23 ], [ var='C' labels={datacenter=ams9.ams, group=directory, instance=directory-01.ams9.ams, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=ams9.ams, group=directory, instance=directory-02.ams9.ams, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ams9.ams, group=directory, instance=directory-02.ams9.ams, origin=volterra-infra-vm Value:0xc0440ab0a8} B:{Var:B Labels:datacenter=ams9.ams, group=directory, instance=directory-02.ams9.ams, origin=volterra-infra-vm Value:0xc0440ab008} C:{Var:C Labels:datacenter=ams9.ams, group=directory, instance=directory-02.ams9.ams, origin=volterra-infra-vm Value:0xc0440ab058}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919111484s EvaluationString:[ var='A' labels={datacenter=ams9.ams, group=directory, instance=directory-02.ams9.ams, origin=volterra-infra-vm} value=24 ], [ var='B' labels={datacenter=ams9.ams, group=directory, instance=directory-02.ams9.ams, origin=volterra-infra-vm} value=24 ], [ var='C' labels={datacenter=ams9.ams, group=directory, instance=directory-02.ams9.ams, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=dal3.dal, group=directory, instance=directory-01.dal3.dal, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dal3.dal, group=directory, instance=directory-01.dal3.dal, origin=volterra-infra-vm Value:0xc0440ab1a8} B:{Var:B Labels:datacenter=dal3.dal, group=directory, instance=directory-01.dal3.dal, origin=volterra-infra-vm Value:0xc0440ab1f0} C:{Var:C Labels:datacenter=dal3.dal, group=directory, instance=directory-01.dal3.dal, origin=volterra-infra-vm Value:0xc0440ab160}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.91912436s EvaluationString:[ var='A' labels={datacenter=dal3.dal, group=directory, instance=directory-01.dal3.dal, origin=volterra-infra-vm} value=1 ], [ var='B' labels={datacenter=dal3.dal, group=directory, instance=directory-01.dal3.dal, origin=volterra-infra-vm} value=1 ], [ var='C' labels={datacenter=dal3.dal, group=directory, instance=directory-01.dal3.dal, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=dal3.dal, group=directory, instance=directory-02.dal3.dal, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dal3.dal, group=directory, instance=directory-02.dal3.dal, origin=volterra-infra-vm Value:0xc0440ab310} B:{Var:B Labels:datacenter=dal3.dal, group=directory, instance=directory-02.dal3.dal, origin=volterra-infra-vm Value:0xc0440ab288} C:{Var:C Labels:datacenter=dal3.dal, group=directory, instance=directory-02.dal3.dal, origin=volterra-infra-vm Value:0xc0440ab2c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919136441s EvaluationString:[ var='A' labels={datacenter=dal3.dal, group=directory, instance=directory-02.dal3.dal, origin=volterra-infra-vm} value=9 ], [ var='B' labels={datacenter=dal3.dal, group=directory, instance=directory-02.dal3.dal, origin=volterra-infra-vm} value=9 ], [ var='C' labels={datacenter=dal3.dal, group=directory, instance=directory-02.dal3.dal, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=dc12.ash, group=directory, instance=directory-01.dc12.ash, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dc12.ash, group=directory, instance=directory-01.dc12.ash, origin=volterra-infra-vm Value:0xc0440ab410} B:{Var:B Labels:datacenter=dc12.ash, group=directory, instance=directory-01.dc12.ash, origin=volterra-infra-vm Value:0xc0440ab390} C:{Var:C Labels:datacenter=dc12.ash, group=directory, instance=directory-01.dc12.ash, origin=volterra-infra-vm Value:0xc0440ab3d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919146115s EvaluationString:[ var='A' labels={datacenter=dc12.ash, group=directory, instance=directory-01.dc12.ash, origin=volterra-infra-vm} value=30 ], [ var='B' labels={datacenter=dc12.ash, group=directory, instance=directory-01.dc12.ash, origin=volterra-infra-vm} value=30 ], [ var='C' labels={datacenter=dc12.ash, group=directory, instance=directory-01.dc12.ash, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=dc12.ash, group=directory, instance=directory-02.dc12.ash, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dc12.ash, group=directory, instance=directory-02.dc12.ash, origin=volterra-infra-vm Value:0xc0440ab4d0} B:{Var:B Labels:datacenter=dc12.ash, group=directory, instance=directory-02.dc12.ash, origin=volterra-infra-vm Value:0xc0440ab510} C:{Var:C Labels:datacenter=dc12.ash, group=directory, instance=directory-02.dc12.ash, origin=volterra-infra-vm Value:0xc0440ab490}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919156602s EvaluationString:[ var='A' labels={datacenter=dc12.ash, group=directory, instance=directory-02.dc12.ash, origin=volterra-infra-vm} value=22 ], [ var='B' labels={datacenter=dc12.ash, group=directory, instance=directory-02.dc12.ash, origin=volterra-infra-vm} value=22 ], [ var='C' labels={datacenter=dc12.ash, group=directory, instance=directory-02.dc12.ash, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=dx1.dxb, group=directory, instance=directory-01.dx1.dxb, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dx1.dxb, group=directory, instance=directory-01.dx1.dxb, origin=volterra-infra-vm Value:0xc0440ab5e0} B:{Var:B Labels:datacenter=dx1.dxb, group=directory, instance=directory-01.dx1.dxb, origin=volterra-infra-vm Value:0xc0440ab628} C:{Var:C Labels:datacenter=dx1.dxb, group=directory, instance=directory-01.dx1.dxb, origin=volterra-infra-vm Value:0xc0440ab598}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919167119s EvaluationString:[ var='A' labels={datacenter=dx1.dxb, group=directory, instance=directory-01.dx1.dxb, origin=volterra-infra-vm} value=39 ], [ var='B' labels={datacenter=dx1.dxb, group=directory, instance=directory-01.dx1.dxb, origin=volterra-infra-vm} value=39 ], [ var='C' labels={datacenter=dx1.dxb, group=directory, instance=directory-01.dx1.dxb, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=dx1.dxb, group=directory, instance=directory-02.dx1.dxb, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dx1.dxb, group=directory, instance=directory-02.dx1.dxb, origin=volterra-infra-vm Value:0xc0440ab6f8} B:{Var:B Labels:datacenter=dx1.dxb, group=directory, instance=directory-02.dx1.dxb, origin=volterra-infra-vm Value:0xc0440ab738} C:{Var:C Labels:datacenter=dx1.dxb, group=directory, instance=directory-02.dx1.dxb, origin=volterra-infra-vm Value:0xc0440ab6b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919177342s EvaluationString:[ var='A' labels={datacenter=dx1.dxb, group=directory, instance=directory-02.dx1.dxb, origin=volterra-infra-vm} value=41 ], [ var='B' labels={datacenter=dx1.dxb, group=directory, instance=directory-02.dx1.dxb, origin=volterra-infra-vm} value=41 ], [ var='C' labels={datacenter=dx1.dxb, group=directory, instance=directory-02.dx1.dxb, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=fr4.fra, group=directory, instance=directory-01.fr4.fra, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=fr4.fra, group=directory, instance=directory-01.fr4.fra, origin=volterra-infra-vm Value:0xc0440ab7f0} B:{Var:B Labels:datacenter=fr4.fra, group=directory, instance=directory-01.fr4.fra, origin=volterra-infra-vm Value:0xc0440ab830} C:{Var:C Labels:datacenter=fr4.fra, group=directory, instance=directory-01.fr4.fra, origin=volterra-infra-vm Value:0xc0440ab870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919183249s EvaluationString:[ var='A' labels={datacenter=fr4.fra, group=directory, instance=directory-01.fr4.fra, origin=volterra-infra-vm} value=44 ], [ var='B' labels={datacenter=fr4.fra, group=directory, instance=directory-01.fr4.fra, origin=volterra-infra-vm} value=44 ], [ var='C' labels={datacenter=fr4.fra, group=directory, instance=directory-01.fr4.fra, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=fr4.fra, group=directory, instance=directory-02.fr4.fra, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=fr4.fra, group=directory, instance=directory-02.fr4.fra, origin=volterra-infra-vm Value:0xc0440ab938} B:{Var:B Labels:datacenter=fr4.fra, group=directory, instance=directory-02.fr4.fra, origin=volterra-infra-vm Value:0xc0440ab978} C:{Var:C Labels:datacenter=fr4.fra, group=directory, instance=directory-02.fr4.fra, origin=volterra-infra-vm Value:0xc0440ab8f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919188738s EvaluationString:[ var='A' labels={datacenter=fr4.fra, group=directory, instance=directory-02.fr4.fra, origin=volterra-infra-vm} value=48 ], [ var='B' labels={datacenter=fr4.fra, group=directory, instance=directory-02.fr4.fra, origin=volterra-infra-vm} value=48 ], [ var='C' labels={datacenter=fr4.fra, group=directory, instance=directory-02.fr4.fra, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=hk2.hkg, group=directory, instance=directory-01.hk2.hkg, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=hk2.hkg, group=directory, instance=directory-01.hk2.hkg, origin=volterra-infra-vm Value:0xc0440aba98} B:{Var:B Labels:datacenter=hk2.hkg, group=directory, instance=directory-01.hk2.hkg, origin=volterra-infra-vm Value:0xc0440aba08} C:{Var:C Labels:datacenter=hk2.hkg, group=directory, instance=directory-01.hk2.hkg, origin=volterra-infra-vm Value:0xc0440aba50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919194824s EvaluationString:[ var='A' labels={datacenter=hk2.hkg, group=directory, instance=directory-01.hk2.hkg, origin=volterra-infra-vm} value=25 ], [ var='B' labels={datacenter=hk2.hkg, group=directory, instance=directory-01.hk2.hkg, origin=volterra-infra-vm} value=25 ], [ var='C' labels={datacenter=hk2.hkg, group=directory, instance=directory-01.hk2.hkg, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=hk2.hkg, group=directory, instance=directory-02.hk2.hkg, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=hk2.hkg, group=directory, instance=directory-02.hk2.hkg, origin=volterra-infra-vm Value:0xc0440abb50} B:{Var:B Labels:datacenter=hk2.hkg, group=directory, instance=directory-02.hk2.hkg, origin=volterra-infra-vm Value:0xc0440abb90} C:{Var:C Labels:datacenter=hk2.hkg, group=directory, instance=directory-02.hk2.hkg, origin=volterra-infra-vm Value:0xc0440abc00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919200485s EvaluationString:[ var='A' labels={datacenter=hk2.hkg, group=directory, instance=directory-02.hk2.hkg, origin=volterra-infra-vm} value=25 ], [ var='B' labels={datacenter=hk2.hkg, group=directory, instance=directory-02.hk2.hkg, origin=volterra-infra-vm} value=25 ], [ var='C' labels={datacenter=hk2.hkg, group=directory, instance=directory-02.hk2.hkg, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=ld6.lon, group=directory, instance=directory-01.ld6.lon, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ld6.lon, group=directory, instance=directory-01.ld6.lon, origin=volterra-infra-vm Value:0xc0440abc80} B:{Var:B Labels:datacenter=ld6.lon, group=directory, instance=directory-01.ld6.lon, origin=volterra-infra-vm Value:0xc0440abcc0} C:{Var:C Labels:datacenter=ld6.lon, group=directory, instance=directory-01.ld6.lon, origin=volterra-infra-vm Value:0xc0440abd08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919207515s EvaluationString:[ var='A' labels={datacenter=ld6.lon, group=directory, instance=directory-01.ld6.lon, origin=volterra-infra-vm} value=15 ], [ var='B' labels={datacenter=ld6.lon, group=directory, instance=directory-01.ld6.lon, origin=volterra-infra-vm} value=15 ], [ var='C' labels={datacenter=ld6.lon, group=directory, instance=directory-01.ld6.lon, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=ld6.lon, group=directory, instance=directory-02.ld6.lon, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ld6.lon, group=directory, instance=directory-02.ld6.lon, origin=volterra-infra-vm Value:0xc0440abd90} B:{Var:B Labels:datacenter=ld6.lon, group=directory, instance=directory-02.ld6.lon, origin=volterra-infra-vm Value:0xc0440abde0} C:{Var:C Labels:datacenter=ld6.lon, group=directory, instance=directory-02.ld6.lon, origin=volterra-infra-vm Value:0xc0440abe20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919213601s EvaluationString:[ var='A' labels={datacenter=ld6.lon, group=directory, instance=directory-02.ld6.lon, origin=volterra-infra-vm} value=15 ], [ var='B' labels={datacenter=ld6.lon, group=directory, instance=directory-02.ld6.lon, origin=volterra-infra-vm} value=15 ], [ var='C' labels={datacenter=ld6.lon, group=directory, instance=directory-02.ld6.lon, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=ls1.lis, group=directory, instance=directory-01.ls1.lis, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ls1.lis, group=directory, instance=directory-01.ls1.lis, origin=volterra-infra-vm Value:0xc0440abea0} B:{Var:B Labels:datacenter=ls1.lis, group=directory, instance=directory-01.ls1.lis, origin=volterra-infra-vm Value:0xc0440abf10} C:{Var:C Labels:datacenter=ls1.lis, group=directory, instance=directory-01.ls1.lis, origin=volterra-infra-vm Value:0xc0440abf50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919218468s EvaluationString:[ var='A' labels={datacenter=ls1.lis, group=directory, instance=directory-01.ls1.lis, origin=volterra-infra-vm} value=58 ], [ var='B' labels={datacenter=ls1.lis, group=directory, instance=directory-01.ls1.lis, origin=volterra-infra-vm} value=58 ], [ var='C' labels={datacenter=ls1.lis, group=directory, instance=directory-01.ls1.lis, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=ls1.lis, group=directory, instance=directory-02.ls1.lis, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ls1.lis, group=directory, instance=directory-02.ls1.lis, origin=volterra-infra-vm Value:0xc0440abfd0} B:{Var:B Labels:datacenter=ls1.lis, group=directory, instance=directory-02.ls1.lis, origin=volterra-infra-vm Value:0xc04a036010} C:{Var:C Labels:datacenter=ls1.lis, group=directory, instance=directory-02.ls1.lis, origin=volterra-infra-vm Value:0xc04a036068}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919222912s EvaluationString:[ var='A' labels={datacenter=ls1.lis, group=directory, instance=directory-02.ls1.lis, origin=volterra-infra-vm} value=46 ], [ var='B' labels={datacenter=ls1.lis, group=directory, instance=directory-02.ls1.lis, origin=volterra-infra-vm} value=46 ], [ var='C' labels={datacenter=ls1.lis, group=directory, instance=directory-02.ls1.lis, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=mb2.mum, group=directory, instance=directory-01.mb2.mum, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mb2.mum, group=directory, instance=directory-01.mb2.mum, origin=volterra-infra-vm Value:0xc04a0360f8} B:{Var:B Labels:datacenter=mb2.mum, group=directory, instance=directory-01.mb2.mum, origin=volterra-infra-vm Value:0xc04a036140} C:{Var:C Labels:datacenter=mb2.mum, group=directory, instance=directory-01.mb2.mum, origin=volterra-infra-vm Value:0xc04a036180}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919228068s EvaluationString:[ var='A' labels={datacenter=mb2.mum, group=directory, instance=directory-01.mb2.mum, origin=volterra-infra-vm} value=51 ], [ var='B' labels={datacenter=mb2.mum, group=directory, instance=directory-01.mb2.mum, origin=volterra-infra-vm} value=51 ], [ var='C' labels={datacenter=mb2.mum, group=directory, instance=directory-01.mb2.mum, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=mb2.mum, group=directory, instance=directory-02.mb2.mum, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mb2.mum, group=directory, instance=directory-02.mb2.mum, origin=volterra-infra-vm Value:0xc04a036280} B:{Var:B Labels:datacenter=mb2.mum, group=directory, instance=directory-02.mb2.mum, origin=volterra-infra-vm Value:0xc04a0361f8} C:{Var:C Labels:datacenter=mb2.mum, group=directory, instance=directory-02.mb2.mum, origin=volterra-infra-vm Value:0xc04a036240}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919232651s EvaluationString:[ var='A' labels={datacenter=mb2.mum, group=directory, instance=directory-02.mb2.mum, origin=volterra-infra-vm} value=58 ], [ var='B' labels={datacenter=mb2.mum, group=directory, instance=directory-02.mb2.mum, origin=volterra-infra-vm} value=58 ], [ var='C' labels={datacenter=mb2.mum, group=directory, instance=directory-02.mb2.mum, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=md2.mad, group=directory, instance=directory-01.md2.mad, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=md2.mad, group=directory, instance=directory-01.md2.mad, origin=volterra-infra-vm Value:0xc04a036390} B:{Var:B Labels:datacenter=md2.mad, group=directory, instance=directory-01.md2.mad, origin=volterra-infra-vm Value:0xc04a036308} C:{Var:C Labels:datacenter=md2.mad, group=directory, instance=directory-01.md2.mad, origin=volterra-infra-vm Value:0xc04a036350}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919236972s EvaluationString:[ var='A' labels={datacenter=md2.mad, group=directory, instance=directory-01.md2.mad, origin=volterra-infra-vm} value=60 ], [ var='B' labels={datacenter=md2.mad, group=directory, instance=directory-01.md2.mad, origin=volterra-infra-vm} value=60 ], [ var='C' labels={datacenter=md2.mad, group=directory, instance=directory-01.md2.mad, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=md2.mad, group=directory, instance=directory-02.md2.mad, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=md2.mad, group=directory, instance=directory-02.md2.mad, origin=volterra-infra-vm Value:0xc04a036418} B:{Var:B Labels:datacenter=md2.mad, group=directory, instance=directory-02.md2.mad, origin=volterra-infra-vm Value:0xc04a036460} C:{Var:C Labels:datacenter=md2.mad, group=directory, instance=directory-02.md2.mad, origin=volterra-infra-vm Value:0xc04a0364a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919243732s EvaluationString:[ var='A' labels={datacenter=md2.mad, group=directory, instance=directory-02.md2.mad, origin=volterra-infra-vm} value=19 ], [ var='B' labels={datacenter=md2.mad, group=directory, instance=directory-02.md2.mad, origin=volterra-infra-vm} value=19 ], [ var='C' labels={datacenter=md2.mad, group=directory, instance=directory-02.md2.mad, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=me1.mel, group=directory, instance=directory-01.me1.mel, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=me1.mel, group=directory, instance=directory-01.me1.mel, origin=volterra-infra-vm Value:0xc04a036528} B:{Var:B Labels:datacenter=me1.mel, group=directory, instance=directory-01.me1.mel, origin=volterra-infra-vm Value:0xc04a036568} C:{Var:C Labels:datacenter=me1.mel, group=directory, instance=directory-01.me1.mel, origin=volterra-infra-vm Value:0xc04a0365b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919248438s EvaluationString:[ var='A' labels={datacenter=me1.mel, group=directory, instance=directory-01.me1.mel, origin=volterra-infra-vm} value=51 ], [ var='B' labels={datacenter=me1.mel, group=directory, instance=directory-01.me1.mel, origin=volterra-infra-vm} value=51 ], [ var='C' labels={datacenter=me1.mel, group=directory, instance=directory-01.me1.mel, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=me1.mel, group=directory, instance=directory-02.me1.mel, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=me1.mel, group=directory, instance=directory-02.me1.mel, origin=volterra-infra-vm Value:0xc04a036630} B:{Var:B Labels:datacenter=me1.mel, group=directory, instance=directory-02.me1.mel, origin=volterra-infra-vm Value:0xc04a036670} C:{Var:C Labels:datacenter=me1.mel, group=directory, instance=directory-02.me1.mel, origin=volterra-infra-vm Value:0xc04a0366b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919254428s EvaluationString:[ var='A' labels={datacenter=me1.mel, group=directory, instance=directory-02.me1.mel, origin=volterra-infra-vm} value=62 ], [ var='B' labels={datacenter=me1.mel, group=directory, instance=directory-02.me1.mel, origin=volterra-infra-vm} value=62 ], [ var='C' labels={datacenter=me1.mel, group=directory, instance=directory-02.me1.mel, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=mtl7.mon, group=directory, instance=directory-01.mtl7.mon, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mtl7.mon, group=directory, instance=directory-01.mtl7.mon, origin=volterra-infra-vm Value:0xc04a036780} B:{Var:B Labels:datacenter=mtl7.mon, group=directory, instance=directory-01.mtl7.mon, origin=volterra-infra-vm Value:0xc04a0367c8} C:{Var:C Labels:datacenter=mtl7.mon, group=directory, instance=directory-01.mtl7.mon, origin=volterra-infra-vm Value:0xc04a036740}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919259139s EvaluationString:[ var='A' labels={datacenter=mtl7.mon, group=directory, instance=directory-01.mtl7.mon, origin=volterra-infra-vm} value=51 ], [ var='B' labels={datacenter=mtl7.mon, group=directory, instance=directory-01.mtl7.mon, origin=volterra-infra-vm} value=51 ], [ var='C' labels={datacenter=mtl7.mon, group=directory, instance=directory-01.mtl7.mon, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=mtl7.mon, group=directory, instance=directory-02.mtl7.mon, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mtl7.mon, group=directory, instance=directory-02.mtl7.mon, origin=volterra-infra-vm Value:0xc04a036850} B:{Var:B Labels:datacenter=mtl7.mon, group=directory, instance=directory-02.mtl7.mon, origin=volterra-infra-vm Value:0xc04a036890} C:{Var:C Labels:datacenter=mtl7.mon, group=directory, instance=directory-02.mtl7.mon, origin=volterra-infra-vm Value:0xc04a0368d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.91926422s EvaluationString:[ var='A' labels={datacenter=mtl7.mon, group=directory, instance=directory-02.mtl7.mon, origin=volterra-infra-vm} value=20 ], [ var='B' labels={datacenter=mtl7.mon, group=directory, instance=directory-02.mtl7.mon, origin=volterra-infra-vm} value=20 ], [ var='C' labels={datacenter=mtl7.mon, group=directory, instance=directory-02.mtl7.mon, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=ny2.nyc, group=directory, instance=directory-01.ny2.nyc, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ny2.nyc, group=directory, instance=directory-01.ny2.nyc, origin=volterra-infra-vm Value:0xc04a036960} B:{Var:B Labels:datacenter=ny2.nyc, group=directory, instance=directory-01.ny2.nyc, origin=volterra-infra-vm Value:0xc04a0369a8} C:{Var:C Labels:datacenter=ny2.nyc, group=directory, instance=directory-01.ny2.nyc, origin=volterra-infra-vm Value:0xc04a0369f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919268999s EvaluationString:[ var='A' labels={datacenter=ny2.nyc, group=directory, instance=directory-01.ny2.nyc, origin=volterra-infra-vm} value=8 ], [ var='B' labels={datacenter=ny2.nyc, group=directory, instance=directory-01.ny2.nyc, origin=volterra-infra-vm} value=8 ], [ var='C' labels={datacenter=ny2.nyc, group=directory, instance=directory-01.ny2.nyc, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=ny2.nyc, group=directory, instance=directory-02.ny2.nyc, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ny2.nyc, group=directory, instance=directory-02.ny2.nyc, origin=volterra-infra-vm Value:0xc04a036aa8} B:{Var:B Labels:datacenter=ny2.nyc, group=directory, instance=directory-02.ny2.nyc, origin=volterra-infra-vm Value:0xc04a036ae8} C:{Var:C Labels:datacenter=ny2.nyc, group=directory, instance=directory-02.ny2.nyc, origin=volterra-infra-vm Value:0xc04a036b30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.9192736s EvaluationString:[ var='A' labels={datacenter=ny2.nyc, group=directory, instance=directory-02.ny2.nyc, origin=volterra-infra-vm} value=14 ], [ var='B' labels={datacenter=ny2.nyc, group=directory, instance=directory-02.ny2.nyc, origin=volterra-infra-vm} value=14 ], [ var='C' labels={datacenter=ny2.nyc, group=directory, instance=directory-02.ny2.nyc, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=os1.osa, group=directory, instance=directory-01.os1.osa, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=os1.osa, group=directory, instance=directory-01.os1.osa, origin=volterra-infra-vm Value:0xc04a036bd0} B:{Var:B Labels:datacenter=os1.osa, group=directory, instance=directory-01.os1.osa, origin=volterra-infra-vm Value:0xc04a036c10} C:{Var:C Labels:datacenter=os1.osa, group=directory, instance=directory-01.os1.osa, origin=volterra-infra-vm Value:0xc04a036c50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919279886s EvaluationString:[ var='A' labels={datacenter=os1.osa, group=directory, instance=directory-01.os1.osa, origin=volterra-infra-vm} value=6 ], [ var='B' labels={datacenter=os1.osa, group=directory, instance=directory-01.os1.osa, origin=volterra-infra-vm} value=6 ], [ var='C' labels={datacenter=os1.osa, group=directory, instance=directory-01.os1.osa, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=os1.osa, group=directory, instance=directory-02.os1.osa, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=os1.osa, group=directory, instance=directory-02.os1.osa, origin=volterra-infra-vm Value:0xc04a036cd0} B:{Var:B Labels:datacenter=os1.osa, group=directory, instance=directory-02.os1.osa, origin=volterra-infra-vm Value:0xc04a036d10} C:{Var:C Labels:datacenter=os1.osa, group=directory, instance=directory-02.os1.osa, origin=volterra-infra-vm Value:0xc04a036d50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919284886s EvaluationString:[ var='A' labels={datacenter=os1.osa, group=directory, instance=directory-02.os1.osa, origin=volterra-infra-vm} value=58 ], [ var='B' labels={datacenter=os1.osa, group=directory, instance=directory-02.os1.osa, origin=volterra-infra-vm} value=58 ], [ var='C' labels={datacenter=os1.osa, group=directory, instance=directory-02.os1.osa, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=pa2.par, group=directory, instance=directory-01.pa2.par, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, group=directory, instance=directory-01.pa2.par, origin=volterra-infra-vm Value:0xc04a036de0} B:{Var:B Labels:datacenter=pa2.par, group=directory, instance=directory-01.pa2.par, origin=volterra-infra-vm Value:0xc04a036e20} C:{Var:C Labels:datacenter=pa2.par, group=directory, instance=directory-01.pa2.par, origin=volterra-infra-vm Value:0xc04a036e60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919289481s EvaluationString:[ var='A' labels={datacenter=pa2.par, group=directory, instance=directory-01.pa2.par, origin=volterra-infra-vm} value=34 ], [ var='B' labels={datacenter=pa2.par, group=directory, instance=directory-01.pa2.par, origin=volterra-infra-vm} value=34 ], [ var='C' labels={datacenter=pa2.par, group=directory, instance=directory-01.pa2.par, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=pa2.par, group=directory, instance=directory-02.pa2.par, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, group=directory, instance=directory-02.pa2.par, origin=volterra-infra-vm Value:0xc04a036ef0} B:{Var:B Labels:datacenter=pa2.par, group=directory, instance=directory-02.pa2.par, origin=volterra-infra-vm Value:0xc04a036f30} C:{Var:C Labels:datacenter=pa2.par, group=directory, instance=directory-02.pa2.par, origin=volterra-infra-vm Value:0xc04a036f70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919294195s EvaluationString:[ var='A' labels={datacenter=pa2.par, group=directory, instance=directory-02.pa2.par, origin=volterra-infra-vm} value=34 ], [ var='B' labels={datacenter=pa2.par, group=directory, instance=directory-02.pa2.par, origin=volterra-infra-vm} value=34 ], [ var='C' labels={datacenter=pa2.par, group=directory, instance=directory-02.pa2.par, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=pa2.par, group=directory, instance=directory-main-01.pa2.par, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, group=directory, instance=directory-main-01.pa2.par, origin=volterra-infra-vm Value:0xc04a036ff8} B:{Var:B Labels:datacenter=pa2.par, group=directory, instance=directory-main-01.pa2.par, origin=volterra-infra-vm Value:0xc04a037040} C:{Var:C Labels:datacenter=pa2.par, group=directory, instance=directory-main-01.pa2.par, origin=volterra-infra-vm Value:0xc04a037088}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919298892s EvaluationString:[ var='A' labels={datacenter=pa2.par, group=directory, instance=directory-main-01.pa2.par, origin=volterra-infra-vm} value=50 ], [ var='B' labels={datacenter=pa2.par, group=directory, instance=directory-main-01.pa2.par, origin=volterra-infra-vm} value=50 ], [ var='C' labels={datacenter=pa2.par, group=directory, instance=directory-main-01.pa2.par, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=pa4.par, group=directory, instance=directory-01.pa4.par, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, group=directory, instance=directory-01.pa4.par, origin=volterra-infra-vm Value:0xc04a037150} B:{Var:B Labels:datacenter=pa4.par, group=directory, instance=directory-01.pa4.par, origin=volterra-infra-vm Value:0xc04a037198} C:{Var:C Labels:datacenter=pa4.par, group=directory, instance=directory-01.pa4.par, origin=volterra-infra-vm Value:0xc04a037110}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919303699s EvaluationString:[ var='A' labels={datacenter=pa4.par, group=directory, instance=directory-01.pa4.par, origin=volterra-infra-vm} value=29 ], [ var='B' labels={datacenter=pa4.par, group=directory, instance=directory-01.pa4.par, origin=volterra-infra-vm} value=29 ], [ var='C' labels={datacenter=pa4.par, group=directory, instance=directory-01.pa4.par, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=pa4.par, group=directory, instance=directory-02.pa4.par, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, group=directory, instance=directory-02.pa4.par, origin=volterra-infra-vm Value:0xc04a037218} B:{Var:B Labels:datacenter=pa4.par, group=directory, instance=directory-02.pa4.par, origin=volterra-infra-vm Value:0xc04a037258} C:{Var:C Labels:datacenter=pa4.par, group=directory, instance=directory-02.pa4.par, origin=volterra-infra-vm Value:0xc04a037298}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919318923s EvaluationString:[ var='A' labels={datacenter=pa4.par, group=directory, instance=directory-02.pa4.par, origin=volterra-infra-vm} value=43 ], [ var='B' labels={datacenter=pa4.par, group=directory, instance=directory-02.pa4.par, origin=volterra-infra-vm} value=43 ], [ var='C' labels={datacenter=pa4.par, group=directory, instance=directory-02.pa4.par, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sg3.sin, group=directory, instance=directory-01.sg3.sin, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sg3.sin, group=directory, instance=directory-01.sg3.sin, origin=volterra-infra-vm Value:0xc04a037368} B:{Var:B Labels:datacenter=sg3.sin, group=directory, instance=directory-01.sg3.sin, origin=volterra-infra-vm Value:0xc04a0373b0} C:{Var:C Labels:datacenter=sg3.sin, group=directory, instance=directory-01.sg3.sin, origin=volterra-infra-vm Value:0xc04a037328}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919323935s EvaluationString:[ var='A' labels={datacenter=sg3.sin, group=directory, instance=directory-01.sg3.sin, origin=volterra-infra-vm} value=26 ], [ var='B' labels={datacenter=sg3.sin, group=directory, instance=directory-01.sg3.sin, origin=volterra-infra-vm} value=26 ], [ var='C' labels={datacenter=sg3.sin, group=directory, instance=directory-01.sg3.sin, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sg3.sin, group=directory, instance=directory-02.sg3.sin, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sg3.sin, group=directory, instance=directory-02.sg3.sin, origin=volterra-infra-vm Value:0xc04a037440} B:{Var:B Labels:datacenter=sg3.sin, group=directory, instance=directory-02.sg3.sin, origin=volterra-infra-vm Value:0xc04a037480} C:{Var:C Labels:datacenter=sg3.sin, group=directory, instance=directory-02.sg3.sin, origin=volterra-infra-vm Value:0xc04a0374c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919328805s EvaluationString:[ var='A' labels={datacenter=sg3.sin, group=directory, instance=directory-02.sg3.sin, origin=volterra-infra-vm} value=46 ], [ var='B' labels={datacenter=sg3.sin, group=directory, instance=directory-02.sg3.sin, origin=volterra-infra-vm} value=46 ], [ var='C' labels={datacenter=sg3.sin, group=directory, instance=directory-02.sg3.sin, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sif.che, group=directory, instance=directory-01.sif.che, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sif.che, group=directory, instance=directory-01.sif.che, origin=volterra-infra-vm Value:0xc04a037540} B:{Var:B Labels:datacenter=sif.che, group=directory, instance=directory-01.sif.che, origin=volterra-infra-vm Value:0xc04a037580} C:{Var:C Labels:datacenter=sif.che, group=directory, instance=directory-01.sif.che, origin=volterra-infra-vm Value:0xc04a0375c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919333391s EvaluationString:[ var='A' labels={datacenter=sif.che, group=directory, instance=directory-01.sif.che, origin=volterra-infra-vm} value=30 ], [ var='B' labels={datacenter=sif.che, group=directory, instance=directory-01.sif.che, origin=volterra-infra-vm} value=30 ], [ var='C' labels={datacenter=sif.che, group=directory, instance=directory-01.sif.che, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sif.che, group=directory, instance=directory-02.sif.che, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sif.che, group=directory, instance=directory-02.sif.che, origin=volterra-infra-vm Value:0xc04a037690} B:{Var:B Labels:datacenter=sif.che, group=directory, instance=directory-02.sif.che, origin=volterra-infra-vm Value:0xc04a0376d8} C:{Var:C Labels:datacenter=sif.che, group=directory, instance=directory-02.sif.che, origin=volterra-infra-vm Value:0xc04a037648}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919340711s EvaluationString:[ var='A' labels={datacenter=sif.che, group=directory, instance=directory-02.sif.che, origin=volterra-infra-vm} value=13 ], [ var='B' labels={datacenter=sif.che, group=directory, instance=directory-02.sif.che, origin=volterra-infra-vm} value=13 ], [ var='C' labels={datacenter=sif.che, group=directory, instance=directory-02.sif.che, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sp4.sao, group=directory, instance=directory-01.sp4.sao, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sp4.sao, group=directory, instance=directory-01.sp4.sao, origin=volterra-infra-vm Value:0xc04a0377e0} B:{Var:B Labels:datacenter=sp4.sao, group=directory, instance=directory-01.sp4.sao, origin=volterra-infra-vm Value:0xc04a037760} C:{Var:C Labels:datacenter=sp4.sao, group=directory, instance=directory-01.sp4.sao, origin=volterra-infra-vm Value:0xc04a0377a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919345658s EvaluationString:[ var='A' labels={datacenter=sp4.sao, group=directory, instance=directory-01.sp4.sao, origin=volterra-infra-vm} value=58 ], [ var='B' labels={datacenter=sp4.sao, group=directory, instance=directory-01.sp4.sao, origin=volterra-infra-vm} value=58 ], [ var='C' labels={datacenter=sp4.sao, group=directory, instance=directory-01.sp4.sao, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sp4.sao, group=directory, instance=directory-02.sp4.sao, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sp4.sao, group=directory, instance=directory-02.sp4.sao, origin=volterra-infra-vm Value:0xc04a037860} B:{Var:B Labels:datacenter=sp4.sao, group=directory, instance=directory-02.sp4.sao, origin=volterra-infra-vm Value:0xc04a0378a0} C:{Var:C Labels:datacenter=sp4.sao, group=directory, instance=directory-02.sp4.sao, origin=volterra-infra-vm Value:0xc04a0378e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919350568s EvaluationString:[ var='A' labels={datacenter=sp4.sao, group=directory, instance=directory-02.sp4.sao, origin=volterra-infra-vm} value=15 ], [ var='B' labels={datacenter=sp4.sao, group=directory, instance=directory-02.sp4.sao, origin=volterra-infra-vm} value=15 ], [ var='C' labels={datacenter=sp4.sao, group=directory, instance=directory-02.sp4.sao, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sto6.sto, group=directory, instance=directory-01.sto6.sto, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sto6.sto, group=directory, instance=directory-01.sto6.sto, origin=volterra-infra-vm Value:0xc04a037978} B:{Var:B Labels:datacenter=sto6.sto, group=directory, instance=directory-01.sto6.sto, origin=volterra-infra-vm Value:0xc04a0379c0} C:{Var:C Labels:datacenter=sto6.sto, group=directory, instance=directory-01.sto6.sto, origin=volterra-infra-vm Value:0xc04a037a00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919355313s EvaluationString:[ var='A' labels={datacenter=sto6.sto, group=directory, instance=directory-01.sto6.sto, origin=volterra-infra-vm} value=18 ], [ var='B' labels={datacenter=sto6.sto, group=directory, instance=directory-01.sto6.sto, origin=volterra-infra-vm} value=18 ], [ var='C' labels={datacenter=sto6.sto, group=directory, instance=directory-01.sto6.sto, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sto6.sto, group=directory, instance=directory-02.sto6.sto, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sto6.sto, group=directory, instance=directory-02.sto6.sto, origin=volterra-infra-vm Value:0xc04a037a80} B:{Var:B Labels:datacenter=sto6.sto, group=directory, instance=directory-02.sto6.sto, origin=volterra-infra-vm Value:0xc04a037ac0} C:{Var:C Labels:datacenter=sto6.sto, group=directory, instance=directory-02.sto6.sto, origin=volterra-infra-vm Value:0xc04a037b00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919361156s EvaluationString:[ var='A' labels={datacenter=sto6.sto, group=directory, instance=directory-02.sto6.sto, origin=volterra-infra-vm} value=1 ], [ var='B' labels={datacenter=sto6.sto, group=directory, instance=directory-02.sto6.sto, origin=volterra-infra-vm} value=1 ], [ var='C' labels={datacenter=sto6.sto, group=directory, instance=directory-02.sto6.sto, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sv10.sjc, group=directory, instance=directory-01.sv10.sjc, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sv10.sjc, group=directory, instance=directory-01.sv10.sjc, origin=volterra-infra-vm Value:0xc04a037c18} B:{Var:B Labels:datacenter=sv10.sjc, group=directory, instance=directory-01.sv10.sjc, origin=volterra-infra-vm Value:0xc04a037b88} C:{Var:C Labels:datacenter=sv10.sjc, group=directory, instance=directory-01.sv10.sjc, origin=volterra-infra-vm Value:0xc04a037bd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919365718s EvaluationString:[ var='A' labels={datacenter=sv10.sjc, group=directory, instance=directory-01.sv10.sjc, origin=volterra-infra-vm} value=61 ], [ var='B' labels={datacenter=sv10.sjc, group=directory, instance=directory-01.sv10.sjc, origin=volterra-infra-vm} value=61 ], [ var='C' labels={datacenter=sv10.sjc, group=directory, instance=directory-01.sv10.sjc, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sv10.sjc, group=directory, instance=directory-02.sv10.sjc, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sv10.sjc, group=directory, instance=directory-02.sv10.sjc, origin=volterra-infra-vm Value:0xc04a037ce0} B:{Var:B Labels:datacenter=sv10.sjc, group=directory, instance=directory-02.sv10.sjc, origin=volterra-infra-vm Value:0xc04a037d30} C:{Var:C Labels:datacenter=sv10.sjc, group=directory, instance=directory-02.sv10.sjc, origin=volterra-infra-vm Value:0xc04a037ca0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919370774s EvaluationString:[ var='A' labels={datacenter=sv10.sjc, group=directory, instance=directory-02.sv10.sjc, origin=volterra-infra-vm} value=28 ], [ var='B' labels={datacenter=sv10.sjc, group=directory, instance=directory-02.sv10.sjc, origin=volterra-infra-vm} value=28 ], [ var='C' labels={datacenter=sv10.sjc, group=directory, instance=directory-02.sv10.sjc, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sy5.syd, group=directory, instance=directory-01.sy5.syd, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sy5.syd, group=directory, instance=directory-01.sy5.syd, origin=volterra-infra-vm Value:0xc04a037dc8} B:{Var:B Labels:datacenter=sy5.syd, group=directory, instance=directory-01.sy5.syd, origin=volterra-infra-vm Value:0xc04a037e10} C:{Var:C Labels:datacenter=sy5.syd, group=directory, instance=directory-01.sy5.syd, origin=volterra-infra-vm Value:0xc04a037e78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919377772s EvaluationString:[ var='A' labels={datacenter=sy5.syd, group=directory, instance=directory-01.sy5.syd, origin=volterra-infra-vm} value=26 ], [ var='B' labels={datacenter=sy5.syd, group=directory, instance=directory-01.sy5.syd, origin=volterra-infra-vm} value=26 ], [ var='C' labels={datacenter=sy5.syd, group=directory, instance=directory-01.sy5.syd, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=sy5.syd, group=directory, instance=directory-02.sy5.syd, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sy5.syd, group=directory, instance=directory-02.sy5.syd, origin=volterra-infra-vm Value:0xc04a037f40} B:{Var:B Labels:datacenter=sy5.syd, group=directory, instance=directory-02.sy5.syd, origin=volterra-infra-vm Value:0xc04a037f80} C:{Var:C Labels:datacenter=sy5.syd, group=directory, instance=directory-02.sy5.syd, origin=volterra-infra-vm Value:0xc04a037f00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919384797s EvaluationString:[ var='A' labels={datacenter=sy5.syd, group=directory, instance=directory-02.sy5.syd, origin=volterra-infra-vm} value=49 ], [ var='B' labels={datacenter=sy5.syd, group=directory, instance=directory-02.sy5.syd, origin=volterra-infra-vm} value=49 ], [ var='C' labels={datacenter=sy5.syd, group=directory, instance=directory-02.sy5.syd, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=tr2.tor, group=directory, instance=directory-01.tr2.tor, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=tr2.tor, group=directory, instance=directory-01.tr2.tor, origin=volterra-infra-vm Value:0xc0084d4000} B:{Var:B Labels:datacenter=tr2.tor, group=directory, instance=directory-01.tr2.tor, origin=volterra-infra-vm Value:0xc0084d4040} C:{Var:C Labels:datacenter=tr2.tor, group=directory, instance=directory-01.tr2.tor, origin=volterra-infra-vm Value:0xc0084d4088}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919395518s EvaluationString:[ var='A' labels={datacenter=tr2.tor, group=directory, instance=directory-01.tr2.tor, origin=volterra-infra-vm} value=56 ], [ var='B' labels={datacenter=tr2.tor, group=directory, instance=directory-01.tr2.tor, origin=volterra-infra-vm} value=56 ], [ var='C' labels={datacenter=tr2.tor, group=directory, instance=directory-01.tr2.tor, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=tr2.tor, group=directory, instance=directory-02.tr2.tor, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=tr2.tor, group=directory, instance=directory-02.tr2.tor, origin=volterra-infra-vm Value:0xc0084d4170} B:{Var:B Labels:datacenter=tr2.tor, group=directory, instance=directory-02.tr2.tor, origin=volterra-infra-vm Value:0xc0084d41b0} C:{Var:C Labels:datacenter=tr2.tor, group=directory, instance=directory-02.tr2.tor, origin=volterra-infra-vm Value:0xc0084d4118}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919405046s EvaluationString:[ var='A' labels={datacenter=tr2.tor, group=directory, instance=directory-02.tr2.tor, origin=volterra-infra-vm} value=49 ], [ var='B' labels={datacenter=tr2.tor, group=directory, instance=directory-02.tr2.tor, origin=volterra-infra-vm} value=49 ], [ var='C' labels={datacenter=tr2.tor, group=directory, instance=directory-02.tr2.tor, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=ty8.tky, group=directory, instance=directory-01.ty8.tky, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ty8.tky, group=directory, instance=directory-01.ty8.tky, origin=volterra-infra-vm Value:0xc0084d4250} B:{Var:B Labels:datacenter=ty8.tky, group=directory, instance=directory-01.ty8.tky, origin=volterra-infra-vm Value:0xc0084d42a8} C:{Var:C Labels:datacenter=ty8.tky, group=directory, instance=directory-01.ty8.tky, origin=volterra-infra-vm Value:0xc0084d42f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919413094s EvaluationString:[ var='A' labels={datacenter=ty8.tky, group=directory, instance=directory-01.ty8.tky, origin=volterra-infra-vm} value=60 ], [ var='B' labels={datacenter=ty8.tky, group=directory, instance=directory-01.ty8.tky, origin=volterra-infra-vm} value=60 ], [ var='C' labels={datacenter=ty8.tky, group=directory, instance=directory-01.ty8.tky, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=ty8.tky, group=directory, instance=directory-02.ty8.tky, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ty8.tky, group=directory, instance=directory-02.ty8.tky, origin=volterra-infra-vm Value:0xc0084d43d8} B:{Var:B Labels:datacenter=ty8.tky, group=directory, instance=directory-02.ty8.tky, origin=volterra-infra-vm Value:0xc0084d4430} C:{Var:C Labels:datacenter=ty8.tky, group=directory, instance=directory-02.ty8.tky, origin=volterra-infra-vm Value:0xc0084d4390}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919422201s EvaluationString:[ var='A' labels={datacenter=ty8.tky, group=directory, instance=directory-02.ty8.tky, origin=volterra-infra-vm} value=3 ], [ var='B' labels={datacenter=ty8.tky, group=directory, instance=directory-02.ty8.tky, origin=volterra-infra-vm} value=3 ], [ var='C' labels={datacenter=ty8.tky, group=directory, instance=directory-02.ty8.tky, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=wes.sea, group=directory, instance=directory-01.wes.sea, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=wes.sea, group=directory, instance=directory-01.wes.sea, origin=volterra-infra-vm Value:0xc0084d44b0} B:{Var:B Labels:datacenter=wes.sea, group=directory, instance=directory-01.wes.sea, origin=volterra-infra-vm Value:0xc0084d44f0} C:{Var:C Labels:datacenter=wes.sea, group=directory, instance=directory-01.wes.sea, origin=volterra-infra-vm Value:0xc0084d4530}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919429449s EvaluationString:[ var='A' labels={datacenter=wes.sea, group=directory, instance=directory-01.wes.sea, origin=volterra-infra-vm} value=63 ], [ var='B' labels={datacenter=wes.sea, group=directory, instance=directory-01.wes.sea, origin=volterra-infra-vm} value=63 ], [ var='C' labels={datacenter=wes.sea, group=directory, instance=directory-01.wes.sea, origin=volterra-infra-vm} value=0 ]} {Instance:datacenter=wes.sea, group=directory, instance=directory-02.wes.sea, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=wes.sea, group=directory, instance=directory-02.wes.sea, origin=volterra-infra-vm Value:0xc0084d4648} B:{Var:B Labels:datacenter=wes.sea, group=directory, instance=directory-02.wes.sea, origin=volterra-infra-vm Value:0xc0084d45b0} C:{Var:C Labels:datacenter=wes.sea, group=directory, instance=directory-02.wes.sea, origin=volterra-infra-vm Value:0xc0084d4600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.919436164s EvaluationString:[ var='A' labels={datacenter=wes.sea, group=directory, instance=directory-02.wes.sea, origin=volterra-infra-vm} value=13 ], [ var='B' labels={datacenter=wes.sea, group=directory, instance=directory-02.wes.sea, origin=volterra-infra-vm} value=13 ], [ var='C' labels={datacenter=wes.sea, group=directory, instance=directory-02.wes.sea, origin=volterra-infra-vm} value=0 ]}]" duration=744.671042ms + level=debug ts=2024-05-29T13:44:14.921928789Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.921873786Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bratislava, cluster=Bratislava-2, country=Slovakia, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=149.102.232.92, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=bratislava-s404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.921861841Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.921841768Z caller=remote_instance_store.go:51 user=363785 slug=moonletmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.921828939Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=363785 slug=moonletmonitor instance= t=2024-05-29T13:44:14.921778575Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.921738631Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=363785 slug=moonletmonitor instance= t=2024-05-29T13:44:14.921763261Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bratislava, cluster=Bratislava-2, country=Slovakia, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=149.102.232.92, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bratislava-s404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.921660859Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.921606323Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.921627569Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.921582013Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=60199 slug=wallapop t=2024-05-29T13:44:14.921519101Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=41.134184ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bratislava, cluster=Bratislava-2, country=Slovakia, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=149.102.232.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=bratislava-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.921460613Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.921466085Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=158536 slug=clearsaleantifraude t=2024-05-29T13:44:14.921401442Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=30.636749ms + level=debug ts=2024-05-29T13:44:14.921335275Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bratislava, cluster=Bratislava-2, country=Slovakia, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=149.102.232.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bratislava-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.921270783Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.921290633Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bratislava, cluster=Bratislava-2, country=Slovakia, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=149.102.232.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bratislava-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.921259936Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.921191757Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ADAMSCOUNTYCO, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.921142959Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ARCATACATEST, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.921121244Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PRAIRIEVILLAGEKS-TEST, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.921090987Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.921101546Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PRAIRIEVILLAGEKS-TEST, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.921080882Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.92103631Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/VOLUNTOWNCT, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.921063606Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.921015339Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/VOLUNTOWNCT, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.921052785Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.92096278Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bogota, cluster=Bogota, country=Colombia, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=154.47.16.220, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bogota-s404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.920939067Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HARRISCOUNTYTX, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.920920075Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BRECKENRIDGECO, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.920895478Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DECATURIL-FLAGFIX, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.920879946Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.92082317Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.920799232Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:14.920773982Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=51.19392ms + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RICHMONDCOUNTYVA-STAGING, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.920773591Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.920735908Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/OWATONNAMN-STAGING, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.920752109Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.920678359Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SOUTHPORTNCMATUPDATEV4, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.92072085Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.920630901Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CLINTONCT-TEST, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.920688122Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CLINTONCT-TEST, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.920676684Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWSHOREHAMRI-MAT, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.92060818Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWSHOREHAMRI-MAT, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.920598406Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.920507585Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BOLINGBROOKILFINAL, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.920548335Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bogota, cluster=Bogota, country=Colombia, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=154.47.16.129, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bogota-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.920552743Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.920474732Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.920469084Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GODDARDKS, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.920425724Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.920424351Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=4947 slug=mediamath instance= t=2024-05-29T13:44:14.920406372Z level=debug msg="Execution error state is Alerting" handler=resultAlerting previous_handler=resultError + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CITYOFSONOMACA, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.920396937Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CLOVERDALECA, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.920368017Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.920282937Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.920246595Z caller=remote_instance_store.go:51 user=137351 slug=pinnacle21 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BUENAPARKCA, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.920269317Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MONTCOPA, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.920220158Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=432323 slug=lithic instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.918143735Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=83.97.23.178, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s405, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.920218498Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MONTCOPA, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.920208102Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.920116487Z caller=remote_instance_store.go:51 user=83647 slug=bidsolutions msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/UPPERDARBYPA, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.920153132Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CENTRALFALLSRI-MAT, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.920097923Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.920040145Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BEVERLYMA-POSTLIVEMATFIX, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.920052327Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/KAUAICOUNTYHI, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.920030414Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.919929815Z caller=remote_instance_store.go:51 user=224047 slug=ppbtradingtribeprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=163215 slug=tripadvisor t=2024-05-29T13:44:14.919974052Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.175182ms + level=debug ts=2024-05-29T13:44:14.919943864Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WINONACOUNTYMNFINALTEST, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.919885339Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=194.36.108.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s451, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.919907308Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=127813 slug=clearsale t=2024-05-29T13:44:14.919781916Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=50.159863ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.919877024Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WINDSORCT_BACKUP0602, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.919844673Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SHAFTERCA, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.91982409Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.919788532Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SHAFTERCA, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.919814171Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.919640052Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=194.36.108.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s451, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.919702335Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.91964871Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.919634913Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.919521723Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GRAFTONMAMATUPDATEV3, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.919677537Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GRAFTONMAMATUPDATEV3, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.919667571Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.919554834Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MIDDLESEXNJ, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.919584569Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.919519504Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.919532194Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MIDDLESEXNJ, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.919575102Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.919514165Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.919497544Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.919513861Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.919465936Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.919495048Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SCOTTSVALLEYCA, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.919479089Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MUNDELEINIL-MATREMAP, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.919451757Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MTPLEASANTWITEST, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.919422561Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CLAREMOREOK-STAGING, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.919391464Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.919310412Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.919305666Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GRAFTONMAMATUPDATE, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.91932697Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.919306487Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/KERNERSVILLENCMAT, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.919249739Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.919214768Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s452, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.919099921Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.919071682Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LISLEILTEST, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.919080679Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SUNPRAIRIEWI-TR, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.919050231Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.918858635Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/subscriptions/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/resourcegroups/Default-SQL-EastUS/providers/Microsoft.Sql/servers/hdnysxcx5f/databases/bouldercountyco, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.918910872Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CENTRALFALLSRIMATTEST, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.918824601Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.918623627Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.918565791Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MOUNTPLEASANTTX, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.918557349Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ABINGTONPA-MIGRATION, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.918486684Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SOUTHPORTNCMATUPDATEV3, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.918454924Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.918444358Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.918399544Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=884866 slug=cnonumerique instance="datasource_uid=ddhkbrfewv7k0d, ref_id=A" t=2024-05-29T13:44:14.918344095Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s459, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.918251273Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.918294153Z caller=remote_instance_store.go:51 user=700783 slug=gsgmedia msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s459, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.918232646Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=542095 slug=intelligencefusion t=2024-05-29T13:44:14.91817318Z level=debug msg="Saving alert states" count=6 max_state_save_concurrency=1 + logger=ngalert.state.manager user=542095 slug=intelligencefusion instance="AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/news-api-https/2a1119d34d9fd13d" t=2024-05-29T13:44:14.918157371Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=542095 slug=intelligencefusion instance="AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/news-api-https/2a1119d34d9fd13d" t=2024-05-29T13:44:14.918145528Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=542095 slug=intelligencefusion instance="AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/auth-api/6b403266a8d6f76a" t=2024-05-29T13:44:14.918075319Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BUENAPARKCA-IS-RPA, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.918089457Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHHILLSVILLAGENY-TEST, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.91802075Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=542095 slug=intelligencefusion instance="AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/actor-api-https/ed5db844777eae94" t=2024-05-29T13:44:14.917915013Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=542095 slug=intelligencefusion version=100 fingerprint=aecdc8c11173d346 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.917635554Z level=debug msg="Alert rule evaluated" results="[{Instance:AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/actor-api-https/ed5db844777eae94 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/actor-api-https/ed5db844777eae94 Value:0xc01a13dc60} C:{Var:C Labels:AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/actor-api-https/ed5db844777eae94 Value:0xc01a13dc68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.916931391s EvaluationString:[ var='B' labels={AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/actor-api-https/ed5db844777eae94} value=0.05836321503141789 ], [ var='C' labels={AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/actor-api-https/ed5db844777eae94} value=0 ]} {Instance:AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/alert-api-https/b47ca31b12837847 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/alert-api-https/b47ca31b12837847 Value:0xc01a13dd30} C:{Var:C Labels:AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/alert-api-https/b47ca31b12837847 Value:0xc01a13dd38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.916948882s EvaluationString:[ var='B' labels={AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/alert-api-https/b47ca31b12837847} value=0.17497535686274507 ], [ var='C' labels={AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/alert-api-https/b47ca31b12837847} value=0 ]} {Instance:AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/asset-api-https/13f91165db7c0663 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/asset-api-https/13f91165db7c0663 Value:0xc01a13de00} C:{Var:C Labels:AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/asset-api-https/13f91165db7c0663 Value:0xc01a13de08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.916959853s EvaluationString:[ var='B' labels={AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/asset-api-https/13f91165db7c0663} value=0.07284629364073826 ], [ var='C' labels={AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/asset-api-https/13f91165db7c0663} value=0 ]} {Instance:AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/auth-api/6b403266a8d6f76a State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/auth-api/6b403266a8d6f76a Value:0xc01a13dee0} C:{Var:C Labels:AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/auth-api/6b403266a8d6f76a Value:0xc01a13dee8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.916967897s EvaluationString:[ var='B' labels={AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/auth-api/6b403266a8d6f76a} value=0.03610806272560719 ], [ var='C' labels={AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/auth-api/6b403266a8d6f76a} value=0 ]} {Instance:AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/incident-api-fargate-https/94900f620ff47135 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/incident-api-fargate-https/94900f620ff47135 Value:0xc01a13dfd0} C:{Var:C Labels:AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/incident-api-fargate-https/94900f620ff47135 Value:0xc01a13dfd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.916975623s EvaluationString:[ var='B' labels={AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/incident-api-fargate-https/94900f620ff47135} value=0.10054996973697056 ], [ var='C' labels={AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/incident-api-fargate-https/94900f620ff47135} value=0 ]} {Instance:AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/news-api-https/2a1119d34d9fd13d State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/news-api-https/2a1119d34d9fd13d Value:0xc0054900a0} C:{Var:C Labels:AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/news-api-https/2a1119d34d9fd13d Value:0xc0054900a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.916981378s EvaluationString:[ var='B' labels={AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/news-api-https/2a1119d34d9fd13d} value=0.12632268973214286 ], [ var='C' labels={AvailabilityZone=eu-west-2a, LoadBalancer=app/global-alb/8944643a463c0787, TargetGroup=targetgroup/news-api-https/2a1119d34d9fd13d} value=0 ]}]" duration=33.424491ms + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DURANGOCO, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.91782874Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CITYOFSELMACA, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.917808859Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DEDHAMMA-STAGING, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.917688056Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.35, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s458, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.917577662Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RI-DHS, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.91756006Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.917523841Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TUOLUMNECOUNTYCA-MIGRATION, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.917462278Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SUNPRAIRIEWI-FLAGFIX, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.917442617Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BUENAPARKCA-STAGING, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.917413551Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWTONMA-RELOAD, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.917370721Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ENGLEWOODCO, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.917349859Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EUREKACAGISTEST, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.917336182Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BURNSVILLEMNGISPHASE2, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.917296784Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SOUTHPORTNCMATUPDATEV2, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.917277247Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DETROITMI-TEST, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.917255219Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.215, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s457, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.917107649Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.215, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s457, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.917094063Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BOLTONCT-MAT-UPDATE, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.917101464Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SUNPRAIRIEWIGISFLAGFIX, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.917037353Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TUOLUMNECOUNTYCA-MIGRATION2, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.916926997Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FRESNOCA, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.916905006Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FERNLEYNV, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.916840396Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FERNLEYNV, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.91682892Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.916779864Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WINDSORCT_BACKUP0603, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.916756713Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SUWANEEGA, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.916730527Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SUWANEEGA, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.916718487Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EUREKACA-TEST, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.91665345Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ABINGTONMA-STAGING, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.916512172Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.916460829Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BOLTONCT-MAT-UPDATE-V2, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.9164286Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/_MONROECOUNTYIN-MATFIX_BAK, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.916380448Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.125, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.916338978Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.125, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s402, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.916322251Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GREENRIVERWY, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.91622159Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MN-MDH, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.916158574Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.916030251Z caller=remote_instance_store.go:51 user=526847 slug=soniclabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BALTIMOREMD, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.916126082Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.916071845Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.915964349Z caller=remote_instance_store.go:51 user=357638 slug=usepower msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/AMHERSTMAJMO, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.916048368Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.51.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s410, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.915868201Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HUDSONOH, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.915865032Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WORCESTERMA-GIS, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.915843945Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.915806434Z caller=remote_instance_store.go:51 user=127813 slug=clearsale msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=127813 slug=clearsale t=2024-05-29T13:44:14.915740727Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MARBLEHEADMA, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.915755294Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MARBLEHEADMA, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.915743785Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.51.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s410, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.915683433Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.915639Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PIERRESD, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.915690828Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.915627091Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PIERRESD, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.915680175Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.915562266Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.91559838Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MISSIONHILLSKS, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.915616135Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.915525721Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TALBOTCOMD, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.915562752Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GALVESTONCOUNTYTX, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.915423584Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHSTPAULMN, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.915403091Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RI-CRMC, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.915321932Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RICHMONDRI, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.915277664Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.89, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s409, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.915218505Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTHARTFORDCT, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.915194388Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.89, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s409, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.915203152Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=542900 slug=yuktarthtrehan instance= t=2024-05-29T13:44:14.915085741Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager.persist user=843304 slug=ppcgroup t=2024-05-29T13:44:14.915132414Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFHUNTINGTONNY-TEST-MATUPDATE1, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.915128921Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SOUTHWINDSORCT, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.915106316Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SOUTHWINDSORCT, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.915096792Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.59, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.915034099Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SCITUATERI, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.914970215Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.914855229Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.267737ms + logger=ngalert.state.manager user=430961 slug=solifi instance="Instance=--" t=2024-05-29T13:44:14.914845232Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi instance="Instance=--" t=2024-05-29T13:44:14.914829895Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.914813098Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/POSTFALLSID, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.914842124Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MAPLEVALLEYWA, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.914800408Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MAPLEVALLEYWA, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.91479288Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNSHIPLOWERMAKEFIELDPA, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.914746444Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.59, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.914765826Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=460952 slug=prdnextgen t=2024-05-29T13:44:14.914664577Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TALLMADGEOH, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.914714255Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=471861 slug=planetstaging instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.914732118Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.914731728Z caller=remote_instance_store.go:51 user=460952 slug=prdnextgen msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=166137 slug=teletracking t=2024-05-29T13:44:14.914599598Z level=debug msg="Deleting alert states" count=1 + level=debug ts=2024-05-29T13:44:14.914696135Z caller=remote_instance_store.go:51 user=166137 slug=teletracking msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=460952 slug=prdnextgen t=2024-05-29T13:44:14.91448859Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Belgrade, cluster=Belgrade, country=Serbia, datacenter=Altushost, environment=production, instance=10.0.0.203:9998, ip=37.46.115.41, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=belgrade-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.914588488Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SALINANY-MATCHANGE, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.914572238Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SALINANY-MATCHANGE, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.914562139Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=truemotion-unenrollment-worker, pod=truemotion-unenrollment-worker-58b8b664d5-mvr26" t=2024-05-29T13:44:14.914570374Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=373502 slug=stakeandrelax t=2024-05-29T13:44:14.914498962Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.178139ms + level=info ts=2024-05-29T13:44:14.914519918Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.156.57:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=edbhsq1qbvri8e alerts=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Belgrade, cluster=Belgrade, country=Serbia, datacenter=Altushost, environment=production, instance=10.0.0.203:9998, ip=37.46.115.41, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=belgrade-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.914393553Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Belgrade, cluster=Belgrade, country=Serbia, datacenter=Altushost, environment=production, instance=10.0.0.203:9998, ip=37.46.115.41, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=belgrade-s403, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.914376468Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SMFDCA, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.914415785Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SURFCITYNC, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.914376804Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166137 slug=teletracking instance="Series=query059300b92153444f82cb1a1a2bfce524" t=2024-05-29T13:44:14.914353995Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PEPPERELLMA, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.914356745Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ROCHESTERNH-STAGING, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.914324883Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.914200504Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.914163465Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.914111358Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.914041712Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WOOSTEROH, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.91408817Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HOPKINTONRI, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.913991568Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.913936375Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FORESTPARKOH, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.913944012Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s406, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.91392048Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.913883278Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SEAGOVILLETX, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.913849994Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LANSINGNY, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.913787418Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s407, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.913682938Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=489921 slug=statuscake t=2024-05-29T13:44:14.913614271Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.298579ms + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WINTHROPMA, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.913665317Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EXETERRI, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.913642822Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CAMBRIDGEMA-GIS, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.913577125Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CAMBRIDGEMA-GIS, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.913564491Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s407, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.913469063Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SHELTONCT, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.913482822Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.913436527Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BROOKFIELDCT, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.913375572Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RUTLANDMA, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.913356328Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CAMBRIDGEMA-FINALREMAP, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.913334505Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.913295949Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.913279091Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.913238885Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=43.644877ms + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FORTMYERSFL, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.913311982Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COVENTRYRI, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.913296918Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COVENTRYRI, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.913290267Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CLINTONCT, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.913252387Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WAUKEGANIL, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.913221808Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:14.913132679Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=27.783891ms + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LEBANONOH, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.913160068Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTERNHIGHLANDSHEALTHDISTRICT, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.913129985Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.913070133Z caller=remote_instance_store.go:51 user=236496 slug=improbable msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WEBSTERMA, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.913067666Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TIVERTONRI, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.913034156Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TIVERTONRI, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.913027345Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:14.911865487Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=432323 slug=lithic instance="Cluster Name=sandbox-v2, Consumer Group=ledger-ne-consumer-v1, Topic=processing.transaction-grouped-card-network-events.v1" t=2024-05-29T13:44:14.911813691Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:14.912866238Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=236496 slug=improbable t=2024-05-29T13:44:14.912804943Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BENICIACA, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.912879545Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=335419 slug=tbauctions t=2024-05-29T13:44:14.912827107Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WARRENCOUNTYNC, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.912861001Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=335419 slug=tbauctions instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.912783702Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MILLBURYMA, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.912832395Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DEMO-SUPPORT-2, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.91273763Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HEMPSTEADNY-CLEANUP, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.912672188Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BRISTOLRI, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.912611405Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HUBBARDSTONMA, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.912572644Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MUNDELEINIL, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.912536005Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.912450322Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GRANBYNY, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.912445465Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.912397191Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.912322537Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FOOTHILLSHD, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.912296069Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHHILLSVILLAGENY, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.912217059Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SUFFIELDCT, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.912162495Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WAREMA, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.912144179Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WAREMA, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.912134471Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MCALESTEROK, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.912065858Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.912044865Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PLAINVILLECT, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.912035402Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ROCKYHILLCT, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.911974248Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ROCKYHILLCT, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.911964281Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.911860276Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BOARDMANOH, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.911901199Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BELLEAIRFL, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.911830642Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/AGAWAMMAJMOFLAGS, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.911613832Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PAXTONMA, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.911551279Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s409, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.9114781Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WILLINGTONCT, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.911530875Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/YARMOUTHMA-MAT, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.911498743Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DEDHAMMA, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.911454966Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BALTIMOREMDDOT, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.911424183Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MANCHESTERMA, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.911405247Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.911338958Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.911357061Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/Subscriptions/71e10ac9-a8a2-48aa-8db6-496b91fa7449/resourceGroups/Default-SQL-EastUS/providers/Microsoft.Sql/Servers/hdnysxcx5f/databases/boroughofbathpa, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.911378686Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.911220882Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager.persist user=146728 slug=dgc t=2024-05-29T13:44:14.911171531Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GEORGIADOE, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.911189636Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.911014354Z caller=remote_instance_store.go:51 user=763376 slug=f5nginxone msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=146728 slug=dgc t=2024-05-29T13:44:14.911118505Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=196413 slug=form3production t=2024-05-29T13:44:14.910824541Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.578279ms + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SELMACA, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.911146376Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=146728 slug=dgc version=1 fingerprint=2b67f6c1ccddb2ff attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.911038693Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[C0:{Var:C Labels: Value:} C1:{Var:C Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.910641432s EvaluationString:[ var='C0' metric='NoData' labels={} value=null ], [ var='C1' metric='NoData' labels={} value=null ]}]" duration=91.392129ms + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CENTRALFALLSRI, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.911113963Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.911034152Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s408, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.9109903Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/READINGMA-MIGRATION, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.911052102Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/READINGMA-MIGRATION, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.911040743Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.91093608Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GROTONMA, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.91083224Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GROTONMA, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.910822489Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s408, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.910731821Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFHUNTINGTONNY-TEST-MATUPDATE, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.910716683Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.910536612Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WINCHESTERMA, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.910564494Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LAWRENCEKS, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.910498136Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=DUBAI Query" t=2024-05-29T13:44:14.910490401Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bangkok, cluster=Bangkok, country=Thailand, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.226.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=bangkok-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.910484955Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LAWRENCEKS, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.910488722Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ARCATACA, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.91045679Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WATERTOWNMA, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.910435117Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WATERTOWNMA, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.910419818Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.910319837Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=770248 slug=aurora t=2024-05-29T13:44:14.910310389Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=26ab494e7bbc56ff attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.910204318Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=DUBAI Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc0959e0708} Threshold:{Var:Threshold Labels: Value:0xc0959e0740} compare:{Var:compare Labels:aggregatedBy=sum, name=DUBAI Query Value:0xc0959e0790} sum:{Var:sum Labels:aggregatedBy=sum, name=DUBAI Query Value:0xc0959e07e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.909647704s EvaluationString:[ var='Breaches' labels={} value=72 ], [ var='Threshold' labels={} value=2 ], [ var='compare' labels={aggregatedBy=sum, name=DUBAI Query} value=0 ], [ var='sum' labels={aggregatedBy=sum, name=DUBAI Query} value=0 ]}]" duration=68.120254ms + logger=ngalert.state.manager user=770248 slug=aurora instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.910295588Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=770248 slug=aurora instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.910279738Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bangkok, cluster=Bangkok, country=Thailand, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.226.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bangkok-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.910283241Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=770248 slug=aurora t=2024-05-29T13:44:14.910224996Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FRANKLININ, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.910232464Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TISBURYMA, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.910211255Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/VILLAGEOFBISCAYNEPARKFL, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.910156472Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.91012807Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MONROECOUNTYINNEWGISTEST, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.910052395Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Auckland, cluster=Auckland, country=New Zealand, datacenter=ServersAustralia, environment=production, instance=10.0.0.203:9998, ip=221.121.135.57, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=auckland-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.910086313Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Auckland, cluster=Auckland, country=New Zealand, datacenter=ServersAustralia, environment=production, instance=10.0.0.203:9998, ip=221.121.135.57, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=auckland-s401, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.91007767Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CAMBRIDGEMA-REMAP, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.91003349Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.910051553Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CAMBRIDGEMA-REMAP, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.91002929Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GLOCESTERRI, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.909989803Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTMINSTERMA, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.909977105Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTMINSTERMA, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.909970277Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFHUNTINGTONNY, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.909914916Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DEERFIELDIL, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.909862617Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.909838368Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHPROVIDENCERI, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.909817413Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163215 slug=tripadvisor instance= t=2024-05-29T13:44:14.90977095Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STUARTFL, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.909806311Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163215 slug=tripadvisor t=2024-05-29T13:44:14.909721466Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STOWOH, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.909762629Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MARATHONFL, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.909749432Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ANDERSONSC, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.909728326Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NYECOUNTYNV, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.909689437Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=n/a, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=45.134.140.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.909666861Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWINGTONCT, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.90967017Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWINGTONCT, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.909663337Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WARWICKRI, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.909632382Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SHARONMA, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.909613937Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SHARONMA, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.909607503Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTGREENWICHRI, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.909546919Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PEABODYMA, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.90952502Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PEABODYMA, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.909517579Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.909402934Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PUEBLOCO, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.909459719Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PUEBLOCO, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.909451401Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.909345553Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ARLINGTONMA, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.909330448Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BOURNEMA, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.909304198Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SOUTHPORTNC, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.909234462Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NATICKMA, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.90922049Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.154, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.909265843Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.154, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.909250759Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MADISONCT, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.909151256Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.909171427Z caller=remote_instance_store.go:51 user=618621 slug=sendamatic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEEDHAMMA, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.909096743Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MTPLEASANTWI, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.909048596Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FARMINGTONCT, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.909035148Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PROVINCETOWNMA, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.909015255Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.154, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.909034534Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DANVERSMA, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.908963437Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WARRENRI, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.908941485Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHKINGSTOWNRI, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.908923268Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ROMEOVILLEIL, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.90890264Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GRAFTONMA, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.908863523Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.908866297Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.908818763Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MONROECOUNTYIN, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.908786565Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MONROECOUNTYIN, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.90877759Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DAHLONEGAGA, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.908758928Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ORANGECT, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.908733672Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NARRAGANSETTRI, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.908694796Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.90866917Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHATTLEBOROUGHMA, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.90866211Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.908652264Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MEDINAMN, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.90863759Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.908570354Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BERLINMA, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.908605833Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/UT-DABS, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.908540998Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PORTAGECOUNTYOH, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.908514618Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WAREHAMMA, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.90848215Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=185.152.66.225, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s404, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.908477453Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MENTOROH, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.908451597Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ELIZABETHCITYNC, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.908408228Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.908348666Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWSHOREHAMRI, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.908384409Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.908335895Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=537072 slug=devbitvavo instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.908211627Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTERLYRI, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.908253636Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTERLYRI, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.908243137Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.908248413Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.908227898Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SOUTHKINGSTOWNRI, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.908167197Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.908009643Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DARTMOUTHMA, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.908109328Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFWARRENTON, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.908063329Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STONINGTONCT, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.908044804Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RAYNHAMMA, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.908025284Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.908077966Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.908021879Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WYTHEVILLEVA, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.907944767Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.907930438Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PORTSMOUTHNH, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.907919556Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.90789999Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.907901427Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NAUGATUCKCT, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.907861045Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.907628219Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TUOLUMNECOUNTYCA, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.907797135Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BURLINGTONMA, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.907785141Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BURLINGTONMA, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.907778931Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.907773773Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s405, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.90777914Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TYNGSBOROUGHMA, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.907759298Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.907687269Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SALINANY, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.907715271Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=460915 slug=funrise t=2024-05-29T13:44:14.907661321Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SALEMMA, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.907601338Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=460915 slug=funrise t=2024-05-29T13:44:14.907591653Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=460915 slug=funrise version=24 fingerprint=32f4f97ff971ee30 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.90750693Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=node-exporter-ev-worker-3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:instance=node-exporter-ev-worker-3 Value:0xc0375e0780} C:{Var:C Labels:instance=node-exporter-ev-worker-3 Value:0xc0375e07d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.907205046s EvaluationString:[ var='B' labels={instance=node-exporter-ev-worker-3} value=69.62831410268119 ], [ var='C' labels={instance=node-exporter-ev-worker-3} value=0 ]}]" duration=18.934862ms + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/UPPERARLINGTONOH, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.90755699Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CECILCOUNTYMD, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.907534881Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/JOHNSTONRI, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.907503281Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.907380469Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=707607 slug=obi t=2024-05-29T13:44:14.907326659Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.907364966Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.907344664Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.907331882Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.scheduler user=707607 slug=obi version=32 fingerprint=becfffd7c13c4794 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.907232398Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[Available:{Var:Available Labels: Value:0xc09a534970} Difference:{Var:Difference Labels: Value:0xc09a534978} lastAvailableIsBelow1:{Var:lastAvailableIsBelow1 Labels: Value:0xc09a5349a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.906930954s EvaluationString:[ var='Available' labels={} value=7 ], [ var='Difference' labels={} value=0 ], [ var='lastAvailableIsBelow1' labels={} value=0 ]}]" duration=27.167884ms + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:14.907286629Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GILPINCOUNTYCO, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.907307026Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LINCOLNRI, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.907276245Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LEMONTIL, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.907253072Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank instance="DBInstanceIdentifier=beta-play-kong-internal-rds-20231124102235593700000001" t=2024-05-29T13:44:14.907254854Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=114492 slug=railsbank t=2024-05-29T13:44:14.907204413Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CORTLANDTNY, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.907221305Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CORTLANDTNY, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.907210931Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=athens-s404, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.907189193Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.907149774Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WAYLANDMA, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.907102981Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=astana-s401, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.907000362Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.906968667Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CRANSTONRI, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.906972255Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RIDGEFIELDCT, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.906940091Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WALPOLEMA, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.906874274Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.9066775Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.90667771Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WALPOLEMA, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.906862734Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.906759296Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWFAIRFIELDCT, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.906807598Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWFAIRFIELDCT, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.906796884Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.906538582Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GARYIN, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.906583265Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNSHIPPENNFORESTCARBONPA, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.906553095Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WELLESLEYMA, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.906485635Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WELLESLEYMA, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.90647489Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=astana-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.906353855Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BLACKSBURGVA, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.906394812Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MOUNTVERNONNY, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.90631592Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LEXINGTONMA, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.906294038Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GAHANNAOH, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.906260748Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MARLBOROUGHMA, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.906223828Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STRATFORDCT, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.90616771Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFESSEXMA, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.906133213Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.906096725Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFESSEXMA, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.906123725Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/REDDINGCT, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.906086131Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWBEDFORDMA, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.905987493Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWBEDFORDMA, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.905977232Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=andorra-s405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.905958637Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=andorra-s405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.905943927Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CLAREMONTNH, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.905919421Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COUNTYOFWILSONNC, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.905797687Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.905747967Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COUNTYOFWILSONNC, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.905787878Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BALTIMOREMDDHCD, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.905681165Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BURLINGTONVT, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.905603862Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COUNTYOFNASHNC, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.905570101Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=277970 slug=teckresourcestest t=2024-05-29T13:44:14.905551111Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:14.905510639Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:14.905523303Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:14.905494019Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=andorra-s406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.905488544Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=andorra-s406, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.905466793Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.905427737Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PLAINFIELDIL, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.905476221Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.90534286Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHANDOVERMA, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.905375646Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHANDOVERMA, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.905365855Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=277970 slug=teckresourcestest t=2024-05-29T13:44:14.905320252Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.905299017Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=2 fingerprint=9afe0adab03f1648 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.905266976Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=5.484434ms + level=error ts=2024-05-29T13:44:14.90523331Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LITTLETONMA, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.90527933Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.91, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s409, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.905220504Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/READINGMA, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.905132198Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/READINGMA, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.905121992Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/VILLAGEOFBARTLETTIL, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.905060861Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/AUSTELLGA, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.90500191Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/YORKPA, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.904969563Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MIDDLETOWNRI, resourceName=ECHO-POOL" t=2024-05-29T13:44:14.904938501Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.91, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s409, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.904918573Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COCOABEACHFL, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.904923212Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ABINGTONPA, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.904861243Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BRISTOLCT, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.904717268Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CRANBERRYTOWNSHIPPA, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.904671797Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.90463091Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/JACKSONTN, resourceName=GOLF-POOL" t=2024-05-29T13:44:14.90462204Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PRAIRIEVILLAGEKS, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.904577931Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PRAIRIEVILLAGEKS, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.904561356Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FALLRIVERMA, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.904540183Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STAMFORDCT, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.904499895Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.904435412Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CUMBERLANDRI, resourceName=FOXTROT-POOL" t=2024-05-29T13:44:14.904442317Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CHATHAMCOUNTYNC, resourceName=CHARLIE-POOL" t=2024-05-29T13:44:14.904421621Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/AGAWAMMA, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.904317515Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SALEMNH, resourceName=BRAVO-POOL" t=2024-05-29T13:44:14.904212073Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BEVERLYMA, resourceName=ALPHA-POOL" t=2024-05-29T13:44:14.904143169Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MOUNTPLEASANTNY, resourceName=DELTA-POOL" t=2024-05-29T13:44:14.904103212Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s407, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.904053367Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.904006786Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.scheduler user=396586 slug=opengov version=147 fingerprint=c6fba625cc7ad59e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.895003745Z level=debug msg="Alert rule evaluated" results="[{Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MOUNTPLEASANTNY, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MOUNTPLEASANTNY, resourceName=DELTA-POOL Value:0xc01c6e8da0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MOUNTPLEASANTNY, resourceName=DELTA-POOL Value:0xc01c6e8da8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878756805s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MOUNTPLEASANTNY, resourceName=DELTA-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MOUNTPLEASANTNY, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BEVERLYMA, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BEVERLYMA, resourceName=ALPHA-POOL Value:0xc01c6e8e10} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BEVERLYMA, resourceName=ALPHA-POOL Value:0xc01c6e8e18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878776213s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BEVERLYMA, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BEVERLYMA, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PLAINCITYOH, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PLAINCITYOH, resourceName=DELTA-POOL Value:0xc01c6e8ef0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PLAINCITYOH, resourceName=DELTA-POOL Value:0xc01c6e8ef8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878782457s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PLAINCITYOH, resourceName=DELTA-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PLAINCITYOH, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SALEMNH, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SALEMNH, resourceName=BRAVO-POOL Value:0xc01c6e8f70} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SALEMNH, resourceName=BRAVO-POOL Value:0xc01c6e8f78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878787169s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SALEMNH, resourceName=BRAVO-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SALEMNH, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ANDOVERMA, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ANDOVERMA, resourceName=ECHO-POOL Value:0xc01c6e8ff8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ANDOVERMA, resourceName=ECHO-POOL Value:0xc01c6e8ff0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878791664s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ANDOVERMA, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ANDOVERMA, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/KERNERSVILLENC, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/KERNERSVILLENC, resourceName=ALPHA-POOL Value:0xc01c6e90c0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/KERNERSVILLENC, resourceName=ALPHA-POOL Value:0xc01c6e90c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87879535s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/KERNERSVILLENC, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/KERNERSVILLENC, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/AGAWAMMA, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/AGAWAMMA, resourceName=ALPHA-POOL Value:0xc01c6e9328} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/AGAWAMMA, resourceName=ALPHA-POOL Value:0xc01c6e9320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878798128s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/AGAWAMMA, resourceName=ALPHA-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/AGAWAMMA, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BRIGHTONCO, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BRIGHTONCO, resourceName=BRAVO-POOL Value:0xc01c6e9448} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BRIGHTONCO, resourceName=BRAVO-POOL Value:0xc01c6e9440}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878801298s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BRIGHTONCO, resourceName=BRAVO-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BRIGHTONCO, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FISHERSIN, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FISHERSIN, resourceName=ECHO-POOL Value:0xc01c6e94c8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FISHERSIN, resourceName=ECHO-POOL Value:0xc01c6e94c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878804212s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FISHERSIN, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FISHERSIN, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CHATHAMCOUNTYNC, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CHATHAMCOUNTYNC, resourceName=CHARLIE-POOL Value:0xc01c6e9578} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CHATHAMCOUNTYNC, resourceName=CHARLIE-POOL Value:0xc01c6e9600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878807078s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CHATHAMCOUNTYNC, resourceName=CHARLIE-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CHATHAMCOUNTYNC, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CUMBERLANDRI, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CUMBERLANDRI, resourceName=FOXTROT-POOL Value:0xc01c6e9658} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CUMBERLANDRI, resourceName=FOXTROT-POOL Value:0xc01c6e9650}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878811074s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CUMBERLANDRI, resourceName=FOXTROT-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CUMBERLANDRI, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWTONMA-STAGING, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWTONMA-STAGING, resourceName=BRAVO-POOL Value:0xc01c6e9808} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWTONMA-STAGING, resourceName=BRAVO-POOL Value:0xc01c6e9800}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878813967s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWTONMA-STAGING, resourceName=BRAVO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWTONMA-STAGING, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STAMFORDCT, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STAMFORDCT, resourceName=BRAVO-POOL Value:0xc01c6e9898} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STAMFORDCT, resourceName=BRAVO-POOL Value:0xc01c6e9890}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878817254s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STAMFORDCT, resourceName=BRAVO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STAMFORDCT, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FALLRIVERMA, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FALLRIVERMA, resourceName=ALPHA-POOL Value:0xc01c6e9900} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FALLRIVERMA, resourceName=ALPHA-POOL Value:0xc01c6e9908}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878820681s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FALLRIVERMA, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FALLRIVERMA, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PRAIRIEVILLAGEKS, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PRAIRIEVILLAGEKS, resourceName=ALPHA-POOL Value:0xc01c6e9980} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PRAIRIEVILLAGEKS, resourceName=ALPHA-POOL Value:0xc01c6e9988}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878823108s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PRAIRIEVILLAGEKS, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PRAIRIEVILLAGEKS, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/JACKSONTN, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/JACKSONTN, resourceName=GOLF-POOL Value:0xc01c6e9a40} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/JACKSONTN, resourceName=GOLF-POOL Value:0xc01c6e9a48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878827074s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/JACKSONTN, resourceName=GOLF-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/JACKSONTN, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HEBRONCT, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HEBRONCT, resourceName=DELTA-POOL Value:0xc01c6e9b90} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HEBRONCT, resourceName=DELTA-POOL Value:0xc01c6e9b98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878829417s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HEBRONCT, resourceName=DELTA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HEBRONCT, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CRANBERRYTOWNSHIPPA, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CRANBERRYTOWNSHIPPA, resourceName=GOLF-POOL Value:0xc01c6e9c58} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CRANBERRYTOWNSHIPPA, resourceName=GOLF-POOL Value:0xc01c6e9c50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878833089s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CRANBERRYTOWNSHIPPA, resourceName=GOLF-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CRANBERRYTOWNSHIPPA, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BRISTOLCT, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BRISTOLCT, resourceName=ALPHA-POOL Value:0xc01c6e9cd0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BRISTOLCT, resourceName=ALPHA-POOL Value:0xc01c6e9cd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878836628s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BRISTOLCT, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BRISTOLCT, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COVENTRYCT, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COVENTRYCT, resourceName=FOXTROT-POOL Value:0xc01c6e9db8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COVENTRYCT, resourceName=FOXTROT-POOL Value:0xc01c6e9f60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87884095s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COVENTRYCT, resourceName=FOXTROT-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COVENTRYCT, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/JACKSONMS, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/JACKSONMS, resourceName=BRAVO-POOL Value:0xc01c6e9fc0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/JACKSONMS, resourceName=BRAVO-POOL Value:0xc01c6e9fc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878844944s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/JACKSONMS, resourceName=BRAVO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/JACKSONMS, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ENFIELDCT, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ENFIELDCT, resourceName=DELTA-POOL Value:0xc00cb42130} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ENFIELDCT, resourceName=DELTA-POOL Value:0xc00cb42138}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878848984s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ENFIELDCT, resourceName=DELTA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ENFIELDCT, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/YARMOUTHMA, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/YARMOUTHMA, resourceName=DELTA-POOL Value:0xc00cb42330} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/YARMOUTHMA, resourceName=DELTA-POOL Value:0xc00cb42338}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878851978s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/YARMOUTHMA, resourceName=DELTA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/YARMOUTHMA, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ABINGTONPA, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ABINGTONPA, resourceName=FOXTROT-POOL Value:0xc00cb42488} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ABINGTONPA, resourceName=FOXTROT-POOL Value:0xc00cb42530}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87885681s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ABINGTONPA, resourceName=FOXTROT-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ABINGTONPA, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTONPA, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTONPA, resourceName=ALPHA-POOL Value:0xc00cb425a0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTONPA, resourceName=ALPHA-POOL Value:0xc00cb425a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878861446s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTONPA, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTONPA, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COCOABEACHFL, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COCOABEACHFL, resourceName=GOLF-POOL Value:0xc00cb42670} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COCOABEACHFL, resourceName=GOLF-POOL Value:0xc00cb42678}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878864435s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COCOABEACHFL, resourceName=GOLF-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COCOABEACHFL, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MIDDLETOWNRI, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MIDDLETOWNRI, resourceName=ECHO-POOL Value:0xc00cb42750} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MIDDLETOWNRI, resourceName=ECHO-POOL Value:0xc00cb42758}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878867073s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MIDDLETOWNRI, resourceName=ECHO-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MIDDLETOWNRI, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/YORKPA, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/YORKPA, resourceName=DELTA-POOL Value:0xc00cb42838} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/YORKPA, resourceName=DELTA-POOL Value:0xc00cb42830}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878870222s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/YORKPA, resourceName=DELTA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/YORKPA, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/AUSTELLGA, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/AUSTELLGA, resourceName=ECHO-POOL Value:0xc00cb429e0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/AUSTELLGA, resourceName=ECHO-POOL Value:0xc00cb429e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878873039s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/AUSTELLGA, resourceName=ECHO-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/AUSTELLGA, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/VILLAGEOFBARTLETTIL, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/VILLAGEOFBARTLETTIL, resourceName=GOLF-POOL Value:0xc00cb42b80} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/VILLAGEOFBARTLETTIL, resourceName=GOLF-POOL Value:0xc00cb42b88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878875771s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/VILLAGEOFBARTLETTIL, resourceName=GOLF-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/VILLAGEOFBARTLETTIL, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HEMPFIELDPA, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HEMPFIELDPA, resourceName=GOLF-POOL Value:0xc00cb42c90} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HEMPFIELDPA, resourceName=GOLF-POOL Value:0xc00cb42c98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87887889s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HEMPFIELDPA, resourceName=GOLF-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HEMPFIELDPA, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/READINGMA, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/READINGMA, resourceName=ECHO-POOL Value:0xc00cb42da0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/READINGMA, resourceName=ECHO-POOL Value:0xc00cb42da8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878881174s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/READINGMA, resourceName=ECHO-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/READINGMA, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GLYNNCOUNTYGA, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GLYNNCOUNTYGA, resourceName=ECHO-POOL Value:0xc00cb42eb0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GLYNNCOUNTYGA, resourceName=ECHO-POOL Value:0xc00cb42eb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878883514s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GLYNNCOUNTYGA, resourceName=ECHO-POOL} value=2 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GLYNNCOUNTYGA, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SUDBURYMA, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SUDBURYMA, resourceName=DELTA-POOL Value:0xc00cb42f98} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SUDBURYMA, resourceName=DELTA-POOL Value:0xc00cb42f90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87888624s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SUDBURYMA, resourceName=DELTA-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SUDBURYMA, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FAIRFIELDOH, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FAIRFIELDOH, resourceName=ALPHA-POOL Value:0xc00cb43060} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FAIRFIELDOH, resourceName=ALPHA-POOL Value:0xc00cb43068}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87888977s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FAIRFIELDOH, resourceName=ALPHA-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FAIRFIELDOH, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LITTLETONMA, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LITTLETONMA, resourceName=ECHO-POOL Value:0xc00cb43140} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LITTLETONMA, resourceName=ECHO-POOL Value:0xc00cb43148}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878892868s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LITTLETONMA, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LITTLETONMA, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/Subscriptions/71e10ac9-a8a2-48aa-8db6-496b91fa7449/resourceGroups/Default-SQL-EastUS/providers/Microsoft.Sql/Servers/hdnysxcx5f/databases/gloucesterma, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/Subscriptions/71e10ac9-a8a2-48aa-8db6-496b91fa7449/resourceGroups/Default-SQL-EastUS/providers/Microsoft.Sql/Servers/hdnysxcx5f/databases/gloucesterma, resourceName=FOXTROT-POOL Value:0xc00cb43208} C:{Var:C Labels:databaseresourceid=/Subscriptions/71e10ac9-a8a2-48aa-8db6-496b91fa7449/resourceGroups/Default-SQL-EastUS/providers/Microsoft.Sql/Servers/hdnysxcx5f/databases/gloucesterma, resourceName=FOXTROT-POOL Value:0xc00cb432e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878896209s EvaluationString:[ var='B' labels={databaseresourceid=/Subscriptions/71e10ac9-a8a2-48aa-8db6-496b91fa7449/resourceGroups/Default-SQL-EastUS/providers/Microsoft.Sql/Servers/hdnysxcx5f/databases/gloucesterma, resourceName=FOXTROT-POOL} value=1 ], [ var='C' labels={databaseresourceid=/Subscriptions/71e10ac9-a8a2-48aa-8db6-496b91fa7449/resourceGroups/Default-SQL-EastUS/providers/Microsoft.Sql/Servers/hdnysxcx5f/databases/gloucesterma, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHANDOVERMA, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHANDOVERMA, resourceName=ECHO-POOL Value:0xc00cb433e0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHANDOVERMA, resourceName=ECHO-POOL Value:0xc00cb433e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878898774s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHANDOVERMA, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHANDOVERMA, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PROVIDENCERI, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PROVIDENCERI, resourceName=ECHO-POOL Value:0xc00cb434d0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PROVIDENCERI, resourceName=ECHO-POOL Value:0xc00cb434d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878901666s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PROVIDENCERI, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PROVIDENCERI, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SUNPRAIRIEWI, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SUNPRAIRIEWI, resourceName=FOXTROT-POOL Value:0xc00cb43598} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SUNPRAIRIEWI, resourceName=FOXTROT-POOL Value:0xc00cb43720}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878904374s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SUNPRAIRIEWI, resourceName=FOXTROT-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SUNPRAIRIEWI, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PLAINFIELDIL, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PLAINFIELDIL, resourceName=DELTA-POOL Value:0xc00cb438c0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PLAINFIELDIL, resourceName=DELTA-POOL Value:0xc00cb438c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878906663s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PLAINFIELDIL, resourceName=DELTA-POOL} value=2 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PLAINFIELDIL, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HANOVERMA, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HANOVERMA, resourceName=GOLF-POOL Value:0xc00cb43970} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HANOVERMA, resourceName=GOLF-POOL Value:0xc00cb43978}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878911296s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HANOVERMA, resourceName=GOLF-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HANOVERMA, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HAMDENCT, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HAMDENCT, resourceName=DELTA-POOL Value:0xc00cb43b50} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HAMDENCT, resourceName=DELTA-POOL Value:0xc00cb43b58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878913513s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HAMDENCT, resourceName=DELTA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HAMDENCT, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COUNTYOFNASHNC, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COUNTYOFNASHNC, resourceName=DELTA-POOL Value:0xc00cb43c10} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COUNTYOFNASHNC, resourceName=DELTA-POOL Value:0xc00cb43c18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878917562s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COUNTYOFNASHNC, resourceName=DELTA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COUNTYOFNASHNC, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BURLINGTONVT, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BURLINGTONVT, resourceName=ECHO-POOL Value:0xc00cb43f00} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BURLINGTONVT, resourceName=ECHO-POOL Value:0xc00cb43f08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878919907s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BURLINGTONVT, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BURLINGTONVT, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DANBURYCT, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DANBURYCT, resourceName=GOLF-POOL Value:0xc00cb43f90} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DANBURYCT, resourceName=GOLF-POOL Value:0xc00cb43f98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878922504s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DANBURYCT, resourceName=GOLF-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DANBURYCT, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BALTIMOREMDDHCD, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BALTIMOREMDDHCD, resourceName=CHARLIE-POOL Value:0xc0134980d0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BALTIMOREMDDHCD, resourceName=CHARLIE-POOL Value:0xc013498058}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878925109s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BALTIMOREMDDHCD, resourceName=CHARLIE-POOL} value=2 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BALTIMOREMDDHCD, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTALLISWI, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTALLISWI, resourceName=ALPHA-POOL Value:0xc013498170} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTALLISWI, resourceName=ALPHA-POOL Value:0xc013498178}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878928s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTALLISWI, resourceName=ALPHA-POOL} value=3 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTALLISWI, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RI-DOT, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RI-DOT, resourceName=DELTA-POOL Value:0xc013498238} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RI-DOT, resourceName=DELTA-POOL Value:0xc013498230}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878930358s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RI-DOT, resourceName=DELTA-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RI-DOT, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COUNTYOFWILSONNC, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COUNTYOFWILSONNC, resourceName=ALPHA-POOL Value:0xc013498318} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COUNTYOFWILSONNC, resourceName=ALPHA-POOL Value:0xc013498310}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87893352s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COUNTYOFWILSONNC, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/COUNTYOFWILSONNC, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CLAREMONTNH, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CLAREMONTNH, resourceName=CHARLIE-POOL Value:0xc013498378} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CLAREMONTNH, resourceName=CHARLIE-POOL Value:0xc0134983c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878936377s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CLAREMONTNH, resourceName=CHARLIE-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CLAREMONTNH, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CANTONMA, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CANTONMA, resourceName=ECHO-POOL Value:0xc013498410} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CANTONMA, resourceName=ECHO-POOL Value:0xc013498418}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878940622s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CANTONMA, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CANTONMA, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWBEDFORDMA, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWBEDFORDMA, resourceName=ECHO-POOL Value:0xc013498480} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWBEDFORDMA, resourceName=ECHO-POOL Value:0xc013498488}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878943481s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWBEDFORDMA, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWBEDFORDMA, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BREWSTERMA, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BREWSTERMA, resourceName=CHARLIE-POOL Value:0xc013498520} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BREWSTERMA, resourceName=CHARLIE-POOL Value:0xc0134984d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878946969s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BREWSTERMA, resourceName=CHARLIE-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BREWSTERMA, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/REDDINGCT, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/REDDINGCT, resourceName=GOLF-POOL Value:0xc0134985c0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/REDDINGCT, resourceName=GOLF-POOL Value:0xc0134985c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878950287s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/REDDINGCT, resourceName=GOLF-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/REDDINGCT, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CHAPELHILLNC, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CHAPELHILLNC, resourceName=GOLF-POOL Value:0xc013498700} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CHAPELHILLNC, resourceName=GOLF-POOL Value:0xc013498708}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878952767s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CHAPELHILLNC, resourceName=GOLF-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CHAPELHILLNC, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFESSEXMA, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFESSEXMA, resourceName=CHARLIE-POOL Value:0xc0134988b0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFESSEXMA, resourceName=CHARLIE-POOL Value:0xc0134987d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87895515s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFESSEXMA, resourceName=CHARLIE-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFESSEXMA, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STRATFORDCT, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STRATFORDCT, resourceName=GOLF-POOL Value:0xc013498968} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STRATFORDCT, resourceName=GOLF-POOL Value:0xc013498960}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878957792s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STRATFORDCT, resourceName=GOLF-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STRATFORDCT, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MARLBOROUGHMA, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MARLBOROUGHMA, resourceName=FOXTROT-POOL Value:0xc0134989e8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MARLBOROUGHMA, resourceName=FOXTROT-POOL Value:0xc013498a80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878961245s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MARLBOROUGHMA, resourceName=FOXTROT-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MARLBOROUGHMA, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GAHANNAOH, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GAHANNAOH, resourceName=ALPHA-POOL Value:0xc013498b10} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GAHANNAOH, resourceName=ALPHA-POOL Value:0xc013498b18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878964994s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GAHANNAOH, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GAHANNAOH, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LEXINGTONMA, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LEXINGTONMA, resourceName=BRAVO-POOL Value:0xc013498c60} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LEXINGTONMA, resourceName=BRAVO-POOL Value:0xc013498c68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878967412s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LEXINGTONMA, resourceName=BRAVO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LEXINGTONMA, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MOUNTVERNONNY, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MOUNTVERNONNY, resourceName=DELTA-POOL Value:0xc013498da0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MOUNTVERNONNY, resourceName=DELTA-POOL Value:0xc013498da8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878969822s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MOUNTVERNONNY, resourceName=DELTA-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MOUNTVERNONNY, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWBRITAINCT, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWBRITAINCT, resourceName=BRAVO-POOL Value:0xc013498ef0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWBRITAINCT, resourceName=BRAVO-POOL Value:0xc013498ef8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878972499s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWBRITAINCT, resourceName=BRAVO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWBRITAINCT, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BLACKSBURGVA, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BLACKSBURGVA, resourceName=BRAVO-POOL Value:0xc013498fd0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BLACKSBURGVA, resourceName=BRAVO-POOL Value:0xc013498fd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878976345s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BLACKSBURGVA, resourceName=BRAVO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BLACKSBURGVA, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHSMITHFIELDRI, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHSMITHFIELDRI, resourceName=DELTA-POOL Value:0xc013499060} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHSMITHFIELDRI, resourceName=DELTA-POOL Value:0xc013499068}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878980367s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHSMITHFIELDRI, resourceName=DELTA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHSMITHFIELDRI, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EVERETTMA, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EVERETTMA, resourceName=ECHO-POOL Value:0xc0134990d0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EVERETTMA, resourceName=ECHO-POOL Value:0xc0134990d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878984565s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EVERETTMA, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EVERETTMA, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WELLESLEYMA, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WELLESLEYMA, resourceName=GOLF-POOL Value:0xc013499158} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WELLESLEYMA, resourceName=GOLF-POOL Value:0xc013499150}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87898706s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WELLESLEYMA, resourceName=GOLF-POOL} value=2 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WELLESLEYMA, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GREENSBURGPA, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GREENSBURGPA, resourceName=ALPHA-POOL Value:0xc0134991f0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GREENSBURGPA, resourceName=ALPHA-POOL Value:0xc0134991f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878989344s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GREENSBURGPA, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GREENSBURGPA, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNSHIPPENNFORESTCARBONPA, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNSHIPPENNFORESTCARBONPA, resourceName=DELTA-POOL Value:0xc013499320} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNSHIPPENNFORESTCARBONPA, resourceName=DELTA-POOL Value:0xc013499328}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878991501s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNSHIPPENNFORESTCARBONPA, resourceName=DELTA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNSHIPPENNFORESTCARBONPA, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GARYIN, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GARYIN, resourceName=ALPHA-POOL Value:0xc013499490} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GARYIN, resourceName=ALPHA-POOL Value:0xc013499498}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87899521s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GARYIN, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GARYIN, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DAVIECOUNTYNC, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DAVIECOUNTYNC, resourceName=FOXTROT-POOL Value:0xc013499588} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DAVIECOUNTYNC, resourceName=FOXTROT-POOL Value:0xc0134996c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.878997197s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DAVIECOUNTYNC, resourceName=FOXTROT-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DAVIECOUNTYNC, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WAKEFIELDMA, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WAKEFIELDMA, resourceName=ALPHA-POOL Value:0xc013499720} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WAKEFIELDMA, resourceName=ALPHA-POOL Value:0xc013499728}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87900027s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WAKEFIELDMA, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WAKEFIELDMA, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTALLISWI-STAGING, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTALLISWI-STAGING, resourceName=FOXTROT-POOL Value:0xc0134997e8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTALLISWI-STAGING, resourceName=FOXTROT-POOL Value:0xc013499860}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879003155s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTALLISWI-STAGING, resourceName=FOXTROT-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTALLISWI-STAGING, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BURNSVILLEMN, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BURNSVILLEMN, resourceName=FOXTROT-POOL Value:0xc013499910} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BURNSVILLEMN, resourceName=FOXTROT-POOL Value:0xc013499918}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879006092s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BURNSVILLEMN, resourceName=FOXTROT-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BURNSVILLEMN, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EFFINGHAMCOUNTYGA, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EFFINGHAMCOUNTYGA, resourceName=CHARLIE-POOL Value:0xc0134999c8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EFFINGHAMCOUNTYGA, resourceName=CHARLIE-POOL Value:0xc013499a40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879009315s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EFFINGHAMCOUNTYGA, resourceName=CHARLIE-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EFFINGHAMCOUNTYGA, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/UNIONCOUNTYOH, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/UNIONCOUNTYOH, resourceName=ECHO-POOL Value:0xc013499aa0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/UNIONCOUNTYOH, resourceName=ECHO-POOL Value:0xc013499aa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87901196s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/UNIONCOUNTYOH, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/UNIONCOUNTYOH, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWFAIRFIELDCT, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWFAIRFIELDCT, resourceName=CHARLIE-POOL Value:0xc013499b38} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWFAIRFIELDCT, resourceName=CHARLIE-POOL Value:0xc013499bb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87901556s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWFAIRFIELDCT, resourceName=CHARLIE-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWFAIRFIELDCT, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CHATTANOOGATREASURER, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CHATTANOOGATREASURER, resourceName=DELTA-POOL Value:0xc013499c38} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CHATTANOOGATREASURER, resourceName=DELTA-POOL Value:0xc013499c30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879018452s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CHATTANOOGATREASURER, resourceName=DELTA-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CHATTANOOGATREASURER, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WALPOLEMA, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WALPOLEMA, resourceName=ECHO-POOL Value:0xc013499ca0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WALPOLEMA, resourceName=ECHO-POOL Value:0xc013499ca8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87902361s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WALPOLEMA, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WALPOLEMA, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RICHMONDCOUNTYVA, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RICHMONDCOUNTYVA, resourceName=FOXTROT-POOL Value:0xc013499cf8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RICHMONDCOUNTYVA, resourceName=FOXTROT-POOL Value:0xc013499d40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879026468s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RICHMONDCOUNTYVA, resourceName=FOXTROT-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RICHMONDCOUNTYVA, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RIDGEFIELDCT, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RIDGEFIELDCT, resourceName=ECHO-POOL Value:0xc013499db0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RIDGEFIELDCT, resourceName=ECHO-POOL Value:0xc013499db8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879030294s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RIDGEFIELDCT, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RIDGEFIELDCT, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CRANSTONRI, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CRANSTONRI, resourceName=BRAVO-POOL Value:0xc013499e50} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CRANSTONRI, resourceName=BRAVO-POOL Value:0xc013499e58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879033822s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CRANSTONRI, resourceName=BRAVO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CRANSTONRI, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTRIDGETN, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTRIDGETN, resourceName=FOXTROT-POOL Value:0xc013499ee8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTRIDGETN, resourceName=FOXTROT-POOL Value:0xc013499f70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879036909s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTRIDGETN, resourceName=FOXTROT-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTRIDGETN, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LAKEVILLEMA, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LAKEVILLEMA, resourceName=ECHO-POOL Value:0xc013499fc0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LAKEVILLEMA, resourceName=ECHO-POOL Value:0xc013499fc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879040577s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LAKEVILLEMA, resourceName=ECHO-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LAKEVILLEMA, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HARWICHMA, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HARWICHMA, resourceName=CHARLIE-POOL Value:0xc04581e018} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HARWICHMA, resourceName=CHARLIE-POOL Value:0xc04581e070}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879044108s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HARWICHMA, resourceName=CHARLIE-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HARWICHMA, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WAYLANDMA, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WAYLANDMA, resourceName=FOXTROT-POOL Value:0xc04581e0d0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WAYLANDMA, resourceName=FOXTROT-POOL Value:0xc04581e0d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879047083s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WAYLANDMA, resourceName=FOXTROT-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WAYLANDMA, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DARIENCT, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DARIENCT, resourceName=ECHO-POOL Value:0xc04581e300} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DARIENCT, resourceName=ECHO-POOL Value:0xc04581e308}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879053527s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DARIENCT, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DARIENCT, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RHODEISLAND, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RHODEISLAND, resourceName=ECHO-POOL Value:0xc04581e5f0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RHODEISLAND, resourceName=ECHO-POOL Value:0xc04581e5f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879056117s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RHODEISLAND, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RHODEISLAND, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CORTLANDTNY, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CORTLANDTNY, resourceName=DELTA-POOL Value:0xc04581ed10} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CORTLANDTNY, resourceName=DELTA-POOL Value:0xc04581ed18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879058464s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CORTLANDTNY, resourceName=DELTA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CORTLANDTNY, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LEMONTIL, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LEMONTIL, resourceName=ALPHA-POOL Value:0xc04581f150} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LEMONTIL, resourceName=ALPHA-POOL Value:0xc04581f158}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879060799s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LEMONTIL, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LEMONTIL, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LINCOLNRI, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LINCOLNRI, resourceName=CHARLIE-POOL Value:0xc04581f5b0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LINCOLNRI, resourceName=CHARLIE-POOL Value:0xc04581f528}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879064267s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LINCOLNRI, resourceName=CHARLIE-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LINCOLNRI, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GILPINCOUNTYCO, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GILPINCOUNTYCO, resourceName=ALPHA-POOL Value:0xc04581f890} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GILPINCOUNTYCO, resourceName=ALPHA-POOL Value:0xc04581f898}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879068015s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GILPINCOUNTYCO, resourceName=ALPHA-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GILPINCOUNTYCO, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BEXLEYOH, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BEXLEYOH, resourceName=ECHO-POOL Value:0xc04581fc20} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BEXLEYOH, resourceName=ECHO-POOL Value:0xc04581fc28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879070891s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BEXLEYOH, resourceName=ECHO-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BEXLEYOH, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HILLIARDOH, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HILLIARDOH, resourceName=ECHO-POOL Value:0xc04581fc98} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HILLIARDOH, resourceName=ECHO-POOL Value:0xc04581fc90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879073852s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HILLIARDOH, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HILLIARDOH, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SPRINGFIELDDELCO, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SPRINGFIELDDELCO, resourceName=DELTA-POOL Value:0xc04581fd28} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SPRINGFIELDDELCO, resourceName=DELTA-POOL Value:0xc04581fd20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879076551s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SPRINGFIELDDELCO, resourceName=DELTA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SPRINGFIELDDELCO, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GLASTONBURYCT, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GLASTONBURYCT, resourceName=FOXTROT-POOL Value:0xc04581fdb8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GLASTONBURYCT, resourceName=FOXTROT-POOL Value:0xc04581fe10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879079808s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GLASTONBURYCT, resourceName=FOXTROT-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GLASTONBURYCT, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/JOHNSTONRI, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/JOHNSTONRI, resourceName=ALPHA-POOL Value:0xc04581fe88} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/JOHNSTONRI, resourceName=ALPHA-POOL Value:0xc04581fe80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879083358s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/JOHNSTONRI, resourceName=ALPHA-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/JOHNSTONRI, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CECILCOUNTYMD, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CECILCOUNTYMD, resourceName=ALPHA-POOL Value:0xc04581fef0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CECILCOUNTYMD, resourceName=ALPHA-POOL Value:0xc04581fef8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879085715s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CECILCOUNTYMD, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CECILCOUNTYMD, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/UPPERARLINGTONOH, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/UPPERARLINGTONOH, resourceName=ECHO-POOL Value:0xc04581ff70} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/UPPERARLINGTONOH, resourceName=ECHO-POOL Value:0xc04581ff78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879088602s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/UPPERARLINGTONOH, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/UPPERARLINGTONOH, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SALEMMA, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SALEMMA, resourceName=ALPHA-POOL Value:0xc029192000} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SALEMMA, resourceName=ALPHA-POOL Value:0xc029192008}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879091809s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SALEMMA, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SALEMMA, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PAWTUCKETRI, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PAWTUCKETRI, resourceName=FOXTROT-POOL Value:0xc029192068} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PAWTUCKETRI, resourceName=FOXTROT-POOL Value:0xc0291920c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879094172s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PAWTUCKETRI, resourceName=FOXTROT-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PAWTUCKETRI, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWMILFORDCT, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWMILFORDCT, resourceName=ECHO-POOL Value:0xc029192120} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWMILFORDCT, resourceName=ECHO-POOL Value:0xc029192128}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879097581s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWMILFORDCT, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWMILFORDCT, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DUXBURYMA, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DUXBURYMA, resourceName=CHARLIE-POOL Value:0xc0291921c0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DUXBURYMA, resourceName=CHARLIE-POOL Value:0xc029192178}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879100417s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DUXBURYMA, resourceName=CHARLIE-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DUXBURYMA, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SALINANY, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SALINANY, resourceName=DELTA-POOL Value:0xc029192220} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SALINANY, resourceName=DELTA-POOL Value:0xc029192228}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879102659s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SALINANY, resourceName=DELTA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SALINANY, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/Subscriptions/71e10ac9-a8a2-48aa-8db6-496b91fa7449/resourceGroups/Default-SQL-EastUS/providers/Microsoft.Sql/Servers/hdnysxcx5f/databases/ithacany, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/Subscriptions/71e10ac9-a8a2-48aa-8db6-496b91fa7449/resourceGroups/Default-SQL-EastUS/providers/Microsoft.Sql/Servers/hdnysxcx5f/databases/ithacany, resourceName=CHARLIE-POOL Value:0xc029192298} C:{Var:C Labels:databaseresourceid=/Subscriptions/71e10ac9-a8a2-48aa-8db6-496b91fa7449/resourceGroups/Default-SQL-EastUS/providers/Microsoft.Sql/Servers/hdnysxcx5f/databases/ithacany, resourceName=CHARLIE-POOL Value:0xc0291922e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879105025s EvaluationString:[ var='B' labels={databaseresourceid=/Subscriptions/71e10ac9-a8a2-48aa-8db6-496b91fa7449/resourceGroups/Default-SQL-EastUS/providers/Microsoft.Sql/Servers/hdnysxcx5f/databases/ithacany, resourceName=CHARLIE-POOL} value=1 ], [ var='C' labels={databaseresourceid=/Subscriptions/71e10ac9-a8a2-48aa-8db6-496b91fa7449/resourceGroups/Default-SQL-EastUS/providers/Microsoft.Sql/Servers/hdnysxcx5f/databases/ithacany, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TYNGSBOROUGHMA, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TYNGSBOROUGHMA, resourceName=FOXTROT-POOL Value:0xc029192330} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TYNGSBOROUGHMA, resourceName=FOXTROT-POOL Value:0xc029192338}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87910731s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TYNGSBOROUGHMA, resourceName=FOXTROT-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TYNGSBOROUGHMA, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BURLINGTONMA, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BURLINGTONMA, resourceName=ECHO-POOL Value:0xc0291923c8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BURLINGTONMA, resourceName=ECHO-POOL Value:0xc0291923c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879109382s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BURLINGTONMA, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BURLINGTONMA, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TUOLUMNECOUNTYCA, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TUOLUMNECOUNTYCA, resourceName=GOLF-POOL Value:0xc029192448} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TUOLUMNECOUNTYCA, resourceName=GOLF-POOL Value:0xc029192440}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879112931s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TUOLUMNECOUNTYCA, resourceName=GOLF-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TUOLUMNECOUNTYCA, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HAVERHILLMA, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HAVERHILLMA, resourceName=ALPHA-POOL Value:0xc0291924b0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HAVERHILLMA, resourceName=ALPHA-POOL Value:0xc0291924b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879115162s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HAVERHILLMA, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HAVERHILLMA, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/OLDLYMECT, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/OLDLYMECT, resourceName=CHARLIE-POOL Value:0xc029192518} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/OLDLYMECT, resourceName=CHARLIE-POOL Value:0xc029192570}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87911744s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/OLDLYMECT, resourceName=CHARLIE-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/OLDLYMECT, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NAUGATUCKCT, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NAUGATUCKCT, resourceName=ALPHA-POOL Value:0xc0291925d0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NAUGATUCKCT, resourceName=ALPHA-POOL Value:0xc0291925d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879119437s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NAUGATUCKCT, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NAUGATUCKCT, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MARSHFIELDMA, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MARSHFIELDMA, resourceName=BRAVO-POOL Value:0xc029192640} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MARSHFIELDMA, resourceName=BRAVO-POOL Value:0xc029192648}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879122184s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MARSHFIELDMA, resourceName=BRAVO-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MARSHFIELDMA, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TORRINGTONCT, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TORRINGTONCT, resourceName=CHARLIE-POOL Value:0xc0291926a8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TORRINGTONCT, resourceName=CHARLIE-POOL Value:0xc029192700}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879124521s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TORRINGTONCT, resourceName=CHARLIE-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TORRINGTONCT, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PORTSMOUTHNH, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PORTSMOUTHNH, resourceName=BRAVO-POOL Value:0xc029192760} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PORTSMOUTHNH, resourceName=BRAVO-POOL Value:0xc029192768}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879127129s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PORTSMOUTHNH, resourceName=BRAVO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PORTSMOUTHNH, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WYTHEVILLEVA, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WYTHEVILLEVA, resourceName=FOXTROT-POOL Value:0xc0291927b8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WYTHEVILLEVA, resourceName=FOXTROT-POOL Value:0xc029192810}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879130383s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WYTHEVILLEVA, resourceName=FOXTROT-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WYTHEVILLEVA, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LISLEIL, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LISLEIL, resourceName=ALPHA-POOL Value:0xc029192870} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LISLEIL, resourceName=ALPHA-POOL Value:0xc029192878}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879133343s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LISLEIL, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LISLEIL, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHCANTONOH, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHCANTONOH, resourceName=BRAVO-POOL Value:0xc0291928f0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHCANTONOH, resourceName=BRAVO-POOL Value:0xc0291928f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87913711s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHCANTONOH, resourceName=BRAVO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHCANTONOH, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DECATURIL, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DECATURIL, resourceName=ALPHA-POOL Value:0xc029192970} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DECATURIL, resourceName=ALPHA-POOL Value:0xc029192978}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87914093s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DECATURIL, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DECATURIL, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RAYNHAMMA, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RAYNHAMMA, resourceName=BRAVO-POOL Value:0xc0291929f0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RAYNHAMMA, resourceName=BRAVO-POOL Value:0xc0291929f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879144356s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RAYNHAMMA, resourceName=BRAVO-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RAYNHAMMA, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STONINGTONCT, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STONINGTONCT, resourceName=ALPHA-POOL Value:0xc029192a78} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STONINGTONCT, resourceName=ALPHA-POOL Value:0xc029192a70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879148837s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STONINGTONCT, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STONINGTONCT, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFWARRENTON, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFWARRENTON, resourceName=ALPHA-POOL Value:0xc029192af8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFWARRENTON, resourceName=ALPHA-POOL Value:0xc029192af0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879154047s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFWARRENTON, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFWARRENTON, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DESERTHOTSPRINGSCA, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DESERTHOTSPRINGSCA, resourceName=ECHO-POOL Value:0xc029192b70} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DESERTHOTSPRINGSCA, resourceName=ECHO-POOL Value:0xc029192b78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879159621s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DESERTHOTSPRINGSCA, resourceName=ECHO-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DESERTHOTSPRINGSCA, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DARTMOUTHMA, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DARTMOUTHMA, resourceName=DELTA-POOL Value:0xc029192bf8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DARTMOUTHMA, resourceName=DELTA-POOL Value:0xc029192bf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87916619s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DARTMOUTHMA, resourceName=DELTA-POOL} value=2 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DARTMOUTHMA, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FITCHBURGMA, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FITCHBURGMA, resourceName=GOLF-POOL Value:0xc029192c78} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FITCHBURGMA, resourceName=GOLF-POOL Value:0xc029192c70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879170962s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FITCHBURGMA, resourceName=GOLF-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FITCHBURGMA, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SOUTHKINGSTOWNRI, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SOUTHKINGSTOWNRI, resourceName=ECHO-POOL Value:0xc029192cf0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SOUTHKINGSTOWNRI, resourceName=ECHO-POOL Value:0xc029192cf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879174315s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SOUTHKINGSTOWNRI, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SOUTHKINGSTOWNRI, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/VESTAVIAHILLSAL, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/VESTAVIAHILLSAL, resourceName=FOXTROT-POOL Value:0xc029192d58} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/VESTAVIAHILLSAL, resourceName=FOXTROT-POOL Value:0xc029192da0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879177883s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/VESTAVIAHILLSAL, resourceName=FOXTROT-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/VESTAVIAHILLSAL, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTERLYRI, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTERLYRI, resourceName=GOLF-POOL Value:0xc029192e30} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTERLYRI, resourceName=GOLF-POOL Value:0xc029192e38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879180937s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTERLYRI, resourceName=GOLF-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTERLYRI, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DRACUTMA, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DRACUTMA, resourceName=FOXTROT-POOL Value:0xc029192ef0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DRACUTMA, resourceName=FOXTROT-POOL Value:0xc029192e98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87918352s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DRACUTMA, resourceName=FOXTROT-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DRACUTMA, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FRANKLINCOUNTYNC, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FRANKLINCOUNTYNC, resourceName=GOLF-POOL Value:0xc029192f40} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FRANKLINCOUNTYNC, resourceName=GOLF-POOL Value:0xc029192f48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879185959s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FRANKLINCOUNTYNC, resourceName=GOLF-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FRANKLINCOUNTYNC, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/QUINCYMA, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/QUINCYMA, resourceName=FOXTROT-POOL Value:0xc029192f98} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/QUINCYMA, resourceName=FOXTROT-POOL Value:0xc029192ff0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879189154s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/QUINCYMA, resourceName=FOXTROT-POOL} value=2 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/QUINCYMA, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWSHOREHAMRI, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWSHOREHAMRI, resourceName=ALPHA-POOL Value:0xc029193058} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWSHOREHAMRI, resourceName=ALPHA-POOL Value:0xc029193050}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879192747s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWSHOREHAMRI, resourceName=ALPHA-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWSHOREHAMRI, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ELIZABETHCITYNC, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ELIZABETHCITYNC, resourceName=GOLF-POOL Value:0xc0291930d0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ELIZABETHCITYNC, resourceName=GOLF-POOL Value:0xc0291930d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879195413s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ELIZABETHCITYNC, resourceName=GOLF-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ELIZABETHCITYNC, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MENTOROH, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MENTOROH, resourceName=BRAVO-POOL Value:0xc029193150} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MENTOROH, resourceName=BRAVO-POOL Value:0xc029193158}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879198082s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MENTOROH, resourceName=BRAVO-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MENTOROH, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WAREHAMMA, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WAREHAMMA, resourceName=CHARLIE-POOL Value:0xc0291931b8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WAREHAMMA, resourceName=CHARLIE-POOL Value:0xc029193210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87920038s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WAREHAMMA, resourceName=CHARLIE-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WAREHAMMA, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PORTAGECOUNTYOH, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PORTAGECOUNTYOH, resourceName=ECHO-POOL Value:0xc029193260} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PORTAGECOUNTYOH, resourceName=ECHO-POOL Value:0xc029193268}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879202788s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PORTAGECOUNTYOH, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PORTAGECOUNTYOH, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/UT-DABS, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/UT-DABS, resourceName=CHARLIE-POOL Value:0xc0291932c8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/UT-DABS, resourceName=CHARLIE-POOL Value:0xc029193320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87920552s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/UT-DABS, resourceName=CHARLIE-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/UT-DABS, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/VALLEYSTREAMNY, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/VALLEYSTREAMNY, resourceName=BRAVO-POOL Value:0xc029193388} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/VALLEYSTREAMNY, resourceName=BRAVO-POOL Value:0xc029193380}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879208063s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/VALLEYSTREAMNY, resourceName=BRAVO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/VALLEYSTREAMNY, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BERLINMA, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BERLINMA, resourceName=FOXTROT-POOL Value:0xc0291933d8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BERLINMA, resourceName=FOXTROT-POOL Value:0xc029193420}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879211519s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BERLINMA, resourceName=FOXTROT-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BERLINMA, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MEDINAMN, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MEDINAMN, resourceName=BRAVO-POOL Value:0xc029193488} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MEDINAMN, resourceName=BRAVO-POOL Value:0xc029193480}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879214043s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MEDINAMN, resourceName=BRAVO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MEDINAMN, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHATTLEBOROUGHMA, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHATTLEBOROUGHMA, resourceName=DELTA-POOL Value:0xc029193510} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHATTLEBOROUGHMA, resourceName=DELTA-POOL Value:0xc029193518}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879216002s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHATTLEBOROUGHMA, resourceName=DELTA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHATTLEBOROUGHMA, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NARRAGANSETTRI, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NARRAGANSETTRI, resourceName=ECHO-POOL Value:0xc029193580} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NARRAGANSETTRI, resourceName=ECHO-POOL Value:0xc029193588}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.8792188s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NARRAGANSETTRI, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NARRAGANSETTRI, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ORANGECT, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ORANGECT, resourceName=ALPHA-POOL Value:0xc029193618} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ORANGECT, resourceName=ALPHA-POOL Value:0xc029193610}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879221027s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ORANGECT, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ORANGECT, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DAHLONEGAGA, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DAHLONEGAGA, resourceName=ALPHA-POOL Value:0xc029193690} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DAHLONEGAGA, resourceName=ALPHA-POOL Value:0xc029193698}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87922315s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DAHLONEGAGA, resourceName=ALPHA-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DAHLONEGAGA, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MONROECOUNTYIN, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MONROECOUNTYIN, resourceName=ALPHA-POOL Value:0xc029193700} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MONROECOUNTYIN, resourceName=ALPHA-POOL Value:0xc029193708}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879225885s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MONROECOUNTYIN, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MONROECOUNTYIN, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTWOODMA, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTWOODMA, resourceName=ECHO-POOL Value:0xc029193790} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTWOODMA, resourceName=ECHO-POOL Value:0xc029193798}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879229527s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTWOODMA, resourceName=ECHO-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTWOODMA, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WINONACOUNTYMN, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WINONACOUNTYMN, resourceName=DELTA-POOL Value:0xc029193810} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WINONACOUNTYMN, resourceName=DELTA-POOL Value:0xc029193818}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879232453s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WINONACOUNTYMN, resourceName=DELTA-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WINONACOUNTYMN, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GRAFTONMA, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GRAFTONMA, resourceName=FOXTROT-POOL Value:0xc0291938c0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GRAFTONMA, resourceName=FOXTROT-POOL Value:0xc029193868}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87923453s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GRAFTONMA, resourceName=FOXTROT-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GRAFTONMA, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WOODBURYCT, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WOODBURYCT, resourceName=CHARLIE-POOL Value:0xc029193920} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WOODBURYCT, resourceName=CHARLIE-POOL Value:0xc029193928}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87923681s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WOODBURYCT, resourceName=CHARLIE-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WOODBURYCT, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ROMEOVILLEIL, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ROMEOVILLEIL, resourceName=DELTA-POOL Value:0xc0291939a0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ROMEOVILLEIL, resourceName=DELTA-POOL Value:0xc0291939a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879239097s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ROMEOVILLEIL, resourceName=DELTA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ROMEOVILLEIL, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHKINGSTOWNRI, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHKINGSTOWNRI, resourceName=ALPHA-POOL Value:0xc029193a10} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHKINGSTOWNRI, resourceName=ALPHA-POOL Value:0xc029193a18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87924222s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHKINGSTOWNRI, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHKINGSTOWNRI, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WARRENRI, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WARRENRI, resourceName=BRAVO-POOL Value:0xc029193aa8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WARRENRI, resourceName=BRAVO-POOL Value:0xc029193aa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879244742s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WARRENRI, resourceName=BRAVO-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WARRENRI, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DANVERSMA, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DANVERSMA, resourceName=CHARLIE-POOL Value:0xc029193b08} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DANVERSMA, resourceName=CHARLIE-POOL Value:0xc029193b50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87924739s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DANVERSMA, resourceName=CHARLIE-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DANVERSMA, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GARDNERMA, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GARDNERMA, resourceName=ALPHA-POOL Value:0xc029193ba0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GARDNERMA, resourceName=ALPHA-POOL Value:0xc029193ba8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879249954s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GARDNERMA, resourceName=ALPHA-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GARDNERMA, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PROVINCETOWNMA, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PROVINCETOWNMA, resourceName=FOXTROT-POOL Value:0xc029193c08} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PROVINCETOWNMA, resourceName=FOXTROT-POOL Value:0xc029193c60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879252035s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PROVINCETOWNMA, resourceName=FOXTROT-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PROVINCETOWNMA, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FARMINGTONCT, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FARMINGTONCT, resourceName=FOXTROT-POOL Value:0xc029193cc8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FARMINGTONCT, resourceName=FOXTROT-POOL Value:0xc029193cc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879255304s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FARMINGTONCT, resourceName=FOXTROT-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FARMINGTONCT, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MTPLEASANTWI, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MTPLEASANTWI, resourceName=ALPHA-POOL Value:0xc029193d40} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MTPLEASANTWI, resourceName=ALPHA-POOL Value:0xc029193d48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879257485s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MTPLEASANTWI, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MTPLEASANTWI, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTPROVIDENCERI, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTPROVIDENCERI, resourceName=ECHO-POOL Value:0xc029193dc0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTPROVIDENCERI, resourceName=ECHO-POOL Value:0xc029193dc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879260956s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTPROVIDENCERI, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTPROVIDENCERI, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEEDHAMMA, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEEDHAMMA, resourceName=FOXTROT-POOL Value:0xc029193e28} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEEDHAMMA, resourceName=FOXTROT-POOL Value:0xc029193e70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879263215s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEEDHAMMA, resourceName=FOXTROT-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEEDHAMMA, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RI-CRLB, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RI-CRLB, resourceName=ALPHA-POOL Value:0xc029193f10} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RI-CRLB, resourceName=ALPHA-POOL Value:0xc029193f18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879265232s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RI-CRLB, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/RI-CRLB, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/OTSEGONY, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/OTSEGONY, resourceName=BRAVO-POOL Value:0xc029193fa8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/OTSEGONY, resourceName=BRAVO-POOL Value:0xc029193fa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879267394s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/OTSEGONY, resourceName=BRAVO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/OTSEGONY, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MADISONCT, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MADISONCT, resourceName=BRAVO-POOL Value:0xc0068a8080} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MADISONCT, resourceName=BRAVO-POOL Value:0xc0068a8088}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879269602s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MADISONCT, resourceName=BRAVO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MADISONCT, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SMYRNAGA, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SMYRNAGA, resourceName=GOLF-POOL Value:0xc0068a8640} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SMYRNAGA, resourceName=GOLF-POOL Value:0xc0068a8648}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87927246s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SMYRNAGA, resourceName=GOLF-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SMYRNAGA, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NATICKMA, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NATICKMA, resourceName=GOLF-POOL Value:0xc0068a87c0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NATICKMA, resourceName=GOLF-POOL Value:0xc0068a87c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879274522s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NATICKMA, resourceName=GOLF-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NATICKMA, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SOUTHPORTNC, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SOUTHPORTNC, resourceName=CHARLIE-POOL Value:0xc0068a88e8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SOUTHPORTNC, resourceName=CHARLIE-POOL Value:0xc0068a8a10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879277411s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SOUTHPORTNC, resourceName=CHARLIE-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SOUTHPORTNC, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SOUTHBOROUGHMA, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SOUTHBOROUGHMA, resourceName=ECHO-POOL Value:0xc0068a8ce0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SOUTHBOROUGHMA, resourceName=ECHO-POOL Value:0xc0068a8ce8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87927961s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SOUTHBOROUGHMA, resourceName=ECHO-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SOUTHBOROUGHMA, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CHESHIRECT, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CHESHIRECT, resourceName=ECHO-POOL Value:0xc0068a8f00} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CHESHIRECT, resourceName=ECHO-POOL Value:0xc0068a8f08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879282689s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CHESHIRECT, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CHESHIRECT, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BOURNEMA, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BOURNEMA, resourceName=DELTA-POOL Value:0xc0068a9160} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BOURNEMA, resourceName=DELTA-POOL Value:0xc0068a9168}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879285932s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BOURNEMA, resourceName=DELTA-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BOURNEMA, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ARLINGTONMA, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ARLINGTONMA, resourceName=ECHO-POOL Value:0xc0068a91e0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ARLINGTONMA, resourceName=ECHO-POOL Value:0xc0068a91e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879288059s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ARLINGTONMA, resourceName=ECHO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ARLINGTONMA, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BEDFORDTX, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BEDFORDTX, resourceName=DELTA-POOL Value:0xc0068a92b0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BEDFORDTX, resourceName=DELTA-POOL Value:0xc0068a92b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879290835s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BEDFORDTX, resourceName=DELTA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BEDFORDTX, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ROCHESTERNH, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ROCHESTERNH, resourceName=BRAVO-POOL Value:0xc0068a9370} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ROCHESTERNH, resourceName=BRAVO-POOL Value:0xc0068a9378}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879293008s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ROCHESTERNH, resourceName=BRAVO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ROCHESTERNH, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WATERTOWNSD, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WATERTOWNSD, resourceName=DELTA-POOL Value:0xc0068a93f8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WATERTOWNSD, resourceName=DELTA-POOL Value:0xc0068a93f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879295007s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WATERTOWNSD, resourceName=DELTA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WATERTOWNSD, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GRANDISLAND, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GRANDISLAND, resourceName=CHARLIE-POOL Value:0xc0068a9498} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GRANDISLAND, resourceName=CHARLIE-POOL Value:0xc0068a94e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879301654s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GRANDISLAND, resourceName=CHARLIE-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GRANDISLAND, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SMITHFIELDRI, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SMITHFIELDRI, resourceName=BRAVO-POOL Value:0xc0068a9570} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SMITHFIELDRI, resourceName=BRAVO-POOL Value:0xc0068a9578}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879303967s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SMITHFIELDRI, resourceName=BRAVO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SMITHFIELDRI, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PUEBLOCO, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PUEBLOCO, resourceName=CHARLIE-POOL Value:0xc0068a9610} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PUEBLOCO, resourceName=CHARLIE-POOL Value:0xc0068a95c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879306863s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PUEBLOCO, resourceName=CHARLIE-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PUEBLOCO, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WINDSORCT, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WINDSORCT, resourceName=DELTA-POOL Value:0xc0068a96c8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WINDSORCT, resourceName=DELTA-POOL Value:0xc0068a96c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879309297s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WINDSORCT, resourceName=DELTA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WINDSORCT, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MAUMELLEAR, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MAUMELLEAR, resourceName=GOLF-POOL Value:0xc0068a9730} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MAUMELLEAR, resourceName=GOLF-POOL Value:0xc0068a9738}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879312205s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MAUMELLEAR, resourceName=GOLF-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MAUMELLEAR, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PEABODYMA, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PEABODYMA, resourceName=GOLF-POOL Value:0xc0068a9808} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PEABODYMA, resourceName=GOLF-POOL Value:0xc0068a9800}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87931547s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PEABODYMA, resourceName=GOLF-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/PEABODYMA, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTGREENWICHRI, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTGREENWICHRI, resourceName=GOLF-POOL Value:0xc0068a99b0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTGREENWICHRI, resourceName=GOLF-POOL Value:0xc0068a99b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879318543s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTGREENWICHRI, resourceName=GOLF-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/EASTGREENWICHRI, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWPORTRI, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWPORTRI, resourceName=GOLF-POOL Value:0xc0068a9a70} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWPORTRI, resourceName=GOLF-POOL Value:0xc0068a9a78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879320814s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWPORTRI, resourceName=GOLF-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWPORTRI, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHBOROUGHMA, resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHBOROUGHMA, resourceName=ECHO-POOL Value:0xc0068a9b40} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHBOROUGHMA, resourceName=ECHO-POOL Value:0xc0068a9b48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879323179s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHBOROUGHMA, resourceName=ECHO-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHBOROUGHMA, resourceName=ECHO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SHARONMA, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SHARONMA, resourceName=DELTA-POOL Value:0xc0068a9ca0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SHARONMA, resourceName=DELTA-POOL Value:0xc0068a9ca8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879325278s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SHARONMA, resourceName=DELTA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SHARONMA, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WARWICKRI, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WARWICKRI, resourceName=GOLF-POOL Value:0xc0068a9e78} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WARWICKRI, resourceName=GOLF-POOL Value:0xc0068a9e70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879327478s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WARWICKRI, resourceName=GOLF-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WARWICKRI, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SHERIDANWY, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SHERIDANWY, resourceName=CHARLIE-POOL Value:0xc0068a9fe8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SHERIDANWY, resourceName=CHARLIE-POOL Value:0xc027d7c040}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879330024s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SHERIDANWY, resourceName=CHARLIE-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/SHERIDANWY, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWINGTONCT, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWINGTONCT, resourceName=CHARLIE-POOL Value:0xc027d7c090} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWINGTONCT, resourceName=CHARLIE-POOL Value:0xc027d7c098}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879332567s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWINGTONCT, resourceName=CHARLIE-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWINGTONCT, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NYECOUNTYNV, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NYECOUNTYNV, resourceName=DELTA-POOL Value:0xc027d7c100} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NYECOUNTYNV, resourceName=DELTA-POOL Value:0xc027d7c108}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879334758s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NYECOUNTYNV, resourceName=DELTA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NYECOUNTYNV, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DETROITMI, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DETROITMI, resourceName=ALPHA-POOL Value:0xc027d7c178} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DETROITMI, resourceName=ALPHA-POOL Value:0xc027d7c170}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879337405s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DETROITMI, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DETROITMI, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ANDERSONSC, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ANDERSONSC, resourceName=GOLF-POOL Value:0xc027d7c1e8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ANDERSONSC, resourceName=GOLF-POOL Value:0xc027d7c1e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879339404s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ANDERSONSC, resourceName=GOLF-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/ANDERSONSC, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MARATHONFL, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MARATHONFL, resourceName=FOXTROT-POOL Value:0xc027d7c248} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MARATHONFL, resourceName=FOXTROT-POOL Value:0xc027d7c2c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879341401s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MARATHONFL, resourceName=FOXTROT-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MARATHONFL, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STOWOH, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STOWOH, resourceName=BRAVO-POOL Value:0xc027d7c310} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STOWOH, resourceName=BRAVO-POOL Value:0xc027d7c318}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879344247s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STOWOH, resourceName=BRAVO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STOWOH, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HEMPSTEADNY-MIGRATION, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HEMPSTEADNY-MIGRATION, resourceName=ALPHA-POOL Value:0xc027d7c380} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HEMPSTEADNY-MIGRATION, resourceName=ALPHA-POOL Value:0xc027d7c388}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879347272s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HEMPSTEADNY-MIGRATION, resourceName=ALPHA-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/HEMPSTEADNY-MIGRATION, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STUARTFL, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STUARTFL, resourceName=BRAVO-POOL Value:0xc027d7c3f0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STUARTFL, resourceName=BRAVO-POOL Value:0xc027d7c3f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.87935066s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STUARTFL, resourceName=BRAVO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/STUARTFL, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHPROVIDENCERI, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHPROVIDENCERI, resourceName=ALPHA-POOL Value:0xc027d7c460} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHPROVIDENCERI, resourceName=ALPHA-POOL Value:0xc027d7c468}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879353091s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHPROVIDENCERI, resourceName=ALPHA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NORTHPROVIDENCERI, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/APOPKAFL, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/APOPKAFL, resourceName=BRAVO-POOL Value:0xc027d7c4e0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/APOPKAFL, resourceName=BRAVO-POOL Value:0xc027d7c4e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879355954s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/APOPKAFL, resourceName=BRAVO-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/APOPKAFL, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DEERFIELDIL, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DEERFIELDIL, resourceName=DELTA-POOL Value:0xc027d7c550} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DEERFIELDIL, resourceName=DELTA-POOL Value:0xc027d7c558}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879358902s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DEERFIELDIL, resourceName=DELTA-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/DEERFIELDIL, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MAMARONECKNY, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MAMARONECKNY, resourceName=GOLF-POOL Value:0xc027d7c5c8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MAMARONECKNY, resourceName=GOLF-POOL Value:0xc027d7c5c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879361224s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MAMARONECKNY, resourceName=GOLF-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MAMARONECKNY, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BRIGHTONNY, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BRIGHTONNY, resourceName=DELTA-POOL Value:0xc027d7c630} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BRIGHTONNY, resourceName=DELTA-POOL Value:0xc027d7c638}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879364201s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BRIGHTONNY, resourceName=DELTA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/BRIGHTONNY, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFHUNTINGTONNY, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFHUNTINGTONNY, resourceName=FOXTROT-POOL Value:0xc027d7c688} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFHUNTINGTONNY, resourceName=FOXTROT-POOL Value:0xc027d7c6d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879366361s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFHUNTINGTONNY, resourceName=FOXTROT-POOL} value=2 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFHUNTINGTONNY, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FRANKLINMA, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FRANKLINMA, resourceName=CHARLIE-POOL Value:0xc027d7c720} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FRANKLINMA, resourceName=CHARLIE-POOL Value:0xc027d7c728}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879369056s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FRANKLINMA, resourceName=CHARLIE-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/FRANKLINMA, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWBURYPORTMA, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWBURYPORTMA, resourceName=DELTA-POOL Value:0xc027d7c798} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWBURYPORTMA, resourceName=DELTA-POOL Value:0xc027d7c790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879371713s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWBURYPORTMA, resourceName=DELTA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/NEWBURYPORTMA, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTMINSTERMA, resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTMINSTERMA, resourceName=BRAVO-POOL Value:0xc027d7c800} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTMINSTERMA, resourceName=BRAVO-POOL Value:0xc027d7c808}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879374154s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTMINSTERMA, resourceName=BRAVO-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/WESTMINSTERMA, resourceName=BRAVO-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GLOCESTERRI, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GLOCESTERRI, resourceName=DELTA-POOL Value:0xc027d7c878} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GLOCESTERRI, resourceName=DELTA-POOL Value:0xc027d7c870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879376203s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GLOCESTERRI, resourceName=DELTA-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/GLOCESTERRI, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CAMBRIDGEMA-MOVERECORDS, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CAMBRIDGEMA-MOVERECORDS, resourceName=CHARLIE-POOL Value:0xc027d7c8c8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CAMBRIDGEMA-MOVERECORDS, resourceName=CHARLIE-POOL Value:0xc027d7c910}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879379174s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CAMBRIDGEMA-MOVERECORDS, resourceName=CHARLIE-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CAMBRIDGEMA-MOVERECORDS, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CAMBRIDGEMA-REMAP, resourceName=CHARLIE-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CAMBRIDGEMA-REMAP, resourceName=CHARLIE-POOL Value:0xc027d7c960} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CAMBRIDGEMA-REMAP, resourceName=CHARLIE-POOL Value:0xc027d7c968}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879382017s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CAMBRIDGEMA-REMAP, resourceName=CHARLIE-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/CAMBRIDGEMA-REMAP, resourceName=CHARLIE-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MONROECOUNTYINNEWGISTEST, resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MONROECOUNTYINNEWGISTEST, resourceName=ALPHA-POOL Value:0xc027d7c9e0} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MONROECOUNTYINNEWGISTEST, resourceName=ALPHA-POOL Value:0xc027d7c9e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879384037s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MONROECOUNTYINNEWGISTEST, resourceName=ALPHA-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/MONROECOUNTYINNEWGISTEST, resourceName=ALPHA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/KINGSMOUNTAINNC, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/KINGSMOUNTAINNC, resourceName=GOLF-POOL Value:0xc027d7ca50} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/KINGSMOUNTAINNC, resourceName=GOLF-POOL Value:0xc027d7ca58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879387859s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/KINGSMOUNTAINNC, resourceName=GOLF-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/KINGSMOUNTAINNC, resourceName=GOLF-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFHUNTINGTONNY-TEST2, resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFHUNTINGTONNY-TEST2, resourceName=FOXTROT-POOL Value:0xc027d7caa8} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFHUNTINGTONNY-TEST2, resourceName=FOXTROT-POOL Value:0xc027d7cb00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879391014s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFHUNTINGTONNY-TEST2, resourceName=FOXTROT-POOL} value=0 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/TOWNOFHUNTINGTONNY-TEST2, resourceName=FOXTROT-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LAPORTEIN, resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LAPORTEIN, resourceName=DELTA-POOL Value:0xc027d7cc00} C:{Var:C Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LAPORTEIN, resourceName=DELTA-POOL Value:0xc027d7cc08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.879394083s EvaluationString:[ var='B' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LAPORTEIN, resourceName=DELTA-POOL} value=1 ], [ var='C' labels={databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/LAPORTEIN, resourceName=DELTA-POOL} value=0 ]} {Instance:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/VILLAGEOFBISCAYNEPARKFL, resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:databaseresourceid=/SUBSCRIPTIONS/71E10AC9-A8A2-48AA-8DB6-496B91FA7449/RESOURCEGROUPS/DEFAULT-SQL-EASTUS/PROVIDERS/MICROSOFT.SQL/SERVERS/HDNYSXCX5F/DATABASES/VILLAGEOFBISCAYNEPARKFL, resourceName=GOLF-POOL Value:0xc027d7ce40} C:{Var:C Labels:databaseresourceid=/SUBSCRIP + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.903613669Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.903433135Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s408, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.903237219Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.903207906Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + Error parsing panelUID for alert annotationruleID2796dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.historian backend=loki user=698963 slug=lemonade t=2024-05-29T13:44:14.903120357Z level=debug msg="Done saving alert state history batch" + level=debug ts=2024-05-29T13:44:14.903118102Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.902913048Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=304032 slug=clearbanc t=2024-05-29T13:44:14.902960656Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.487585ms + level=debug ts=2024-05-29T13:44:14.903044685Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=82372 slug=fout t=2024-05-29T13:44:14.895582633Z level=debug msg="Saving alert states" count=19 max_state_save_concurrency=1 + logger=ngalert.state.manager user=82372 slug=fout instance="metric.label.container=rfp-aggregator1, metric.label.hostname=staging-rfp-aggregator1-0, metric.label.plugin_id=out-bigquery-rfp, metric.label.pod=staging-rfp-aggregator1-0, metric.label.type=bigquery_insert, metric.label.worker_id=0, resource.label.cluster=staging-rfp-all, resource.label.instance=staging-rfp-aggregator1-0:prometheus, resource.label.job=staging-rfp-aggregator1, resource.label.location=asia-northeast1, resource.label.namespace=default, resource.label.project_id=rfp-proj, resource.type=prometheus_target" t=2024-05-29T13:44:14.895545262Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.902914928Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=82372 slug=fout instance="metric.label.container=rfp-aggregator1, metric.label.hostname=rfp-aggregator1-5, metric.label.plugin_id=out-bigquery-rfp, metric.label.pod=rfp-aggregator1-5, metric.label.type=bigquery_insert, metric.label.worker_id=0, resource.label.cluster=rfp-dlv, resource.label.instance=rfp-aggregator1-5:prometheus, resource.label.job=rfp-aggregator1, resource.label.location=asia-northeast1, resource.label.namespace=default, resource.label.project_id=rfp-proj, resource.type=prometheus_target" t=2024-05-29T13:44:14.895284878Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=82372 slug=fout instance="metric.label.container=rfp-aggregator1, metric.label.hostname=rfp-aggregator1-2, metric.label.plugin_id=out-bigquery-rfp, metric.label.pod=rfp-aggregator1-2, metric.label.type=bigquery_insert, metric.label.worker_id=0, resource.label.cluster=rfp-dlv, resource.label.instance=rfp-aggregator1-2:prometheus, resource.label.job=rfp-aggregator1, resource.label.location=asia-northeast1, resource.label.namespace=default, resource.label.project_id=rfp-proj, resource.type=prometheus_target" t=2024-05-29T13:44:14.895134483Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s405, server_type=10G, service_name=cpz_vpn" t=2024-05-29T13:44:14.902804661Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.902770138Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=82372 slug=fout instance="metric.label.container=rfp-aggregator1, metric.label.hostname=rfp-aggregator1-17, metric.label.plugin_id=out-bigquery-rfp, metric.label.pod=rfp-aggregator1-17, metric.label.type=bigquery_insert, metric.label.worker_id=0, resource.label.cluster=rfp-dlv, resource.label.instance=rfp-aggregator1-17:prometheus, resource.label.job=rfp-aggregator1, resource.label.location=asia-northeast1, resource.label.namespace=default, resource.label.project_id=rfp-proj, resource.type=prometheus_target" t=2024-05-29T13:44:14.895047213Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=82372 slug=fout instance="metric.label.container=rfp-aggregator1, metric.label.hostname=rfp-aggregator1-17, metric.label.plugin_id=out-bigquery-rfp, metric.label.pod=rfp-aggregator1-17, metric.label.type=bigquery_insert, metric.label.worker_id=0, resource.label.cluster=rfp-dlv, resource.label.instance=rfp-aggregator1-17:prometheus, resource.label.job=rfp-aggregator1, resource.label.location=asia-northeast1, resource.label.namespace=default, resource.label.project_id=rfp-proj, resource.type=prometheus_target" t=2024-05-29T13:44:14.895033694Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.902612502Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.902566242Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.902534371Z caller=remote_instance_store.go:51 user=23997 slug=wheniwork msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=23997 slug=wheniwork instance= t=2024-05-29T13:44:14.902460031Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=23997 slug=wheniwork instance= t=2024-05-29T13:44:14.902452134Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=23997 slug=wheniwork instance= t=2024-05-29T13:44:14.902443171Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=23997 slug=wheniwork t=2024-05-29T13:44:14.902422263Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=23997 slug=wheniwork version=2 fingerprint=37f6cffadb816c19 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.902334551Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[D0:{Var:D Labels: Value:} D1:{Var:D Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.901973246s EvaluationString:[ var='D0' metric='NoData' labels={} value=null ], [ var='D1' metric='NoData' labels={} value=null ]}]" duration=128.841037ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.901837125Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=algiers-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.901691337Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=algiers-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.901679209Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.901497084Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.901501592Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.901367337Z caller=remote_instance_store.go:51 user=373502 slug=stakeandrelax msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=373502 slug=stakeandrelax t=2024-05-29T13:44:14.901317653Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=algiers-s402, server_type=1G, service_name=cpz_vpn" t=2024-05-29T13:44:14.901422631Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.901344975Z caller=grafana.go:247 user=389502 slug=ciscoiot msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query= groups=4 alerts=0 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.901352628Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=cpz_vpn" + level=debug ts=2024-05-29T13:44:14.901122569Z caller=remote_instance_store.go:51 user=357638 slug=usepower msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=260796 slug=expressvpn version=51 fingerprint=9ac5f36af7b1529a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.779561053Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=algiers-s402, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=algiers-s402, server_type=1G, service_name=cpz_vpn Value:0xc022210620} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=algiers-s402, server_type=1G, service_name=cpz_vpn Value:0xc022210760} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=algiers-s402, server_type=1G, service_name=cpz_vpn Value:0xc0222108d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.524700859s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=algiers-s402, server_type=1G, service_name=cpz_vpn} value=86.3896569521162 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=algiers-s402, server_type=1G, service_name=cpz_vpn} value=86.3896569521162 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=algiers-s402, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=algiers-s402, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=algiers-s402, server_type=1G, service_name=cpz_vpn Value:0xc022210b50} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=algiers-s402, server_type=1G, service_name=cpz_vpn Value:0xc022210c80} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=algiers-s402, server_type=1G, service_name=cpz_vpn Value:0xc022210dc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.524734251s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=algiers-s402, server_type=1G, service_name=cpz_vpn} value=95.79349954512018 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=algiers-s402, server_type=1G, service_name=cpz_vpn} value=95.79349954512018 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=algiers-s402, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=algiers-s401, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=algiers-s401, server_type=1G, service_name=cpz_vpn Value:0xc022211048} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=algiers-s401, server_type=1G, service_name=cpz_vpn Value:0xc0222111b0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=algiers-s401, server_type=1G, service_name=cpz_vpn Value:0xc0222112f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.524751976s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=algiers-s401, server_type=1G, service_name=cpz_vpn} value=62.387753587871124 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=algiers-s401, server_type=1G, service_name=cpz_vpn} value=62.387753587871124 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=algiers-s401, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=algiers-s401, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=algiers-s401, server_type=1G, service_name=cpz_vpn Value:0xc022211578} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=algiers-s401, server_type=1G, service_name=cpz_vpn Value:0xc0222116c0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=algiers-s401, server_type=1G, service_name=cpz_vpn Value:0xc0222117e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.524767475s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=algiers-s401, server_type=1G, service_name=cpz_vpn} value=149.92777673660163 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=algiers-s401, server_type=1G, service_name=cpz_vpn} value=149.92777673660163 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Algiers, cluster=Algiers, country=Algeria, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=176.125.228.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=algiers-s401, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.206.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s406, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.206.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s406, server_type=10G, service_name=cpz_vpn Value:0xc022211d08} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.206.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s406, server_type=10G, service_name=cpz_vpn Value:0xc022211a90} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.206.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s406, server_type=10G, service_name=cpz_vpn Value:0xc022211bc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.524794772s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.206.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s406, server_type=10G, service_name=cpz_vpn} value=61.39007527494214 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.206.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s406, server_type=10G, service_name=cpz_vpn} value=61.39007527494214 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.206.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s406, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.206.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s406, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.206.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s406, server_type=10G, service_name=cpz_vpn Value:0xc03d4db500} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.206.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s406, server_type=10G, service_name=cpz_vpn Value:0xc022211f98} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.206.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s406, server_type=10G, service_name=cpz_vpn Value:0xc03d4db368}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.524812641s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.206.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s406, server_type=10G, service_name=cpz_vpn} value=74.86638314782813 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.206.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s406, server_type=10G, service_name=cpz_vpn} value=74.86638314782813 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.206.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s406, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s405, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s405, server_type=10G, service_name=cpz_vpn Value:0xc03d4dbae0} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s405, server_type=10G, service_name=cpz_vpn Value:0xc03d4dbdd0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s405, server_type=10G, service_name=cpz_vpn Value:0xc03d4db860}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.524843355s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s405, server_type=10G, service_name=cpz_vpn} value=28.389709036124017 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s405, server_type=10G, service_name=cpz_vpn} value=28.389709036124017 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s405, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s405, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s405, server_type=10G, service_name=cpz_vpn Value:0xc029e8a380} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s405, server_type=10G, service_name=cpz_vpn Value:0xc029e8a0f8} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s405, server_type=10G, service_name=cpz_vpn Value:0xc029e8a228}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.524861212s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s405, server_type=10G, service_name=cpz_vpn} value=114.7988757049239 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s405, server_type=10G, service_name=cpz_vpn} value=114.7988757049239 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s405, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s408, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s408, server_type=10G, service_name=cpz_vpn Value:0xc029e8a788} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s408, server_type=10G, service_name=cpz_vpn Value:0xc029e8a8f0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s408, server_type=10G, service_name=cpz_vpn Value:0xc029e8a600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.524875727s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s408, server_type=10G, service_name=cpz_vpn} value=28.39012905592566 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s408, server_type=10G, service_name=cpz_vpn} value=28.39012905592566 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s408, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s408, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s408, server_type=10G, service_name=cpz_vpn Value:0xc029e8ab68} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s408, server_type=10G, service_name=cpz_vpn Value:0xc029e8acb0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s408, server_type=10G, service_name=cpz_vpn Value:0xc029e8ae00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.524889781s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s408, server_type=10G, service_name=cpz_vpn} value=116.79431887301403 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s408, server_type=10G, service_name=cpz_vpn} value=116.79431887301403 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=191.96.168.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s408, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s404, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s404, server_type=10G, service_name=cpz_vpn Value:0xc029e8b1b8} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s404, server_type=10G, service_name=cpz_vpn Value:0xc029e8b2f8} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s404, server_type=10G, service_name=cpz_vpn Value:0xc029e8b080}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.524904676s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s404, server_type=10G, service_name=cpz_vpn} value=28.388744156709535 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s404, server_type=10G, service_name=cpz_vpn} value=28.388744156709535 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s404, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s404, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s404, server_type=10G, service_name=cpz_vpn Value:0xc029e8b590} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s404, server_type=10G, service_name=cpz_vpn Value:0xc029e8b6c8} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s404, server_type=10G, service_name=cpz_vpn Value:0xc029e8b810}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.524923232s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s404, server_type=10G, service_name=cpz_vpn} value=141.98262147408062 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s404, server_type=10G, service_name=cpz_vpn} value=141.98262147408062 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s404, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s407, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s407, server_type=10G, service_name=cpz_vpn Value:0xc029e8bbc0} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s407, server_type=10G, service_name=cpz_vpn Value:0xc029e8bd48} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s407, server_type=10G, service_name=cpz_vpn Value:0xc029e8ba90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.524941248s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s407, server_type=10G, service_name=cpz_vpn} value=28.390551146482753 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s407, server_type=10G, service_name=cpz_vpn} value=28.390551146482753 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s407, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s407, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s407, server_type=10G, service_name=cpz_vpn Value:0xc029e8bfc0} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s407, server_type=10G, service_name=cpz_vpn Value:0xc048304100} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s407, server_type=10G, service_name=cpz_vpn Value:0xc048304298}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.524962012s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s407, server_type=10G, service_name=cpz_vpn} value=61.84101411150932 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s407, server_type=10G, service_name=cpz_vpn} value=61.84101411150932 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam, country=Netherlands, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=195.78.54.4, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s407, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.151, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s415, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.151, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s415, server_type=10G, service_name=cpz_vpn Value:0xc048304a48} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.151, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s415, server_type=10G, service_name=cpz_vpn Value:0xc048304700} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.151, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s415, server_type=10G, service_name=cpz_vpn Value:0xc048304858}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.524976395s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.151, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s415, server_type=10G, service_name=cpz_vpn} value=84.39148683624894 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.151, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s415, server_type=10G, service_name=cpz_vpn} value=84.39148683624894 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.151, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s415, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.151, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s415, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.151, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s415, server_type=10G, service_name=cpz_vpn Value:0xc0483051e8} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.151, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s415, server_type=10G, service_name=cpz_vpn Value:0xc048304db0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.151, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s415, server_type=10G, service_name=cpz_vpn Value:0xc048304f80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.524991187s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.151, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s415, server_type=10G, service_name=cpz_vpn} value=129.99663729982248 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.151, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s415, server_type=10G, service_name=cpz_vpn} value=129.99663729982248 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.151, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s415, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.91, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s409, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.91, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s409, server_type=10G, service_name=cpz_vpn Value:0xc048305660} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.91, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s409, server_type=10G, service_name=cpz_vpn Value:0xc0483057e0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.91, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s409, server_type=10G, service_name=cpz_vpn Value:0xc048305930}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.52500592s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.91, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s409, server_type=10G, service_name=cpz_vpn} value=22.38947679794876 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.91, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s409, server_type=10G, service_name=cpz_vpn} value=22.38947679794876 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.91, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=amsterdam-s409, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.91, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s409, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.91, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s409, server_type=10G, service_name=cpz_vpn Value:0xc048305f30} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.91, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s409, server_type=10G, service_name=cpz_vpn Value:0xc03914e130} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.91, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s409, server_type=10G, service_name=cpz_vpn Value:0xc048305d28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525021244s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.91, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s409, server_type=10G, service_name=cpz_vpn} value=92.78458096538192 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.91, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s409, server_type=10G, service_name=cpz_vpn} value=92.78458096538192 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Amsterdam, cluster=Amsterdam-2, country=Netherlands, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.47.91, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=amsterdam-s409, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=andorra-s406, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=andorra-s406, server_type=10G, service_name=cpz_vpn Value:0xc03914fcd8} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=andorra-s406, server_type=10G, service_name=cpz_vpn Value:0xc03914f0e0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=andorra-s406, server_type=10G, service_name=cpz_vpn Value:0xc03914f7f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525032006s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=andorra-s406, server_type=10G, service_name=cpz_vpn} value=30.390610981895474 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=andorra-s406, server_type=10G, service_name=cpz_vpn} value=30.390610981895474 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=andorra-s406, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=andorra-s406, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=andorra-s406, server_type=10G, service_name=cpz_vpn Value:0xc05ebc2598} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=andorra-s406, server_type=10G, service_name=cpz_vpn Value:0xc05ebc2898} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=andorra-s406, server_type=10G, service_name=cpz_vpn Value:0xc05ebc3030}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525051366s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=andorra-s406, server_type=10G, service_name=cpz_vpn} value=107.76830774152495 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=andorra-s406, server_type=10G, service_name=cpz_vpn} value=107.76830774152495 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.15, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=andorra-s406, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=andorra-s405, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=andorra-s405, server_type=10G, service_name=cpz_vpn Value:0xc05ebc3aa8} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=andorra-s405, server_type=10G, service_name=cpz_vpn Value:0xc05ebc3df0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=andorra-s405, server_type=10G, service_name=cpz_vpn Value:0xc05ebc3f40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525065989s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=andorra-s405, server_type=10G, service_name=cpz_vpn} value=31.388132115462728 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=andorra-s405, server_type=10G, service_name=cpz_vpn} value=31.388132115462728 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=andorra-s405, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=andorra-s405, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=andorra-s405, server_type=10G, service_name=cpz_vpn Value:0xc019cae400} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=andorra-s405, server_type=10G, service_name=cpz_vpn Value:0xc019cae608} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=andorra-s405, server_type=10G, service_name=cpz_vpn Value:0xc019caea70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525082213s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=andorra-s405, server_type=10G, service_name=cpz_vpn} value=105.77288906035989 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=andorra-s405, server_type=10G, service_name=cpz_vpn} value=105.77288906035989 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Andorra, cluster=Andorra-2, country=Andorra, datacenter=GSL, environment=production, instance=10.0.0.203:9998, ip=173.239.217.2, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=andorra-s405, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=astana-s402, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=astana-s402, server_type=1G, service_name=cpz_vpn Value:0xc019caf248} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=astana-s402, server_type=1G, service_name=cpz_vpn Value:0xc019caf460} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=astana-s402, server_type=1G, service_name=cpz_vpn Value:0xc019caeff8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525097405s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=astana-s402, server_type=1G, service_name=cpz_vpn} value=28.390288408577813 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=astana-s402, server_type=1G, service_name=cpz_vpn} value=28.390288408577813 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=astana-s402, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=astana-s402, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=astana-s402, server_type=1G, service_name=cpz_vpn Value:0xc019cafbf0} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=astana-s402, server_type=1G, service_name=cpz_vpn Value:0xc019cafda0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=astana-s402, server_type=1G, service_name=cpz_vpn Value:0xc019caffc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525110248s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=astana-s402, server_type=1G, service_name=cpz_vpn} value=93.79359859519472 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=astana-s402, server_type=1G, service_name=cpz_vpn} value=93.79359859519472 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=astana-s402, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=astana-s401, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=astana-s401, server_type=1G, service_name=cpz_vpn Value:0xc013ec4ba8} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=astana-s401, server_type=1G, service_name=cpz_vpn Value:0xc013ec4cd8} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=astana-s401, server_type=1G, service_name=cpz_vpn Value:0xc013ec4e78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.52512609s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=astana-s401, server_type=1G, service_name=cpz_vpn} value=43.39176103636215 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=astana-s401, server_type=1G, service_name=cpz_vpn} value=43.39176103636215 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=astana-s401, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=astana-s401, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=astana-s401, server_type=1G, service_name=cpz_vpn Value:0xc013ec5388} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=astana-s401, server_type=1G, service_name=cpz_vpn Value:0xc013ec59a8} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=astana-s401, server_type=1G, service_name=cpz_vpn Value:0xc013ec5b18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525147983s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=astana-s401, server_type=1G, service_name=cpz_vpn} value=131.78471242597757 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=astana-s401, server_type=1G, service_name=cpz_vpn} value=131.78471242597757 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Astana, cluster=Astana, country=Kazakhstan, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=62.133.47.145, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=astana-s401, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=athens-s404, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=athens-s404, server_type=1G, service_name=cpz_vpn Value:0xc0593e0900} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=athens-s404, server_type=1G, service_name=cpz_vpn Value:0xc01f39e078} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=athens-s404, server_type=1G, service_name=cpz_vpn Value:0xc01f39e1e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525161521s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=athens-s404, server_type=1G, service_name=cpz_vpn} value=29.388903827750752 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=athens-s404, server_type=1G, service_name=cpz_vpn} value=29.388903827750752 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=athens-s404, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s404, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s404, server_type=1G, service_name=cpz_vpn Value:0xc01f39e4a8} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s404, server_type=1G, service_name=cpz_vpn Value:0xc01f39e600} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s404, server_type=1G, service_name=cpz_vpn Value:0xc01f39e770}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.52517883s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s404, server_type=1G, service_name=cpz_vpn} value=121.79504966332996 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s404, server_type=1G, service_name=cpz_vpn} value=121.79504966332996 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s404, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=athens-s405, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=athens-s405, server_type=1G, service_name=cpz_vpn Value:0xc01f39ea20} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=athens-s405, server_type=1G, service_name=cpz_vpn Value:0xc01f39ebb0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=athens-s405, server_type=1G, service_name=cpz_vpn Value:0xc01f39ece8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525198293s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=athens-s405, server_type=1G, service_name=cpz_vpn} value=28.389828184943937 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=athens-s405, server_type=1G, service_name=cpz_vpn} value=28.389828184943937 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=athens-s405, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s405, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s405, server_type=1G, service_name=cpz_vpn Value:0xc01f39ef68} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s405, server_type=1G, service_name=cpz_vpn Value:0xc01f39f0d8} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s405, server_type=1G, service_name=cpz_vpn Value:0xc01f39f220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525220287s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s405, server_type=1G, service_name=cpz_vpn} value=113.79584670514744 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s405, server_type=1G, service_name=cpz_vpn} value=113.79584670514744 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.242, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s405, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=athens-s406, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=athens-s406, server_type=1G, service_name=cpz_vpn Value:0xc01f39f4d8} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=athens-s406, server_type=1G, service_name=cpz_vpn Value:0xc01f39f658} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=athens-s406, server_type=1G, service_name=cpz_vpn Value:0xc01f39f7a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525233972s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=athens-s406, server_type=1G, service_name=cpz_vpn} value=28.39015235028862 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=athens-s406, server_type=1G, service_name=cpz_vpn} value=28.39015235028862 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=athens-s406, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s406, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s406, server_type=1G, service_name=cpz_vpn Value:0xc01f39fd30} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s406, server_type=1G, service_name=cpz_vpn Value:0xc01f39fa60} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s406, server_type=1G, service_name=cpz_vpn Value:0xc01f39fbc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525245606s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s406, server_type=1G, service_name=cpz_vpn} value=39.84360142623102 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s406, server_type=1G, service_name=cpz_vpn} value=39.84360142623102 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Athens, cluster=Athens, country=Greece, datacenter=ESTNOC, environment=production, instance=10.0.0.203:9998, ip=185.51.134.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=athens-s406, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=185.152.66.225, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s404, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=185.152.66.225, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s404, server_type=10G, service_name=cpz_vpn Value:0xc03a23c608} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=185.152.66.225, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s404, server_type=10G, service_name=cpz_vpn Value:0xc03a23c7e8} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=185.152.66.225, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s404, server_type=10G, service_name=cpz_vpn Value:0xc03a23c3b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525265253s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=185.152.66.225, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s404, server_type=10G, service_name=cpz_vpn} value=65.39186646936382 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=185.152.66.225, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s404, server_type=10G, service_name=cpz_vpn} value=65.39186646936382 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=185.152.66.225, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s404, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=185.152.66.225, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s404, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=185.152.66.225, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s404, server_type=10G, service_name=cpz_vpn Value:0xc03a23cd58} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=185.152.66.225, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s404, server_type=10G, service_name=cpz_vpn Value:0xc03a23d040} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=185.152.66.225, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s404, server_type=10G, service_name=cpz_vpn Value:0xc03a23d480}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.52527709s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=185.152.66.225, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s404, server_type=10G, service_name=cpz_vpn} value=74.65157711859838 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=185.152.66.225, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s404, server_type=10G, service_name=cpz_vpn} value=74.65157711859838 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=185.152.66.225, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s404, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s408, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s408, server_type=10G, service_name=cpz_vpn Value:0xc03a23db10} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s408, server_type=10G, service_name=cpz_vpn Value:0xc03a23de88} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s408, server_type=10G, service_name=cpz_vpn Value:0xc0157660c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525293093s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s408, server_type=10G, service_name=cpz_vpn} value=25.391467048708588 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s408, server_type=10G, service_name=cpz_vpn} value=25.391467048708588 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s408, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s408, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s408, server_type=10G, service_name=cpz_vpn Value:0xc0157666a0} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s408, server_type=10G, service_name=cpz_vpn Value:0xc0157663b0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s408, server_type=10G, service_name=cpz_vpn Value:0xc015766518}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525307509s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s408, server_type=10G, service_name=cpz_vpn} value=105.77806427145346 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s408, server_type=10G, service_name=cpz_vpn} value=105.77806427145346 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.130, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s408, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.154, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s407, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.154, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s407, server_type=10G, service_name=cpz_vpn Value:0xc015766a40} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.154, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s407, server_type=10G, service_name=cpz_vpn Value:0xc015766c48} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.154, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s407, server_type=10G, service_name=cpz_vpn Value:0xc015766dc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525332126s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.154, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s407, server_type=10G, service_name=cpz_vpn} value=65.39188443206898 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.154, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s407, server_type=10G, service_name=cpz_vpn} value=65.39188443206898 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.154, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s407, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.154, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s407, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.154, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s407, server_type=10G, service_name=cpz_vpn Value:0xc0157670a0} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.154, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s407, server_type=10G, service_name=cpz_vpn Value:0xc0157671f0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.154, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s407, server_type=10G, service_name=cpz_vpn Value:0xc015767378}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.52534825s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.154, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s407, server_type=10G, service_name=cpz_vpn} value=101.77631730331785 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.154, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s407, server_type=10G, service_name=cpz_vpn} value=101.77631730331785 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=Atlanta, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=89.187.171.154, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s407, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=n/a, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=45.134.140.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s405, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=n/a, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=45.134.140.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s405, server_type=10G, service_name=cpz_vpn Value:0xc015767750} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=n/a, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=45.134.140.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s405, server_type=10G, service_name=cpz_vpn Value:0xc0157679a0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=n/a, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=45.134.140.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s405, server_type=10G, service_name=cpz_vpn Value:0xc015767bf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525369125s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=n/a, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=45.134.140.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s405, server_type=10G, service_name=cpz_vpn} value=41.387792565395635 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=n/a, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=45.134.140.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s405, server_type=10G, service_name=cpz_vpn} value=41.387792565395635 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=n/a, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=45.134.140.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=atlanta-s405, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=n/a, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=45.134.140.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s405, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=n/a, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=45.134.140.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s405, server_type=10G, service_name=cpz_vpn Value:0xc015767f40} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=n/a, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=45.134.140.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s405, server_type=10G, service_name=cpz_vpn Value:0xc03b9fe0a8} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=n/a, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=45.134.140.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s405, server_type=10G, service_name=cpz_vpn Value:0xc03b9fe218}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525383895s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=n/a, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=45.134.140.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s405, server_type=10G, service_name=cpz_vpn} value=91.82357960294796 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=n/a, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=45.134.140.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s405, server_type=10G, service_name=cpz_vpn} value=91.82357960294796 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Atlanta, cluster=n/a, country=United States, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=45.134.140.193, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=atlanta-s405, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Auckland, cluster=Auckland, country=New Zealand, datacenter=ServersAustralia, environment=production, instance=10.0.0.203:9998, ip=221.121.135.57, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=auckland-s401, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Auckland, cluster=Auckland, country=New Zealand, datacenter=ServersAustralia, environment=production, instance=10.0.0.203:9998, ip=221.121.135.57, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=auckland-s401, server_type=10G, service_name=cpz_vpn Value:0xc03b9fe650} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Auckland, cluster=Auckland, country=New Zealand, datacenter=ServersAustralia, environment=production, instance=10.0.0.203:9998, ip=221.121.135.57, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=auckland-s401, server_type=10G, service_name=cpz_vpn Value:0xc03b9fe7c8} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Auckland, cluster=Auckland, country=New Zealand, datacenter=ServersAustralia, environment=production, instance=10.0.0.203:9998, ip=221.121.135.57, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=auckland-s401, server_type=10G, service_name=cpz_vpn Value:0xc03b9fe4f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525410163s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Auckland, cluster=Auckland, country=New Zealand, datacenter=ServersAustralia, environment=production, instance=10.0.0.203:9998, ip=221.121.135.57, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=auckland-s401, server_type=10G, service_name=cpz_vpn} value=45.390995355364055 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Auckland, cluster=Auckland, country=New Zealand, datacenter=ServersAustralia, environment=production, instance=10.0.0.203:9998, ip=221.121.135.57, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=auckland-s401, server_type=10G, service_name=cpz_vpn} value=45.390995355364055 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Auckland, cluster=Auckland, country=New Zealand, datacenter=ServersAustralia, environment=production, instance=10.0.0.203:9998, ip=221.121.135.57, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=auckland-s401, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Auckland, cluster=Auckland, country=New Zealand, datacenter=ServersAustralia, environment=production, instance=10.0.0.203:9998, ip=221.121.135.57, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=auckland-s401, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Auckland, cluster=Auckland, country=New Zealand, datacenter=ServersAustralia, environment=production, instance=10.0.0.203:9998, ip=221.121.135.57, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=auckland-s401, server_type=10G, service_name=cpz_vpn Value:0xc03b9fea88} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Auckland, cluster=Auckland, country=New Zealand, datacenter=ServersAustralia, environment=production, instance=10.0.0.203:9998, ip=221.121.135.57, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=auckland-s401, server_type=10G, service_name=cpz_vpn Value:0xc03b9fec10} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Auckland, cluster=Auckland, country=New Zealand, datacenter=ServersAustralia, environment=production, instance=10.0.0.203:9998, ip=221.121.135.57, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=auckland-s401, server_type=10G, service_name=cpz_vpn Value:0xc03b9fed80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525433353s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Auckland, cluster=Auckland, country=New Zealand, datacenter=ServersAustralia, environment=production, instance=10.0.0.203:9998, ip=221.121.135.57, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=auckland-s401, server_type=10G, service_name=cpz_vpn} value=133.15754628193523 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Auckland, cluster=Auckland, country=New Zealand, datacenter=ServersAustralia, environment=production, instance=10.0.0.203:9998, ip=221.121.135.57, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=auckland-s401, server_type=10G, service_name=cpz_vpn} value=133.15754628193523 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Auckland, cluster=Auckland, country=New Zealand, datacenter=ServersAustralia, environment=production, instance=10.0.0.203:9998, ip=221.121.135.57, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=auckland-s401, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bangkok, cluster=Bangkok, country=Thailand, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.226.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bangkok-s401, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bangkok, cluster=Bangkok, country=Thailand, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.226.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bangkok-s401, server_type=10G, service_name=cpz_vpn Value:0xc03b9ff048} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bangkok, cluster=Bangkok, country=Thailand, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.226.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bangkok-s401, server_type=10G, service_name=cpz_vpn Value:0xc03b9ff1f0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bangkok, cluster=Bangkok, country=Thailand, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.226.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bangkok-s401, server_type=10G, service_name=cpz_vpn Value:0xc03b9ff3f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525450936s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bangkok, cluster=Bangkok, country=Thailand, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.226.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bangkok-s401, server_type=10G, service_name=cpz_vpn} value=30.391287108450115 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bangkok, cluster=Bangkok, country=Thailand, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.226.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bangkok-s401, server_type=10G, service_name=cpz_vpn} value=30.391287108450115 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bangkok, cluster=Bangkok, country=Thailand, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.226.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=bangkok-s401, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bangkok, cluster=Bangkok, country=Thailand, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.226.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=bangkok-s401, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bangkok, cluster=Bangkok, country=Thailand, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.226.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=bangkok-s401, server_type=10G, service_name=cpz_vpn Value:0xc03b9ff7b8} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bangkok, cluster=Bangkok, country=Thailand, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.226.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=bangkok-s401, server_type=10G, service_name=cpz_vpn Value:0xc03b9ff960} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bangkok, cluster=Bangkok, country=Thailand, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.226.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=bangkok-s401, server_type=10G, service_name=cpz_vpn Value:0xc03b9ffb50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525478686s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bangkok, cluster=Bangkok, country=Thailand, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.226.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=bangkok-s401, server_type=10G, service_name=cpz_vpn} value=28.8461482201225 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bangkok, cluster=Bangkok, country=Thailand, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.226.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=bangkok-s401, server_type=10G, service_name=cpz_vpn} value=28.8461482201225 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Bangkok, cluster=Bangkok, country=Thailand, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=192.142.226.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=bangkok-s401, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s408, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s408, server_type=1G, service_name=cpz_vpn Value:0xc03b9ffe58} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s408, server_type=1G, service_name=cpz_vpn Value:0xc03b9fff88} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s408, server_type=1G, service_name=cpz_vpn Value:0xc016a2c100}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525494166s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s408, server_type=1G, service_name=cpz_vpn} value=81.39283672043963 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s408, server_type=1G, service_name=cpz_vpn} value=81.39283672043963 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s408, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s408, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s408, server_type=1G, service_name=cpz_vpn Value:0xc016a2c710} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s408, server_type=1G, service_name=cpz_vpn Value:0xc016a2c470} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s408, server_type=1G, service_name=cpz_vpn Value:0xc016a2c5c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525516816s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s408, server_type=1G, service_name=cpz_vpn} value=70.67209598011442 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s408, server_type=1G, service_name=cpz_vpn} value=70.67209598011442 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s408, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s409, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s409, server_type=1G, service_name=cpz_vpn Value:0xc016a2c998} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s409, server_type=1G, service_name=cpz_vpn Value:0xc016a2caf0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s409, server_type=1G, service_name=cpz_vpn Value:0xc016a2cc40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525536259s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s409, server_type=1G, service_name=cpz_vpn} value=79.39188192774598 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s409, server_type=1G, service_name=cpz_vpn} value=79.39188192774598 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s409, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s409, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s409, server_type=1G, service_name=cpz_vpn Value:0xc016a2cf10} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s409, server_type=1G, service_name=cpz_vpn Value:0xc016a2d050} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s409, server_type=1G, service_name=cpz_vpn Value:0xc016a2d1a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525551113s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s409, server_type=1G, service_name=cpz_vpn} value=70.67114118744233 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s409, server_type=1G, service_name=cpz_vpn} value=70.67114118744233 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=146.70.22.66, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s409, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.253.99.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s405, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.253.99.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s405, server_type=1G, service_name=cpz_vpn Value:0xc016a2d760} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.253.99.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s405, server_type=1G, service_name=cpz_vpn Value:0xc016a2d448} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.253.99.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s405, server_type=1G, service_name=cpz_vpn Value:0xc016a2d610}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525568975s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.253.99.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s405, server_type=1G, service_name=cpz_vpn} value=58.389804750763666 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.253.99.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s405, server_type=1G, service_name=cpz_vpn} value=58.389804750763666 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.253.99.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s405, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.253.99.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s405, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.253.99.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s405, server_type=1G, service_name=cpz_vpn Value:0xc016a2da40} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.253.99.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s405, server_type=1G, service_name=cpz_vpn Value:0xc016a2dba0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.253.99.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s405, server_type=1G, service_name=cpz_vpn Value:0xc016a2dd08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525590846s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.253.99.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s405, server_type=1G, service_name=cpz_vpn} value=149.11389039942404 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.253.99.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s405, server_type=1G, service_name=cpz_vpn} value=149.11389039942404 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=185.253.99.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s405, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s402, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s402, server_type=1G, service_name=cpz_vpn Value:0xc016a2df80} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s402, server_type=1G, service_name=cpz_vpn Value:0xc056bcc0d0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s402, server_type=1G, service_name=cpz_vpn Value:0xc056bcc210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525610393s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s402, server_type=1G, service_name=cpz_vpn} value=35.38962303965271 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s402, server_type=1G, service_name=cpz_vpn} value=35.38962303965271 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s402, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s402, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s402, server_type=1G, service_name=cpz_vpn Value:0xc056bcc480} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s402, server_type=1G, service_name=cpz_vpn Value:0xc056bcc670} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s402, server_type=1G, service_name=cpz_vpn Value:0xc056bcc798}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525632053s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s402, server_type=1G, service_name=cpz_vpn} value=148.12973878101673 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s402, server_type=1G, service_name=cpz_vpn} value=148.12973878101673 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.162, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s402, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s403, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s403, server_type=1G, service_name=cpz_vpn Value:0xc056bcca48} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s403, server_type=1G, service_name=cpz_vpn Value:0xc056bccb70} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s403, server_type=1G, service_name=cpz_vpn Value:0xc056bccca0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525651791s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s403, server_type=1G, service_name=cpz_vpn} value=42.3884551317345 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s403, server_type=1G, service_name=cpz_vpn} value=42.3884551317345 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s403, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s403, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s403, server_type=1G, service_name=cpz_vpn Value:0xc056bcd040} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s403, server_type=1G, service_name=cpz_vpn Value:0xc056bcd170} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s403, server_type=1G, service_name=cpz_vpn Value:0xc056bccf10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525667369s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s403, server_type=1G, service_name=cpz_vpn} value=123.77368661374766 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s403, server_type=1G, service_name=cpz_vpn} value=123.77368661374766 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s403, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s404, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s404, server_type=1G, service_name=cpz_vpn Value:0xc04492a670} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s404, server_type=1G, service_name=cpz_vpn Value:0xc04492a3a0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s404, server_type=1G, service_name=cpz_vpn Value:0xc04492a4f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525679058s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s404, server_type=1G, service_name=cpz_vpn} value=42.39115783619224 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s404, server_type=1G, service_name=cpz_vpn} value=42.39115783619224 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s404, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s404, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s404, server_type=1G, service_name=cpz_vpn Value:0xc04492a928} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s404, server_type=1G, service_name=cpz_vpn Value:0xc04492aa68} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s404, server_type=1G, service_name=cpz_vpn Value:0xc04492abd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.52569206s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s404, server_type=1G, service_name=cpz_vpn} value=80.80998885514404 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s404, server_type=1G, service_name=cpz_vpn} value=80.80998885514404 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=37.120.142.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s404, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s407, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s407, server_type=1G, service_name=cpz_vpn Value:0xc04492ae80} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s407, server_type=1G, service_name=cpz_vpn Value:0xc04492afb8} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s407, server_type=1G, service_name=cpz_vpn Value:0xc04492b120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525704307s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s407, server_type=1G, service_name=cpz_vpn} value=39.39091118382044 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s407, server_type=1G, service_name=cpz_vpn} value=39.39091118382044 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s407, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s407, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s407, server_type=1G, service_name=cpz_vpn Value:0xc04492b640} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s407, server_type=1G, service_name=cpz_vpn Value:0xc04492b770} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s407, server_type=1G, service_name=cpz_vpn Value:0xc04492b4d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.52572789s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s407, server_type=1G, service_name=cpz_vpn} value=121.77620053619147 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s407, server_type=1G, service_name=cpz_vpn} value=121.77620053619147 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.194, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s407, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s406, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s406, server_type=1G, service_name=cpz_vpn Value:0xc04492ba70} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s406, server_type=1G, service_name=cpz_vpn Value:0xc04492bbb0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s406, server_type=1G, service_name=cpz_vpn Value:0xc04492bcf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525744856s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s406, server_type=1G, service_name=cpz_vpn} value=58.389981690790385 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s406, server_type=1G, service_name=cpz_vpn} value=58.389981690790385 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=barcelona-s406, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s406, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s406, server_type=1G, service_name=cpz_vpn Value:0xc04492bfa0} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s406, server_type=1G, service_name=cpz_vpn Value:0xc0174720f8} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s406, server_type=1G, service_name=cpz_vpn Value:0xc017472288}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.5257615s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s406, server_type=1G, service_name=cpz_vpn} value=80.81744696899791 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s406, server_type=1G, service_name=cpz_vpn} value=80.81744696899791 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Barcelona, cluster=Barcelona, country=Spain, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=82.102.26.210, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=barcelona-s406, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Belgrade, cluster=Belgrade, country=Serbia, datacenter=Altushost, environment=production, instance=10.0.0.203:9998, ip=37.46.115.41, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=belgrade-s403, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Belgrade, cluster=Belgrade, country=Serbia, datacenter=Altushost, environment=production, instance=10.0.0.203:9998, ip=37.46.115.41, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=belgrade-s403, server_type=10G, service_name=cpz_vpn Value:0xc017472548} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Belgrade, cluster=Belgrade, country=Serbia, datacenter=Altushost, environment=production, instance=10.0.0.203:9998, ip=37.46.115.41, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=belgrade-s403, server_type=10G, service_name=cpz_vpn Value:0xc0174726a0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Belgrade, cluster=Belgrade, country=Serbia, datacenter=Altushost, environment=production, instance=10.0.0.203:9998, ip=37.46.115.41, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=belgrade-s403, server_type=10G, service_name=cpz_vpn Value:0xc017472810}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525784387s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Belgrade, cluster=Belgrade, country=Serbia, datacenter=Altushost, environment=production, instance=10.0.0.203:9998, ip=37.46.115.41, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=belgrade-s403, server_type=10G, service_name=cpz_vpn} value=36.38812649187633 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Belgrade, cluster=Belgrade, country=Serbia, datacenter=Altushost, environment=production, instance=10.0.0.203:9998, ip=37.46.115.41, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=belgrade-s403, server_type=10G, service_name=cpz_vpn} value=36.38812649187633 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Belgrade, cluster=Belgrade, country=Serbia, datacenter=Altushost, environment=production, instance=10.0.0.203:9998, ip=37.46.115.41, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=belgrade-s403, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Belgrade, cluster=Belgrade, country=Serbia, datacenter=Altushost, environment=production, instance=10.0.0.203:9998, ip=37.46.115.41, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=belgrade-s403, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Belgrade, cluster=Belgrade, country=Serbia, datacenter=Altushost, environment=production, instance=10.0.0.203:9998, ip=37.46.115.41, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=belgrade-s403, server_type=10G, service_name=cpz_vpn Value:0xc017472ce8} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Belgrade, cluster=Belgrade, country=Serbia, datacenter=Altushost, environment=production, instance=10.0.0.203:9998, ip=37.46.115.41, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=belgrade-s403, server_type=10G, service_name=cpz_vpn Value:0xc017472e40} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Belgrade, cluster=Belgrade, country=Serbia, datacenter=Altushost, environment=production, instance=10.0.0.203:9998, ip=37.46.115.41, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=belgrade-s403, server_type=10G, service_name=cpz_vpn Value:0xc017472b80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525797043s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Belgrade, cluster=Belgrade, country=Serbia, datacenter=Altushost, environment=production, instance=10.0.0.203:9998, ip=37.46.115.41, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=belgrade-s403, server_type=10G, service_name=cpz_vpn} value=39.83505936263283 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Belgrade, cluster=Belgrade, country=Serbia, datacenter=Altushost, environment=production, instance=10.0.0.203:9998, ip=37.46.115.41, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=belgrade-s403, server_type=10G, service_name=cpz_vpn} value=39.83505936263283 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Belgrade, cluster=Belgrade, country=Serbia, datacenter=Altushost, environment=production, instance=10.0.0.203:9998, ip=37.46.115.41, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=belgrade-s403, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.59, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s408, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.59, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s408, server_type=10G, service_name=cpz_vpn Value:0xc0174730f8} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.59, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s408, server_type=10G, service_name=cpz_vpn Value:0xc017473250} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.59, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s408, server_type=10G, service_name=cpz_vpn Value:0xc0174733e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525811351s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.59, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s408, server_type=10G, service_name=cpz_vpn} value=38.3883845246481 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.59, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s408, server_type=10G, service_name=cpz_vpn} value=38.3883845246481 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.59, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s408, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.59, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s408, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.59, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s408, server_type=10G, service_name=cpz_vpn Value:0xc017473700} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.59, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s408, server_type=10G, service_name=cpz_vpn Value:0xc017473920} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.59, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s408, server_type=10G, service_name=cpz_vpn Value:0xc017473a78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525860625s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.59, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s408, server_type=10G, service_name=cpz_vpn} value=105.77487758067588 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.59, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s408, server_type=10G, service_name=cpz_vpn} value=105.77487758067588 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.59, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s408, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.89, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s409, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.89, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s409, server_type=10G, service_name=cpz_vpn Value:0xc017473d60} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.89, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s409, server_type=10G, service_name=cpz_vpn Value:0xc017473ee8} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.89, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s409, server_type=10G, service_name=cpz_vpn Value:0xc0178f2290}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525878369s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.89, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s409, server_type=10G, service_name=cpz_vpn} value=38.389160970903994 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.89, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s409, server_type=10G, service_name=cpz_vpn} value=38.389160970903994 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.89, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s409, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.89, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s409, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.89, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s409, server_type=10G, service_name=cpz_vpn Value:0xc02d4c4100} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.89, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s409, server_type=10G, service_name=cpz_vpn Value:0xc02d4c4258} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.89, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s409, server_type=10G, service_name=cpz_vpn Value:0xc02d4c43a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525902493s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.89, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s409, server_type=10G, service_name=cpz_vpn} value=101.7710475455944 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.89, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s409, server_type=10G, service_name=cpz_vpn} value=101.7710475455944 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=138.199.63.89, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s409, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.51.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s410, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.51.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s410, server_type=10G, service_name=cpz_vpn Value:0xc02d4c47b8} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.51.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s410, server_type=10G, service_name=cpz_vpn Value:0xc02d4c4908} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.51.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s410, server_type=10G, service_name=cpz_vpn Value:0xc02d4c4650}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.52591572s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.51.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s410, server_type=10G, service_name=cpz_vpn} value=81.39176795407967 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.51.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s410, server_type=10G, service_name=cpz_vpn} value=81.39176795407967 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.51.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berkshire-s410, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.51.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s410, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.51.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s410, server_type=10G, service_name=cpz_vpn Value:0xc02d4c4bb8} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.51.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s410, server_type=10G, service_name=cpz_vpn Value:0xc02d4c4d18} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.51.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s410, server_type=10G, service_name=cpz_vpn Value:0xc02d4c4ef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525929509s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.51.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s410, server_type=10G, service_name=cpz_vpn} value=107.76901332489007 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.51.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s410, server_type=10G, service_name=cpz_vpn} value=107.76901332489007 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berkshire, cluster=Berkshire, country=United Kingdom, datacenter=DataPacket, environment=production, instance=10.0.0.203:9998, ip=84.17.51.1, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berkshire-s410, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.125, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s402, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.125, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s402, server_type=10G, service_name=cpz_vpn Value:0xc02d4c5288} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.125, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s402, server_type=10G, service_name=cpz_vpn Value:0xc02d4c53c0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.125, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s402, server_type=10G, service_name=cpz_vpn Value:0xc02d4c5160}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525963025s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.125, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s402, server_type=10G, service_name=cpz_vpn} value=28.389595676952904 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.125, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s402, server_type=10G, service_name=cpz_vpn} value=28.389595676952904 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.125, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s402, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.125, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s402, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.125, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s402, server_type=10G, service_name=cpz_vpn Value:0xc02d4c5640} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.125, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s402, server_type=10G, service_name=cpz_vpn Value:0xc02d4c5770} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.125, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s402, server_type=10G, service_name=cpz_vpn Value:0xc02d4c58a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525987465s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.125, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s402, server_type=10G, service_name=cpz_vpn} value=107.77755864214315 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.125, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s402, server_type=10G, service_name=cpz_vpn} value=107.77755864214315 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.125, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s402, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.185, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s456, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.185, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s456, server_type=10G, service_name=cpz_vpn Value:0xc02d4c5d70} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.185, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s456, server_type=10G, service_name=cpz_vpn Value:0xc02d4c5b10} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.185, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s456, server_type=10G, service_name=cpz_vpn Value:0xc02d4c5c38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.526014055s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.185, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s456, server_type=10G, service_name=cpz_vpn} value=18.390505000268856 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.185, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s456, server_type=10G, service_name=cpz_vpn} value=18.390505000268856 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.185, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s456, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.185, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s456, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.185, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s456, server_type=10G, service_name=cpz_vpn Value:0xc02d4c5ff0} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.185, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s456, server_type=10G, service_name=cpz_vpn Value:0xc01efca700} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.185, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s456, server_type=10G, service_name=cpz_vpn Value:0xc01efcb1e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.52602888s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.185, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s456, server_type=10G, service_name=cpz_vpn} value=105.77371102097841 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.185, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s456, server_type=10G, service_name=cpz_vpn} value=105.77371102097841 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.185, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s456, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.215, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s457, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.215, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s457, server_type=10G, service_name=cpz_vpn Value:0xc03285e1b8} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.215, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s457, server_type=10G, service_name=cpz_vpn Value:0xc03285e2e0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.215, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s457, server_type=10G, service_name=cpz_vpn Value:0xc03285e080}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.526050441s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.215, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s457, server_type=10G, service_name=cpz_vpn} value=18.38781789654022 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.215, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s457, server_type=10G, service_name=cpz_vpn} value=18.38781789654022 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.215, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s457, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.215, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s457, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.215, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s457, server_type=10G, service_name=cpz_vpn Value:0xc03285e570} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.215, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s457, server_type=10G, service_name=cpz_vpn Value:0xc03285ef88} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.215, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s457, server_type=10G, service_name=cpz_vpn Value:0xc03285f210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.526067223s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.215, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s457, server_type=10G, service_name=cpz_vpn} value=3.06655633614228 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.215, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s457, server_type=10G, service_name=cpz_vpn} value=3.06655633614228 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.215, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s457, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.35, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s458, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.35, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s458, server_type=10G, service_name=cpz_vpn Value:0xc032a9e3e0} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.35, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s458, server_type=10G, service_name=cpz_vpn Value:0xc032a9e558} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.35, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s458, server_type=10G, service_name=cpz_vpn Value:0xc032a9e698}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.526080048s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.35, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s458, server_type=10G, service_name=cpz_vpn} value=28.391028233993726 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.35, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s458, server_type=10G, service_name=cpz_vpn} value=28.391028233993726 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.35, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s458, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.35, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s458, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.35, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s458, server_type=10G, service_name=cpz_vpn Value:0xc032a9e8e8} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.35, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s458, server_type=10G, service_name=cpz_vpn Value:0xc032a9ea20} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.35, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s458, server_type=10G, service_name=cpz_vpn Value:0xc032a9eb50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.526094017s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.35, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s458, server_type=10G, service_name=cpz_vpn} value=102.74554212383703 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.35, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s458, server_type=10G, service_name=cpz_vpn} value=102.74554212383703 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.35, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s458, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s459, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s459, server_type=10G, service_name=cpz_vpn Value:0xc032a9eda0} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s459, server_type=10G, service_name=cpz_vpn Value:0xc032a9eec8} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s459, server_type=10G, service_name=cpz_vpn Value:0xc032a9eff0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.526107093s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s459, server_type=10G, service_name=cpz_vpn} value=19.38933878174544 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s459, server_type=10G, service_name=cpz_vpn} value=19.38933878174544 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s459, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s459, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s459, server_type=10G, service_name=cpz_vpn Value:0xc032a9f238} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s459, server_type=10G, service_name=cpz_vpn Value:0xc032a9f360} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s459, server_type=10G, service_name=cpz_vpn Value:0xc032a9f498}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.526119042s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s459, server_type=10G, service_name=cpz_vpn} value=147.7958318400002 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s459, server_type=10G, service_name=cpz_vpn} value=147.7958318400002 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.65, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s459, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.95, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s401, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.95, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s401, server_type=10G, service_name=cpz_vpn Value:0xc032a9f6f8} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.95, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s401, server_type=10G, service_name=cpz_vpn Value:0xc032a9f818} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.95, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s401, server_type=10G, service_name=cpz_vpn Value:0xc032a9f950}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.526134159s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.95, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s401, server_type=10G, service_name=cpz_vpn} value=19.39141610037844 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.95, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s401, server_type=10G, service_name=cpz_vpn} value=19.39141610037844 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.95, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s401, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.95, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s401, server_type=10G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.95, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s401, server_type=10G, service_name=cpz_vpn Value:0xc032a9fe20} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.95, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s401, server_type=10G, service_name=cpz_vpn Value:0xc032a9fbb0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.95, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s401, server_type=10G, service_name=cpz_vpn Value:0xc032a9fcf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.526150014s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.95, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s401, server_type=10G, service_name=cpz_vpn} value=98.77400869381053 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.95, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s401, server_type=10G, service_name=cpz_vpn} value=98.77400869381053 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin, country=Germany, datacenter=CyberGhost DataRoom, environment=production, instance=10.0.0.203:9998, ip=181.214.173.95, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s401, server_type=10G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s452, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s452, server_type=1G, service_name=cpz_vpn Value:0xc00ab147d0} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s452, server_type=1G, service_name=cpz_vpn Value:0xc00ab159d0} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s452, server_type=1G, service_name=cpz_vpn Value:0xc00ab15e58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.52616369s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s452, server_type=1G, service_name=cpz_vpn} value=16.3896662756069 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s452, server_type=1G, service_name=cpz_vpn} value=16.3896662756069 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s452, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s452, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s452, server_type=1G, service_name=cpz_vpn Value:0xc016579b40} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s452, server_type=1G, service_name=cpz_vpn Value:0xc016578f48} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s452, server_type=1G, service_name=cpz_vpn Value:0xc016579600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.52617667s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s452, server_type=1G, service_name=cpz_vpn} value=105.7739139613905 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s452, server_type=1G, service_name=cpz_vpn} value=105.7739139613905 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.34, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s452, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s453, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s453, server_type=1G, service_name=cpz_vpn Value:0xc00b090a88} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s453, server_type=1G, service_name=cpz_vpn Value:0xc00b090ca8} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s453, server_type=1G, service_name=cpz_vpn Value:0xc00b091688}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.526185596s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s453, server_type=1G, service_name=cpz_vpn} value=17.38817966590051 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s453, server_type=1G, service_name=cpz_vpn} value=17.38817966590051 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s453, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s453, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s453, server_type=1G, service_name=cpz_vpn Value:0xc00b0919c0} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s453, server_type=1G, service_name=cpz_vpn Value:0xc00b091b08} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s453, server_type=1G, service_name=cpz_vpn Value:0xc00b091c50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.526199179s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s453, server_type=1G, service_name=cpz_vpn} value=80.81642040713858 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s453, server_type=1G, service_name=cpz_vpn} value=80.81642040713858 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=193.176.86.50, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/openvpn.crt, role=vpn, server=berlin-s453, server_type=1G, service_name=cpz_vpn} value=0 ]} {Instance:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=194.36.108.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s451, server_type=1G, service_name=cpz_vpn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=194.36.108.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s451, server_type=1G, service_name=cpz_vpn Value:0xc03ad30950} B:{Var:B Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=194.36.108.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s451, server_type=1G, service_name=cpz_vpn Value:0xc03ad30230} C:{Var:C Labels:__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=194.36.108.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s451, server_type=1G, service_name=cpz_vpn Value:0xc03ad30560}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.526212529s EvaluationString:[ var='A' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=194.36.108.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s451, server_type=1G, service_name=cpz_vpn} value=22.38855569241804 ], [ var='B' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country=Germany, datacenter=M247, environment=production, instance=10.0.0.203:9998, ip=194.36.108.82, is_collocated=false, job=vpn-infrastructure-monitoring-exporter, path=/root/certificates/fullchain.pem, role=vpn, server=berlin-s451, server_type=1G, service_name=cpz_vpn} value=22.38855569241804 ], [ var='C' labels={__name__=certificate_days_left, access_group=premium, brand=CyberGhost, city=Berlin, cluster=Berlin-2, country= + level=debug ts=2024-05-29T13:44:14.901112364Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.901152638Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.901008266Z caller=grafana.go:247 user=389502 slug=ciscoiot msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query= groups=3 alerts=0 + level=debug ts=2024-05-29T13:44:14.900830284Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.90078947Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.900760452Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.900483001Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.900492709Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.900378274Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.900076513Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.899788963Z caller=remote_instance_store.go:51 user=451223 slug=amadeuspfptest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.899703223Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=451223 slug=amadeuspfptest version=25 fingerprint=b132d415e53b86a6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.899434463Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.899034171s EvaluationString:}]" duration=26.021476ms + level=debug ts=2024-05-29T13:44:14.899137518Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.898543643Z caller=remote_instance_store.go:51 user=543660 slug=jobcloudprogrammaticstage msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.898461489Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.898420364Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.898384753Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.89808733Z caller=remote_instance_store.go:51 user=432323 slug=lithic msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:14.898025488Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.897439106Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=432323 slug=lithic version=16 fingerprint=1082ce22eca964f4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.897744902Z level=debug msg="Alert rule evaluated" results="[{Instance:DBClusterIdentifier=prod-journal-processor-cluster State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBClusterIdentifier=prod-journal-processor-cluster Value:0xc0138a77a0} C:{Var:C Labels:DBClusterIdentifier=prod-journal-processor-cluster Value:0xc0138a77a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.897339434s EvaluationString:[ var='B' labels={DBClusterIdentifier=prod-journal-processor-cluster} value=5.741666666666666 ], [ var='C' labels={DBClusterIdentifier=prod-journal-processor-cluster} value=0 ]}]" duration=120.98252ms + level=debug ts=2024-05-29T13:44:14.897751888Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.897741651Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.897822636Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.897295356Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.897278879Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.897198652Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=489921 slug=statuscake instance= t=2024-05-29T13:44:14.89729726Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=489921 slug=statuscake instance= t=2024-05-29T13:44:14.897287465Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=489921 slug=statuscake version=52 fingerprint=fa988f5b6267fd40 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.897178135Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[Last:{Var:Last Labels: Value:0xc053f12620} Value:{Var:Value Labels: Value:0xc053f12638}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.896886719s EvaluationString:[ var='Last' labels={} value=0 ], [ var='Value' labels={} value=0 ]}]" duration=5.962257ms + level=debug ts=2024-05-29T13:44:14.897089759Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.896950213Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.896585083Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.896572316Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=kube-rbac-proxy-main, deployment=elepay-allvalue-agent, ele_env=pro, instance=10.10.46.3:8443, job=kube-state-metrics, namespace=elepay-api, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0" t=2024-05-29T13:44:14.896544416Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=245291 slug=pismo version=15 fingerprint=4f791e56c346e474 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.896443484Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.896216132s EvaluationString:}]" duration=930.831593ms + level=debug ts=2024-05-29T13:44:14.896229473Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.896241038Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=196413 slug=form3production t=2024-05-29T13:44:14.89612109Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=kube-rbac-proxy-main, deployment=smart-boss-web, ele_env=pro, instance=10.10.46.3:8443, job=kube-state-metrics, namespace=elepay, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0" t=2024-05-29T13:44:14.896099521Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.895790077Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:14.895130236Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=82372 slug=fout instance="metric.label.container=rfp-aggregator1, metric.label.hostname=rfp-aggregator1-16, metric.label.plugin_id=out-bigquery-rfp, metric.label.pod=rfp-aggregator1-16, metric.label.type=bigquery_insert, metric.label.worker_id=0, resource.label.cluster=rfp-dlv, resource.label.instance=rfp-aggregator1-16:prometheus, resource.label.job=rfp-aggregator1, resource.label.location=asia-northeast1, resource.label.namespace=default, resource.label.project_id=rfp-proj, resource.type=prometheus_target" t=2024-05-29T13:44:14.894966822Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=82372 slug=fout instance="metric.label.container=rfp-aggregator1, metric.label.hostname=rfp-aggregator1-14, metric.label.plugin_id=out-bigquery-rfp, metric.label.pod=rfp-aggregator1-14, metric.label.type=bigquery_insert, metric.label.worker_id=0, resource.label.cluster=rfp-dlv, resource.label.instance=rfp-aggregator1-14:prometheus, resource.label.job=rfp-aggregator1, resource.label.location=asia-northeast1, resource.label.namespace=default, resource.label.project_id=rfp-proj, resource.type=prometheus_target" t=2024-05-29T13:44:14.892593014Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=82372 slug=fout instance="metric.label.container=rfp-aggregator1, metric.label.hostname=rfp-aggregator1-13, metric.label.plugin_id=out-bigquery-rfp, metric.label.pod=rfp-aggregator1-13, metric.label.type=bigquery_insert, metric.label.worker_id=0, resource.label.cluster=rfp-dlv, resource.label.instance=rfp-aggregator1-13:prometheus, resource.label.job=rfp-aggregator1, resource.label.location=asia-northeast1, resource.label.namespace=default, resource.label.project_id=rfp-proj, resource.type=prometheus_target" t=2024-05-29T13:44:14.892547044Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=82372 slug=fout instance="metric.label.container=rfp-aggregator1, metric.label.hostname=rfp-aggregator1-11, metric.label.plugin_id=out-bigquery-rfp, metric.label.pod=rfp-aggregator1-11, metric.label.type=bigquery_insert, metric.label.worker_id=0, resource.label.cluster=rfp-dlv, resource.label.instance=rfp-aggregator1-11:prometheus, resource.label.job=rfp-aggregator1, resource.label.location=asia-northeast1, resource.label.namespace=default, resource.label.project_id=rfp-proj, resource.type=prometheus_target" t=2024-05-29T13:44:14.892452921Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=82372 slug=fout instance="metric.label.container=rfp-aggregator1, metric.label.hostname=rfp-aggregator1-11, metric.label.plugin_id=out-bigquery-rfp, metric.label.pod=rfp-aggregator1-11, metric.label.type=bigquery_insert, metric.label.worker_id=0, resource.label.cluster=rfp-dlv, resource.label.instance=rfp-aggregator1-11:prometheus, resource.label.job=rfp-aggregator1, resource.label.location=asia-northeast1, resource.label.namespace=default, resource.label.project_id=rfp-proj, resource.type=prometheus_target" t=2024-05-29T13:44:14.892439058Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=82372 slug=fout instance="metric.label.container=rfp-aggregator1, metric.label.hostname=rfp-aggregator1-10, metric.label.plugin_id=out-bigquery-rfp, metric.label.pod=rfp-aggregator1-10, metric.label.type=bigquery_insert, metric.label.worker_id=0, resource.label.cluster=rfp-dlv, resource.label.instance=rfp-aggregator1-10:prometheus, resource.label.job=rfp-aggregator1, resource.label.location=asia-northeast1, resource.label.namespace=default, resource.label.project_id=rfp-proj, resource.type=prometheus_target" t=2024-05-29T13:44:14.892398279Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=82372 slug=fout instance="metric.label.container=rfp-aggregator1, metric.label.hostname=rfp-aggregator1-0, metric.label.plugin_id=out-bigquery-rfp, metric.label.pod=rfp-aggregator1-0, metric.label.type=bigquery_insert, metric.label.worker_id=0, resource.label.cluster=rfp-dlv, resource.label.instance=rfp-aggregator1-0:prometheus, resource.label.job=rfp-aggregator1, resource.label.location=asia-northeast1, resource.label.namespace=default, resource.label.project_id=rfp-proj, resource.type=prometheus_target" t=2024-05-29T13:44:14.892341851Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=82372 slug=fout instance="metric.label.container=rfp-aggregator1, metric.label.hostname=rfp-aggregator1-0, metric.label.plugin_id=out-bigquery-rfp, metric.label.pod=rfp-aggregator1-0, metric.label.type=bigquery_insert, metric.label.worker_id=0, resource.label.cluster=rfp-dlv, resource.label.instance=rfp-aggregator1-0:prometheus, resource.label.job=rfp-aggregator1, resource.label.location=asia-northeast1, resource.label.namespace=default, resource.label.project_id=rfp-proj, resource.type=prometheus_target" t=2024-05-29T13:44:14.892326458Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=82372 slug=fout t=2024-05-29T13:44:14.892272303Z level=debug msg="State manager processing evaluation results" resultCount=19 + level=debug ts=2024-05-29T13:44:14.895282815Z caller=remote_instance_store.go:51 user=700783 slug=gsgmedia msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:14.891923445Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=marketing-edge, pod=marketing-edge-57758b67bf-8jbqv" t=2024-05-29T13:44:14.891910298Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.895191158Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=marketing-edge, pod=marketing-edge-57758b67bf-6mrr4" t=2024-05-29T13:44:14.891812091Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:14.891735508Z level=debug msg="State manager processing evaluation results" resultCount=2 + level=debug ts=2024-05-29T13:44:14.895172111Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=kube-rbac-proxy-main, deployment=argocd-repo-server, ele_env=pro, instance=10.10.46.3:8443, job=kube-state-metrics, namespace=argocd, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0" t=2024-05-29T13:44:14.895015738Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.894852509Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.894754401Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:14.894559481Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=538037 slug=drivewealth version=7 fingerprint=3b264e07ebe816d6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.894395427Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=_h27TffVk, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.894058028s EvaluationString:}]" duration=22.844539ms + logger=ngalert.state.manager.persist user=111653 slug=theassociationmxp t=2024-05-29T13:44:14.894466287Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=111653 slug=theassociationmxp instance= t=2024-05-29T13:44:14.894440301Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.894383813Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.893966748Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.893989118Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.893784517Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.8930985Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.892669823Z caller=remote_instance_store.go:51 user=701741 slug=thetradingpitproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:14.892622397Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.892360201Z caller=remote_instance_store.go:51 user=70430 slug=dapperlabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.892111568Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=70430 slug=dapperlabs t=2024-05-29T13:44:14.891913095Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.891348447Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.891168806Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.891063519Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.891058445Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.890993507Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.890905867Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=158536 slug=clearsaleantifraude t=2024-05-29T13:44:14.890757276Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.890429436Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=83647 slug=bidsolutions t=2024-05-29T13:44:14.890354207Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=83647 slug=bidsolutions instance="datasource_uid=000000064, ref_id=A" t=2024-05-29T13:44:14.890343831Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=304032 slug=clearbanc instance= t=2024-05-29T13:44:14.889351045Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=304032 slug=clearbanc t=2024-05-29T13:44:14.889189963Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.889135135Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.888867311Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=304032 slug=clearbanc version=51 fingerprint=8f2f99f9b42dc438 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.88876769Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc028e3fce0} B:{Var:B Labels: Value:0xc028e3fce8} C:{Var:C Labels: Value:0xc028e3fcf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.888296027s EvaluationString:[ var='A' labels={} value=0 ], [ var='B' labels={} value=0 ], [ var='C' labels={} value=0 ]}]" duration=16.269874ms + level=debug ts=2024-05-29T13:44:14.888712315Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.888640054Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=916149 slug=cmfollpd t=2024-05-29T13:44:14.888624269Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.945578ms + logger=ngalert.state.manager user=109452 slug=deltarisk instance= t=2024-05-29T13:44:14.886221258Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=109452 slug=deltarisk t=2024-05-29T13:44:14.88609283Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.888590115Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.886318871Z caller=remote_instance_store.go:51 user=109452 slug=deltarisk msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=109452 slug=deltarisk version=11 fingerprint=c0a458c17f7a9b7c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.885952508Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.885577952s EvaluationString:}]" duration=34.981708ms + level=debug ts=2024-05-29T13:44:14.888485503Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.88822314Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.888256862Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.888093362Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.88742885Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.886710084Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.479026ms + level=debug ts=2024-05-29T13:44:14.886652628Z caller=remote_instance_store.go:51 user=316418 slug=workmotion msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=27737 slug=edfmancapital t=2024-05-29T13:44:14.886588305Z level=debug msg="Saving alert states" count=13 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.886627794Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=2wfwm2xz0f, Method=--, Resource=/countries, Stage=--" t=2024-05-29T13:44:14.886578513Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=2wfwm2xz0f, Method=--, Resource=/countries, Stage=--" t=2024-05-29T13:44:14.88654884Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=316418 slug=workmotion t=2024-05-29T13:44:14.886522117Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="namespace=tech-tps-admin" t=2024-05-29T13:44:14.886539812Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.886450025Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="namespace=tech-it-billing" t=2024-05-29T13:44:14.886435702Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="namespace=tech-it-billing" t=2024-05-29T13:44:14.886426619Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="namespace=tech-capital-markets-shared" t=2024-05-29T13:44:14.886409857Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="namespace=tech-capital-markets" t=2024-05-29T13:44:14.886380942Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="namespace=tech-capital-markets" t=2024-05-29T13:44:14.886371252Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="namespace=tech-broker-dealer-portal" t=2024-05-29T13:44:14.886352204Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="namespace=tech-broker-dealer-portal" t=2024-05-29T13:44:14.886341976Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="namespace=monitoring" t=2024-05-29T13:44:14.886214725Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.88605414Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.885956596Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.885919064Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.88575396Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.885644102Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.885606617Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.885485335Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.885466406Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.885298709Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.885315103Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.885342022Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:14.885325871Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=114492 slug=railsbank instance="QueueName=BETA-PLAYLIVE-MESSAGE_QUEUE_INBOUND_PAYMENT-DLQ.fifo" t=2024-05-29T13:44:14.885290161Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.885019523Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=142180 slug=luxtronic t=2024-05-29T13:44:14.885042926Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.133205ms + level=info ts=2024-05-29T13:44:14.885065666Z caller=remote_alert_sender.go:94 user=87052 slug=polystream host=polystream-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.152.81.230:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=rTCPy7l7k alerts=1 + level=debug ts=2024-05-29T13:44:14.884835002Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.884655457Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.884641187Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=181845 slug=novol instance= t=2024-05-29T13:44:14.884303023Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=181845 slug=novol version=92 fingerprint=361cf464c4b61764 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.884168531Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc00798c138} B:{Var:B Labels: Value:0xc00798c110} C:{Var:C Labels: Value:0xc00798c118} D:{Var:D Labels: Value:0xc00798c130}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.883875896s EvaluationString:[ var='A' labels={} value=47650 ], [ var='B' labels={} value=47650 ], [ var='C' labels={} value=13.23611111111111 ], [ var='D' labels={} value=0 ]}]" duration=24.715307ms + logger=ngalert.state.manager user=4947 slug=mediamath instance="datasource_uid=000000186, ref_id=A" t=2024-05-29T13:44:14.884233357Z level=warn msg="Failed to take an image" dashboard=eT2TtfMZz panel=4 error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:14.884034745Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.883862638Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.883673266Z caller=grafana.go:247 user=786662 slug=skycareaignoc msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=bdjf0u8ejm8lca" groups=0 alerts=0 + level=debug ts=2024-05-29T13:44:14.883529367Z caller=remote_image_capturer.go:54 user=4947 slug=mediamath rule_org_id=1 rule_uid=edbhsq1qbvri8e dashboard=eT2TtfMZz panel=4 msg="rendering alert image with grafana" + logger=ngalert.scheduler user=371756 slug=asapp version=134 fingerprint=26390dcf845b6271 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.883439665Z level=debug msg="Alert rule evaluated" results="[{Instance:cluster=prod-champ State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:cluster=prod-champ Value:0xc02e7d0a88} C:{Var:C Labels:cluster=prod-champ Value:0xc02e7d0a68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.883045018s EvaluationString:[ var='B' labels={cluster=prod-champ} value=0 ], [ var='C' labels={cluster=prod-champ} value=0 ]} {Instance:cluster=prod-dishwireless State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:cluster=prod-dishwireless Value:0xc02e7d0ad8} C:{Var:C Labels:cluster=prod-dishwireless Value:0xc02e7d0ae8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.883058296s EvaluationString:[ var='B' labels={cluster=prod-dishwireless} value=0 ], [ var='C' labels={cluster=prod-dishwireless} value=0 ]} {Instance:cluster=prod-fargo State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:cluster=prod-fargo Value:0xc02e7d0b28} C:{Var:C Labels:cluster=prod-fargo Value:0xc02e7d0b58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.883067193s EvaluationString:[ var='B' labels={cluster=prod-fargo} value=0 ], [ var='C' labels={cluster=prod-fargo} value=0 ]} {Instance:cluster=prod-oslo State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:cluster=prod-oslo Value:0xc02e7d0bb0} C:{Var:C Labels:cluster=prod-oslo Value:0xc02e7d0be0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.88307364s EvaluationString:[ var='B' labels={cluster=prod-oslo} value=0 ], [ var='C' labels={cluster=prod-oslo} value=0 ]} {Instance:cluster=prod-rio State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:cluster=prod-rio Value:0xc02e7d0c38} C:{Var:C Labels:cluster=prod-rio Value:0xc02e7d0c20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.883078426s EvaluationString:[ var='B' labels={cluster=prod-rio} value=0 ], [ var='C' labels={cluster=prod-rio} value=0 ]}]" duration=249.231337ms + level=debug ts=2024-05-29T13:44:14.883471312Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=4947 slug=mediamath instance="datasource_uid=000000186, ref_id=A" t=2024-05-29T13:44:14.883454694Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=4947 slug=mediamath instance="datasource_uid=000000186, ref_id=A" t=2024-05-29T13:44:14.883428827Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:14.883397989Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info ts=2024-05-29T13:44:14.883327431Z caller=grafana.go:247 user=786662 slug=skycareaignoc msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=bdjf0u8ejm8lca" groups=0 alerts=0 + level=debug ts=2024-05-29T13:44:14.883259779Z caller=remote_instance_store.go:51 user=763376 slug=f5nginxone msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.883025305Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.882959537Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.882892422Z caller=remote_instance_store.go:51 user=618621 slug=sendamatic msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.882851981Z caller=remote_instance_store.go:51 user=231061 slug=teamaround msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=231061 slug=teamaround t=2024-05-29T13:44:14.882651443Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=231061 slug=teamaround version=85 fingerprint=57f32602a85c663f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.882560838Z level=debug msg="Alert rule evaluated" results="[{Instance:CacheClusterId=redis-broker-prod-euce1-001 State:Normal Error: Results:map[] Values:map[H:{Var:H Labels:CacheClusterId=redis-broker-prod-euce1-001 Value:0xc02ce5dd38} I:{Var:I Labels:CacheClusterId=redis-broker-prod-euce1-001 Value:0xc02ce5dd30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.882171131s EvaluationString:[ var='H' labels={CacheClusterId=redis-broker-prod-euce1-001} value=0 ], [ var='I' labels={CacheClusterId=redis-broker-prod-euce1-001} value=0 ]}]" duration=140.705944ms + level=debug ts=2024-05-29T13:44:14.882560037Z caller=remote_instance_store.go:51 user=290313 slug=replit msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.88243273Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114286 slug=enverus t=2024-05-29T13:44:14.882128946Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=277970 slug=teckresourcestest t=2024-05-29T13:44:14.882041786Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=36.691534ms + level=debug ts=2024-05-29T13:44:14.881820265Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=174927 slug=syndic82690 t=2024-05-29T13:44:14.881762758Z level=debug msg="Saving alert states" count=12 max_state_save_concurrency=1 + logger=ngalert.state.manager user=174927 slug=syndic82690 instance="__name__=kube_node_status_condition, cluster=ocp-ue2-prod, condition=MemoryPressure, container=kube-rbac-proxy-main, endpoint=https-main, job=kube-state-metrics, namespace=openshift-monitoring, node=ip-11-0-220-227.us-east-2.compute.internal, prometheus=openshift-monitoring/k8s, service=kube-state-metrics, status=true" t=2024-05-29T13:44:14.881731731Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=174927 slug=syndic82690 instance="__name__=kube_node_status_condition, cluster=ocp-ue2-prod, condition=MemoryPressure, container=kube-rbac-proxy-main, endpoint=https-main, job=kube-state-metrics, namespace=openshift-monitoring, node=ip-11-0-209-59.us-east-2.compute.internal, prometheus=openshift-monitoring/k8s, service=kube-state-metrics, status=true" t=2024-05-29T13:44:14.881655639Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=174927 slug=syndic82690 instance="__name__=kube_node_status_condition, cluster=ocp-ue2-prod, condition=MemoryPressure, container=kube-rbac-proxy-main, endpoint=https-main, job=kube-state-metrics, namespace=openshift-monitoring, node=ip-11-0-209-59.us-east-2.compute.internal, prometheus=openshift-monitoring/k8s, service=kube-state-metrics, status=true" t=2024-05-29T13:44:14.881645205Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.881532035Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=174927 slug=syndic82690 instance="__name__=kube_node_status_condition, cluster=ocp-ue2-prod, condition=MemoryPressure, container=kube-rbac-proxy-main, endpoint=https-main, job=kube-state-metrics, namespace=openshift-monitoring, node=ip-11-0-184-155.us-east-2.compute.internal, prometheus=openshift-monitoring/k8s, service=kube-state-metrics, status=true" t=2024-05-29T13:44:14.881509634Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.881482039Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.88142572Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=174927 slug=syndic82690 instance="__name__=kube_node_status_condition, cluster=ocp-ue2-prod, condition=MemoryPressure, container=kube-rbac-proxy-main, endpoint=https-main, job=kube-state-metrics, namespace=openshift-monitoring, node=ip-11-0-167-200.us-east-2.compute.internal, prometheus=openshift-monitoring/k8s, service=kube-state-metrics, status=true" t=2024-05-29T13:44:14.881466276Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=245291 slug=pismo version=699 fingerprint=66f860a341eb6142 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.881402792Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.881160455s EvaluationString:}]" duration=198.99141ms + logger=ngalert.state.manager user=174927 slug=syndic82690 instance="__name__=kube_node_status_condition, cluster=ocp-ue2-prod, condition=MemoryPressure, container=kube-rbac-proxy-main, endpoint=https-main, job=kube-state-metrics, namespace=openshift-monitoring, node=ip-11-0-160-179.us-east-2.compute.internal, prometheus=openshift-monitoring/k8s, service=kube-state-metrics, status=true" t=2024-05-29T13:44:14.881391399Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=174927 slug=syndic82690 instance="__name__=kube_node_status_condition, cluster=ocp-ue2-prod, condition=MemoryPressure, container=kube-rbac-proxy-main, endpoint=https-main, job=kube-state-metrics, namespace=openshift-monitoring, node=ip-11-0-154-80.us-east-2.compute.internal, prometheus=openshift-monitoring/k8s, service=kube-state-metrics, status=true" t=2024-05-29T13:44:14.881351641Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.88125368Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=174927 slug=syndic82690 instance="__name__=kube_node_status_condition, cluster=ocp-ue2-prod, condition=MemoryPressure, container=kube-rbac-proxy-main, endpoint=https-main, job=kube-state-metrics, namespace=openshift-monitoring, node=ip-11-0-142-25.us-east-2.compute.internal, prometheus=openshift-monitoring/k8s, service=kube-state-metrics, status=true" t=2024-05-29T13:44:14.881281911Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=174927 slug=syndic82690 instance="__name__=kube_node_status_condition, cluster=ocp-ue2-prod, condition=MemoryPressure, container=kube-rbac-proxy-main, endpoint=https-main, job=kube-state-metrics, namespace=openshift-monitoring, node=ip-11-0-135-25.us-east-2.compute.internal, prometheus=openshift-monitoring/k8s, service=kube-state-metrics, status=true" t=2024-05-29T13:44:14.881225326Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.881080391Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=127813 slug=clearsale instance= t=2024-05-29T13:44:14.881172283Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.881039794Z caller=remote_instance_store.go:51 user=347171 slug=neuralconcept msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=127813 slug=clearsale version=7 fingerprint=60330f65369593a4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.880879094Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.880593371s EvaluationString:}]" duration=205.795713ms + level=debug ts=2024-05-29T13:44:14.880879895Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.880805933Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.880809062Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.880567054Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=60199 slug=wallapop instance= t=2024-05-29T13:44:14.880291278Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.880096168Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.879802467Z caller=remote_instance_store.go:51 user=543660 slug=jobcloudprogrammaticstage msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=60199 slug=wallapop t=2024-05-29T13:44:14.879863608Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.879687951Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=70430 slug=dapperlabs instance= t=2024-05-29T13:44:14.879466893Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=70430 slug=dapperlabs instance= t=2024-05-29T13:44:14.879452676Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.879348051Z caller=remote_instance_store.go:51 user=112732 slug=gleamer msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112732 slug=gleamer instance="appId=CHBASTIA01, component=gmsk-proxy-api, id=full, instance=gmsk-proxy-api:8090, job=gmsk-proxy-api, origin_prometheus=CHBASTIA01" t=2024-05-29T13:44:14.879198646Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.87811939Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.878204203Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=806229 slug=simplisafe instance= t=2024-05-29T13:44:14.878108621Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.877991825Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.878012666Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.877504691Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=824501 slug=bendingspoons instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.877380123Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.877051826Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.877048607Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=129076 slug=marginalunit t=2024-05-29T13:44:14.876929459Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=24.834534ms + logger=ngalert.state.manager.persist user=642786 slug=sophoscomnsg t=2024-05-29T13:44:14.876903881Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.808564ms + level=debug ts=2024-05-29T13:44:14.87680938Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.87679616Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.876440841Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=901230 slug=integromonitor t=2024-05-29T13:44:14.876446735Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=901230 slug=integromonitor instance= previous_handler=resultError t=2024-05-29T13:44:14.876430695Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=901230 slug=integromonitor instance= previous_handler=resultError t=2024-05-29T13:44:14.876422506Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.876317547Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.876190297Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.876171206Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.876028843Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.875946452Z caller=remote_instance_store.go:51 user=354676 slug=gridmarketenergy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.876016282Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.875844908Z caller=remote_instance_store.go:51 user=460990 slug=classting msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.875814774Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=460990 slug=classting t=2024-05-29T13:44:14.875792224Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.875895483Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=460990 slug=classting instance= t=2024-05-29T13:44:14.875781056Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.875609816Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=491157 slug=prd01wr t=2024-05-29T13:44:14.875438464Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=371756 slug=asapp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.875576491Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.875569114Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.875559889Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.87551018Z caller=remote_instance_store.go:51 user=491157 slug=prd01wr msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.87542168Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.875453162Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=530405 slug=zetetic t=2024-05-29T13:44:14.875429425Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=32.937429ms + level=debug ts=2024-05-29T13:44:14.875309638Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.87527577Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.87528763Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.875322336Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.875032911Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.874789129Z caller=remote_instance_store.go:51 user=71676 slug=lemonbeat msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.87469529Z caller=remote_alert_sender.go:94 user=70430 slug=dapperlabs host=dapperlabs-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.176.29:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ce4a42f1-f812-4d3e-b9e7-993a92619377 alerts=1 + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:14.874463365Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=43.71852ms + level=debug ts=2024-05-29T13:44:14.874494893Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.874000394Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.874001004Z caller=remote_instance_store.go:51 user=402122 slug=leapwallet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=402122 slug=leapwallet instance= t=2024-05-29T13:44:14.873927503Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.873797641Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=402122 slug=leapwallet version=44 fingerprint=b8e314c1331c1bab attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.873787166Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.873439087s EvaluationString:}]" duration=23.065761ms + level=debug ts=2024-05-29T13:44:14.87365636Z caller=remote_instance_store.go:51 user=763376 slug=f5nginxone msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.87373278Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.873579404Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.873467521Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.87341956Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.873296923Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.873339184Z caller=remote_alert_sender.go:94 user=250150 slug=bizagi host=bizagi-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.107.6:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=a714ff0a-a75d-44fe-ab9b-0dc79fd336bb alerts=1 + level=info ts=2024-05-29T13:44:14.873320018Z caller=remote_alert_sender.go:94 user=250150 slug=bizagi host=bizagi-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.117.7:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=a714ff0a-a75d-44fe-ab9b-0dc79fd336bb alerts=1 + level=debug ts=2024-05-29T13:44:14.873338074Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.873134218Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.873260654Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.873228718Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.873056817Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=diff, name=diffSeries(keepLastValue(eadp.gos.torch.prod.nhl-2022-xbsx.Gameplay_Users,6)) Query" t=2024-05-29T13:44:14.873203857Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.873151196Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.873058978Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.873024684Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.87300018Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.872945456Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.872892194Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.872359213Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.872188974Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=304032 slug=clearbanc instance="datasource_uid=vSyGdhP7z, ref_id=A" t=2024-05-29T13:44:14.871614233Z level=debug msg="Setting next state" handler=resultNoData + Error parsing panelUID for alert annotationruleID1496dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=304032 slug=clearbanc version=40 fingerprint=5490cee1135f5089 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.871476258Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=vSyGdhP7z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.871099634s EvaluationString:}]" duration=13.560502ms + logger=ngalert.state.manager.persist user=475799 slug=dpdcz t=2024-05-29T13:44:14.871586473Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.852432ms + level=debug ts=2024-05-29T13:44:14.870651942Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.870307049Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.87012448Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.870060997Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.86999052Z caller=remote_image_capturer.go:61 user=87052 slug=polystream rule_org_id=1 rule_uid=rTCPy7l7k dashboard=mYHxzqDWz panel=42 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + level=info ts=2024-05-29T13:44:14.869668452Z caller=remote_alert_sender.go:94 user=250150 slug=bizagi host=bizagi-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.117.7:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=eddz4y0wouolcd alerts=1 + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:14.869868159Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.869672886Z caller=remote_instance_store.go:51 user=127813 slug=clearsale msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=5e1656abe00d0d43 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.869584007Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=PSU PS5 Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc0525baa00} Threshold:{Var:Threshold Labels: Value:0xc0525baa08} compare:{Var:compare Labels:aggregatedBy=sum, name=PSU PS5 Query Value:0xc0525baa78} sum:{Var:sum Labels:aggregatedBy=sum, name=PSU PS5 Query Value:0xc0525baac0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.86936483s EvaluationString:[ var='Breaches' labels={} value=15 ], [ var='Threshold' labels={} value=100 ], [ var='compare' labels={aggregatedBy=sum, name=PSU PS5 Query} value=0 ], [ var='sum' labels={aggregatedBy=sum, name=PSU PS5 Query} value=0 ]}]" duration=23.551671ms + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:14.869576958Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.868008242Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.869563623Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.868006386Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=245291 slug=pismo version=193 fingerprint=ca5714a84e2f5a03 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.86942021Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.869116925s EvaluationString:}]" duration=752.509978ms + level=debug ts=2024-05-29T13:44:14.868447072Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.868274413Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.868145322Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.868039545Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.867996189Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.86780425Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.867708196Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.867674056Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:14.867619598Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.86653443Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.865823919Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.865964399Z caller=remote_instance_store.go:51 user=142180 slug=luxtronic msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=142180 slug=luxtronic t=2024-05-29T13:44:14.865906414Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=142180 slug=luxtronic instance= t=2024-05-29T13:44:14.865895203Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=142180 slug=luxtronic t=2024-05-29T13:44:14.865856539Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=22398 slug=sunfolding t=2024-05-29T13:44:14.86551326Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.220026ms + level=info ts=2024-05-29T13:44:14.865474189Z caller=remote_alert_sender.go:94 user=698103 slug=vericast host=vericast-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.104.126:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=adfnr97rwgm4jd alerts=1 + level=debug ts=2024-05-29T13:44:14.865421361Z caller=remote_instance_store.go:51 user=451750 slug=amadeuspfpprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.865173217Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.865203803Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.865018165Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.864265765Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.864099346Z caller=remote_instance_store.go:51 user=543660 slug=jobcloudprogrammaticstage msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.863505726Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.863322671Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=426229 slug=accelbyte t=2024-05-29T13:44:14.863029094Z level=debug msg="Saving alert states" count=168 max_state_save_concurrency=1 + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-west-2, environment=development, exported_job=srv-flt_018fc116-7baa-77b7-ad3e-af0608f4d288-ttx1.m-0d8cbb3e, instance=10.11.69.37:4646, job=nomad_server, namespace=default, task_group=srv" t=2024-05-29T13:44:14.862980338Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.862891989Z caller=remote_instance_store.go:51 user=451427 slug=rocketchat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-west-2, environment=development, exported_job=srv-flt_018ef461-e3f3-7a3b-9fd8-e3b2633e0657-ttx1.s-a0bff09a, instance=10.11.69.37:4646, job=nomad_server, namespace=default, task_group=srv" t=2024-05-29T13:44:14.86280751Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-west-2, environment=development, exported_job=srv-flt_018e9bb8-fae6-7d7b-9264-c59edf13e68f-ttx1.m-24c0282f, instance=10.11.69.37:4646, job=nomad_server, namespace=default, task_group=srv" t=2024-05-29T13:44:14.862748303Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.862726339Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.86262206Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-west-2, environment=development, exported_job=srv-flt_018d7c9b-06e2-751f-b87b-9aa91ddab744-ttx1.s-ffca3dcf, instance=10.11.69.37:4646, job=nomad_server, namespace=default, task_group=srv" t=2024-05-29T13:44:14.862602156Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-west-2, environment=development, exported_job=srv-flt_018d7c9b-06e2-751f-b87b-9aa91ddab744-ttx1.s-ffca3dcf, instance=10.11.69.37:4646, job=nomad_server, namespace=default, task_group=srv" t=2024-05-29T13:44:14.862590808Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-west-2, environment=development, exported_job=srv-flt_018d5855-a6b0-78b2-ad59-0a40e8dca528-ttx1.s-1187526a, instance=10.11.69.37:4646, job=nomad_server, namespace=default, task_group=srv" t=2024-05-29T13:44:14.862548816Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-west-2, environment=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-fdc68fd7, instance=10.11.69.37:4646, job=nomad_server, namespace=default, task_group=srv" t=2024-05-29T13:44:14.86249957Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.862416814Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.862366291Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.scheduler user=214309 slug=spenmo t=2024-05-29T13:44:14.862332777Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-west-2, environment=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-47f0c945, instance=10.11.69.37:4646, job=nomad_server, namespace=default, task_group=srv" t=2024-05-29T13:44:14.862294868Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.862245189Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.862205198Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-west-2, environment=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-1bd7657a, instance=10.11.69.37:4646, job=nomad_server, namespace=default, task_group=srv" t=2024-05-29T13:44:14.862176961Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-west-2, environment=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-0ecf1f57, instance=10.11.69.37:4646, job=nomad_server, namespace=default, task_group=srv" t=2024-05-29T13:44:14.862138441Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.862095824Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.862119731Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-west-2, environment=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-0e1f0ccd, instance=10.11.69.37:4646, job=nomad_server, namespace=default, task_group=srv" t=2024-05-29T13:44:14.862095585Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.862029822Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-west-2, environment=development, exported_job=srv-flt_018c66b2-759e-790d-bd29-a48c06016688-ttx1.m-635b9c96, instance=10.11.69.37:4646, job=nomad_server, namespace=default, task_group=srv" t=2024-05-29T13:44:14.862002728Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.861980089Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-west-2, environment=development, exported_job=srv-flt_018c65f7-d1e6-7743-b406-13ad54643a2e-ttx1.m-c75814fa, instance=10.11.69.37:4646, job=nomad_server, namespace=default, task_group=srv" t=2024-05-29T13:44:14.861960902Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.861848382Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.861845786Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + level=debug ts=2024-05-29T13:44:14.861737744Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018f7c9f-817a-708c-adf9-afdaba9cfc67-ttx1.s-6db8065e, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1b, prom_instance=10.12.28.255, task_group=srv" t=2024-05-29T13:44:14.861824133Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018f7c9f-817a-708c-adf9-afdaba9cfc67-ttx1.s-6db8065e, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1a, prom_instance=10.12.15.83, task_group=srv" t=2024-05-29T13:44:14.861771689Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018f70de-a32f-7f3a-8ee6-ebc47bcb1257-ttx1.s-a5247cc1, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1c, prom_instance=10.12.47.65, task_group=srv" t=2024-05-29T13:44:14.861729565Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.861755891Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.861641083Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018f5119-9230-7fd7-9022-d8569c272700-ttx1.s-3d52f0a1, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1c, prom_instance=10.12.47.65, task_group=srv" t=2024-05-29T13:44:14.861563473Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=201644 slug=thoughtspot t=2024-05-29T13:44:14.861497195Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=36.194519ms + level=debug ts=2024-05-29T13:44:14.86146168Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.861475458Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018f5119-9230-7fd7-9022-d8569c272700-ttx1.s-3d52f0a1, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1b, prom_instance=10.12.28.255, task_group=srv" t=2024-05-29T13:44:14.861510589Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.861419333Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.861472315Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.861414131Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + level=debug ts=2024-05-29T13:44:14.86137458Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018f183a-03b2-7c78-a3df-215f00e3c459-ttx1.s-2ba78a6c, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1b, prom_instance=10.12.28.255, task_group=srv" t=2024-05-29T13:44:14.861342223Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018f183a-03b2-7c78-a3df-215f00e3c459-ttx1.s-2ba78a6c, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1a, prom_instance=10.12.15.83, task_group=srv" t=2024-05-29T13:44:14.861291478Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018f183a-03b2-7c78-a3df-215f00e3c459-ttx1.s-2ba78a6c, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1a, prom_instance=10.12.15.83, task_group=srv" t=2024-05-29T13:44:14.861276778Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ef461-e3f3-7a3b-9fd8-e3b2633e0657-ttx1.s-ffd7a821, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1c, prom_instance=10.12.47.65, task_group=srv" t=2024-05-29T13:44:14.86120548Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.861164409Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + level=debug ts=2024-05-29T13:44:14.861133647Z caller=remote_instance_store.go:51 user=642786 slug=sophoscomnsg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=642786 slug=sophoscomnsg t=2024-05-29T13:44:14.861013745Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.861115411Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ef461-e3f3-7a3b-9fd8-e3b2633e0657-ttx1.s-cbc54739, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1c, prom_instance=10.12.47.65, task_group=srv" t=2024-05-29T13:44:14.861042521Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.860923403Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.86090826Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018e47e1-37e6-7b3d-848f-bd95092c2627-ttx1.s-5cc64e49, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1b, prom_instance=10.12.28.255, task_group=srv" t=2024-05-29T13:44:14.860860439Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018e47e1-37e6-7b3d-848f-bd95092c2627-ttx1.s-5cc64e49, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1b, prom_instance=10.12.28.255, task_group=srv" t=2024-05-29T13:44:14.860848319Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018e47e1-37e6-7b3d-848f-bd95092c2627-ttx1.s-5cc64e49, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1a, prom_instance=10.12.15.83, task_group=srv" t=2024-05-29T13:44:14.860788106Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.860757361Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + level=debug ts=2024-05-29T13:44:14.860717688Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018e417b-1e06-7df8-8061-c04e81ae2ae5-ttx1.s-af8b90a0, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1c, prom_instance=10.12.47.65, task_group=srv" t=2024-05-29T13:44:14.8607278Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018e417b-1e06-7df8-8061-c04e81ae2ae5-ttx1.s-af8b90a0, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1b, prom_instance=10.12.28.255, task_group=srv" t=2024-05-29T13:44:14.860683466Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018e416f-f8dc-7c6a-8a3c-a54e745f6c18-ttx1.s-6985a413, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1c, prom_instance=10.12.47.65, task_group=srv" t=2024-05-29T13:44:14.860570408Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018e416f-f8dc-7c6a-8a3c-a54e745f6c18-ttx1.s-6985a413, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1b, prom_instance=10.12.28.255, task_group=srv" t=2024-05-29T13:44:14.860508841Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018e416f-f8dc-7c6a-8a3c-a54e745f6c18-ttx1.s-6985a413, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1a, prom_instance=10.12.15.83, task_group=srv" t=2024-05-29T13:44:14.860429768Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.860056816Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018e3581-1fb0-7e1e-bad4-d2890640f059-ttx1.s-7e6bab31, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1b, prom_instance=10.12.28.255, task_group=srv" t=2024-05-29T13:44:14.859989079Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.859954442Z caller=remote_instance_store.go:51 user=633381 slug=arascorp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.859936064Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=633381 slug=arascorp t=2024-05-29T13:44:14.859906529Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.859668698Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=633381 slug=arascorp instance= t=2024-05-29T13:44:14.859876327Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.859810469Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=633381 slug=arascorp version=1 fingerprint=3a9303acd2f94519 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.859756881Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc021026c00} B:{Var:B Labels: Value:0xc021026c08} C:{Var:C Labels: Value:0xc021026c10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.859313261s EvaluationString:[ var='A' labels={} value=4.75 ], [ var='B' labels={} value=4.75 ], [ var='C' labels={} value=0 ]}]" duration=21.165984ms + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018e1c24-b8cc-7825-bf23-0e5eb097b905-ttx1.s-22c4e796, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1c, prom_instance=10.12.47.65, task_group=srv" t=2024-05-29T13:44:14.859694167Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018e1c24-b8cc-7825-bf23-0e5eb097b905-ttx1.s-22c4e796, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1b, prom_instance=10.12.28.255, task_group=srv" t=2024-05-29T13:44:14.859643682Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.859621132Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018e1c24-b8cc-7825-bf23-0e5eb097b905-ttx1.s-22c4e796, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1a, prom_instance=10.12.15.83, task_group=srv" t=2024-05-29T13:44:14.85958644Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ded5f-6952-7e32-b71a-125ac596536f-ttx1.s-e01cef20, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1c, prom_instance=10.12.47.65, task_group=srv" t=2024-05-29T13:44:14.85950841Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ded5f-6952-7e32-b71a-125ac596536f-ttx1.s-e01cef20, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1b, prom_instance=10.12.28.255, task_group=srv" t=2024-05-29T13:44:14.859456787Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.859401615Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + level=debug ts=2024-05-29T13:44:14.859276687Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ded5f-6952-7e32-b71a-125ac596536f-ttx1.s-bb997441, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1c, prom_instance=10.12.47.65, task_group=srv" t=2024-05-29T13:44:14.859298039Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.859255911Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ded5f-6952-7e32-b71a-125ac596536f-ttx1.s-bb997441, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1b, prom_instance=10.12.28.255, task_group=srv" t=2024-05-29T13:44:14.859221872Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ded5f-6952-7e32-b71a-125ac596536f-ttx1.s-bb997441, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1a, prom_instance=10.12.15.83, task_group=srv" t=2024-05-29T13:44:14.859160177Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018d7c9b-06e2-751f-b87b-9aa91ddab744-ttx1.s-b1cf4279, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1c, prom_instance=10.12.47.65, task_group=srv" t=2024-05-29T13:44:14.859106253Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.859045233Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.859022183Z caller=remote_instance_store.go:51 user=526847 slug=soniclabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018d7c9b-06e2-751f-b87b-9aa91ddab744-ttx1.s-b1cf4279, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1b, prom_instance=10.12.28.255, task_group=srv" t=2024-05-29T13:44:14.859048408Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018d68da-df3e-738f-8af0-ec1712b093a6-ttx1.m-2a7acfb2, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1c, prom_instance=10.12.47.65, task_group=srv" t=2024-05-29T13:44:14.858880467Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.858857912Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018d68da-df3e-738f-8af0-ec1712b093a6-ttx1.m-2a7acfb2, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1b, prom_instance=10.12.28.255, task_group=srv" t=2024-05-29T13:44:14.858832231Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018d68da-df3e-738f-8af0-ec1712b093a6-ttx1.m-2a7acfb2, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1a, prom_instance=10.12.15.83, task_group=srv" t=2024-05-29T13:44:14.858767921Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.858733054Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.858680421Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + level=debug ts=2024-05-29T13:44:14.858345933Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=173374 slug=felmo instance= t=2024-05-29T13:44:14.858631488Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.858398834Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=746601 slug=columbusm3 t=2024-05-29T13:44:14.85819382Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.135797ms + level=debug ts=2024-05-29T13:44:14.858502661Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-fa783d80, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1a, prom_instance=10.12.15.83, task_group=srv" t=2024-05-29T13:44:14.858433834Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-f08c2206, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1c, prom_instance=10.12.47.65, task_group=srv" t=2024-05-29T13:44:14.858365644Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.858246246Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-f08c2206, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1a, prom_instance=10.12.15.83, task_group=srv" t=2024-05-29T13:44:14.858267757Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-75206fb0, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1c, prom_instance=10.12.47.65, task_group=srv" t=2024-05-29T13:44:14.858208885Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.858115647Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-75206fb0, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1a, prom_instance=10.12.15.83, task_group=srv" t=2024-05-29T13:44:14.858095459Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-694a2865, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1c, prom_instance=10.12.47.65, task_group=srv" t=2024-05-29T13:44:14.858026368Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-694a2865, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1b, prom_instance=10.12.28.255, task_group=srv" t=2024-05-29T13:44:14.857975187Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.857910696Z caller=remote_instance_store.go:51 user=347171 slug=neuralconcept msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-4452a1be, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1c, prom_instance=10.12.47.65, task_group=srv" t=2024-05-29T13:44:14.857874958Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.857710193Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.857755392Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-4452a1be, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1a, prom_instance=10.12.15.83, task_group=srv" t=2024-05-29T13:44:14.857735661Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-443e1a43, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1c, prom_instance=10.12.47.65, task_group=srv" t=2024-05-29T13:44:14.857672837Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-405038d5, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1c, prom_instance=10.12.47.65, task_group=srv" t=2024-05-29T13:44:14.857499674Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-405038d5, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1a, prom_instance=10.12.15.83, task_group=srv" t=2024-05-29T13:44:14.857398891Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-3ab55882, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1c, prom_instance=10.12.47.65, task_group=srv" t=2024-05-29T13:44:14.857318714Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-3ab55882, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1b, prom_instance=10.12.28.255, task_group=srv" t=2024-05-29T13:44:14.857275199Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.857204312Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.857202457Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.857175778Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-381fbabe, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1c, prom_instance=10.12.47.65, task_group=srv" t=2024-05-29T13:44:14.857159527Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.857129685Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-3543c897, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1c, prom_instance=10.12.47.65, task_group=srv" t=2024-05-29T13:44:14.857006428Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-3543c897, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1a, prom_instance=10.12.15.83, task_group=srv" t=2024-05-29T13:44:14.85688525Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.856850751Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018c7c1b-b5f7-73b1-99c2-d62aee952701-ttx1.m-fcd7f6b0, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1c, prom_instance=10.12.47.65, task_group=srv" t=2024-05-29T13:44:14.856828877Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.856757708Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018c7c1b-b5f7-73b1-99c2-d62aee952701-ttx1.m-fcd7f6b0, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1a, prom_instance=10.12.15.83, task_group=srv" t=2024-05-29T13:44:14.856692586Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cluster=ams-development, datacenter=us-east-1, environment=development, exported_job=srv-flt_018a1c98-744d-7a9b-9f50-5d9d6f8fdaa2-ttx1.s-8d0b359d, instance=10.12.68.154:4646, job=nomad_server, namespace=default, prom_az=us-east-1b, prom_instance=10.12.28.255, task_group=srv" t=2024-05-29T13:44:14.856592525Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.856552557Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018fc116-7baa-77b7-ad3e-af0608f4d288-ttx1.m-0d8cbb3e, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.856455232Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f183a-03b2-7c78-a3df-215f00e3c459-ttx1.s-45c8ac25, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.856385925Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f183a-03b2-7c78-a3df-215f00e3c459-ttx1.s-45c8ac25, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.856365705Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.856325321Z caller=remote_instance_store.go:51 user=22398 slug=sunfolding msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ef461-e3f3-7a3b-9fd8-e3b2633e0657-ttx1.s-ecdeac04, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.856266844Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.856177154Z caller=remote_instance_store.go:51 user=701741 slug=thetradingpitproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.856095816Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.856124566Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + level=debug ts=2024-05-29T13:44:14.856070209Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.855841318Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.855909519Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018da0be-b8f3-7a1d-8a26-1e03517fb2ca-ttx1.s-dc31defe, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.855839573Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d7c9b-06e2-751f-b87b-9aa91ddab744-ttx1.s-ffca3dcf, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.855737791Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=112387 slug=lucidhq t=2024-05-29T13:44:14.855656868Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d5855-a6b0-78b2-ad59-0a40e8dca528-ttx1.s-1187526a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.85566246Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq t=2024-05-29T13:44:14.855576114Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-bbc2a11a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.855398357Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.855321169Z caller=remote_instance_store.go:51 user=147497 slug=rhodev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-98425cc8, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.855321562Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.855238494Z caller=remote_instance_store.go:51 user=167630 slug=gr168 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-98425cc8, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.855308535Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-47f0c945, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.85526199Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-47f0c945, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.855243346Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=167630 slug=gr168 instance= t=2024-05-29T13:44:14.855170126Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=167630 slug=gr168 t=2024-05-29T13:44:14.855136529Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-39940df7, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.855168474Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-1bd7657a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.855094007Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.855054048Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.855068241Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-0e1f0ccd, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.854965958Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018c66b2-759e-790d-bd29-a48c06016688-ttx1.m-635b9c96, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.854844399Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018c66b2-759e-790d-bd29-a48c06016688-ttx1.m-635b9c96, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.85483076Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018c65f7-d1e6-7743-b406-13ad54643a2e-ttx1.m-c75814fa, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.85477799Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.854696181Z caller=remote_instance_store.go:51 user=174054 slug=netrading msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475799 slug=dpdcz instance="ClusterName=Alfa3Cluster, ServiceName=Alfa3WorkerProd" t=2024-05-29T13:44:14.85469995Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f7c9f-817a-708c-adf9-afdaba9cfc67-ttx1.s-6db8065e, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.854616212Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.854581064Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + level=debug ts=2024-05-29T13:44:14.854482914Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112387 slug=lucidhq instance="datasource_uid=grafanacloud-prom, ref_id=A,B,C,D" t=2024-05-29T13:44:14.854372857Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.854297324Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.854211564Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.854202094Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ef461-e3f3-7a3b-9fd8-e3b2633e0657-ttx1.s-cbc54739, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.854226273Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ef461-e3f3-7a3b-9fd8-e3b2633e0657-ttx1.s-cbc54739, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.854207097Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=739013 slug=altoglobalsharing t=2024-05-29T13:44:14.854189333Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.520496ms + level=debug ts=2024-05-29T13:44:14.854055832Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.854049879Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.85405559Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e47e1-37e6-7b3d-848f-bd95092c2627-ttx1.s-5cc64e49, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.854112879Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.85407769Z caller=remote_alert_sender.go:94 user=87780 slug=zencloudandhosting host=zencloudandhosting-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.192.7:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=f47012cd-24d0-415e-9591-8ee88c19f904 alerts=1 + level=debug ts=2024-05-29T13:44:14.8540338Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e417b-1e06-7df8-8061-c04e81ae2ae5-ttx1.s-af8b90a0, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.854032236Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:14.85373183Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=59.601735ms + level=debug ts=2024-05-29T13:44:14.853918893Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.853912109Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.853820754Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.853719432Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager.persist user=155740 slug=routific t=2024-05-29T13:44:14.85218963Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=36.262422ms + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.853602142Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + level=debug ts=2024-05-29T13:44:14.853458803Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e1c24-b8cc-7825-bf23-0e5eb097b905-ttx1.s-22c4e796, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.853456119Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.853332179Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.853265359Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.853247249Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ded5f-6952-7e32-b71a-125ac596536f-ttx1.s-bb997441, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.85320735Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.853017565Z caller=remote_instance_store.go:51 user=432323 slug=lithic msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:14.853029704Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.853081862Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + level=debug ts=2024-05-29T13:44:14.852962226Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d68da-df3e-738f-8af0-ec1712b093a6-ttx1.m-2a7acfb2, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.853046109Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.852992507Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d4348-5186-7ed1-b002-09e747ac421a-ttx1.s-8851f5a3, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.852958492Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d4348-5186-7ed1-b002-09e747ac421a-ttx1.s-8851f5a3, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.852939077Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=432323 slug=lithic instance="FunctionName=outbound-processor-replayer-live" t=2024-05-29T13:44:14.85291576Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=527202 slug=lnrsusinsurancedev t=2024-05-29T13:44:14.852894432Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.852888696Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.85290001Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=527202 slug=lnrsusinsurancedev t=2024-05-29T13:44:14.852842722Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.85279202Z caller=remote_instance_store.go:51 user=618621 slug=sendamatic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-fa783d80, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.852844434Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=527202 slug=lnrsusinsurancedev version=36 fingerprint=67de55a02ee4e516 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.852779721Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=Rz3MTGbVz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.85246135s EvaluationString:}]" duration=34.621929ms + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.852806866Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.852798271Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-f08c2206, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.852745651Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-75206fb0, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.852676587Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.852594616Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.852530421Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.852449081Z caller=remote_instance_store.go:51 user=243675 slug=oneschema msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-694a2865, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.852532018Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.852493114Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.852397031Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-443e1a43, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.852343912Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.85218843Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + level=debug ts=2024-05-29T13:44:14.852182026Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-3ab55882, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.852153112Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=465668 slug=xpressinfra version=47 fingerprint=9072ca4be7dcf039 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.852101293Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.85180921s EvaluationString:}]" duration=2.328205218s + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-3543c897, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv" t=2024-05-29T13:44:14.85193805Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=129076 slug=marginalunit instance= t=2024-05-29T13:44:14.85207436Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.852068755Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=129076 slug=marginalunit version=1 fingerprint=17ca6ed21f5f58e1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.851918119Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.851556988s EvaluationString:}]" duration=40.432188ms + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.851682477Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + logger=ngalert.scheduler user=426229 slug=accelbyte version=29 fingerprint=735a80fc901086bd attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.845864426Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018a1c98-744d-7a9b-9f50-5d9d6f8fdaa2-ttx1.s-8d0b359d, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018a1c98-744d-7a9b-9f50-5d9d6f8fdaa2-ttx1.s-8d0b359d, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc06d583198} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018a1c98-744d-7a9b-9f50-5d9d6f8fdaa2-ttx1.s-8d0b359d, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc06d583880}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831280448s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018a1c98-744d-7a9b-9f50-5d9d6f8fdaa2-ttx1.s-8d0b359d, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018a1c98-744d-7a9b-9f50-5d9d6f8fdaa2-ttx1.s-8d0b359d, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018c7c1b-b5f7-73b1-99c2-d62aee952701-ttx1.m-fcd7f6b0, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018c7c1b-b5f7-73b1-99c2-d62aee952701-ttx1.m-fcd7f6b0, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0236d8730} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018c7c1b-b5f7-73b1-99c2-d62aee952701-ttx1.m-fcd7f6b0, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0236d8fe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831311868s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018c7c1b-b5f7-73b1-99c2-d62aee952701-ttx1.m-fcd7f6b0, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=1 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018c7c1b-b5f7-73b1-99c2-d62aee952701-ttx1.m-fcd7f6b0, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-3543c897, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-3543c897, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc001818170} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-3543c897, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0018185a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831330179s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-3543c897, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-3543c897, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-381fbabe, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-381fbabe, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc001818d58} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-381fbabe, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc001819200}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831346345s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-381fbabe, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-381fbabe, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-3ab55882, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-3ab55882, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc001819900} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-3ab55882, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc001819c18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.83136595s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-3ab55882, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-3ab55882, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-405038d5, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-405038d5, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc073954500} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-405038d5, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc073954af0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831385467s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-405038d5, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-405038d5, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-443e1a43, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-443e1a43, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0739553a8} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-443e1a43, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc073955760}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831405048s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-443e1a43, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-443e1a43, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-4452a1be, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-4452a1be, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc03d98c1a0} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-4452a1be, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc03d98c958}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831420259s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-4452a1be, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-4452a1be, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-694a2865, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-694a2865, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc068cae278} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-694a2865, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc03d98daf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831433417s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-694a2865, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-694a2865, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-75206fb0, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-75206fb0, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc068caf0a0} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-75206fb0, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc068caf490}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831448871s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-75206fb0, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-75206fb0, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-f08c2206, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-f08c2206, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc068cafd08} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-f08c2206, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0228de0c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.83146299s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-f08c2206, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-f08c2206, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-fa783d80, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-fa783d80, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0228ded88} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-fa783d80, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0228df410}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831477627s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-fa783d80, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-fa783d80, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d4348-5186-7ed1-b002-09e747ac421a-ttx1.s-8851f5a3, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d4348-5186-7ed1-b002-09e747ac421a-ttx1.s-8851f5a3, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc05d27c050} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d4348-5186-7ed1-b002-09e747ac421a-ttx1.s-8851f5a3, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc05d27c500}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831490574s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d4348-5186-7ed1-b002-09e747ac421a-ttx1.s-8851f5a3, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d4348-5186-7ed1-b002-09e747ac421a-ttx1.s-8851f5a3, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d68da-df3e-738f-8af0-ec1712b093a6-ttx1.m-2a7acfb2, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d68da-df3e-738f-8af0-ec1712b093a6-ttx1.m-2a7acfb2, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc05d27d0f0} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d68da-df3e-738f-8af0-ec1712b093a6-ttx1.m-2a7acfb2, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc05d27d720}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831504816s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d68da-df3e-738f-8af0-ec1712b093a6-ttx1.m-2a7acfb2, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d68da-df3e-738f-8af0-ec1712b093a6-ttx1.m-2a7acfb2, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d7c9b-06e2-751f-b87b-9aa91ddab744-ttx1.s-b1cf4279, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d7c9b-06e2-751f-b87b-9aa91ddab744-ttx1.s-b1cf4279, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc02ea440e8} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d7c9b-06e2-751f-b87b-9aa91ddab744-ttx1.s-b1cf4279, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc02ea445a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831521134s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d7c9b-06e2-751f-b87b-9aa91ddab744-ttx1.s-b1cf4279, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d7c9b-06e2-751f-b87b-9aa91ddab744-ttx1.s-b1cf4279, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ded5f-6952-7e32-b71a-125ac596536f-ttx1.s-bb997441, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ded5f-6952-7e32-b71a-125ac596536f-ttx1.s-bb997441, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc02ea450f8} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ded5f-6952-7e32-b71a-125ac596536f-ttx1.s-bb997441, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc02ea45680}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831534784s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ded5f-6952-7e32-b71a-125ac596536f-ttx1.s-bb997441, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=1 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ded5f-6952-7e32-b71a-125ac596536f-ttx1.s-bb997441, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ded5f-6952-7e32-b71a-125ac596536f-ttx1.s-e01cef20, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ded5f-6952-7e32-b71a-125ac596536f-ttx1.s-e01cef20, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0097e4030} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ded5f-6952-7e32-b71a-125ac596536f-ttx1.s-e01cef20, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0097e4380}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831549328s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ded5f-6952-7e32-b71a-125ac596536f-ttx1.s-e01cef20, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ded5f-6952-7e32-b71a-125ac596536f-ttx1.s-e01cef20, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e1c24-b8cc-7825-bf23-0e5eb097b905-ttx1.s-22c4e796, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e1c24-b8cc-7825-bf23-0e5eb097b905-ttx1.s-22c4e796, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0097e4b80} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e1c24-b8cc-7825-bf23-0e5eb097b905-ttx1.s-22c4e796, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0097e4ed8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831563229s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e1c24-b8cc-7825-bf23-0e5eb097b905-ttx1.s-22c4e796, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e1c24-b8cc-7825-bf23-0e5eb097b905-ttx1.s-22c4e796, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e1cd4-bb4d-7e5d-b38a-1840aca9447b-ttx1.s-7492e59a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e1cd4-bb4d-7e5d-b38a-1840aca9447b-ttx1.s-7492e59a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0097e5908} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e1cd4-bb4d-7e5d-b38a-1840aca9447b-ttx1.s-7492e59a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0097e55e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831576662s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e1cd4-bb4d-7e5d-b38a-1840aca9447b-ttx1.s-7492e59a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e1cd4-bb4d-7e5d-b38a-1840aca9447b-ttx1.s-7492e59a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e3581-1fb0-7e1e-bad4-d2890640f059-ttx1.s-7e6bab31, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e3581-1fb0-7e1e-bad4-d2890640f059-ttx1.s-7e6bab31, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc070196f30} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e3581-1fb0-7e1e-bad4-d2890640f059-ttx1.s-7e6bab31, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0701961a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831590421s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e3581-1fb0-7e1e-bad4-d2890640f059-ttx1.s-7e6bab31, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=1 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e3581-1fb0-7e1e-bad4-d2890640f059-ttx1.s-7e6bab31, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e3581-1fb0-7e1e-bad4-d2890640f059-ttx1.s-a3472fa8, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e3581-1fb0-7e1e-bad4-d2890640f059-ttx1.s-a3472fa8, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0262b6000} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e3581-1fb0-7e1e-bad4-d2890640f059-ttx1.s-a3472fa8, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0262b65a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831602977s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e3581-1fb0-7e1e-bad4-d2890640f059-ttx1.s-a3472fa8, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=1 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e3581-1fb0-7e1e-bad4-d2890640f059-ttx1.s-a3472fa8, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e3581-1fb0-7e1e-bad4-d2890640f059-ttx1.s-c6f5cab2, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e3581-1fb0-7e1e-bad4-d2890640f059-ttx1.s-c6f5cab2, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0262b71e8} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e3581-1fb0-7e1e-bad4-d2890640f059-ttx1.s-c6f5cab2, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0262b7608}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831617204s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e3581-1fb0-7e1e-bad4-d2890640f059-ttx1.s-c6f5cab2, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e3581-1fb0-7e1e-bad4-d2890640f059-ttx1.s-c6f5cab2, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e416f-f8dc-7c6a-8a3c-a54e745f6c18-ttx1.s-6985a413, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e416f-f8dc-7c6a-8a3c-a54e745f6c18-ttx1.s-6985a413, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc04a678380} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e416f-f8dc-7c6a-8a3c-a54e745f6c18-ttx1.s-6985a413, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc04a678900}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831629024s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e416f-f8dc-7c6a-8a3c-a54e745f6c18-ttx1.s-6985a413, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e416f-f8dc-7c6a-8a3c-a54e745f6c18-ttx1.s-6985a413, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e417b-1e06-7df8-8061-c04e81ae2ae5-ttx1.s-af8b90a0, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e417b-1e06-7df8-8061-c04e81ae2ae5-ttx1.s-af8b90a0, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc04a679470} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e417b-1e06-7df8-8061-c04e81ae2ae5-ttx1.s-af8b90a0, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc04a679a00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831642132s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e417b-1e06-7df8-8061-c04e81ae2ae5-ttx1.s-af8b90a0, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e417b-1e06-7df8-8061-c04e81ae2ae5-ttx1.s-af8b90a0, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e47e1-37e6-7b3d-848f-bd95092c2627-ttx1.s-5cc64e49, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e47e1-37e6-7b3d-848f-bd95092c2627-ttx1.s-5cc64e49, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc082b807c0} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e47e1-37e6-7b3d-848f-bd95092c2627-ttx1.s-5cc64e49, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc082b80d10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831654731s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e47e1-37e6-7b3d-848f-bd95092c2627-ttx1.s-5cc64e49, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018e47e1-37e6-7b3d-848f-bd95092c2627-ttx1.s-5cc64e49, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ef461-e3f3-7a3b-9fd8-e3b2633e0657-ttx1.s-cbc54739, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ef461-e3f3-7a3b-9fd8-e3b2633e0657-ttx1.s-cbc54739, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc082b815b8} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ef461-e3f3-7a3b-9fd8-e3b2633e0657-ttx1.s-cbc54739, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc082b818b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831677804s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ef461-e3f3-7a3b-9fd8-e3b2633e0657-ttx1.s-cbc54739, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ef461-e3f3-7a3b-9fd8-e3b2633e0657-ttx1.s-cbc54739, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ef461-e3f3-7a3b-9fd8-e3b2633e0657-ttx1.s-ffd7a821, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ef461-e3f3-7a3b-9fd8-e3b2633e0657-ttx1.s-ffd7a821, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc082b81fb8} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ef461-e3f3-7a3b-9fd8-e3b2633e0657-ttx1.s-ffd7a821, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0327dc388}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831699957s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ef461-e3f3-7a3b-9fd8-e3b2633e0657-ttx1.s-ffd7a821, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ef461-e3f3-7a3b-9fd8-e3b2633e0657-ttx1.s-ffd7a821, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f183a-03b2-7c78-a3df-215f00e3c459-ttx1.s-2ba78a6c, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f183a-03b2-7c78-a3df-215f00e3c459-ttx1.s-2ba78a6c, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0327dca50} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f183a-03b2-7c78-a3df-215f00e3c459-ttx1.s-2ba78a6c, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0327dce90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831714424s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f183a-03b2-7c78-a3df-215f00e3c459-ttx1.s-2ba78a6c, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=1 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f183a-03b2-7c78-a3df-215f00e3c459-ttx1.s-2ba78a6c, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f5119-9230-7fd7-9022-d8569c272700-ttx1.s-3d52f0a1, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f5119-9230-7fd7-9022-d8569c272700-ttx1.s-3d52f0a1, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0327dd558} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f5119-9230-7fd7-9022-d8569c272700-ttx1.s-3d52f0a1, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc0327ddb68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831729278s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f5119-9230-7fd7-9022-d8569c272700-ttx1.s-3d52f0a1, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=1 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f5119-9230-7fd7-9022-d8569c272700-ttx1.s-3d52f0a1, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f70de-a32f-7f3a-8ee6-ebc47bcb1257-ttx1.s-a5247cc1, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f70de-a32f-7f3a-8ee6-ebc47bcb1257-ttx1.s-a5247cc1, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc04ada83b8} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f70de-a32f-7f3a-8ee6-ebc47bcb1257-ttx1.s-a5247cc1, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc04ada89b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831743584s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f70de-a32f-7f3a-8ee6-ebc47bcb1257-ttx1.s-a5247cc1, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f70de-a32f-7f3a-8ee6-ebc47bcb1257-ttx1.s-a5247cc1, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f7c9f-817a-708c-adf9-afdaba9cfc67-ttx1.s-6db8065e, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f7c9f-817a-708c-adf9-afdaba9cfc67-ttx1.s-6db8065e, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc04ada9348} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f7c9f-817a-708c-adf9-afdaba9cfc67-ttx1.s-6db8065e, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc04ada9840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831758346s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f7c9f-817a-708c-adf9-afdaba9cfc67-ttx1.s-6db8065e, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-east-1b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-east-1, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018f7c9f-817a-708c-adf9-afdaba9cfc67-ttx1.s-6db8065e, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-01c6d3417d2764d8f, host_image_id=ami-0f59029f386ab4f17, host_name=ip-10-12-68-154.ec2.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-12-68-154 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018a1c98-744d-7a9b-9f50-5d9d6f8fdaa2-ttx1.s-b27a1be6, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018a1c98-744d-7a9b-9f50-5d9d6f8fdaa2-ttx1.s-b27a1be6, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc038baec18} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018a1c98-744d-7a9b-9f50-5d9d6f8fdaa2-ttx1.s-b27a1be6, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc038bae540}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831771999s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018a1c98-744d-7a9b-9f50-5d9d6f8fdaa2-ttx1.s-b27a1be6, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018a1c98-744d-7a9b-9f50-5d9d6f8fdaa2-ttx1.s-b27a1be6, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018c65f7-d1e6-7743-b406-13ad54643a2e-ttx1.m-c75814fa, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018c65f7-d1e6-7743-b406-13ad54643a2e-ttx1.m-c75814fa, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc01d862238} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018c65f7-d1e6-7743-b406-13ad54643a2e-ttx1.m-c75814fa, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc038baf990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831784602s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018c65f7-d1e6-7743-b406-13ad54643a2e-ttx1.m-c75814fa, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018c65f7-d1e6-7743-b406-13ad54643a2e-ttx1.m-c75814fa, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018c66b2-759e-790d-bd29-a48c06016688-ttx1.m-635b9c96, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018c66b2-759e-790d-bd29-a48c06016688-ttx1.m-635b9c96, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc01d8630b0} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018c66b2-759e-790d-bd29-a48c06016688-ttx1.m-635b9c96, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc01d862cb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831797602s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018c66b2-759e-790d-bd29-a48c06016688-ttx1.m-635b9c96, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018c66b2-759e-790d-bd29-a48c06016688-ttx1.m-635b9c96, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-08fd96e3, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-08fd96e3, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc00f8aa148} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-08fd96e3, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc01d863c88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831809626s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-08fd96e3, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-08fd96e3, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-0e1f0ccd, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-0e1f0ccd, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc00f8aa870} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-0e1f0ccd, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc00f8aab50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831822572s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-0e1f0ccd, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-0e1f0ccd, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-0ecf1f57, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-0ecf1f57, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc00f8ab198} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-0ecf1f57, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc00f8ab498}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831834599s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-0ecf1f57, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-0ecf1f57, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-1bd7657a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-1bd7657a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc00f8abbb0} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-1bd7657a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc00f8abea0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831847638s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-1bd7657a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-1bd7657a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-39940df7, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-39940df7, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc042b04538} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-39940df7, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc042b048e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831859779s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-39940df7, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-39940df7, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-47f0c945, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-47f0c945, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc042b04f90} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-47f0c945, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc042b05278}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831871697s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-47f0c945, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-47f0c945, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-98425cc8, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-98425cc8, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc042b05860} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-98425cc8, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc042b05b50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831884258s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-98425cc8, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-98425cc8, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-bbc2a11a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-bbc2a11a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc05d6bf110} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-bbc2a11a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc05d6be290}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831897045s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-bbc2a11a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-bbc2a11a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-d5637fa8, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-d5637fa8, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc02fbea550} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-d5637fa8, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc02fbeac98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831910529s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-d5637fa8, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-d5637fa8, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-fdc68fd7, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-fdc68fd7, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc02fbebac0} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-fdc68fd7, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc01ba98190}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831925032s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-fdc68fd7, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018ce9d9-68f0-7b26-939d-2aa06fcd6694-ttx1.m-fdc68fd7, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d5855-a6b0-78b2-ad59-0a40e8dca528-ttx1.s-1187526a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d5855-a6b0-78b2-ad59-0a40e8dca528-ttx1.s-1187526a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc01ba99060} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d5855-a6b0-78b2-ad59-0a40e8dca528-ttx1.s-1187526a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc01ba995f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.83193721s EvaluationString:[ var='B' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d5855-a6b0-78b2-ad59-0a40e8dca528-ttx1.s-1187526a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ], [ var='C' labels={__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d5855-a6b0-78b2-ad59-0a40e8dca528-ttx1.s-1187526a, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv} value=0 ]} {Instance:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d7c9b-06e2-751f-b87b-9aa91ddab744-ttx1.s-ffca3dcf, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=166244137514, cloud_availability_zone=us-west-2b, cloud_platform=aws_ec2, cloud_provider=aws, cloud_region=us-west-2, cluster=ams-development, ec2_tag_Name=accelbyte-armada-development-nomad-server-001-asg, ec2_tag_cluster=ams-development, ec2_tag_environment=development, ec2_tag_environment_name=development, ec2_tag_project=armada, ec2_tag_service=nomad-server, ec2_tag_unique_id=88002, environment_name=development, exported_job=srv-flt_018d7c9b-06e2-751f-b87b-9aa91ddab744-ttx1.s-ffca3dcf, host_arch=arm64, host_cpu_vendor_id=ARM, host_id=i-05f21b434b13e6923, host_image_id=ami-00a396b9428df5a7c, host_name=ip-10-11-69-37.us-west-2.compute.internal, host_type=m6gd.medium, http_scheme=https, instance=127.0.0.1:4646, job=nomad_metrics, namespace=default, net_host_port=4646, os_description=Ubuntu 22.04.4 LTS (Jammy Jellyfish) (Linux ip-10-11-69-37 6.5.0-1016-aws #16~22.04.1-Ubuntu SMP Wed Mar 13 20:57:51 UTC 2024 aarch64), os_type=linux, service=nomad-server, service_instance_id=127.0.0.1:4646, service_name=nomad_metrics, task_group=srv Value:0xc025dec5b0} C:{Var:C Labels:__name__=nomad_nomad_job_summary_failed, cloud_account_id=16624413751 + level=debug ts=2024-05-29T13:44:14.851533386Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.851387979Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.851182134Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=396586 slug=opengov t=2024-05-29T13:44:14.851096007Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=396586 slug=opengov instance="dimension_LoadBalancer=net/k8s-ingressn-prointer-38a8d6a93a/d84d3befaab2249c, tag_environment=integration" t=2024-05-29T13:44:14.851086722Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.85100978Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=698103 slug=vericast t=2024-05-29T13:44:14.850871581Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.850871473Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.850909503Z caller=remote_instance_store.go:51 user=698103 slug=vericast msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=396586 slug=opengov version=6 fingerprint=7642c9cd633c0d85 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.850795161Z level=debug msg="Alert rule evaluated" results="[{Instance:dimension_LoadBalancer=net/k8s-ingressn-prointer-38a8d6a93a/d84d3befaab2249c, tag_environment=integration State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:dimension_LoadBalancer=net/k8s-ingressn-prointer-38a8d6a93a/d84d3befaab2249c, tag_environment=integration Value:0xc07a7bf518} B:{Var:B Labels:dimension_LoadBalancer=net/k8s-ingressn-prointer-38a8d6a93a/d84d3befaab2249c, tag_environment=integration Value:0xc07a7bf560} C:{Var:C Labels:dimension_LoadBalancer=net/k8s-ingressn-prointer-38a8d6a93a/d84d3befaab2249c, tag_environment=integration Value:0xc07a7bf568}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.850486282s EvaluationString:[ var='A' labels={dimension_LoadBalancer=net/k8s-ingressn-prointer-38a8d6a93a/d84d3befaab2249c, tag_environment=integration} value=0 ], [ var='B' labels={dimension_LoadBalancer=net/k8s-ingressn-prointer-38a8d6a93a/d84d3befaab2249c, tag_environment=integration} value=0 ], [ var='C' labels={dimension_LoadBalancer=net/k8s-ingressn-prointer-38a8d6a93a/d84d3befaab2249c, tag_environment=integration} value=0 ]}]" duration=29.287057ms + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.850610328Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:14.850576703Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=ci, pod=ci-bc9d97c96-mb5fz" t=2024-05-29T13:44:14.85052337Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=ci, pod=ci-bc9d97c96-6nbqj" t=2024-05-29T13:44:14.85049825Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=698963 slug=lemonade version=3 fingerprint=2bccf8eaca22a191 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.850345964Z level=debug msg="Alert rule evaluated" results="[{Instance:app=ci, pod=ci-bc9d97c96-6nbqj State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=ci, pod=ci-bc9d97c96-6nbqj Value:0xc022395370} THRESHOLD:{Var:THRESHOLD Labels:app=ci, pod=ci-bc9d97c96-6nbqj Value:0xc0223953a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.850007709s EvaluationString:[ var='QUERY' labels={app=ci, pod=ci-bc9d97c96-6nbqj} value=0 ], [ var='THRESHOLD' labels={app=ci, pod=ci-bc9d97c96-6nbqj} value=0 ]} {Instance:app=ci, pod=ci-bc9d97c96-mb5fz State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=ci, pod=ci-bc9d97c96-mb5fz Value:0xc0223953d8} THRESHOLD:{Var:THRESHOLD Labels:app=ci, pod=ci-bc9d97c96-mb5fz Value:0xc022395408}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.850018944s EvaluationString:[ var='QUERY' labels={app=ci, pod=ci-bc9d97c96-mb5fz} value=0 ], [ var='THRESHOLD' labels={app=ci, pod=ci-bc9d97c96-mb5fz} value=0 ]}]" duration=39.975378ms + level=debug ts=2024-05-29T13:44:14.850345606Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.849633388Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.849733621Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.849776831Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.849734593Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.849675182Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.84967653Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.849649188Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.849649644Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.849639639Z caller=remote_instance_store.go:51 user=295631 slug=dapvizor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.849548936Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=163513 slug=dialpad instance="job_name=retentionpipeline, project=voiceai-staging, project_id=voiceai-staging" t=2024-05-29T13:44:14.849374259Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=295631 slug=dapvizor t=2024-05-29T13:44:14.849433634Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.849403936Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=163513 slug=dialpad instance="job_name=pubsubtopinotpipeline-1716707307, project=voiceai-staging-olap, project_id=voiceai-staging-olap" t=2024-05-29T13:44:14.849276208Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=698103 slug=vericast version=24 fingerprint=ed42b1781155d8bd attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.849174389Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Alerting Error: Results:map[] Values:map[B0:{Var:B Labels: Value:0xc0210265a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.848793524s EvaluationString:[ var='B0' metric='age_minutes' labels={} value=36763 ]}]" duration=104.859966ms + logger=ngalert.state.manager user=163513 slug=dialpad instance="job_name=pubsubtoespipeline, project=voiceai-staging, project_id=voiceai-staging" t=2024-05-29T13:44:14.849063333Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=174054 slug=netrading t=2024-05-29T13:44:14.849097068Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=75.594998ms + level=debug ts=2024-05-29T13:44:14.848913446Z caller=remote_instance_store.go:51 user=452115 slug=ybmetrics msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.848521764Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.848459067Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.848273243Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.840405ms + logger=ngalert.state.manager.persist user=357638 slug=usepower t=2024-05-29T13:44:14.848243411Z level=debug msg="Saving alert states" count=7 max_state_save_concurrency=1 + logger=ngalert.state.manager user=357638 slug=usepower instance="DBInstanceIdentifier=service-user-stg" t=2024-05-29T13:44:14.848231017Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=357638 slug=usepower instance="DBInstanceIdentifier=service-payment-method-stg" t=2024-05-29T13:44:14.848114608Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=844490 slug=prea t=2024-05-29T13:44:14.848066981Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=844490 slug=prea instance="resource.label.project_id=prea-cloud, resource.label.service_name=respace-map-tile-cloud-run, resource.type=cloud_run_revision" t=2024-05-29T13:44:14.848051026Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=357638 slug=usepower instance="DBInstanceIdentifier=service-business-decision-stg" t=2024-05-29T13:44:14.847866302Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=357638 slug=usepower instance="DBInstanceIdentifier=service-brand-stg" t=2024-05-29T13:44:14.847775683Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=357638 slug=usepower instance="DBInstanceIdentifier=service-application-decision-stg" t=2024-05-29T13:44:14.847677685Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.847588968Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.847371458Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.847186644Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.846924719Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.847024725Z caller=remote_instance_store.go:51 user=70430 slug=dapperlabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.846923294Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.846709963Z caller=remote_instance_store.go:51 user=763376 slug=f5nginxone msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.84525077Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.846647827Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.846164416Z caller=remote_alert_sender.go:94 user=767797 slug=mgmresorts host=mgmresorts-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.132.178:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddmilhiz8zj7kc alerts=1 + logger=ngalert.state.manager.persist user=656459 slug=activeport t=2024-05-29T13:44:14.845998156Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:14.845389238Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.186759ms + level=debug ts=2024-05-29T13:44:14.845466305Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.845405404Z caller=remote_instance_store.go:51 user=277970 slug=teckresourcestest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:14.845332174Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=277970 slug=teckresourcestest t=2024-05-29T13:44:14.845293453Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=2 fingerprint=c1cc952e027b39d3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.845255008Z level=error msg="Failed to evaluate rule" error="failed to build query 'C': data source not found" duration=13.913859ms + level=error ts=2024-05-29T13:44:14.845219797Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'C': data source not found" + level=debug ts=2024-05-29T13:44:14.845096732Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.845168753Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=543604 slug=kingmakers t=2024-05-29T13:44:14.845155742Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=24.910782ms + level=debug ts=2024-05-29T13:44:14.84508673Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.845038712Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.84511235Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.845067243Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.844964633Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.844353909Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.844069321Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.843929647Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=746601 slug=columbusm3 t=2024-05-29T13:44:14.843961461Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=312340 slug=lakefs version=32 fingerprint=3eb9c38ad52b9326 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.843711361Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc0485afa60} C:{Var:C Labels: Value:0xc0485afa68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.843336697s EvaluationString:[ var='B' labels={} value=117.65000000000002 ], [ var='C' labels={} value=0 ]}]" duration=307.14311ms + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:14.84230912Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="region=us-east-1, service=kube-state-metrics, stage=sandbox" + logger=ngalert.state.manager user=530405 slug=zetetic instance="chain=Kusama, pool=Watermelon 1" t=2024-05-29T13:44:14.84247748Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=530405 slug=zetetic version=57 fingerprint=69539f232479edf9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.842355058Z level=debug msg="Alert rule evaluated" results="[{Instance:chain=Kusama, pool=Watermelon 1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:chain=Kusama, pool=Watermelon 1 Value:0xc043e97ac0} C:{Var:C Labels:chain=Kusama, pool=Watermelon 1 Value:0xc043e97b00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.841946653s EvaluationString:[ var='B' labels={chain=Kusama, pool=Watermelon 1} value=1 ], [ var='C' labels={chain=Kusama, pool=Watermelon 1} value=0 ]}]" duration=6.439398ms + level=debug ts=2024-05-29T13:44:14.842217284Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.842131332Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.842104388Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.842059728Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.841994395Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=844031 slug=dzomysql t=2024-05-29T13:44:14.841977472Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=31.822656ms + level=debug ts=2024-05-29T13:44:14.841676601Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.841722469Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.84164311Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.841551624Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.841032865Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=691855 slug=chainlake t=2024-05-29T13:44:14.840283373Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.584129ms + level=debug ts=2024-05-29T13:44:14.840185343Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=618621 slug=sendamatic instance="config_version=1705914097836890880, instance=https://blogtrottr.com, job=Blogtrottr SSL, probe=London" t=2024-05-29T13:44:14.839628957Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.839706658Z caller=remote_instance_store.go:51 user=739013 slug=altoglobalsharing msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=739013 slug=altoglobalsharing t=2024-05-29T13:44:14.839663767Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=739013 slug=altoglobalsharing instance="instance=172.17.0.1:9100, job=node_venco_paq" t=2024-05-29T13:44:14.839638887Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.839548043Z caller=grafana.go:247 user=391538 slug=risknarrative msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=15&state=firing&state=noData&state=error" groups=3 alerts=0 + logger=ngalert.state.manager user=618621 slug=sendamatic instance="config_version=1688548870750945536, instance=https://www.sendamatic.net, job=Sendamatic web, probe=Amsterdam" t=2024-05-29T13:44:14.839461424Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=618621 slug=sendamatic instance="config_version=1683110971424005632, instance=in.smtp.sendamatic.net:465, job=in.smtp.sendamatic.net TLS, probe=Amsterdam" t=2024-05-29T13:44:14.839393343Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=618621 slug=sendamatic t=2024-05-29T13:44:14.839340352Z level=debug msg="State manager processing evaluation results" resultCount=9 + level=debug ts=2024-05-29T13:44:14.839329468Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.838459194Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.839055062Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.838815592Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.838808504Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.83822855Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.838622529Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.838181219Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-data-management-db, env=eu" t=2024-05-29T13:44:14.838510049Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-data-management-db, env=eu" t=2024-05-29T13:44:14.838426616Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.838431749Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.838427668Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=MILAN Query" t=2024-05-29T13:44:14.838395237Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.838354476Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.838233715Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.83801521Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.837853162Z caller=remote_instance_store.go:51 user=71676 slug=lemonbeat msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.837671655Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.837571084Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.837621105Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.837595267Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.837597341Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.837542984Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.837499061Z caller=remote_instance_store.go:51 user=35611 slug=play msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=11291d2ac8aa5dae attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.837489921Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.837251762s EvaluationString:}]" duration=213.704693ms + level=debug ts=2024-05-29T13:44:14.83739034Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.837423526Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=855233 slug=sadeno version=18 fingerprint=e1e4e33f9e098979 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.836873424Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=localhost:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=localhost:9100 Value:0xc001e72270} C:{Var:C Labels:instance=localhost:9100 Value:0xc001e72290}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.836452542s EvaluationString:[ var='A' labels={instance=localhost:9100} value=28.095197364197375 ], [ var='C' labels={instance=localhost:9100} value=0 ]}]" duration=12.437924ms + level=debug ts=2024-05-29T13:44:14.837041385Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.836720158Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.836625189Z caller=remote_alert_sender.go:94 user=633335 slug=promqlworkshop host=promqlworkshop-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.46.240:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fe56779f-247a-42b2-849f-80065701e6a7 alerts=1 + level=debug ts=2024-05-29T13:44:14.836543624Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.836522037Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.836367057Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=806229 slug=simplisafe version=21 fingerprint=0ee6e15b57ef34a1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.836146449Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.835641043s EvaluationString:}]" duration=34.848391ms + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-comply-advantage-db, env=eu" t=2024-05-29T13:44:14.835947708Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.835855844Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.835726047Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.835626442Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.835662082Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.835634737Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-compliance-lens-db, env=eu" t=2024-05-29T13:44:14.835599328Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.83553552Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.835475451Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.835155886Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=183214 slug=vectorizedio t=2024-05-29T13:44:14.835107893Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="byoc=True, data_cluster_uuid=b131038e-93a4-4e68-b485-767341aea00c, endpoint=https-metrics, instance=10.63.76.31:10250, metrics_path=/metrics, namespace=rp-60-261-1135, node=gke-measurement-redp-redpanda-worker--3d3a69ed-g9rz, persistentvolumeclaim=datadir-rp-13f447c-1, provider=google, region=us-central1, service=prometheus-kube-prometheus-kubelet, vectorized_cloud_data_cluster_name=measurement-redpanda-prod-us" t=2024-05-29T13:44:14.835082483Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=130566 slug=commercebuild instance= t=2024-05-29T13:44:14.834955871Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=848777 slug=opsalert t=2024-05-29T13:44:14.834432651Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=20.736419ms + level=debug ts=2024-05-29T13:44:14.834229389Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.83391843Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.833798112Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.830790998Z caller=remote_instance_store.go:51 user=70430 slug=dapperlabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.833375585Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.833319653Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.833275711Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.833233932Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.833174915Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=316960 slug=mojamteam instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.83305652Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=391538 slug=risknarrative t=2024-05-29T13:44:14.832975367Z level=debug msg="State manager processing evaluation results" resultCount=143 + level=debug ts=2024-05-29T13:44:14.833055021Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.832993686Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.832672814Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.832653003Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.832554388Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.831671106Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.832044292Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.832183778Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.83224775Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.831901708Z caller=remote_instance_store.go:51 user=350551 slug=loopme msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=767797 slug=mgmresorts instance="datasource_uid=d1aebc62-96b9-4d63-9239-4734a6bc96ce, ref_id=A" t=2024-05-29T13:44:14.832029797Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=synthetic-monitoring-agent, service=synthetic-monitoring-linkerd" t=2024-05-29T13:44:14.832040399Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=767797 slug=mgmresorts version=51 fingerprint=35e3d887a7a125f3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.831895345Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=d1aebc62-96b9-4d63-9239-4734a6bc96ce, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.831570377s EvaluationString:}]" duration=29.517244ms + logger=ngalert.state.manager.persist user=337951 slug=pawapay t=2024-05-29T13:44:14.831949717Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance= t=2024-05-29T13:44:14.831957955Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=337951 slug=pawapay instance= t=2024-05-29T13:44:14.831740642Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=307381 slug=kambitaskforce version=11 fingerprint=a9ff68b14f1a1385 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.831644548Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.750962854s EvaluationString:}]" duration=100.347573ms + logger=ngalert.state.manager user=337951 slug=pawapay t=2024-05-29T13:44:14.831601096Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.831539193Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=337951 slug=pawapay version=8 fingerprint=fbf9c3cd4d8c9664 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.831331653Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.76711863s EvaluationString:}]" duration=99.376068ms + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=suncorp, service=suncorp-appgateway-api" t=2024-05-29T13:44:14.831475694Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.831310053Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:14.831251618Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.831198232Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=396586 slug=opengov t=2024-05-29T13:44:14.831109527Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.557281ms + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=suncorp, service=payconnector-background" t=2024-05-29T13:44:14.831081506Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=114492 slug=railsbank version=2 fingerprint=1c7d9220ea5c1b2c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.830883769Z level=debug msg="Alert rule evaluated" results="[{Instance:DBInstanceIdentifier=db-endusers-120230105143002016600000005 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=db-endusers-120230105143002016600000005 Value:0xc017189510} C:{Var:C Labels:DBInstanceIdentifier=db-endusers-120230105143002016600000005 Value:0xc017189518}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.830576074s EvaluationString:[ var='B' labels={DBInstanceIdentifier=db-endusers-120230105143002016600000005} value=100 ], [ var='C' labels={DBInstanceIdentifier=db-endusers-120230105143002016600000005} value=0 ]}]" duration=199.589111ms + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:14.830736335Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=70430 slug=dapperlabs instance="datasource_uid=000000002, ref_id=B" t=2024-05-29T13:44:14.830704118Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=suncorp, service=payconnector" t=2024-05-29T13:44:14.830730241Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=suncorp, service=metadata" t=2024-05-29T13:44:14.830560144Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=suncorp, service=metadata" t=2024-05-29T13:44:14.830545316Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=70430 slug=dapperlabs version=1 fingerprint=dec48d114d5f8082 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.83004483Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=000000002, ref_id=B State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.829643025s EvaluationString:}]" duration=47.860722ms + logger=ngalert.state.manager.persist user=528849 slug=bitvavo t=2024-05-29T13:44:14.829944673Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.311234ms + level=debug ts=2024-05-29T13:44:14.829867623Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.829794218Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.829783221Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.829686278Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.829654608Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.829618183Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.829596514Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.829531961Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=suncorp, service=core-srv-email" t=2024-05-29T13:44:14.829587335Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=suncorp, service=core-srv-email" t=2024-05-29T13:44:14.829578487Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.829431114Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.829438072Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.829300544Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=smartrep, service=payconnector" t=2024-05-29T13:44:14.828990873Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=smartrep, service=metadata" t=2024-05-29T13:44:14.828830915Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.828355067Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.828306776Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.828274003Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.82829418Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.824498192Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=smartrep, service=driver" t=2024-05-29T13:44:14.82779369Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=691855 slug=chainlake t=2024-05-29T13:44:14.827690822Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=smartrep, service=core-api-reports" t=2024-05-29T13:44:14.827556103Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.827532538Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.827497089Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.827483349Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.827471626Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.827439733Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=691855 slug=chainlake version=2 fingerprint=193454850dafaa9f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.827339488Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=nomad_client_uptime, datacenter=fsn1-dc14, instance=redpanda-production-cpx41-redpanda-production, node_class=none, node_id=2df69ecf-bd2b-8921-41c9-a1eb5de12057, node_pool=default, node_scheduling_eligibility=eligible, node_status=ready State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=nomad_client_uptime, datacenter=fsn1-dc14, instance=redpanda-production-cpx41-redpanda-production, node_class=none, node_id=2df69ecf-bd2b-8921-41c9-a1eb5de12057, node_pool=default, node_scheduling_eligibility=eligible, node_status=ready Value:0xc046fbf150} B:{Var:B Labels:__name__=nomad_client_uptime, datacenter=fsn1-dc14, instance=redpanda-production-cpx41-redpanda-production, node_class=none, node_id=2df69ecf-bd2b-8921-41c9-a1eb5de12057, node_pool=default, node_scheduling_eligibility=eligible, node_status=ready Value:0xc046fbf200} C:{Var:C Labels:__name__=nomad_client_uptime, datacenter=fsn1-dc14, instance=redpanda-production-cpx41-redpanda-production, node_class=none, node_id=2df69ecf-bd2b-8921-41c9-a1eb5de12057, node_pool=default, node_scheduling_eligibility=eligible, node_status=ready Value:0xc046fbf2c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.826880184s EvaluationString:[ var='A' labels={__name__=nomad_client_uptime, datacenter=fsn1-dc14, instance=redpanda-production-cpx41-redpanda-production, node_class=none, node_id=2df69ecf-bd2b-8921-41c9-a1eb5de12057, node_pool=default, node_scheduling_eligibility=eligible, node_status=ready} value=1.050288e+06 ], [ var='B' labels={__name__=nomad_client_uptime, datacenter=fsn1-dc14, instance=redpanda-production-cpx41-redpanda-production, node_class=none, node_id=2df69ecf-bd2b-8921-41c9-a1eb5de12057, node_pool=default, node_scheduling_eligibility=eligible, node_status=ready} value=1.050288e+06 ], [ var='C' labels={__name__=nomad_client_uptime, datacenter=fsn1-dc14, instance=redpanda-production-cpx41-redpanda-production, node_class=none, node_id=2df69ecf-bd2b-8921-41c9-a1eb5de12057, node_pool=default, node_scheduling_eligibility=eligible, node_status=ready} value=0 ]}]" duration=139.787389ms + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=47781053d70514d0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.827380615Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.827174011s EvaluationString:}]" duration=179.524757ms + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=smartrep, service=cdc-srv-mapping" t=2024-05-29T13:44:14.827430458Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.827293431Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=redis-cluster, service=redis-cluster" t=2024-05-29T13:44:14.82718552Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.827077484Z caller=remote_instance_store.go:51 user=523906 slug=cyberark msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=redis, service=redis-a-master" t=2024-05-29T13:44:14.826921999Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=523906 slug=cyberark instance="datasource_uid=d3ce9def-da84-43e4-b1e3-291fdd0dbd7d, ref_id=B" t=2024-05-29T13:44:14.826944286Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=redis, service=redis-a-master" t=2024-05-29T13:44:14.826912212Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=523906 slug=cyberark t=2024-05-29T13:44:14.826901316Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=426229 slug=accelbyte t=2024-05-29T13:44:14.826713107Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.826697444Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=426229 slug=accelbyte version=290 fingerprint=4bceaf8bdcc81a4a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.826583337Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.826392806s EvaluationString:}]" duration=40.811691ms + level=debug ts=2024-05-29T13:44:14.826524281Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.826463497Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.82645711Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.826420246Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.826428068Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.82640984Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.826406394Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.826397459Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.826337676Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.826335876Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.826313055Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=5b7a883fe8332795 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.826266326Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.825970216s EvaluationString:}]" duration=134.744441ms + level=debug ts=2024-05-29T13:44:14.826305695Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.826160002Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=paypaplane, service=payto-srv-selector" t=2024-05-29T13:44:14.826106632Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=paypaplane, service=payto-srv-selector" t=2024-05-29T13:44:14.826096931Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.825759538Z caller=remote_rule_evaluator.go:193 user=656459 slug=activeport msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=paypaplane, service=payments-srv-core" t=2024-05-29T13:44:14.825306267Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=277807 slug=info96f8 t=2024-05-29T13:44:14.825084423Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.218952ms + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=paypaplane, service=payconnector-cdc" t=2024-05-29T13:44:14.825175216Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=paypaplane, service=payconnector-background" t=2024-05-29T13:44:14.825057605Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=paypaplane, service=payconnector-background" t=2024-05-29T13:44:14.82504601Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.824831656Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.824583901Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=60199 slug=wallapop t=2024-05-29T13:44:14.824691309Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.824662698Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.824537373Z caller=remote_rule_evaluator.go:193 user=855233 slug=sadeno msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=paypaplane, service=merchants-srv-core" t=2024-05-29T13:44:14.824606235Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=paypaplane, service=kratos" t=2024-05-29T13:44:14.824394592Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=201644 slug=thoughtspot instance= t=2024-05-29T13:44:14.824301861Z level=debug msg="Execution error state is Alerting" handler=resultAlerting previous_handler=resultError + level=debug ts=2024-05-29T13:44:14.824215277Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.823825515Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.823539407Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.823524862Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.historian backend=loki user=438761 slug=wasabicloudprod t=2024-05-29T13:44:14.823475705Z level=debug msg="Done saving alert state history batch" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=paypaplane, service=external-web-agreements" t=2024-05-29T13:44:14.823217998Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=paypaplane, service=external-api-paymods" t=2024-05-29T13:44:14.823057655Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Wilmington, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.822996045Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Wichita, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.822832349Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Wellington, country=New Zealand, environment=production, role=streaming-optimized, service_name=zam" t=2024-05-29T13:44:14.822652713Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.822623642Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:14.822594916Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.822581484Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.822469726Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.822423997Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=paypaplane, service=error" t=2024-05-29T13:44:14.822441154Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=paypaplane, service=error" t=2024-05-29T13:44:14.82242592Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.822196465Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=paypaplane, service=driver-cdc" t=2024-05-29T13:44:14.822253129Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=paypaplane, service=driver-cdc" t=2024-05-29T13:44:14.822240555Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Warsaw, country=Poland, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.822226923Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.82213821Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Virginia Beach, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.821990783Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Virginia Beach, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.82197864Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.821771173Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Vilnius, country=Lithuania, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.82173705Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Vienna, country=Austria, environment=production, role=streaming-optimized, service_name=zam" t=2024-05-29T13:44:14.821562924Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Vienna, country=Austria, environment=production, role=streaming-optimized, service_name=zam" t=2024-05-29T13:44:14.821550986Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.821390079Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112732 slug=gleamer instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.821293616Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Vancouver, country=Canada, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.821374781Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Vancouver, country=Canada, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.821365734Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.821222074Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112732 slug=gleamer instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.821274337Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=112732 slug=gleamer instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.82126015Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=paypaplane, service=core-api-reports" t=2024-05-29T13:44:14.821249703Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Valletta, country=Malta, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.821149145Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=464973 slug=equansdatahub t=2024-05-29T13:44:14.821152133Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=464973 slug=equansdatahub t=2024-05-29T13:44:14.821099585Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=464973 slug=equansdatahub version=1 fingerprint=1f1e16f00bfc3a35 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.821025205Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.82076848s EvaluationString:}]" duration=107.180418ms + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=paypaplane, service=cdc-srv-mapping" t=2024-05-29T13:44:14.821083592Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.821069546Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance= t=2024-05-29T13:44:14.821059241Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=260796 slug=expressvpn version=33 fingerprint=62f06377a044cbb4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.820969182Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.820737425s EvaluationString:}]" duration=31.513778ms + logger=ngalert.state.manager.persist user=472647 slug=planet t=2024-05-29T13:44:14.82097165Z level=debug msg="Saving alert states" count=10 max_state_save_concurrency=1 + logger=ngalert.state.manager user=472647 slug=planet instance="metric.name=value_utilization_mean_max, resource.label.database_id=planet-compute-staging:l3h-next" t=2024-05-29T13:44:14.820953393Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=paypaplane, service=cdc-cloudevents-srv-outbox" t=2024-05-29T13:44:14.820916286Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance="metric.name=value_utilization_mean_max, resource.label.database_id=planet-compute-staging:next-02-ov2-view" t=2024-05-29T13:44:14.820898668Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance="metric.name=value_utilization_mean_max, resource.label.database_id=planet-compute-staging:compute-ordersv2-view" t=2024-05-29T13:44:14.820834924Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet instance="metric.name=value_utilization_mean_max, resource.label.database_id=planet-compute-staging:next-ov2-alt-02-worker" t=2024-05-29T13:44:14.8207675Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=paypaplane, service=bpay-srv-mock" t=2024-05-29T13:44:14.820725054Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance="metric.name=value_utilization_mean_max, resource.label.database_id=planet-compute-staging:next-02-ov2-dflt-01-worker" t=2024-05-29T13:44:14.820675521Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet instance="metric.name=value_utilization_mean_max, resource.label.database_id=planet-compute-staging:next-qe-ov2-default-worker" t=2024-05-29T13:44:14.820570028Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=347171 slug=neuralconcept instance="cluster=rwdi-cluster" t=2024-05-29T13:44:14.820526849Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Vaduz, country=Liechtenstein, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.820521437Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Vaduz, country=Liechtenstein, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.820513229Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda6307d18_8abd_4a66_ba16_290280ce890c.slice/cri-containerd-98a42e6381df726c338e82d4ab28b9247e424a8f44f20d19fd2c79c239d7aa49.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=98a42e6381df726c338e82d4ab28b9247e424a8f44f20d19fd2c79c239d7aa49, namespace=workflow, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=workflow-worker-9bb4cc958-4845b, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.820421756Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=347171 slug=neuralconcept instance="cluster=nc-services-prod" t=2024-05-29T13:44:14.820403625Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Ulaanbaatar, country=Mongolia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.820346817Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Ulaanbaatar, country=Mongolia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.820334939Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=347171 slug=neuralconcept instance="cluster=nc-gitlab-testing" t=2024-05-29T13:44:14.820256766Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.82030345Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:14.820286061Z caller=remote_instance_store.go:51 user=543604 slug=kingmakers msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=tailscale, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0db1f07b_50cf_4a21_afc6_45a427742b30.slice/cri-containerd-e20985d2b48ad76c11060b978538605d33716bb284f0f971a077c54233197e0c.scope, image=ghcr.io/tailscale/tailscale:latest, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e20985d2b48ad76c11060b978538605d33716bb284f0f971a077c54233197e0c, namespace=tailscale, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=subnet-router-5d646f69d9-wbxr4, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.820189245Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Trenton, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.820130701Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:14.820157359Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=paypaplane, service=agreements-srv-core" t=2024-05-29T13:44:14.8201258Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.820049839Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=347171 slug=neuralconcept instance="cluster=frl" t=2024-05-29T13:44:14.820028621Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=opencost, service=opencost" t=2024-05-29T13:44:14.819964987Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Trenton, country=United States, environment=production, role=streaming-optimized, service_name=zam" t=2024-05-29T13:44:14.819935356Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=monitoring, service=prometheus-prometheus-node-exporter" t=2024-05-29T13:44:14.819786868Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=347171 slug=neuralconcept instance="cluster=external-korea-central" t=2024-05-29T13:44:14.819696044Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Toronto, country=Canada, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.819669083Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=rabbitmq, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod1e30b8c9_f244_4d5f_8d50_effe58140f3d.slice/cri-containerd-b180f07b25be350ed099840bd3bf84ad82652039e126dc75f283cc2ba2615b3e.scope, image=docker.io/bitnami/rabbitmq:3.8.35-debian-11-r5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=b180f07b25be350ed099840bd3bf84ad82652039e126dc75f283cc2ba2615b3e, namespace=rabbitmq, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=rabbitmq-ha-2, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.819647998Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=347171 slug=neuralconcept instance="cluster=external-eu-west-new" t=2024-05-29T13:44:14.819573182Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.819555062Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.819452813Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=347171 slug=neuralconcept instance="cluster=external-eu-west" t=2024-05-29T13:44:14.81940275Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Tokyo, country=Japan, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.819503383Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Tokyo, country=Japan, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.819495396Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Tokyo, country=Japan, environment=production, role=streaming-optimized, service_name=zam" t=2024-05-29T13:44:14.819337521Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=manager, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8b46ffb5_37b7_4a9e_a52a_57913337e16e.slice/cri-containerd-f85c2cd4953c9cf93e8719420d273362fab722f1a9219efa0900ba6a57c098db.scope, image=ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator:0.91.0, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=f85c2cd4953c9cf93e8719420d273362fab722f1a9219efa0900ba6a57c098db, namespace=observe, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=otel-opentelemetry-operator-7456d44f8d-lkfzr, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.819348211Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.819304576Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=347171 slug=neuralconcept t=2024-05-29T13:44:14.819231033Z level=debug msg="State manager processing evaluation results" resultCount=10 + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=monitoring, service=prometheus-grafana" t=2024-05-29T13:44:14.819290643Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.819147975Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.819112531Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.819100755Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=monitoring, service=alertmanager-prometheus-kube-prometheus-alertmanager" t=2024-05-29T13:44:14.819107505Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.819044873Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=monitoring, service=agent-kube-state-metrics" t=2024-05-29T13:44:14.818914026Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Tallinn, country=Estonia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.818785614Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Tallinn, country=Estonia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.81877181Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.818738653Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:14.818629602Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.818524465Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Sydney, country=Australia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.818444328Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.818121278Z caller=remote_instance_store.go:51 user=701741 slug=thetradingpitproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=20946 slug=extole t=2024-05-29T13:44:14.818105413Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=20946 slug=extole instance= t=2024-05-29T13:44:14.818078559Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=20946 slug=extole t=2024-05-29T13:44:14.818039892Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Sofia, country=Bulgaria, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.817879504Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Sofia, country=Bulgaria, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.817874138Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Skopje, country=North Macedonia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.817766488Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=linkerd, service=linkerd-proxy-injector" t=2024-05-29T13:44:14.817680711Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.817468124Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance= t=2024-05-29T13:44:14.817508208Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.817453846Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov t=2024-05-29T13:44:14.81741957Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=manager, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8b46ffb5_37b7_4a9e_a52a_57913337e16e.slice/cri-containerd-f85c2cd4953c9cf93e8719420d273362fab722f1a9219efa0900ba6a57c098db.scope, image=ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator:0.91.0, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=f85c2cd4953c9cf93e8719420d273362fab722f1a9219efa0900ba6a57c098db, namespace=observe, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=otel-opentelemetry-operator-7456d44f8d-lkfzr, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.81743884Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Singapore, country=Singapore, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.817434683Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Singapore, country=Singapore, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.817425143Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.817351392Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Silicon Valley, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.81727802Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=linkerd, service=linkerd-destination" t=2024-05-29T13:44:14.817312048Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=linkerd, service=linkerd-destination" t=2024-05-29T13:44:14.81729728Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8b46ffb5_37b7_4a9e_a52a_57913337e16e.slice/cri-containerd-18349235bc85cbbf9346657fc8e4a5a32e0aa4e3987f9e1a2248ff566ecea19d.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=18349235bc85cbbf9346657fc8e4a5a32e0aa4e3987f9e1a2248ff566ecea19d, namespace=observe, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=otel-opentelemetry-operator-7456d44f8d-lkfzr, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.817150278Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=keycloak, service=keycloak" t=2024-05-29T13:44:14.817121558Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.816420176Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.816910981Z caller=remote_instance_store.go:51 user=277807 slug=info96f8 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=277807 slug=info96f8 instance="datasource_uid=grafanacloud-prom, ref_id=A,B" t=2024-05-29T13:44:14.816820374Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod76b921d0_efc1_4e85_b6e7_12993f701979.slice/cri-containerd-400cb893bbf0fbee0a4cc786c4b239ddd5ee24419e4a928ba37bfe66e325c229.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=400cb893bbf0fbee0a4cc786c4b239ddd5ee24419e4a928ba37bfe66e325c229, namespace=observe, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=otel-collector-collector-0, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.816870978Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.816625226Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Sarajevo, country=Bosnia and Herzegovina, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.816573561Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6584c0b5_74d5_4abe_8c0f_293aca4b2149.slice/cri-containerd-4a09f0df5ee8203e64d17bb91caf295f6bdbd32780a41da2faf71624358deacb.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=4a09f0df5ee8203e64d17bb91caf295f6bdbd32780a41da2faf71624358deacb, namespace=monitoring, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=prometheus-operator-69567d78df-g7nst, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.816576729Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Sao Paulo, country=Brazil, environment=production, role=streaming-optimized, service_name=zam" t=2024-05-29T13:44:14.816400033Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.816374004Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=insignia, service=payconnector-cdc" t=2024-05-29T13:44:14.816353011Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.816313529Z caller=remote_alert_sender.go:94 user=190917 slug=d1cx host=d1cx-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.144.156.48:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=df63dc18-8826-474a-8b1a-f3cd2346eca3 alerts=1 + logger=ngalert.state.historian backend=loki user=516446 slug=awarehqdev t=2024-05-29T13:44:14.816253474Z level=debug msg="Done saving alert state history batch" + logger=ngalert.state.manager.persist user=190917 slug=d1cx t=2024-05-29T13:44:14.816228002Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=27.887818ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=prometheus, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod26d839e3_b210_429d_a824_e2a28e7c64f5.slice/cri-containerd-f9f96230cddc8b1c73cec0f5f1cc69b41cc70dd107f3520959a482f20e47e9a1.scope, image=docker.io/victoriametrics/vmagent:v1.99.0, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=f9f96230cddc8b1c73cec0f5f1cc69b41cc70dd107f3520959a482f20e47e9a1, namespace=monitoring, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=prometheus-k8s-0, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.816234796Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=insignia, service=payconnector" t=2024-05-29T13:44:14.816011462Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.815962581Z caller=remote_instance_store.go:51 user=155740 slug=routific msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=155740 slug=routific instance= t=2024-05-29T13:44:14.815909111Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=798556 slug=zenithhr t=2024-05-29T13:44:14.815913744Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.175023ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod26d839e3_b210_429d_a824_e2a28e7c64f5.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=monitoring, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=prometheus-k8s-0, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.815981605Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Salt Lake City, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.815945663Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Salt Lake City, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.815935214Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Riyadh, country=Saudi Arabia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.8157961Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.815657374Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=insignia, service=metadata" t=2024-05-29T13:44:14.815791576Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=kube-rbac-proxy, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podffb28dbf_f091_4074_9994_3356987e91e8.slice/cri-containerd-b7b237fcc3867957940825b795f8d149851cc4841741b4a4314c577c54392f64.scope, image=quay.io/brancz/kube-rbac-proxy:v0.14.2, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=b7b237fcc3867957940825b795f8d149851cc4841741b4a4314c577c54392f64, namespace=monitoring, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=node-exporter-hnpnr, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.815700017Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=528849 slug=bitvavo instance= t=2024-05-29T13:44:14.815606937Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=528849 slug=bitvavo version=5 fingerprint=5bb4ea9ca9595d14 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.815433683Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.815145861s EvaluationString:}]" duration=32.294078ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Riga, country=Latvia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.81552229Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=JOHANNESBURG Query" t=2024-05-29T13:44:14.815388685Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Reykjavik, country=Iceland, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.815360982Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Reykjavik, country=Iceland, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.815353378Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.815327805Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:14.815223197Z caller=remote_instance_store.go:51 user=543660 slug=jobcloudprogrammaticstage msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.81515576Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4c04a533_f29a_4351_8e0a_189f185a8bc4.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=monitoring, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=node-exporter-v9jdj, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.815171351Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.815054561Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.814914916Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=insignia, service=id" t=2024-05-29T13:44:14.814877493Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.814821946Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:14.814776433Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=kube-rbac-proxy-self, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1e607dcb_2812_49cb_99dd_006bc38c3a81.slice/cri-containerd-32be4d5dd06d0be1a58f5bb30196f25f787b808056edd55a242392020d928c9e.scope, image=quay.io/brancz/kube-rbac-proxy:v0.14.2, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=32be4d5dd06d0be1a58f5bb30196f25f787b808056edd55a242392020d928c9e, namespace=monitoring, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=kube-state-metrics-56d4ff59cd-47vwk, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.814673419Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Portland - Oregon, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.814470527Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1e607dcb_2812_49cb_99dd_006bc38c3a81.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=monitoring, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=kube-state-metrics-56d4ff59cd-47vwk, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.814367121Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.814314844Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.840852ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1e607dcb_2812_49cb_99dd_006bc38c3a81.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=monitoring, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=kube-state-metrics-56d4ff59cd-47vwk, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.814344498Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=insignia, service=driver-background" t=2024-05-29T13:44:14.814137906Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.814069095Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.813889408Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=insignia, service=core-srv-payconnector-bridge" t=2024-05-29T13:44:14.813770637Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.81368944Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=848777 slug=opsalert instance="__name__=probe_http_status_code, config_version=1715334496899256064, instance=https://sxfmtaas.dryice-aws.com/, job=SX-MTaaS Prod XSMF, probe=Mumbai" t=2024-05-29T13:44:14.813682584Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.813599576Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=848777 slug=opsalert instance="__name__=probe_http_status_code, config_version=1715334496899256064, instance=https://sxfmtaas.dryice-aws.com/, job=SX-MTaaS Prod XSMF, probe=Frankfurt" t=2024-05-29T13:44:14.813540939Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.813529744Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Phoenix, country=United States, environment=production, role=streaming-optimized, service_name=zam" t=2024-05-29T13:44:14.813555861Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Phoenix, country=United States, environment=production, role=streaming-optimized, service_name=zam" t=2024-05-29T13:44:14.813530474Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.813454255Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1c33de23_7420_4fdc_9c7f_f731a2c3587c.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=monitoring, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=node-exporter-zrgm5, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.813446354Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Phnom Penh, country=Cambodia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.813338714Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=insignia, service=cdc-srv-mapping" t=2024-05-29T13:44:14.813318893Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=insignia, service=cdc-srv-mapping" t=2024-05-29T13:44:14.813301683Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.811738003Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.813179911Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.812814342Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.812965848Z caller=remote_alert_sender.go:94 user=538037 slug=drivewealth host=drivewealth-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.117.25:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=admiogdgg68e8f alerts=1 + logger=ngalert.state.historian backend=loki user=516446 slug=awarehqdev t=2024-05-29T13:44:14.812830389Z level=debug msg="Alert state changed creating annotation" newState="Normal (MissingSeries)" oldState=Pending + logger=ngalert.state.historian backend=loki user=516446 slug=awarehqdev t=2024-05-29T13:44:14.812811288Z level=debug msg="Alert state changed creating annotation" newState=Pending oldState=Normal + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:14.812872787Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.080221ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=node-exporter, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod97a5a51c_9bd3_4844_b2bf_06e2cc95290c.slice/cri-containerd-6db50a7032c08669383855581827764516b4701891ec4719d4f2d69268bec591.scope, image=quay.io/prometheus/node-exporter:v1.6.1, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=6db50a7032c08669383855581827764516b4701891ec4719d4f2d69268bec591, namespace=monitoring, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=node-exporter-vw2b8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.812877359Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.812846339Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.812743037Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Paris, country=France, environment=production, role=streaming-optimized, service_name=zam" t=2024-05-29T13:44:14.81276457Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.812605215Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.812736073Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.812674015Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.812669783Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=251760 slug=forgerock t=2024-05-29T13:44:14.812656199Z level=debug msg="Deleting alert states" count=1 + level=debug ts=2024-05-29T13:44:14.812673661Z caller=remote_instance_store.go:57 user=251760 slug=forgerock msg="calling DeleteAlertInstances - not implemented" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Panama City, country=Panama, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.812617206Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.812573216Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Panama City, country=Panama, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.812608839Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=grafana, service=grafana" t=2024-05-29T13:44:14.81258143Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:14.812500176Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=29.050886ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.812446626Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=432323 slug=lithic instance= t=2024-05-29T13:44:14.812454296Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=432323 slug=lithic t=2024-05-29T13:44:14.812347649Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Ontario, country=Canada, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.812325475Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=mut-f92cf6681a" t=2024-05-29T13:44:14.812170156Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=432323 slug=lithic version=10 fingerprint=bc715f518bfa4cfe attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.812062304Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.811722131s EvaluationString:}]" duration=18.011575ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Ontario, country=Canada, environment=production, role=streaming-optimized, service_name=zam" t=2024-05-29T13:44:14.812107101Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Ontario, country=Canada, environment=production, role=streaming-optimized, service_name=zam" t=2024-05-29T13:44:14.812098502Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank-zammad, service=zammad-memcached" t=2024-05-29T13:44:14.812087759Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=mut-0348a8026c" t=2024-05-29T13:44:14.812073307Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank-zammad, service=zammad-memcached" t=2024-05-29T13:44:14.8120741Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.811970158Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=gov-rc-13may24" t=2024-05-29T13:44:14.811965746Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=danny-temp-02" t=2024-05-29T13:44:14.811889103Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=acunetix360-target" t=2024-05-29T13:44:14.811813193Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank-zammad, service=zammad" t=2024-05-29T13:44:14.811749913Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod16355c00_d4f6_43b9_8eec_c62321a78d1a.slice/cri-containerd-0ebb75b73e284e27e60c81c299733616ad5b5404bbab7de864c50bc4c7fc4c3b.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=0ebb75b73e284e27e60c81c299733616ad5b5404bbab7de864c50bc4c7fc4c3b, namespace=monitoring, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=node-exporter-hqztq, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.811760041Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Oklahoma City, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.81174359Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=url-shortener" t=2024-05-29T13:44:14.811555938Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.811421374Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=transactions-srv-core" t=2024-05-29T13:44:14.811381571Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod16355c00_d4f6_43b9_8eec_c62321a78d1a.slice, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=monitoring, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=node-exporter-hqztq, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.811390865Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod16355c00_d4f6_43b9_8eec_c62321a78d1a.slice, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=monitoring, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=node-exporter-hqztq, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.811297652Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.811083707Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.811060072Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=New Orleans, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.810925353Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=node-exporter, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc0515f15_1a2c_4850_b899_12a2457adb74.slice/cri-containerd-454eb7a3cecbcc41bf1f464c109dfde1c1d0ea6039d8d08fa731034bc0288f6f.scope, image=quay.io/prometheus/node-exporter:v1.6.1, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=454eb7a3cecbcc41bf1f464c109dfde1c1d0ea6039d8d08fa731034bc0288f6f, namespace=monitoring, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=node-exporter-r7j99, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.810930789Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.810897058Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:14.810665047Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc0515f15_1a2c_4850_b899_12a2457adb74.slice, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=monitoring, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=node-exporter-r7j99, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.810651937Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=payto-api-core" t=2024-05-29T13:44:14.810530684Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.810458448Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=paymods-srv-master" t=2024-05-29T13:44:14.81030831Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=844031 slug=dzomysql t=2024-05-29T13:44:14.810148586Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.810226263Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.810213548Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.810104485Z caller=remote_instance_store.go:51 user=701741 slug=thetradingpitproduction msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=844031 slug=dzomysql version=11 fingerprint=a07ed0b336407c19 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.809944341Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=namedprocess_namegroup_num_procs, agent_hostname=web-01, groupname=apache2, instance=web-01:9090, job=integrations/process_exporter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=web-01, groupname=apache2, instance=web-01:9090, job=integrations/process_exporter Value:0xc0108dc250} C:{Var:C Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=web-01, groupname=apache2, instance=web-01:9090, job=integrations/process_exporter Value:0xc0108dc2b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.809466617s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=web-01, groupname=apache2, instance=web-01:9090, job=integrations/process_exporter} value=96 ], [ var='C' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=web-01, groupname=apache2, instance=web-01:9090, job=integrations/process_exporter} value=0 ]}]" duration=10.318459ms + logger=ngalert.state.manager.persist user=343338 slug=f5sdc t=2024-05-29T13:44:14.810109029Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa2.par, group=monitoring, instance=mon-pci.pa2.par, origin=volterra-infra-vm" t=2024-05-29T13:44:14.810079776Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc t=2024-05-29T13:44:14.810031772Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=payments-srv-core" t=2024-05-29T13:44:14.810054394Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=75789 slug=mysign t=2024-05-29T13:44:14.810036455Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Montevideo, country=Uruguay, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.810021642Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=343338 slug=f5sdc version=100 fingerprint=351772dd042bcdde attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.809930391Z level=debug msg="Alert rule evaluated" results="[{Instance:datacenter=pa2.par, group=monitoring, instance=mon-pci.pa2.par, origin=volterra-infra-vm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, group=monitoring, instance=mon-pci.pa2.par, origin=volterra-infra-vm Value:0xc01ef86a28} B:{Var:B Labels:datacenter=pa2.par, group=monitoring, instance=mon-pci.pa2.par, origin=volterra-infra-vm Value:0xc01ef86a90} C:{Var:C Labels:datacenter=pa2.par, group=monitoring, instance=mon-pci.pa2.par, origin=volterra-infra-vm Value:0xc01ef86ae0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.809583742s EvaluationString:[ var='A' labels={datacenter=pa2.par, group=monitoring, instance=mon-pci.pa2.par, origin=volterra-infra-vm} value=41 ], [ var='B' labels={datacenter=pa2.par, group=monitoring, instance=mon-pci.pa2.par, origin=volterra-infra-vm} value=41 ], [ var='C' labels={datacenter=pa2.par, group=monitoring, instance=mon-pci.pa2.par, origin=volterra-infra-vm} value=0 ]}]" duration=266.241168ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.809994227Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.809856404Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=843304 slug=ppcgroup t=2024-05-29T13:44:14.8097934Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.809819935Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Minneapolis, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.809677286Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=payconnector-background" t=2024-05-29T13:44:14.8095417Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Milwaukee, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.809519581Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.809479823Z caller=remote_instance_store.go:51 user=23997 slug=wheniwork msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.809380188Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=payconnector" t=2024-05-29T13:44:14.80934156Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.809349295Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.809341049Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.809157552Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.809135286Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:14.809068569Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.809048987Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.808926086Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=merchants-srv-core" t=2024-05-29T13:44:14.809023334Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=merchants-srv-core" t=2024-05-29T13:44:14.8090124Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.80886076Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.80890763Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=kratos" t=2024-05-29T13:44:14.808808156Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.808788467Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Melbourne, country=Australia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.808648924Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.808598101Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=identity-srv-core" t=2024-05-29T13:44:14.808451006Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=63699 slug=bizzydist t=2024-05-29T13:44:14.808265694Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=63699 slug=bizzydist instance= t=2024-05-29T13:44:14.808254624Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=63699 slug=bizzydist instance= t=2024-05-29T13:44:14.808247731Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.80829045Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=id" t=2024-05-29T13:44:14.808279956Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=promtail, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2f2da93b_49eb_47d5_8546_0f6a542a832c.slice/cri-containerd-b10b3a15b798bb39e1debbe9b760520196a59645e1b8d8d438d54e89adf93f99.scope, image=docker.io/grafana/promtail:2.9.3, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=b10b3a15b798bb39e1debbe9b760520196a59645e1b8d8d438d54e89adf93f99, namespace=loki, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=promtail-ggsl4, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.80820809Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.808082918Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda8c4bd07_f40c_4dcc_a4cc_9a9a7fade41e.slice/cri-containerd-eafdfef0d67376b0a6c09099972364904047f5e02204d6c8236f77b650628966.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=eafdfef0d67376b0a6c09099972364904047f5e02204d6c8236f77b650628966, namespace=loki, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=promtail-h99dd, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.807971997Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.807944496Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.807877958Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=external-web-receiver" t=2024-05-29T13:44:14.807877586Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=external-web-agreements" t=2024-05-29T13:44:14.807704592Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=external-web-agreements" t=2024-05-29T13:44:14.807688781Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.807555853Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.807532031Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podba150ab2_470d_4f0f_92b0_ca8f4d7cbd67.slice/cri-containerd-a9153baa6f8d350ec44602a6eb825b472fcb6ad603ec1a767d2e46710012af19.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=a9153baa6f8d350ec44602a6eb825b472fcb6ad603ec1a767d2e46710012af19, namespace=loki, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=promtail-2dxqd, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.80750236Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podba150ab2_470d_4f0f_92b0_ca8f4d7cbd67.slice/cri-containerd-a9153baa6f8d350ec44602a6eb825b472fcb6ad603ec1a767d2e46710012af19.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=a9153baa6f8d350ec44602a6eb825b472fcb6ad603ec1a767d2e46710012af19, namespace=loki, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=promtail-2dxqd, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.807476164Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.807376688Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=external-api-merchants" t=2024-05-29T13:44:14.807350879Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.807348805Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=external-api-agreements" t=2024-05-29T13:44:14.807162585Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.807007935Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=London, country=United Kingdom, environment=production, role=streaming-optimized, service_name=zam" t=2024-05-29T13:44:14.807006128Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.806819896Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=driver-cdc" t=2024-05-29T13:44:14.806791886Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.806758338Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.806687733Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.806625836Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:14.806561014Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.806470332Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1b0a3470_6b39_4d54_b6b3_5cfc1aaf6b58.slice/cri-containerd-aaf5ee01df212b2b47fcc681fbd10e05908bee8789d15b68fa80cd82b0e2539d.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=aaf5ee01df212b2b47fcc681fbd10e05908bee8789d15b68fa80cd82b0e2539d, namespace=linkerd-smi, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=smi-adaptor-86d9d64855-drs5t, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.806462603Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.806471975Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=driver" t=2024-05-29T13:44:14.806388819Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=cuscal-authorise-api" t=2024-05-29T13:44:14.806191862Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd5564346_3913_428d_9fe6_08dcaa3bfd39.slice/cri-containerd-88be87ceb1538049421d57dfcb5b8aa2ebac5596cebecaa4731f574bc4bfc04a.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=88be87ceb1538049421d57dfcb5b8aa2ebac5596cebecaa4731f574bc4bfc04a, namespace=linkerd, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=linkerd-proxy-injector-956d7bd89-9kpsn, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.80619852Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.806151724Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.805802903Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.806081699Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=679029 slug=joveoprodaws t=2024-05-29T13:44:14.80601426Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=01230fc0cdda2a18 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.805917608Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.80570009s EvaluationString:}]" duration=205.345218ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.80595996Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:14.805640626Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=core-api-reports" t=2024-05-29T13:44:14.805603559Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.805566275Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.805518768Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.805549608Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=cdc-srv-mapping" t=2024-05-29T13:44:14.805441522Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=commbank, service=cdc-srv-mapping" t=2024-05-29T13:44:14.80542908Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.805351141Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager.persist user=937416 slug=cambridgeuniversitypress t=2024-05-29T13:44:14.805205215Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.73674ms + level=debug ts=2024-05-29T13:44:14.80515329Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Kansas City, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.805229424Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05cc212e_8217_433b_ac8f_8d49b6a9bb28.slice/cri-containerd-2bdd5ed5f5af3f84c22e3c562c90910af226b8a9a1b9b4cf488e0e713972fc68.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=2bdd5ed5f5af3f84c22e3c562c90910af226b8a9a1b9b4cf488e0e713972fc68, namespace=linkerd, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=linkerd-destination-9cd56bd6-v8gnj, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.805175702Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.805059038Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.804991035Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=destination, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05cc212e_8217_433b_ac8f_8d49b6a9bb28.slice/cri-containerd-17e8e9dd8ede22e66b9160aa1e5d89405032957afcbb7f243faa7cef99d13222.scope, image=cr.l5d.io/linkerd/controller:stable-2.14.4, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=17e8e9dd8ede22e66b9160aa1e5d89405032957afcbb7f243faa7cef99d13222, namespace=linkerd, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=linkerd-destination-9cd56bd6-v8gnj, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.804923831Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=173730 slug=nikon t=2024-05-29T13:44:14.804808008Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.212035ms + level=debug ts=2024-05-29T13:44:14.804714051Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.804829216Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=798556 slug=zenithhr t=2024-05-29T13:44:14.804659039Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=chaos-mesh, service=chaos-dns-server" t=2024-05-29T13:44:14.80467479Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podadb64238_66b2_4a63_b223_5440f443c498.slice/cri-containerd-0977455ff328ba353da002b79c7cfbeb22812d09fe34b9f6312abb00be6b7b1e.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=0977455ff328ba353da002b79c7cfbeb22812d09fe34b9f6312abb00be6b7b1e, namespace=linkerd, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=linkerd-proxy-injector-956d7bd89-fj248, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.804672049Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.804597819Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=spectrum-cable, exported_endpoint=branch_prediction" t=2024-05-29T13:44:14.804631757Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=rcn, exported_endpoint=branch_prediction" t=2024-05-29T13:44:14.804526719Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=chaos-mesh, service=chaos-dashboard" t=2024-05-29T13:44:14.804510729Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=chaos-mesh, service=chaos-dashboard" t=2024-05-29T13:44:14.804500996Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=optimumfixed, exported_endpoint=branch_prediction" t=2024-05-29T13:44:14.804380523Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=staging, namespace=chaos-mesh, service=chaos-daemon" t=2024-05-29T13:44:14.804350498Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishwireless, exported_endpoint=branch_prediction" t=2024-05-29T13:44:14.804193359Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishget, exported_endpoint=branch_prediction" t=2024-05-29T13:44:14.804109639Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishget, exported_endpoint=branch_prediction" t=2024-05-29T13:44:14.804095638Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Houston, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.804021596Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane t=2024-05-29T13:44:14.803746977Z level=debug msg="State manager processing evaluation results" resultCount=164 + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=american-airlines, exported_endpoint=branch_prediction" t=2024-05-29T13:44:14.803873875Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=american-airlines, exported_endpoint=branch_prediction" t=2024-05-29T13:44:14.803856557Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=aizhomesol, exported_endpoint=branch_prediction" t=2024-05-29T13:44:14.803773223Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.80381452Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.803659077Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.803425009Z caller=remote_instance_store.go:51 user=354676 slug=gridmarketenergy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.803254229Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=linkerd-proxy, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8dd4695f_6f6a_426c_8a7e_ba329dba0749.slice/cri-containerd-e1bb2ef2d9b05335e53003cc27e40e8d0bcb7cedf444ec586f2559a906b5a2de.scope, image=cr.l5d.io/linkerd/proxy:stable-2.14.4, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e1bb2ef2d9b05335e53003cc27e40e8d0bcb7cedf444ec586f2559a906b5a2de, namespace=linkerd, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=linkerd-destination-9cd56bd6-6v72v, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.803245023Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=451750 slug=amadeuspfpprod t=2024-05-29T13:44:14.803141469Z level=debug msg="Saving alert states" count=12 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.803053519Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.802982733Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.803051343Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=451750 slug=amadeuspfpprod instance="device=sda, instance=mpf-ein-node3:9100, job=integrations/node_exporter" t=2024-05-29T13:44:14.802924849Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.802852185Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=451750 slug=amadeuspfpprod instance="device=sda, instance=NGINX-TNG:9100, job=integrations/node_exporter" t=2024-05-29T13:44:14.802767778Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=destination, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8dd4695f_6f6a_426c_8a7e_ba329dba0749.slice/cri-containerd-91b310a68e1b00647f59498be471917a2187c5a73d8fa357231d7a35c698add1.scope, image=cr.l5d.io/linkerd/controller:stable-2.14.4, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=91b310a68e1b00647f59498be471917a2187c5a73d8fa357231d7a35c698add1, namespace=linkerd, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=linkerd-destination-9cd56bd6-6v72v, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.802734691Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.802719381Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=destination, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8dd4695f_6f6a_426c_8a7e_ba329dba0749.slice/cri-containerd-91b310a68e1b00647f59498be471917a2187c5a73d8fa357231d7a35c698add1.scope, image=cr.l5d.io/linkerd/controller:stable-2.14.4, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=91b310a68e1b00647f59498be471917a2187c5a73d8fa357231d7a35c698add1, namespace=linkerd, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=linkerd-destination-9cd56bd6-6v72v, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.80268671Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.802602407Z caller=remote_instance_store.go:51 user=49546 slug=nulogyinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=451750 slug=amadeuspfpprod instance="device=dm-0, instance=mpf-ein-node3:9100, job=integrations/node_exporter" t=2024-05-29T13:44:14.802543496Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=49546 slug=nulogyinfra t=2024-05-29T13:44:14.802558167Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=49546 slug=nulogyinfra instance= t=2024-05-29T13:44:14.802545138Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=49546 slug=nulogyinfra instance= t=2024-05-29T13:44:14.802533825Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=451750 slug=amadeuspfpprod instance="agent_hostname=proxy03, device=sdb, instance=proxy03, job=integrations/node_exporter" t=2024-05-29T13:44:14.802443683Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.802420197Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.802300469Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=451750 slug=amadeuspfpprod instance="agent_hostname=proxy03, device=sda, instance=proxy03, job=integrations/node_exporter" t=2024-05-29T13:44:14.802271716Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.802185389Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.802160551Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=sp-validator, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8dd4695f_6f6a_426c_8a7e_ba329dba0749.slice/cri-containerd-32282388707bef1250bb5a4dbf9941d4e2ffb4b1ad2176fffa9d254613913100.scope, image=cr.l5d.io/linkerd/controller:stable-2.14.4, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=32282388707bef1250bb5a4dbf9941d4e2ffb4b1ad2176fffa9d254613913100, namespace=linkerd, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=linkerd-destination-9cd56bd6-6v72v, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.802141801Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=451750 slug=amadeuspfpprod instance="agent_hostname=proxy01-1, device=dm-0, instance=proxy01-1, job=integrations/node_exporter" t=2024-05-29T13:44:14.801915173Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=linkerd-proxy, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb710b10d_ae5c_4d10_87e7_1dfefbe6cdcd.slice/cri-containerd-8d5bc2ddbee2a1cf76548be7d0046882dc140c3a74b7fb59587c9229d1a16c1a.scope, image=cr.l5d.io/linkerd/proxy:stable-2.14.4, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8d5bc2ddbee2a1cf76548be7d0046882dc140c3a74b7fb59587c9229d1a16c1a, namespace=linkerd, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=linkerd-identity-586f5f94b5-jchc9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.801843026Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.801817071Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.801786554Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.80166179Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=451750 slug=amadeuspfpprod instance="agent_hostname=mpf-ein-fw-master, device=dm-0, instance=mpf-ein-fw-master, job=integrations/node_exporter" t=2024-05-29T13:44:14.80156355Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:14.801459376Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:14.801454419Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Detroit, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.801464552Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.801436531Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager.persist user=543654 slug=jobcloudprogrammaticprod t=2024-05-29T13:44:14.801338665Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=19.932453ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Des Moines, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.801281857Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=451750 slug=amadeuspfpprod t=2024-05-29T13:44:14.801045301Z level=debug msg="State manager processing evaluation results" resultCount=12 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.801024903Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:14.801021544Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.800974906Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.800991433Z caller=remote_instance_store.go:51 user=884866 slug=cnonumerique msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.800936321Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.80086915Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.800824023Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.800699975Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=884866 slug=cnonumerique instance="datasource_uid=ddhkbrfewv7k0d, ref_id=A" t=2024-05-29T13:44:14.800680237Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=884866 slug=cnonumerique instance="datasource_uid=ddhkbrfewv7k0d, ref_id=A" t=2024-05-29T13:44:14.800461952Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.800487637Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Concord, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.800344291Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.800333938Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.800307146Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=884866 slug=cnonumerique t=2024-05-29T13:44:14.800244888Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.800134207Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.800133788Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Columbus, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.800133807Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.800107204Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=679831 slug=joveostageaws instance="datasource_uid=fe98eaba-ee1b-4198-8ef3-9181223fbc0d, ref_id=A" t=2024-05-29T13:44:14.800097387Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=679831 slug=joveostageaws version=12970 fingerprint=805be576a58b4d09 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.800017108Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=fe98eaba-ee1b-4198-8ef3-9181223fbc0d, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.799525463s EvaluationString:}]" duration=53.738929ms + logger=ngalert.scheduler user=884866 slug=cnonumerique version=27 fingerprint=a1c5f4ae41c0af06 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.799679107Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=ddhkbrfewv7k0d, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.799276072s EvaluationString:}]" duration=31.7285ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Columbia, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.799958066Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.799934876Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:14.799876346Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.799841954Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.799827913Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=172772 slug=ppbtradingtribe t=2024-05-29T13:44:14.799720157Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=23.074856ms + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.79977386Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.161137ms + level=debug ts=2024-05-29T13:44:14.799605006Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.799642159Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.799559469Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.799497132Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.799500205Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.79958812Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.79961723Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.799494617Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=320778 slug=omegaai instance= t=2024-05-29T13:44:14.799585998Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=320778 slug=omegaai t=2024-05-29T13:44:14.799500324Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.799185514Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.799006761Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.799078878Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Charlotte, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.799050608Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Charlotte, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.799038832Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.798775767Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=538037 slug=drivewealth t=2024-05-29T13:44:14.798716127Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.798722504Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=538037 slug=drivewealth version=3 fingerprint=f2e883971f9688e7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.798644799Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=_h27TffVk, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.798277129s EvaluationString:}]" duration=23.89227ms + level=debug ts=2024-05-29T13:44:14.798630236Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.798618979Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=177453 slug=clabs t=2024-05-29T13:44:14.798579196Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=168.730617ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.798552555Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager.persist user=63699 slug=bizzydist t=2024-05-29T13:44:14.798449967Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.443928ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.79838653Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.798409289Z caller=remote_alert_sender.go:94 user=233137 slug=mirrornode host=mirrornode-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.144.234.88:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fdk8balhgvbi8b alerts=1 + level=debug ts=2024-05-29T13:44:14.798365928Z caller=remote_instance_store.go:51 user=516446 slug=awarehqdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.798344286Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:14.798148166Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=115.402308ms + logger=ngalert.state.manager.persist user=516446 slug=awarehqdev t=2024-05-29T13:44:14.798276425Z level=debug msg="Deleting alert states" count=1 + logger=ngalert.state.manager user=516446 slug=awarehqdev t=2024-05-29T13:44:14.798268125Z level=info msg="Detected stale state entry" cacheID="[[\"EndpointName\",\"v3-toxicspeech-eng\"],[\"Series\",\"query7ff2a5a3f7ed4b8981eea9e56c63e6a2\"],[\"__alert_rule_namespace_uid__\",\"D-8RyMx4z\"],[\"__alert_rule_uid__\",\"2LufIfx4zm\"],[\"alertname\",\"v3-toxicspeech-eng-no-invocations\"],[\"grafana_folder\",\"bi\"],[\"group\",\"SageMakerNoInvocations\"],[\"route\",\"team=bi\"],[\"team\",\"bi\"]]" state=Pending reason= + logger=ngalert.state.manager user=516446 slug=awarehqdev instance="EndpointName=v3-toxicspeech-eng, Series=query4ee3335941414c8e9978805e6cb8f0ec" t=2024-05-29T13:44:14.798252025Z level=debug msg="Changing state" previous_state=Normal next_state=Pending previous_ends_at=2024-05-29T13:44:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.798229073Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + Error parsing panelUID for alert annotationruleID199dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=27998 slug=korob t=2024-05-29T13:44:14.798114439Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=102.856385ms + level=debug ts=2024-05-29T13:44:14.797960467Z caller=remote_instance_store.go:51 user=224047 slug=ppbtradingtribeprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.797630653Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.797426234Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Brussels, country=Belgium, environment=production, role=streaming-optimized, service_name=zam" t=2024-05-29T13:44:14.797427632Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:14.797406177Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:14.797395377Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:14.797356225Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.79733385Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.797240094Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=430961 slug=solifi version=8 fingerprint=b615e9b735a4d1bf attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.797287007Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.797034085s EvaluationString:}]" duration=118.434016ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.797264093Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.797106972Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=identity, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb644df9e_7a42_49c9_9d79_c81de2ba6e14.slice/cri-containerd-312acd816f6341289a905b5553bff671acc89b525e6466fe4c5aec577fc012da.scope, image=cr.l5d.io/linkerd/controller:stable-2.14.4, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=312acd816f6341289a905b5553bff671acc89b525e6466fe4c5aec577fc012da, namespace=linkerd, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=linkerd-identity-586f5f94b5-vxh2l, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.796949874Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.796897099Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.796895097Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.796866311Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:14.796800149Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.796708398Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Boston, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.79672181Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.796663974Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.796643733Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Boise, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.79655797Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=163215 slug=tripadvisor t=2024-05-29T13:44:14.796431304Z level=debug msg="Saving alert states" count=25 max_state_save_concurrency=1 + logger=ngalert.state.manager user=163215 slug=tripadvisor instance="service=SupplierApi" t=2024-05-29T13:44:14.796354339Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163215 slug=tripadvisor instance="service=RestaurantpresentationApi" t=2024-05-29T13:44:14.796198965Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163215 slug=tripadvisor instance="service=ProductApi" t=2024-05-29T13:44:14.796178417Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163215 slug=tripadvisor instance="service=ProductApi" t=2024-05-29T13:44:14.796167126Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=937416 slug=cambridgeuniversitypress instance= t=2024-05-29T13:44:14.796185958Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=163215 slug=tripadvisor instance="service=PrimaryApi" t=2024-05-29T13:44:14.796147339Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Birmingham, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.796113868Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163215 slug=tripadvisor instance="service=PrimaryApi" t=2024-05-29T13:44:14.796131985Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163215 slug=tripadvisor instance="service=PersonalizationV2Api" t=2024-05-29T13:44:14.796093494Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163215 slug=tripadvisor instance="service=MediaApi" t=2024-05-29T13:44:14.795987984Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163215 slug=tripadvisor instance="service=MediaAlbumApi" t=2024-05-29T13:44:14.795946836Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Billings, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.795980863Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163215 slug=tripadvisor instance="service=MediaAlbumApi" t=2024-05-29T13:44:14.795933951Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.795774812Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:14.795563046Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.795484795Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.795529678Z caller=remote_instance_store.go:51 user=173730 slug=nikon msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.795543733Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.795500897Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.795420597Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.795409162Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=173730 slug=nikon instance= t=2024-05-29T13:44:14.795406445Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=173730 slug=nikon t=2024-05-29T13:44:14.79535114Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=163215 slug=tripadvisor instance="service=LocationSelectionApi" t=2024-05-29T13:44:14.795261842Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163215 slug=tripadvisor instance="service=LocationSelectionApi" t=2024-05-29T13:44:14.795255719Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163215 slug=tripadvisor instance="service=LocationRankingApi" t=2024-05-29T13:44:14.795234966Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.795186962Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.794962711Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=163215 slug=tripadvisor instance="service=HealthsafetyApi" t=2024-05-29T13:44:14.79489446Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163215 slug=tripadvisor instance="service=HealthsafetyApi" t=2024-05-29T13:44:14.794855338Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163215 slug=tripadvisor instance="service=ExpShelvesApi" t=2024-05-29T13:44:14.794818523Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163215 slug=tripadvisor instance="service=AttractionsApi" t=2024-05-29T13:44:14.79479718Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163215 slug=tripadvisor instance="service=AttractionsApi" t=2024-05-29T13:44:14.794785639Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163215 slug=tripadvisor instance="service=AnswersV2Api" t=2024-05-29T13:44:14.794732012Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163215 slug=tripadvisor t=2024-05-29T13:44:14.794675798Z level=debug msg="State manager processing evaluation results" resultCount=25 + logger=ngalert.state.manager user=236496 slug=improbable t=2024-05-29T13:44:14.793923352Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.794483062Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.794348781Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.794279669Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.794173497Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.794170188Z caller=remote_instance_store.go:51 user=288032 slug=dapperlabssre msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod63507014_2cf9_4f63_974e_2faa9c8e49c0.slice/cri-containerd-feac9bdb527fad1e55b3ba87b9da5feb96da646bc24dbd1751876aea06ff17b2.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=feac9bdb527fad1e55b3ba87b9da5feb96da646bc24dbd1751876aea06ff17b2, namespace=kube-system, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=kube-proxy-7fckc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.794154707Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:14.794085974Z level=debug msg="State manager processing evaluation results" resultCount=1 + Error parsing panelUID for alert annotationruleID1980dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=698963 slug=lemonade version=1 fingerprint=0e68ea74872ff62d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.793964514Z level=debug msg="Alert rule evaluated" results="[{Instance:app=ds-squeezer, cluster=lmnd-staging-us-east-1, container=kube-state-metrics, deployment=ds-squeezer, endpoint=http, instance=10.24.74.71:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-t4kxx, region=us-east-1, service=kube-state-metrics, stage=staging State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=ds-squeezer, cluster=lmnd-staging-us-east-1, container=kube-state-metrics, deployment=ds-squeezer, endpoint=http, instance=10.24.74.71:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-t4kxx, region=us-east-1, service=kube-state-metrics, stage=staging Value:0xc02a5823b0} THRESHOLD:{Var:THRESHOLD Labels:app=ds-squeezer, cluster=lmnd-staging-us-east-1, container=kube-state-metrics, deployment=ds-squeezer, endpoint=http, instance=10.24.74.71:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-t4kxx, region=us-east-1, service=kube-state-metrics, stage=staging Value:0xc02a582570}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.793522289s EvaluationString:[ var='QUERY' labels={app=ds-squeezer, cluster=lmnd-staging-us-east-1, container=kube-state-metrics, deployment=ds-squeezer, endpoint=http, instance=10.24.74.71:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-t4kxx, region=us-east-1, service=kube-state-metrics, stage=staging} value=0 ], [ var='THRESHOLD' labels={app=ds-squeezer, cluster=lmnd-staging-us-east-1, container=kube-state-metrics, deployment=ds-squeezer, endpoint=http, instance=10.24.74.71:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-t4kxx, region=us-east-1, service=kube-state-metrics, stage=staging} value=0 ]}]" duration=52.658144ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.794099852Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance= t=2024-05-29T13:44:14.794105501Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.793948143Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=288032 slug=dapperlabssre version=1 fingerprint=95e8c9da512e1f74 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.794004261Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.793683586s EvaluationString:}]" duration=38.784152ms + level=debug ts=2024-05-29T13:44:14.79391278Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.793754856Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod45d627a3_0e2b_4119_b44f_69cf910d549c.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=kube-system, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=eks-pod-identity-agent-9rvzc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.793749486Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.793687063Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.793660551Z caller=remote_instance_store.go:51 user=127813 slug=clearsale msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=127813 slug=clearsale instance= t=2024-05-29T13:44:14.793576877Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=127813 slug=clearsale t=2024-05-29T13:44:14.793510236Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=efs-plugin, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6e5b6bdb_cffd_408c_b89a_0a3508f65247.slice/cri-containerd-885d59a902cc432dd4850fe963b05a047ce311b6be81c6b52d931046f2bba633.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/aws-efs-csi-driver:v1.7.6, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=885d59a902cc432dd4850fe963b05a047ce311b6be81c6b52d931046f2bba633, namespace=kube-system, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=efs-csi-node-26k6x, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.793623764Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:14.793558584Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=69.068441ms + logger=ngalert.scheduler user=127813 slug=clearsale version=7 fingerprint=a2eeaa1d45cfddc2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.793419001Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.793078272s EvaluationString:}]" duration=209.752241ms + level=debug ts=2024-05-29T13:44:14.793511594Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Albuquerque, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.793462528Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=node-driver-registrar, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod261bd556_7209_4e3a_a00a_8ddf0a38c5e9.slice/cri-containerd-03cfde126b91e33a6bc675e2a8f595842ddfa16d5db57f3f2ef3dc7a76c67e4a.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/csi-node-driver-registrar:v2.10.0-eks-1-29-7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=03cfde126b91e33a6bc675e2a8f595842ddfa16d5db57f3f2ef3dc7a76c67e4a, namespace=kube-system, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ebs-csi-node-fpgv7, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.793456707Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.79344336Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod261bd556_7209_4e3a_a00a_8ddf0a38c5e9.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=kube-system, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ebs-csi-node-fpgv7, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.793291034Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=240982 slug=intradocc version=3 fingerprint=9eb55798eb5296a8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.793146594Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.792832637s EvaluationString:}]" duration=26.417023ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Abuja, country=Nigeria, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.793053436Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=csi-snapshotter, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d628fe2_e5c7_4b13_a130_9ab0aef5a5c0.slice/cri-containerd-a9ac4d6f832d3ef6003049cedf46db03a0a5671bcb61892e00861507efd2fafc.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/csi-snapshotter:v7.0.1-eks-1-29-7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=a9ac4d6f832d3ef6003049cedf46db03a0a5671bcb61892e00861507efd2fafc, namespace=kube-system, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ebs-csi-controller-7b68c647b4-bjfgd, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.793008293Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=PIA, city=Abu Dhabi, country=United Arab Emirates, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.792867372Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.792816319Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager.persist user=691102 slug=deluxeconfdev t=2024-05-29T13:44:14.792613084Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=691102 slug=deluxeconfdev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.792580893Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=245291 slug=pismo instance="datasource_uid=grafanacloud-logs, ref_id=Query" t=2024-05-29T13:44:14.792624434Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Zurich, country=Switzerland, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.792672154Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=691102 slug=deluxeconfdev version=1 fingerprint=1eb973cde46652f8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.792486192Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.7922117s EvaluationString:}]" duration=7.978806ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=csi-provisioner, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d628fe2_e5c7_4b13_a130_9ab0aef5a5c0.slice/cri-containerd-108be85454a89b4f0f4b85198eaa556a820bbe7f1c235739d58c34e0c37ebf63.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/csi-provisioner:v4.0.0-eks-1-29-7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=108be85454a89b4f0f4b85198eaa556a820bbe7f1c235739d58c34e0c37ebf63, namespace=kube-system, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ebs-csi-controller-7b68c647b4-bjfgd, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.792571038Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo instance="datasource_uid=grafanacloud-logs, ref_id=Query" t=2024-05-29T13:44:14.79249228Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=245291 slug=pismo instance="datasource_uid=grafanacloud-logs, ref_id=Query" t=2024-05-29T13:44:14.792474972Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:14.79245406Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.792370389Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.792241413Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.792097442Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Washington, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.792108271Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.79204806Z caller=remote_instance_store.go:51 user=465668 slug=xpressinfra msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.792015597Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Warsaw, country=Poland, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.792012242Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=465668 slug=xpressinfra instance= t=2024-05-29T13:44:14.791981977Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=465668 slug=xpressinfra t=2024-05-29T13:44:14.791954489Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Vientiane, country=Unknown country, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.791686424Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Vienna, country=Austria, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.791467004Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod19e529e2_9177_4d4f_a448_2b0611731a69.slice/cri-containerd-d35113fc78677b9a028803a6019b4baa3c35516754066799824506c62582a50b.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d35113fc78677b9a028803a6019b4baa3c35516754066799824506c62582a50b, namespace=kube-system, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=ebs-csi-node-dj7bd, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.791485885Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="container=instances-svc, env=staging, op=putObject" t=2024-05-29T13:44:14.791483856Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=180994 slug=cgmonitor t=2024-05-29T13:44:14.791358157Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.791430812Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.scheduler user=180994 slug=cgmonitor version=1 fingerprint=3a05c298e90d5158 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.791279296Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.790972026s EvaluationString:}]" duration=634.597906ms + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="container=instances-svc, env=staging, op=getObject" t=2024-05-29T13:44:14.791303204Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=liveness-probe, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod19e529e2_9177_4d4f_a448_2b0611731a69.slice/cri-containerd-aa53edb64cd2b5dacc2c49e802523f502c28fdde109fce14cd99296b6729d2db.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/livenessprobe:v2.12.0-eks-1-29-7, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=aa53edb64cd2b5dacc2c49e802523f502c28fdde109fce14cd99296b6729d2db, namespace=kube-system, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=ebs-csi-node-dj7bd, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.791264832Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Vancouver, country=Canada, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.791258319Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=554491 slug=safeskyindustries version=64 fingerprint=70caae58bf0cbf46 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.791216916Z level=debug msg="Alert rule evaluated" results="[{Instance:cluster=westeurope, namespace=production State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=westeurope, namespace=production Value:0xc00fc0e360} C:{Var:C Labels:cluster=westeurope, namespace=production Value:0xc00fc0e200}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.792300566s EvaluationString:[ var='A' labels={cluster=westeurope, namespace=production} value=0 ], [ var='C' labels={cluster=westeurope, namespace=production} value=0 ]}]" duration=10.972468ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Valletta, country=Malta, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.79105554Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=coredns, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4fea7d5_62a1_4a75_aeb9_7fc94b4fa9a6.slice/cri-containerd-0158604d6249a3a203603a522d46891417c3e35d83b1a572ac0cf553ffd38e6c.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/coredns:v1.10.1-eksbuild.7, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=0158604d6249a3a203603a522d46891417c3e35d83b1a572ac0cf553ffd38e6c, namespace=kube-system, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=coredns-57fcd4dd7c-ngbd6, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.791018084Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.791021795Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=coredns, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4fea7d5_62a1_4a75_aeb9_7fc94b4fa9a6.slice/cri-containerd-0158604d6249a3a203603a522d46891417c3e35d83b1a572ac0cf553ffd38e6c.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/coredns:v1.10.1-eksbuild.7, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=0158604d6249a3a203603a522d46891417c3e35d83b1a572ac0cf553ffd38e6c, namespace=kube-system, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=coredns-57fcd4dd7c-ngbd6, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.791003396Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=248027 slug=mishp t=2024-05-29T13:44:14.790865092Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.126351ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4fea7d5_62a1_4a75_aeb9_7fc94b4fa9a6.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=kube-system, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=coredns-57fcd4dd7c-ngbd6, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.790863399Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.790839374Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4fea7d5_62a1_4a75_aeb9_7fc94b4fa9a6.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=kube-system, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=coredns-57fcd4dd7c-ngbd6, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.790848387Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.790829866Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=aws-eks-nodeagent, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9cc31cc1_9bda_4f84_bf62_0c9513979502.slice/cri-containerd-f040cbef36823db3aafaf9a5745c65e43a88e898cf4a3bc27dcbcc6802b78767.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/amazon/aws-network-policy-agent:v1.1.0-eksbuild.1, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=f040cbef36823db3aafaf9a5745c65e43a88e898cf4a3bc27dcbcc6802b78767, namespace=kube-system, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=aws-node-gtmct, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.790719705Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Vaduz, country=Liechtenstein, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.790678022Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=763376 slug=f5nginxone t=2024-05-29T13:44:14.790527536Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}[{{ $labels.env }}] The S3 client for {{ $labels.container }} has experienced {{value}} errors for the {${labels.op}} operation': error executing template __alert_S3ClientFailure: template: __alert_S3ClientFailure:1:156: executing \"__alert_S3ClientFailure\" at : wrong number of args for value: want 1 got 0" + logger=ngalert.state.manager user=173730 slug=nikon instance= t=2024-05-29T13:44:14.790582897Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=173730 slug=nikon t=2024-05-29T13:44:14.790549381Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=emptyAddressRemovalCronJob alert" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ulaanbaatar, country=Mongolia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.790479742Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.790452225Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:14.790441812Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.790435262Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.79034833Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Travnik, country=Bosnia and Herzegovina, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.790327224Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=aws-load-balancer-controller, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c43ee3b_92ec_46a9_9415_ea0a5a0e16d5.slice/cri-containerd-7e1bacb33a57c8f0f42db32c4276fa82b7983703a2460064ec6479042e269685.scope, image=public.ecr.aws/eks/aws-load-balancer-controller:v2.5.1, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=7e1bacb33a57c8f0f42db32c4276fa82b7983703a2460064ec6479042e269685, namespace=kube-system, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=aws-load-balancer-controller-5df6fbc497-vd25l, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.790294142Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.789976886Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c43ee3b_92ec_46a9_9415_ea0a5a0e16d5.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=kube-system, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=aws-load-balancer-controller-5df6fbc497-vd25l, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.790154427Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.789982755Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="container=instances-svc, env=exp0, op=getObject" t=2024-05-29T13:44:14.78991727Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112732 slug=gleamer instance= t=2024-05-29T13:44:14.789900692Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="container=instances-svc, env=exp0, op=getObject" t=2024-05-29T13:44:14.789885069Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=112732 slug=gleamer version=1 fingerprint=e5a65306b6950ffe attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.789750762Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[C0:{Var:C Labels: Value:0xc02df43d40} C1:{Var:C Labels: Value:0xc02df43d50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.789394838s EvaluationString:[ var='C0' metric='Value' labels={} value=13 ], [ var='C1' metric='Value' labels={} value=13 ]}]" duration=25.690147ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Tirana, country=Albania, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.789821015Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="container=instances-svc, env=exp0, op=deleteObject" t=2024-05-29T13:44:14.789669437Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Tirana, country=Albania, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.789813362Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.789789132Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=efs-plugin, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod62b2baf2_5a41_40e3_a8b7_63701fc44a2b.slice/cri-containerd-97cbf2cafb1dea0074dc93f0b629c93957b541df22097d7557562944fa4b5d47.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/aws-efs-csi-driver:v1.7.6, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=97cbf2cafb1dea0074dc93f0b629c93957b541df22097d7557562944fa4b5d47, namespace=kube-system, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=efs-csi-node-tbxms, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.789705154Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Tehran, country=Iran, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.789640688Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=763376 slug=f5nginxone t=2024-05-29T13:44:14.789526576Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}[{{ $labels.env }}] The S3 client for {{ $labels.container }} has experienced {{value}} errors for the {${labels.op}} operation': error executing template __alert_S3ClientFailure: template: __alert_S3ClientFailure:1:156: executing \"__alert_S3ClientFailure\" at : wrong number of args for value: want 1 got 0" + level=debug ts=2024-05-29T13:44:14.789513246Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=liveness-probe, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod62b2baf2_5a41_40e3_a8b7_63701fc44a2b.slice/cri-containerd-86038bbae019cda8a67e6ed3209f9fdb52b0fc2ac0ccb09e920dae2fac8da549.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/livenessprobe:v2.11.0-eks-1-29-2, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=86038bbae019cda8a67e6ed3209f9fdb52b0fc2ac0ccb09e920dae2fac8da549, namespace=kube-system, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=efs-csi-node-tbxms, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.789568987Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Tbilisi, country=Georgia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.789487309Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="container=instances-svc, env=demo1, op=putObject" t=2024-05-29T13:44:14.789452065Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.789463763Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=csi-driver-registrar, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod62b2baf2_5a41_40e3_a8b7_63701fc44a2b.slice/cri-containerd-7bb8a98008394a7b6766a7ac74679b9248317f3cd57f192f28d39477cc2a663c.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/csi-node-driver-registrar:v2.9.3-eks-1-29-2, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=7bb8a98008394a7b6766a7ac74679b9248317f3cd57f192f28d39477cc2a663c, namespace=kube-system, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=efs-csi-node-tbxms, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.78945412Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=361282 slug=turing t=2024-05-29T13:44:14.789311509Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=763376 slug=f5nginxone t=2024-05-29T13:44:14.789339484Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}[{{ $labels.env }}] The S3 client for {{ $labels.container }} has experienced {{value}} errors for the {${labels.op}} operation': error executing template __alert_S3ClientFailure: template: __alert_S3ClientFailure:1:156: executing \"__alert_S3ClientFailure\" at : wrong number of args for value: want 1 got 0" + logger=ngalert.scheduler user=361282 slug=turing version=72 fingerprint=bfee78da06c3d4b5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.789223555Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.788849033s EvaluationString:}]" duration=12.563394ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Tallinn, country=Estonia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.789338319Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Taipei, country=Taiwan, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.78914039Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="container=instances-svc, env=crt, op=putObject" t=2024-05-29T13:44:14.788837749Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="container=instances-svc, env=crt, op=putObject" t=2024-05-29T13:44:14.788809828Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podff7326c9_b226_4e2f_aa29_186276e64ac1.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=kube-system, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=efs-csi-controller-ccffdd5fb-qrtj7, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.78877631Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podff7326c9_b226_4e2f_aa29_186276e64ac1.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=kube-system, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=efs-csi-controller-ccffdd5fb-qrtj7, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.788763947Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=763376 slug=f5nginxone t=2024-05-29T13:44:14.788692817Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}[{{ $labels.env }}] The S3 client for {{ $labels.container }} has experienced {{value}} errors for the {${labels.op}} operation': error executing template __alert_S3ClientFailure: template: __alert_S3ClientFailure:1:156: executing \"__alert_S3ClientFailure\" at : wrong number of args for value: want 1 got 0" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Strasbourg, country=France, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.788751335Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=ebs-plugin, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3e40c9c5_2454_44f9_8eed_e7190284d6c4.slice/cri-containerd-b8af2af8fa8ab4e47f081de0337eb179abf05367ce5d1c8d8ceb6aaf276aadb0.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/aws-ebs-csi-driver:v1.29.1, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=b8af2af8fa8ab4e47f081de0337eb179abf05367ce5d1c8d8ceb6aaf276aadb0, namespace=kube-system, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=ebs-csi-node-jr9s9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.788653303Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=ebs-plugin, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3e40c9c5_2454_44f9_8eed_e7190284d6c4.slice/cri-containerd-b8af2af8fa8ab4e47f081de0337eb179abf05367ce5d1c8d8ceb6aaf276aadb0.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/aws-ebs-csi-driver:v1.29.1, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=b8af2af8fa8ab4e47f081de0337eb179abf05367ce5d1c8d8ceb6aaf276aadb0, namespace=kube-system, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=ebs-csi-node-jr9s9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.788636239Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=node-driver-registrar, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3e40c9c5_2454_44f9_8eed_e7190284d6c4.slice/cri-containerd-25f198e8abcc7e833f6b37e570011a6158423c893b03f9e33f923dd098160279.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/csi-node-driver-registrar:v2.10.0-eks-1-29-7, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=25f198e8abcc7e833f6b37e570011a6158423c893b03f9e33f923dd098160279, namespace=kube-system, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=ebs-csi-node-jr9s9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.788521159Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="container=instances-svc, env=crt, op=deleteObject" t=2024-05-29T13:44:14.788361464Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=630397 slug=tatin version=154 fingerprint=41495f9da16bc0f7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.788368182Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.788063812s EvaluationString:}]" duration=20.985841ms + level=debug ts=2024-05-29T13:44:14.787423625Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.788153572Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=334408 slug=voltagrid t=2024-05-29T13:44:14.788089565Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.424221ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9e5ea808_d8f1_4c8c_9313_1322e39493e5.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=kube-system, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=coredns-57fcd4dd7c-fqdxr, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.788160263Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="container=dataplane-ctrl, env=staging, op=getObject" t=2024-05-29T13:44:14.787907749Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podee85f6ee_958f_4b26_8ff4_ed59e0a59cfa.slice/cri-containerd-de6ee02befd26338d501d5ad686c2e47e6224922ec291c87469cf0a93a6f5abb.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=de6ee02befd26338d501d5ad686c2e47e6224922ec291c87469cf0a93a6f5abb, namespace=kube-system, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=kube-proxy-jskhq, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.787922913Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.78788657Z caller=remote_instance_store.go:51 user=354676 slug=gridmarketenergy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.787856832Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=763376 slug=f5nginxone t=2024-05-29T13:44:14.787799378Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}[{{ $labels.env }}] The S3 client for {{ $labels.container }} has experienced {{value}} errors for the {${labels.op}} operation': error executing template __alert_S3ClientFailure: template: __alert_S3ClientFailure:1:156: executing \"__alert_S3ClientFailure\" at : wrong number of args for value: want 1 got 0" + level=debug ts=2024-05-29T13:44:14.78778271Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod105bbba4_dc1f_4c2f_9f39_bd863269e532.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=kube-system, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=eks-pod-identity-agent-kc9bz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.787795204Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="container=dataplane-ctrl, env=staging, op=deleteObject" t=2024-05-29T13:44:14.787704547Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=763376 slug=f5nginxone t=2024-05-29T13:44:14.787656256Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}[{{ $labels.env }}] The S3 client for {{ $labels.container }} has experienced {{value}} errors for the {${labels.op}} operation': error executing template __alert_S3ClientFailure: template: __alert_S3ClientFailure:1:156: executing \"__alert_S3ClientFailure\" at : wrong number of args for value: want 1 got 0" + level=debug ts=2024-05-29T13:44:14.78762942Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.787615001Z caller=remote_image_capturer.go:54 user=190917 slug=d1cx rule_org_id=1 rule_uid=df63dc18-8826-474a-8b1a-f3cd2346eca3 dashboard=1CJVYJxVk panel=134 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=csi-driver-registrar, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbb8da663_f003_457c_98b6_3e58c4b59445.slice/cri-containerd-b847aa86e4790fec91b6d3586e8e2152059dd93262513114d35f4b638135e540.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/csi-node-driver-registrar:v2.9.3-eks-1-29-2, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=b847aa86e4790fec91b6d3586e8e2152059dd93262513114d35f4b638135e540, namespace=kube-system, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=efs-csi-node-qksw6, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.78754749Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=190917 slug=d1cx instance= t=2024-05-29T13:44:14.787486356Z level=debug msg="Execution error state is Alerting" handler=resultAlerting previous_handler=resultError + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=efs-plugin, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbb8da663_f003_457c_98b6_3e58c4b59445.slice/cri-containerd-a00ab29e24e5229e20315f506809d8e7e5eba00d07913e81a8783fb5c8375ae8.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/aws-efs-csi-driver:v1.7.6, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=a00ab29e24e5229e20315f506809d8e7e5eba00d07913e81a8783fb5c8375ae8, namespace=kube-system, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=efs-csi-node-qksw6, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.787427094Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=efs-plugin, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbb8da663_f003_457c_98b6_3e58c4b59445.slice/cri-containerd-a00ab29e24e5229e20315f506809d8e7e5eba00d07913e81a8783fb5c8375ae8.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/aws-efs-csi-driver:v1.7.6, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=a00ab29e24e5229e20315f506809d8e7e5eba00d07913e81a8783fb5c8375ae8, namespace=kube-system, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=efs-csi-node-qksw6, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.787410336Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.787276352Z caller=remote_instance_store.go:51 user=543660 slug=jobcloudprogrammaticstage msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.787193923Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=763376 slug=f5nginxone t=2024-05-29T13:44:14.787136561Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}[{{ $labels.env }}] The S3 client for {{ $labels.container }} has experienced {{value}} errors for the {${labels.op}} operation': error executing template __alert_S3ClientFailure: template: __alert_S3ClientFailure:1:156: executing \"__alert_S3ClientFailure\" at : wrong number of args for value: want 1 got 0" + level=debug ts=2024-05-29T13:44:14.787119741Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=228733 slug=csmoney instance= t=2024-05-29T13:44:14.786990651Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=228733 slug=csmoney t=2024-05-29T13:44:14.786930729Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=763376 slug=f5nginxone t=2024-05-29T13:44:14.78697948Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}[{{ $labels.env }}] The S3 client for {{ $labels.container }} has experienced {{value}} errors for the {${labels.op}} operation': error executing template __alert_S3ClientFailure: template: __alert_S3ClientFailure:1:156: executing \"__alert_S3ClientFailure\" at : wrong number of args for value: want 1 got 0" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod807ee33d_019c_42de_ae54_6990f1ed4acb.slice/cri-containerd-41733c5ab7d4dbe9441b5a52fa686b912111eaf0a87161606741d26888539ae2.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=41733c5ab7d4dbe9441b5a52fa686b912111eaf0a87161606741d26888539ae2, namespace=kube-system, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ebs-csi-node-75xzs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.787011127Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Seattle, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.786993055Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Seattle, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.786981524Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=liveness-probe, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod807ee33d_019c_42de_ae54_6990f1ed4acb.slice/cri-containerd-2e53a471ece668f1e293fa3d3b2da67be4ae1e2fa5073ed66d40828010c00f9f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/livenessprobe:v2.12.0-eks-1-29-7, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=2e53a471ece668f1e293fa3d3b2da67be4ae1e2fa5073ed66d40828010c00f9f, namespace=kube-system, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ebs-csi-node-75xzs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.78690053Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod807ee33d_019c_42de_ae54_6990f1ed4acb.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=kube-system, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ebs-csi-node-75xzs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.786827833Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=ebs-plugin, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72973cc3_d5d8_4c7e_bda6_ae2601c448f9.slice/cri-containerd-e6be292e2947f50da94748ee8402b241757a0a7c3f0cb56fe8bbe3ec7aced4bd.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/aws-ebs-csi-driver:v1.29.1, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e6be292e2947f50da94748ee8402b241757a0a7c3f0cb56fe8bbe3ec7aced4bd, namespace=kube-system, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ebs-csi-controller-7b68c647b4-c7jdx, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.786728056Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=371756 slug=asapp t=2024-05-29T13:44:14.786688706Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=371756 slug=asapp t=2024-05-29T13:44:14.786617713Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="container=dataplane-ctrl, env=exp0, op=getObject" t=2024-05-29T13:44:14.786512255Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=csi-provisioner, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72973cc3_d5d8_4c7e_bda6_ae2601c448f9.slice/cri-containerd-85d73dd2b67dfc640230b8c95c3a05008e4aac8b5929f4cc5365663cf6674874.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/csi-provisioner:v4.0.0-eks-1-29-7, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=85d73dd2b67dfc640230b8c95c3a05008e4aac8b5929f4cc5365663cf6674874, namespace=kube-system, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ebs-csi-controller-7b68c647b4-c7jdx, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.786457664Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=csi-provisioner, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72973cc3_d5d8_4c7e_bda6_ae2601c448f9.slice/cri-containerd-85d73dd2b67dfc640230b8c95c3a05008e4aac8b5929f4cc5365663cf6674874.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/csi-provisioner:v4.0.0-eks-1-29-7, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=85d73dd2b67dfc640230b8c95c3a05008e4aac8b5929f4cc5365663cf6674874, namespace=kube-system, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ebs-csi-controller-7b68c647b4-c7jdx, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.786440787Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Santiago, country=Chile, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.786445382Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Santiago, country=Chile, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.786437383Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.786410042Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="container=dataplane-ctrl, env=exp0, op=deleteObject" t=2024-05-29T13:44:14.786269672Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=San Francisco, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.786116852Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="container=dataplane-ctrl, env=demo1, op=putObject" t=2024-05-29T13:44:14.78606369Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=63699 slug=bizzydist instance= t=2024-05-29T13:44:14.785984556Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=63699 slug=bizzydist t=2024-05-29T13:44:14.785956135Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Riyadh, country=Saudi Arabia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.785769379Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Riga, country=Latvia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.785635216Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.785608595Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Riga, country=Latvia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.785624454Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=334644 slug=meiro t=2024-05-29T13:44:14.785566347Z level=debug msg="Saving alert states done" count=39 max_state_save_concurrency=1 duration=544.976937ms + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="container=dataplane-ctrl, env=demo1, op=deleteObject" t=2024-05-29T13:44:14.785586905Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="container=dataplane-ctrl, env=demo1, op=deleteObject" t=2024-05-29T13:44:14.785559725Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.785482569Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=763376 slug=f5nginxone t=2024-05-29T13:44:14.785442794Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}[{{ $labels.env }}] The S3 client for {{ $labels.container }} has experienced {{value}} errors for the {${labels.op}} operation': error executing template __alert_S3ClientFailure: template: __alert_S3ClientFailure:1:156: executing \"__alert_S3ClientFailure\" at : wrong number of args for value: want 1 got 0" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2cfc7dd2_b948_4c53_8f6f_11c0bb8ded31.slice/cri-containerd-f97d95aa2543b25b2850cb8616616e326c6899de0edd010b692ec61780b63735.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=f97d95aa2543b25b2850cb8616616e326c6899de0edd010b692ec61780b63735, namespace=kube-system, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=kube-proxy-nhqwd, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.7854696Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="container=dataplane-ctrl, env=crt, op=putObject" t=2024-05-29T13:44:14.785350733Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.785339696Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.785315751Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=eks-pod-identity-agent, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod685df2e1_a1ce_4a28_adb5_88a20686c769.slice/cri-containerd-c276a8b46e21234e337819cacd4708d250f3c41983dcf93b0c126a3973be3c85.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/eks-pod-identity-agent:0.1.6, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=c276a8b46e21234e337819cacd4708d250f3c41983dcf93b0c126a3973be3c85, namespace=kube-system, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=eks-pod-identity-agent-bk84w, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.785284001Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.785297301Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=763376 slug=f5nginxone t=2024-05-29T13:44:14.785178441Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}[{{ $labels.env }}] The S3 client for {{ $labels.container }} has experienced {{value}} errors for the {${labels.op}} operation': error executing template __alert_S3ClientFailure: template: __alert_S3ClientFailure:1:156: executing \"__alert_S3ClientFailure\" at : wrong number of args for value: want 1 got 0" + level=debug ts=2024-05-29T13:44:14.785234652Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.785177066Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.785136619Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Prague, country=Czech Republic, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.785014321Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.784930289Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=efs-plugin, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod65638cc1_9a7b_4b44_8b59_4c5517aa3b29.slice/cri-containerd-d417abaa76448f67eede40e3da1c9a935935078e6e7671a29121b92e2f81745e.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/aws-efs-csi-driver:v1.7.6, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d417abaa76448f67eede40e3da1c9a935935078e6e7671a29121b92e2f81745e, namespace=kube-system, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=efs-csi-node-njjpv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.784861235Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="container=dataplane-ctrl, env=crt, op=deleteObject" t=2024-05-29T13:44:14.784798637Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phoenix, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.784777609Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phoenix, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.784770005Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod65638cc1_9a7b_4b44_8b59_4c5517aa3b29.slice, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=kube-system, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=efs-csi-node-njjpv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.784745275Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod65638cc1_9a7b_4b44_8b59_4c5517aa3b29.slice, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=kube-system, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=efs-csi-node-njjpv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.784730411Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=763376 slug=f5nginxone t=2024-05-29T13:44:14.784659566Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}[{{ $labels.env }}] The S3 client for {{ $labels.container }} has experienced {{value}} errors for the {${labels.op}} operation': error executing template __alert_S3ClientFailure: template: __alert_S3ClientFailure:1:156: executing \"__alert_S3ClientFailure\" at : wrong number of args for value: want 1 got 0" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Phnom Penh, country=Cambodia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.784669489Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Paris, country=France, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.784575226Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.784347574Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.784309186Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.784203803Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbafd11e3_b783_455f_b517_cbe7a2843cb3.slice/cri-containerd-1063b62ef9e0d82400f901d1c005116fcece7b27f27f35e3ad4a96b68b27f816.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=1063b62ef9e0d82400f901d1c005116fcece7b27f27f35e3ad4a96b68b27f816, namespace=kube-system, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=efs-csi-controller-ccffdd5fb-vqr2s, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.784199232Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=843304 slug=ppcgroup instance= t=2024-05-29T13:44:14.784111707Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=843304 slug=ppcgroup instance= t=2024-05-29T13:44:14.784098907Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=843304 slug=ppcgroup t=2024-05-29T13:44:14.784057806Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=843304 slug=ppcgroup version=11 fingerprint=d98b7e2cd62f369a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.783970205Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.776319791s EvaluationString:}]" duration=432.761229ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.783975613Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=aws-eks-nodeagent, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod92d9ecbe_d48c_457e_ad55_b7b0c218a2c6.slice/cri-containerd-2104b32e2fe2899551be5cf520513a841712002655f871963adbf917c95b61d8.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/amazon/aws-network-policy-agent:v1.1.0-eksbuild.1, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=2104b32e2fe2899551be5cf520513a841712002655f871963adbf917c95b61d8, namespace=kube-system, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=aws-node-rrrpz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.78391966Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.783808621Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.783660534Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod92d9ecbe_d48c_457e_ad55_b7b0c218a2c6.slice/cri-containerd-18ed07cab3097f578d42d8295b45c44658d89b9415abe0998ddd57786b107bd6.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=18ed07cab3097f578d42d8295b45c44658d89b9415abe0998ddd57786b107bd6, namespace=kube-system, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=aws-node-rrrpz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.783665887Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.783633121Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.783586415Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod92d9ecbe_d48c_457e_ad55_b7b0c218a2c6.slice, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=kube-system, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=aws-node-rrrpz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.783527275Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.783559636Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Naypyidaw, country=Unknown country, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.783492754Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.783511404Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:14.783413823Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:14.783407569Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=4947 slug=mediamath instance= t=2024-05-29T13:44:14.783327192Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.783285962Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:14.783262728Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=4947 slug=mediamath version=1 fingerprint=8732ea52c6611e9c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.78314885Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Alerting Error: Results:map[] Values:map[B0:{Var:B Labels:name=filesli A Value:0xc02df43a48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.782749527s EvaluationString:[ var='B0' metric='A' labels={name=filesli A} value=0 ]}]" duration=44.144893ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.783080873Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.782971482Z caller=remote_instance_store.go:51 user=310637 slug=notino msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.78283933Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.782955534Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.782919365Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:14.782745117Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.782805827Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.782699095Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=480731 slug=brightmove instance="jobName=ProcessInboundEmailActivityJob" t=2024-05-29T13:44:14.78267484Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0969b8be_a24f_4912_9850_9643cab6699b.slice, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=external-secrets, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=external-secrets-cert-controller-6c8bc74d8-swltf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.782644572Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.782628551Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=oneqr-parking-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-poda4e454a6_caca_43c6_ac93_40758d3a7029.slice/cri-containerd-5885ae00d0796b4340e95184c16f52f8abe5bd7e0e6e006576f56c32697507d1.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/dev-oneqr-parking-web:d7fd6c61426a5175b77437205555d679fc7df879, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=5885ae00d0796b4340e95184c16f52f8abe5bd7e0e6e006576f56c32697507d1, namespace=epsandbox, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-parking-web-6b4766bcd8-zn26j, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.782557932Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montevideo, country=Unknown country, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.782478913Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:14.78217712Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=76.802198ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.782141376Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.782169251Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=237629 slug=ocrolus t=2024-05-29T13:44:14.782167253Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=237629 slug=ocrolus instance= t=2024-05-29T13:44:14.782149209Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.782031252Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=237629 slug=ocrolus version=56 fingerprint=b6ed153389c99367 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.782010258Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.781761666s EvaluationString:}]" duration=90.053385ms + logger=ngalert.state.manager.persist user=85469 slug=portinsider t=2024-05-29T13:44:14.782013867Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=30.792426ms + level=debug ts=2024-05-29T13:44:14.781875406Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.781755407Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.781659377Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.781667545Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=777618 slug=sousmile instance= t=2024-05-29T13:44:14.781610426Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:59:10Z next_ends_at=2024-05-29T14:04:10Z + level=debug ts=2024-05-29T13:44:14.781568033Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.781419466Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.78136692Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.781360123Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.781147321Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.781263999Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.781193778Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.781203776Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.781129363Z caller=remote_instance_store.go:51 user=526847 slug=soniclabs msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=22398 slug=sunfolding version=1 fingerprint=0353591298c8cb33 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.781055966Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-sunfolding, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.780696289s EvaluationString:}]" duration=16.566541ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.780831147Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.780822844Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.78061235Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=127813 slug=clearsale t=2024-05-29T13:44:14.780550002Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=127813 slug=clearsale version=5 fingerprint=4a9f994d25183c12 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.780498827Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.780269397s EvaluationString:}]" duration=186.943824ms + level=debug ts=2024-05-29T13:44:14.780504109Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.780362355Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.780317459Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.780349272Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4d0f3db9_d512_4cd1_829c_4fcabfc3910b.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=epsandbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=ele-scheduler-6fc6f75554-w2pkj, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.780260354Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc3c77bbf_e08e_424b_83bd_8cb907041f28.slice/cri-containerd-44483304b3be6dc2483db1053802c9765119122a7f37ea389294da27a3700e94.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=44483304b3be6dc2483db1053802c9765119122a7f37ea389294da27a3700e94, namespace=epsandbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=ele-gate-cf4d5477d-vcqgs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.780147771Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.779982728Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.779779008Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.779804625Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0c33a4fe_cec0_4746_a53d_7f13a9f905be.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=epsandbox, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=proxysql-66775d7cd7-lbs7v, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.779732245Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0c33a4fe_cec0_4746_a53d_7f13a9f905be.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=epsandbox, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=proxysql-66775d7cd7-lbs7v, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.779718048Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.779622737Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.779518417Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.779468409Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod41e9e17a_e955_4547_8a1a_98d7934c4f74.slice/cri-containerd-187e408a2c8e1b8f33432f8480ecccd5696f4e16fbedd3237875202058ac9792.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=187e408a2c8e1b8f33432f8480ecccd5696f4e16fbedd3237875202058ac9792, namespace=epsandbox, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=oneqr-web-56f78b85b9-n6jfr, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.779464932Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod88f26df5_6e70_404c_be02_cc04456f34da.slice/cri-containerd-4c5952153f5719ecdb67bdaf612c62d4e939ca62c7543e8adaa36135a62665b9.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=4c5952153f5719ecdb67bdaf612c62d4e939ca62c7543e8adaa36135a62665b9, namespace=epsandbox, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=ele-gate-archiver-66c5746d6c-hpkx4, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.779208593Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.778905706Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.778860686Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.778840664Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lima, country=Unknown country, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.778928339Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.77873672Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.778712298Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.778689297Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.778682438Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.778650605Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.778772769Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.77853348Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.77852646Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.7785177Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod12f2ed71_a229_45ee_8d79_c2d58ae16954.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=epsandbox, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=external-dns-666f8b6bb5-qxp8p, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.778506803Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.778582137Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.778472367Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.778417665Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.778405248Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.778262012Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.778215266Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.778224304Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:14.778022064Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.777905637Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.777882232Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod39d31670_6d9b_43d4_8d0e_f45af1b4a6fb.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=epsandbox, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-charge-api-59579fdbd6-4hqv8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.777835923Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.777751627Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.777573803Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd64c285b_c5c6_4e2d_a8c5_a513b2ac8493.slice/cri-containerd-a7fd692ef3d4ac61949510fc1f7db6f918d2edb7a14a5acbc6fc79f500d420ea.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=a7fd692ef3d4ac61949510fc1f7db6f918d2edb7a14a5acbc6fc79f500d420ea, namespace=epsandbox, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-allvalue-agent-c54dfc8bb-67f2z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.777589571Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd64c285b_c5c6_4e2d_a8c5_a513b2ac8493.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=epsandbox, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-allvalue-agent-c54dfc8bb-67f2z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.777441412Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.777387188Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod3d27343d_1042_468f_8509_43f972acc296.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=epsandbox, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-admin-web-7dc96d79b-7fm8k, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.777285006Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.777269807Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.777236323Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.777084337Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.777060221Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager.persist user=27737 slug=edfmancapital t=2024-05-29T13:44:14.777021886Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="FileSystemId=fs-0d27718b508eda2a7" t=2024-05-29T13:44:14.77700288Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.776929697Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=27.45937ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podabefc517_45cd_4f8c_ab44_85e9ff86bbc6.slice, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=epsandbox, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c48d4f58b-jfn2p, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.776893603Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod532780c8_be1f_473d_9c45_441545661f69.slice/cri-containerd-fe8b3b3e28cd69082f7e0dc1194e8f44b25bd18e371c727cd117122c8f865241.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=fe8b3b3e28cd69082f7e0dc1194e8f44b25bd18e371c727cd117122c8f865241, namespace=epsandbox, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=oneqr-connect-api-746577bc86-s57bz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.776765077Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.776726867Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.776741486Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.776697683Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.776661062Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=172772 slug=ppbtradingtribe t=2024-05-29T13:44:14.776637612Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.776571594Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=linkerd-proxy, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod69a9e29d_9470_4943_9061_a8beca89683f.slice/cri-containerd-7e8596ba69f68130f501eca29c65fa9e0aa1aee313f34575520e657c45637415.scope, image=cr.l5d.io/linkerd/proxy:stable-2.14.4, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=7e8596ba69f68130f501eca29c65fa9e0aa1aee313f34575520e657c45637415, namespace=epsandbox, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=elepay-config-api-645c6b9f57-7stwc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.776476057Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.776371512Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.776361422Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.776330043Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:14.776214839Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcb8d1d0d_f6ff_4e21_8211_c5252cb5b9dd.slice, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=epsandbox, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=ele-gate-log-5449fdb4c8-xp8cz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.776324646Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.776229311Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.776071162Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod16789e41_5fce_4be5_8bc2_ef04bc39dc79.slice, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=epsandbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=oneqr-tablet-web-84b5cb9b8f-j9shf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.776179224Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.776131672Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:14.775969053Z caller=remote_image_capturer.go:54 user=172772 slug=ppbtradingtribe rule_org_id=1 rule_uid=2rhIWga7k dashboard=95i4WR-7k panel=2 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=172772 slug=ppbtradingtribe t=2024-05-29T13:44:14.775825296Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9ebaebdc_628d_40b7_8caa_71788283a7f8.slice/cri-containerd-6880438a24012dec80bb1d10c3f90167933bf76f8864cad585b2927911c9e2c0.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=6880438a24012dec80bb1d10c3f90167933bf76f8864cad585b2927911c9e2c0, namespace=epsandbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=oneqr-svc-64d544f847-22jjq, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.775903663Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9ebaebdc_628d_40b7_8caa_71788283a7f8.slice, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=epsandbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=oneqr-svc-64d544f847-22jjq, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.775764928Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod2ec44e92_861b_4945_87d6_5e13075d1155.slice, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=epsandbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=oneqr-kds-web-58cbc99896-5mdth, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.775646119Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.775596634Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.775620513Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:14.775569834Z caller=remote_instance_store.go:51 user=698103 slug=vericast msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698103 slug=vericast instance= t=2024-05-29T13:44:14.775501343Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd31b47fb_76a6_490a_8a38_fb5cc696ab3a.slice/cri-containerd-79638611dc0f59d461a32472a5de768814c5b502591e2de1e6c2bd244139a294.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=79638611dc0f59d461a32472a5de768814c5b502591e2de1e6c2bd244139a294, namespace=epsandbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=oneqr-id-645898965-jsjhn, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.775516376Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.775458495Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.775389337Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod73f0efa1_c4a2_4179_acde_e265be01acfc.slice/cri-containerd-8535a76db21c5d87367b7fed0da6bd1a3af42321f1561c2ca16b38109fe260b2.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8535a76db21c5d87367b7fed0da6bd1a3af42321f1561c2ca16b38109fe260b2, namespace=epsandbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=elepay-user-api-5bb4f9fb5b-l4gr5, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.775412176Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod73f0efa1_c4a2_4179_acde_e265be01acfc.slice/cri-containerd-8535a76db21c5d87367b7fed0da6bd1a3af42321f1561c2ca16b38109fe260b2.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8535a76db21c5d87367b7fed0da6bd1a3af42321f1561c2ca16b38109fe260b2, namespace=epsandbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=elepay-user-api-5bb4f9fb5b-l4gr5, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.775375861Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=linkerd-proxy, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod73f0efa1_c4a2_4179_acde_e265be01acfc.slice/cri-containerd-2d388a7ad207086ea5bb357992e701113909dc4e6c1502ab58444c9d4df6aadc.scope, image=cr.l5d.io/linkerd/proxy:stable-2.14.4, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=2d388a7ad207086ea5bb357992e701113909dc4e6c1502ab58444c9d4df6aadc, namespace=epsandbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=elepay-user-api-5bb4f9fb5b-l4gr5, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.775240938Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.775221759Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.775046687Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod58bd45fa_1d0a_4953_8621_a43177477f20.slice/cri-containerd-6df4da8307e0167afc2e4732a66e69d303d03efb80e7683f93f8acc822d16972.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=6df4da8307e0167afc2e4732a66e69d303d03efb80e7683f93f8acc822d16972, namespace=epsandbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=elepay-sys-v2-6cfd47c6dd-tqhrx, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.775093074Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.775010325Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.77497357Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=elepay-sys-v2, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod58bd45fa_1d0a_4953_8621_a43177477f20.slice/cri-containerd-055761c185b4e044fc26c92bcd39cdc6458f43da9f8c17bc836a7f329ddce424.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/dev-elepay-sys-v2:12c37ecf9a4552ba249b561a43f8e0b1d73aa86d, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=055761c185b4e044fc26c92bcd39cdc6458f43da9f8c17bc836a7f329ddce424, namespace=epsandbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=elepay-sys-v2-6cfd47c6dd-tqhrx, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.774930981Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.774840511Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.774772628Z caller=remote_instance_store.go:51 user=248027 slug=mishp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.774800296Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:14.77465926Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=248027 slug=mishp instance= t=2024-05-29T13:44:14.774712453Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.774653114Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.774548449Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="serverless_id=int-eu-west-1, statefulset=proxy" t=2024-05-29T13:44:14.774573166Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.774477239Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod920ac0a9_f05c_4792_8c6e_d95045089d2c.slice/cri-containerd-2b4ae5e67d643202493be73913b5323f9d33a28c81b2e2ad0d51bf85cb635377.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=2b4ae5e67d643202493be73913b5323f9d33a28c81b2e2ad0d51bf85cb635377, namespace=epsandbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=elepay-admin-api-6849d9fbd9-txnmg, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.774348198Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.774161468Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.774104393Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod920ac0a9_f05c_4792_8c6e_d95045089d2c.slice, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=epsandbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=elepay-admin-api-6849d9fbd9-txnmg, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.77420013Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.774077526Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.774088444Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.77407728Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.774050392Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod76837815_b357_4ce2_92c1_9a4472a66686.slice/cri-containerd-e630c551dd4814bf4c51610112c66ab4ca227c563cba89e8b89a84b91ae915ed.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e630c551dd4814bf4c51610112c66ab4ca227c563cba89e8b89a84b91ae915ed, namespace=epsandbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=ele-send-v2-757b678456-h676p, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.774058934Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod76837815_b357_4ce2_92c1_9a4472a66686.slice, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=epsandbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=ele-send-v2-757b678456-h676p, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.773920902Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.773870167Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:14.773805437Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.773728672Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod840f2fad_8986_478f_b7db_d34e0c79db28.slice, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=epsandbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=ele-queue-6867d7f6cb-t4htj, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.773653172Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.77360263Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.773523634Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.773590688Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.773581658Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=linkerd-proxy, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod56b028e1_a001_4e8e_a829_2c8bf2c73dab.slice/cri-containerd-951c9acaf2d8b70e4c391bc14733de29634271fbb3c34a248efa2473cd43867a.scope, image=cr.l5d.io/linkerd/proxy:stable-2.14.4, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=951c9acaf2d8b70e4c391bc14733de29634271fbb3c34a248efa2473cd43867a, namespace=elepay-api, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=user-api-76ddd5b9d-rkszv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.773537028Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=linkerd-proxy, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod56b028e1_a001_4e8e_a829_2c8bf2c73dab.slice/cri-containerd-951c9acaf2d8b70e4c391bc14733de29634271fbb3c34a248efa2473cd43867a.scope, image=cr.l5d.io/linkerd/proxy:stable-2.14.4, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=951c9acaf2d8b70e4c391bc14733de29634271fbb3c34a248efa2473cd43867a, namespace=elepay-api, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=user-api-76ddd5b9d-rkszv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.773520964Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.773456203Z caller=remote_instance_store.go:51 user=109928 slug=deadhappy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=174054 slug=netrading instance="app=pointconnect-scraper, azure_workload_identity_use=true, cluster=PROD, container=pointconnect-scraper, criticality=critical, datatype=pointconnect-sqs, env=prod, imageversion=2024.05.08-10.17.55-305024c7-dockerfile-7f739c23, instance=pointconnect-scraper-697f658f46-fmjhr:pointconnect-scraper:http-metrics, job=netdataplatform-fundamentals-scrapers/pointconnect-scraper, name=pointconnect-scraper, namespace=netdataplatform-fundamentals-scrapers, owningteam=dataoperations, pod=pointconnect-scraper-697f658f46-fmjhr, pod_template_hash=697f658f46, rabbitmq=0" t=2024-05-29T13:44:14.77345744Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=174054 slug=netrading t=2024-05-29T13:44:14.773391197Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.773418968Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=109928 slug=deadhappy instance="ApiName=deadhappy-api-prd" t=2024-05-29T13:44:14.773370041Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.773309919Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.773270794Z caller=remote_instance_store.go:51 user=536824 slug=forgerockit msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.773258769Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod16690832_6daf_40f6_a7eb_946ac3f188d6.slice/cri-containerd-ccbb3a5cff0f74814d490a4382fd882ec62539b0eadca3cd2d36093d1ad46195.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=ccbb3a5cff0f74814d490a4382fd882ec62539b0eadca3cd2d36093d1ad46195, namespace=elepay-api, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=pubsub-775698cb6b-qgpq5, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.773208539Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod16690832_6daf_40f6_a7eb_946ac3f188d6.slice/cri-containerd-ccbb3a5cff0f74814d490a4382fd882ec62539b0eadca3cd2d36093d1ad46195.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=ccbb3a5cff0f74814d490a4382fd882ec62539b0eadca3cd2d36093d1ad46195, namespace=elepay-api, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=pubsub-775698cb6b-qgpq5, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.773190694Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.773057315Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.773089418Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.77299579Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=pubsub, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod16690832_6daf_40f6_a7eb_946ac3f188d6.slice/cri-containerd-c9242427e30cd75cd1a200f9412d87e0e9c2daf7db2b64c4e5cbde4ee71289d1.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-pubsub:5725dcb8719960097ada1114d15fede23bd39b1b, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=c9242427e30cd75cd1a200f9412d87e0e9c2daf7db2b64c4e5cbde4ee71289d1, namespace=elepay-api, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=pubsub-775698cb6b-qgpq5, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.773060747Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.77281143Z caller=remote_instance_store.go:51 user=334644 slug=meiro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.772736654Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=334408 slug=voltagrid t=2024-05-29T13:44:14.77266109Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.772547406Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=334408 slug=voltagrid t=2024-05-29T13:44:14.772591355Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.772568258Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=linkerd-proxy, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podba04b8f9_3331_4636_8d0d_2c81cc0ffc68.slice/cri-containerd-c0ec461407113837d6b85b193af5e1a541edcd714bdce55d172b34bce30264ac.scope, image=cr.l5d.io/linkerd/proxy:stable-2.14.4, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=c0ec461407113837d6b85b193af5e1a541edcd714bdce55d172b34bce30264ac, namespace=elepay-api, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-charge-api-74775dd5f9-627cs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.772473342Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=linkerd-proxy, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podba04b8f9_3331_4636_8d0d_2c81cc0ffc68.slice/cri-containerd-c0ec461407113837d6b85b193af5e1a541edcd714bdce55d172b34bce30264ac.scope, image=cr.l5d.io/linkerd/proxy:stable-2.14.4, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=c0ec461407113837d6b85b193af5e1a541edcd714bdce55d172b34bce30264ac, namespace=elepay-api, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-charge-api-74775dd5f9-627cs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.772459879Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.772408583Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.772318168Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=elepay-charge-api, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podba04b8f9_3331_4636_8d0d_2c81cc0ffc68.slice/cri-containerd-bdb100bfd73f508b00d1b4f39e95408c650088ce3e0b0f3146856fbe5f81c51c.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-charge-api:8b00e7a784fcdbb71425f200edc2593d0735ff02, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=bdb100bfd73f508b00d1b4f39e95408c650088ce3e0b0f3146856fbe5f81c51c, namespace=elepay-api, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-charge-api-74775dd5f9-627cs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.772322179Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.772230517Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.772099066Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podba04b8f9_3331_4636_8d0d_2c81cc0ffc68.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay-api, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-charge-api-74775dd5f9-627cs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.772060625Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=27737 slug=edfmancapital t=2024-05-29T13:44:14.771919789Z level=debug msg="Saving alert states done" count=57 max_state_save_concurrency=1 duration=949.311986ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.77193203Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.771903415Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + level=debug ts=2024-05-29T13:44:14.771740016Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.771763334Z caller=remote_instance_store.go:51 user=235691 slug=om2 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod914f5b20_8a48_489f_90c5_748999258fbc.slice/cri-containerd-58d606663a43b53dc07b2f127c6d7751506087e7b4dd9ef9b574a41fb7388c96.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=58d606663a43b53dc07b2f127c6d7751506087e7b4dd9ef9b574a41fb7388c96, namespace=elepay-api, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-queue-b67c7cd9b-bjjwc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.771648318Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod208866d5_a447_4708_adad_9b663de43695.slice/cri-containerd-c82448dabc8d39bc5faacd363fba1e7345108f1884ade7d0da5317f3814fc0d8.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=c82448dabc8d39bc5faacd363fba1e7345108f1884ade7d0da5317f3814fc0d8, namespace=elepay-api, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=pubsub-775698cb6b-8rzq8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.771533957Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod208866d5_a447_4708_adad_9b663de43695.slice/cri-containerd-c82448dabc8d39bc5faacd363fba1e7345108f1884ade7d0da5317f3814fc0d8.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=c82448dabc8d39bc5faacd363fba1e7345108f1884ade7d0da5317f3814fc0d8, namespace=elepay-api, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=pubsub-775698cb6b-8rzq8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.771515786Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.771430545Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod208866d5_a447_4708_adad_9b663de43695.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay-api, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=pubsub-775698cb6b-8rzq8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.771355209Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.771223612Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=elepay-charge-api, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod15107631_546b_46c9_8772_0fd3c0076980.slice/cri-containerd-e985c12b662f9992dce277a01d1dcfcaa8b63a648b976299c786e14c0eb0ba48.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-charge-api:65c677935755c021ea9361077bcdd74f37a0ece7, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e985c12b662f9992dce277a01d1dcfcaa8b63a648b976299c786e14c0eb0ba48, namespace=elepay-api, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=elepay-charge-api-689c894b8d-kpcr5, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.771217301Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.771189811Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.771092356Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod15107631_546b_46c9_8772_0fd3c0076980.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay-api, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=elepay-charge-api-689c894b8d-kpcr5, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.771100799Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam" t=2024-05-29T13:44:14.771043518Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.770983959Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=elepay-config-api, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod882dd65b_000c_4c2e_9014_60a55742d759.slice/cri-containerd-0929dfbf769dd8e1fe5dcd1543e2ad70f4b279e2200ff2a4ba08b2a002b03bf2.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-config-api:fbfe4b6414fee98711cb8d58fe7848e71b6bf33d, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=0929dfbf769dd8e1fe5dcd1543e2ad70f4b279e2200ff2a4ba08b2a002b03bf2, namespace=elepay-api, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=elepay-config-api-58bbd7b8df-wcxm2, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.770716199Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod882dd65b_000c_4c2e_9014_60a55742d759.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay-api, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=elepay-config-api-58bbd7b8df-wcxm2, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.770571374Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.770512077Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.770489417Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=245291 slug=pismo version=5 fingerprint=010cca1b312aa906 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.770388476Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.770106349s EvaluationString:}]" duration=184.786488ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=elepay-charge-api, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod20614f10_a965_4158_b3dc_3f58b12f1f95.slice/cri-containerd-9d86d6d686dc82bbd2eb82a98c0e118a2c0f2c38f00842c80d6e36716db28a70.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-charge-api:8b00e7a784fcdbb71425f200edc2593d0735ff02, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=9d86d6d686dc82bbd2eb82a98c0e118a2c0f2c38f00842c80d6e36716db28a70, namespace=elepay-api, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=elepay-charge-api-74775dd5f9-4drnv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.770426113Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.770392966Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=174675 slug=journalprod instance="datasource_uid=uF2hBHyGz, ref_id=A" t=2024-05-29T13:44:14.770346993Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=329082 slug=pluralpolicy instance= t=2024-05-29T13:44:14.770338546Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.scheduler user=235691 slug=om2 version=27 fingerprint=7af3295a799430fe attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.77023626Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.769906321s EvaluationString:}]" duration=173.898966ms + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.770209655Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=zam" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod20614f10_a965_4158_b3dc_3f58b12f1f95.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay-api, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=elepay-charge-api-74775dd5f9-4drnv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.770238649Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:14.769734527Z level=debug msg="State manager processing evaluation results" resultCount=293 + logger=ngalert.scheduler user=260796 slug=expressvpn version=42 fingerprint=9457eba42cbe6a6f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.750897883Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam Value:0xc02075cc08} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam Value:0xc02075ce20} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, role=vpn Value:0xc02075cc60} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, role=vpn Value:0xc02075ce78} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam Value:0xc02075ccf0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, role=vpn Value:0xc02075cee0} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam Value:0xc02075cf70} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam Value:0xc02075cb78} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam Value:0xc02075cd80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.702490959s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam} value=1000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam} value=1000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Algiers, country=Algeria, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam Value:0xc02075d2b0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam Value:0xc02075d0b0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, role=vpn Value:0xc02075d110} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, role=vpn Value:0xc02075d218} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam Value:0xc02075d400} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, role=vpn Value:0xc02075d500} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam Value:0xc02075d358} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam Value:0xc02075d1a8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam Value:0xc02075d4a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.702550952s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam} value=12500 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam} value=12500 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam} value=1 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Amsterdam, country=Netherlands, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam Value:0xc02075d6f0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam Value:0xc02075d780} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, role=vpn Value:0xc02075da38} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, role=vpn Value:0xc02075d5e8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam Value:0xc02075d820} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, role=vpn Value:0xc02075d650} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam Value:0xc02075d9d0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam Value:0xc02075d8b0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam Value:0xc02075d940}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.702591719s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Andorra, country=Andorra, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam Value:0xc02075dbe8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam Value:0xc02075dd58} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, role=vpn Value:0xc02075ddb8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, role=vpn Value:0xc02075df08} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam Value:0xc02075de60} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, role=vpn Value:0xc02075deb8} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam Value:0xc02075dcc0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam Value:0xc02075db80} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam Value:0xc02075df98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.702617474s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam} value=800 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam} value=800 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Astana, country=Kazakhstan, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam Value:0xc07ced5dc0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam Value:0xc01dba20d0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Athens, country=Greece, role=vpn Value:0xc07ced55e8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Athens, country=Greece, role=vpn Value:0xc07ced4a10} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam Value:0xc07ced5030} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Athens, country=Greece, role=vpn Value:0xc07ced47d0} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam Value:0xc07ced4f30} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam Value:0xc07ced5430} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam Value:0xc07ced5af8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.702643016s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam} value=1500 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam} value=1500 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Athens, country=Greece, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Athens, country=Greece, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Athens, country=Greece, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Athens, country=Greece, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam Value:0xc01dba2308} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam Value:0xc01dba28c0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, role=vpn Value:0xc01dba2ab8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, role=vpn Value:0xc01dba2260} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam Value:0xc01dba2568} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, role=vpn Value:0xc01dba2630} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam Value:0xc01dba24c0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam Value:0xc01dba2808} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam Value:0xc01dba29b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.702668751s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam} value=5600 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam} value=5600 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Atlanta, country=United States, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam Value:0xc01dba31c0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam Value:0xc01dba32c0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, role=vpn Value:0xc01dba3380} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, role=vpn Value:0xc01dba3410} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam Value:0xc01dba3518} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, role=vpn Value:0xc01dba35a8} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam Value:0xc01dba3910} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam Value:0xc01dba36b0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam Value:0xc01dba37d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.70269412s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam} value=1120 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam} value=1120 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Auckland, country=New Zealand, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam Value:0xc01dba3ca0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam Value:0xc01dba3d60} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, role=vpn Value:0xc0183401f0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, role=vpn Value:0xc0183405d8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam Value:0xc01dba3e30} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, role=vpn Value:0xc01dba3f88} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam Value:0xc018341158} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam Value:0xc018340d38} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam Value:0xc018341890}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.702713537s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam} value=1120 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam} value=1120 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bangkok, country=Thailand, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam Value:0xc018341fa8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam Value:0xc00b74aa10} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, role=vpn Value:0xc00b57a020} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, role=vpn Value:0xc00b74a670} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam Value:0xc00b57ad20} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, role=vpn Value:0xc00b74a160} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam Value:0xc00b74a4f8} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam Value:0xc00b57a4b0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam Value:0xc00b74ad70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.702735477s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam} value=3200 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam} value=3200 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Barcelona, country=Spain, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam Value:0xc00b74b340} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam Value:0xc00b74bc40} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, role=vpn Value:0xc00b74bda8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, role=vpn Value:0xc02dcee030} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam Value:0xc00b74bfd8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, role=vpn Value:0xc00b74b1a0} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam Value:0xc00b74b880} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam Value:0xc00b74ba00} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam Value:0xc00b74b630}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.702759133s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam} value=1120 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam} value=1120 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Belgrade, country=Serbia, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc02dcee4d0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc02dcee210} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, role=vpn Value:0xc02dcee5d0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, role=vpn Value:0xc02dcee310} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc02dcee668} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, role=vpn Value:0xc02dcee370} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc02dcee2a8} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc02dcee700} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc02dcee3f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.702781565s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=4500 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=4500 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berkshire, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02dcee960} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02dceea90} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, role=vpn Value:0xc02dcee800} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, role=vpn Value:0xc02dceeb40} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02dceebf0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, role=vpn Value:0xc02dceeae8} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02dcee8d0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02dceec80} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02dceed10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.702803182s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam} value=9480 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam} value=9480 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam} value=1 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Berlin, country=Germany, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam Value:0xc02dcef070} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam Value:0xc02dceee98} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, role=vpn Value:0xc02dcef1b8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, role=vpn Value:0xc02dcef2b0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam Value:0xc02dcef380} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, role=vpn Value:0xc02dcef418} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam Value:0xc02dceefc0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam Value:0xc02dceef30} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam Value:0xc02dcef248}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.70282218s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam} value=1720 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam} value=1720 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bogota, country=Colombia, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam Value:0xc02dcef6c0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam Value:0xc02dcef9b8} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, role=vpn Value:0xc02dcef530} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, role=vpn Value:0xc02dcef590} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam Value:0xc02dcefa70} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, role=vpn Value:0xc02dcef720} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam Value:0xc02dcef7b0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam Value:0xc02dcef628} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam Value:0xc02dcef900}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.702875645s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam} value=4000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam} value=4000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bratislava, country=Slovakia, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam Value:0xc02dcefb90} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam Value:0xc02dcefd18} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, role=vpn Value:0xc02dcefbf0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, role=vpn Value:0xc02dcefe00} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam Value:0xc02dcefe90} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, role=vpn Value:0xc02dceff80} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam Value:0xc02dcefc88} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam Value:0xc02dceff28} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam Value:0xc02dcefda8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.702905186s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam} value=1120 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam} value=1120 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brisbane, country=Australia, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam Value:0xc0474061a8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam Value:0xc008a82710} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, role=vpn Value:0xc047406220} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, role=vpn Value:0xc0474060c0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam Value:0xc008a82890} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, role=vpn Value:0xc047406118} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam Value:0xc047406060} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam Value:0xc008a82920} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam Value:0xc008a83710}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.702940567s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam} value=13200 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam} value=13200 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam} value=1 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Brussels, country=Belgium, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam Value:0xc047406520} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam Value:0xc047406360} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, role=vpn Value:0xc0474066d0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, role=vpn Value:0xc047406790} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam Value:0xc047406820} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, role=vpn Value:0xc047406730} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam Value:0xc047406490} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam Value:0xc047406670} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam Value:0xc0474065d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.70296711s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam} value=5040 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam} value=5040 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Bucharest, country=Romania, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam Value:0xc047406d38} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam Value:0xc047406940} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, role=vpn Value:0xc047406998} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, role=vpn Value:0xc047406a10} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam Value:0xc047406bc0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, role=vpn Value:0xc047406c18} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam Value:0xc047406aa0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam Value:0xc047406ca8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam Value:0xc047406b30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.702987235s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam} value=800 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam} value=800 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Budapest, country=Hungary, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam Value:0xc047406f38} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam Value:0xc047406ff0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, role=vpn Value:0xc0474072b0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, role=vpn Value:0xc047407138} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam Value:0xc047407080} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, role=vpn Value:0xc0474070d8} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam Value:0xc047406e88} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam Value:0xc0474071c8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam Value:0xc047407258}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703007925s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam} value=1800 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam} value=1800 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Buenos Aires, country=Argentina, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam Value:0xc0474074b8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam Value:0xc047407538} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, role=vpn Value:0xc047407388} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, role=vpn Value:0xc0474073d8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam Value:0xc0474075c8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, role=vpn Value:0xc047407438} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam Value:0xc0474076e0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam Value:0xc047407658} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam Value:0xc047407768}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703033443s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam} value=700 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam} value=700 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Cairo, country=Egypt, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam Value:0xc047407ca8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam Value:0xc047407888} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, role=vpn Value:0xc0474078e0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, role=vpn Value:0xc047407938} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam Value:0xc0474079e8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, role=vpn Value:0xc047407c18} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam Value:0xc047407bc0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam Value:0xc047407a78} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam Value:0xc047407b28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703055208s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam} value=280 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam} value=280 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Caracas, country=Venezuela, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam Value:0xc047407e70} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam Value:0xc047407ef8} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Chicago, country=United States, role=vpn Value:0xc016da8610} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Chicago, country=United States, role=vpn Value:0xc016da8800} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam Value:0xc016da8450} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Chicago, country=United States, role=vpn Value:0xc016da8050} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam Value:0xc016da8298} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam Value:0xc047407fa0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam Value:0xc047407dd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.70307627s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam} value=3000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam} value=3000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Chicago, country=United States, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Chicago, country=United States, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Chicago, country=United States, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chicago, country=United States, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam Value:0xc016da90a8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam Value:0xc016da93d8} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, role=vpn Value:0xc016da8ec8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, role=vpn Value:0xc016da8f30} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam Value:0xc016da8cb0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, role=vpn Value:0xc016da9580} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam Value:0xc016da8c00} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam Value:0xc016da8e60} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam Value:0xc016da8fe8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703103245s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam} value=560 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam} value=560 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Chisinau, country=Moldova, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam Value:0xc016da98a0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam Value:0xc00f9205a8} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, role=vpn Value:0xc016da9d28} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, role=vpn Value:0xc016da9a40} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam Value:0xc00f920708} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, role=vpn Value:0xc016da9b80} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam Value:0xc00f920020} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam Value:0xc00f920410} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam Value:0xc00f9201b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703128249s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam} value=1520 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam} value=1520 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Colombo, country=Sri Lanka, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam Value:0xc00f920cf8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam Value:0xc00f920d90} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, role=vpn Value:0xc00f920b20} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, role=vpn Value:0xc00f920b80} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam Value:0xc00f920e30} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, role=vpn Value:0xc00f920e90} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam Value:0xc00f920c68} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam Value:0xc00f920ab0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam Value:0xc00f920f20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.7031586s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Copenhagen, country=Denmark, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam Value:0xc00f9211b0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam Value:0xc00f921028} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Dallas, country=United States, role=vpn Value:0xc00f9210b8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Dallas, country=United States, role=vpn Value:0xc00f921108} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam Value:0xc00f9212a8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Dallas, country=United States, role=vpn Value:0xc00f921210} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam Value:0xc00f921350} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam Value:0xc00f9213e8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam Value:0xc00f921480}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703190952s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam} value=3000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam} value=3000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Dallas, country=United States, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Dallas, country=United States, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Dallas, country=United States, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dallas, country=United States, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam Value:0xc00f9216e8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam Value:0xc00f921908} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, role=vpn Value:0xc00f921748} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, role=vpn Value:0xc00f921960} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam Value:0xc00f9217d8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, role=vpn Value:0xc00f9215b8} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam Value:0xc00f921650} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam Value:0xc00f9219f8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam Value:0xc00f921870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703218633s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dhaka, country=Bangladesh, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam Value:0xc00f921da0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam Value:0xc00f921b48} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, role=vpn Value:0xc00f921f00} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, role=vpn Value:0xc00f921ba0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam Value:0xc00f921c28} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, role=vpn Value:0xc00f921c78} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam Value:0xc00f921d18} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam Value:0xc00f921e28} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam Value:0xc00f921eb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.70325245s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam} value=280 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam} value=280 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Doha, country=Qatar, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam Value:0xc02b7c65f0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam Value:0xc02b7c6448} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, role=vpn Value:0xc02b7c6660} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, role=vpn Value:0xc02b7c6228} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam Value:0xc02b7c6500} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, role=vpn Value:0xc02b7c6370} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam Value:0xc02b7c6100} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam Value:0xc02b7c61a8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam Value:0xc02b7c62f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703275997s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam} value=560 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam} value=560 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Douglas, country=Isle of Man, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam Value:0xc02b7c6a70} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam Value:0xc02b7c6af0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, role=vpn Value:0xc02b7c6798} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, role=vpn Value:0xc02b7c6d10} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam Value:0xc02b7c6bb8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, role=vpn Value:0xc02b7c6c20} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam Value:0xc02b7c69a0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam Value:0xc02b7c6cb0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam Value:0xc02b7c68f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703297058s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam} value=400 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam} value=400 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dubai, country=United Arab Emirates, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam Value:0xc02b7c7338} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam Value:0xc02b7c7140} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, role=vpn Value:0xc02b7c6f38} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, role=vpn Value:0xc02b7c6fb0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam Value:0xc02b7c7070} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, role=vpn Value:0xc02b7c71c8} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam Value:0xc02b7c7278} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam Value:0xc02b7c6e90} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam Value:0xc02b7c73e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703318111s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam} value=4520 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam} value=4520 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dublin, country=Ireland, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02b7c7d60} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02b7c77a8} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, role=vpn Value:0xc02b7c7a30} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, role=vpn Value:0xc02b7c7ac0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02b7c7b68} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, role=vpn Value:0xc02b7c7620} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02b7c76f8} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02b7c79a0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02b7c75c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703341901s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam} value=350 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam} value=350 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Dusseldorf, country=Germany, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02c2b42f0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc00f209458} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, role=vpn Value:0xc02c2b40c0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, role=vpn Value:0xc00f2094b0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc025c36db0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, role=vpn Value:0xc02c2b4178} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02c2b4068} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc025c37500} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02c2b4210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703363984s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam} value=350 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam} value=350 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Düsseldorf, country=Germany, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02c2b45f0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02c2b4680} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, role=vpn Value:0xc02c2b46d8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, role=vpn Value:0xc02c2b47c0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02c2b4940} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, role=vpn Value:0xc02c2b4820} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02c2b48b0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02c2b4768} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam Value:0xc02c2b4560}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703394333s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam} value=35120 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam} value=35120 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam} value=1 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Frankfurt, country=Germany, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Guatemala City, country=Unknown country, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Guatemala City, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc02c2b4c88} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Guatemala City, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc02c2b4f10} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Guatemala City, country=Unknown country, role=vpn Value:0xc02c2b4f68} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Guatemala City, country=Unknown country, role=vpn Value:0xc02c2b5150} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Guatemala City, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc02c2b4b38} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Guatemala City, country=Unknown country, role=vpn Value:0xc02c2b4a88} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Guatemala City, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc02c2b4e60} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Guatemala City, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc02c2b4d90} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Guatemala City, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc02c2b5048}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703415992s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Guatemala City, country=Unknown country, environment=production, role=vpn, service_name=zam} value=1200 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Guatemala City, country=Unknown country, environment=production, role=vpn, service_name=zam} value=1200 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Guatemala City, country=Unknown country, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Guatemala City, country=Unknown country, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Guatemala City, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Guatemala City, country=Unknown country, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Guatemala City, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Guatemala City, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Guatemala City, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam Value:0xc02c2b55d8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam Value:0xc02c2b53f8} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, role=vpn Value:0xc02c2b57e8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, role=vpn Value:0xc02c2b5550} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam Value:0xc02c2b52f0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, role=vpn Value:0xc02c2b5370} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam Value:0xc02c2b5260} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam Value:0xc02c2b5668} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam Value:0xc02c2b5798}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703436631s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam} value=2200 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam} value=2200 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hanoi, country=Vietnam, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam Value:0xc02c2b5908} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam Value:0xc02c2b5cb0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, role=vpn Value:0xc02c2b5a50} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, role=vpn Value:0xc02c2b5b38} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam Value:0xc02c2b5ae0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, role=vpn Value:0xc02c2b5b90} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam Value:0xc02c2b5998} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam Value:0xc02c2b5c20} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam Value:0xc02c2b5d40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703465057s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Helsinki, country=Finland, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam Value:0xc02c2b5ec0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam Value:0xc023248368} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, role=vpn Value:0xc02c2b5fc0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, role=vpn Value:0xc0232483e8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam Value:0xc023248098} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, role=vpn Value:0xc02c2b5e30} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam Value:0xc023248180} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam Value:0xc02c2b5f60} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam Value:0xc023248278}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703496987s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam} value=2520 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam} value=2520 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Hong Kong, country=Hong Kong, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam Value:0xc023248cd0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam Value:0xc023248980} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, role=vpn Value:0xc023248888} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, role=vpn Value:0xc023248d70} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam Value:0xc0232487f8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, role=vpn Value:0xc0232485f8} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam Value:0xc023248a80} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam Value:0xc0232486f8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam Value:0xc023248b80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703527882s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam} value=6000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam} value=6000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Huenenberg, country=Switzerland, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam Value:0xc023249010} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam Value:0xc023249270} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, role=vpn Value:0xc023249098} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, role=vpn Value:0xc0232493f0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam Value:0xc023249350} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, role=vpn Value:0xc023249468} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam Value:0xc023248f30} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam Value:0xc023249188} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam Value:0xc023249548}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703558996s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam} value=1000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam} value=1000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Istanbul, country=Turkey, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam Value:0xc023249db8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam Value:0xc023249fb0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, role=vpn Value:0xc06f232598} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, role=vpn Value:0xc023249bf0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam Value:0xc06f232290} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, role=vpn Value:0xc06f2323c8} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam Value:0xc023249738} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam Value:0xc0232498f8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam Value:0xc023249ac8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703587946s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam} value=2360 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam} value=2360 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jakarta, country=Indonesia, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam Value:0xc06f232bd8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam Value:0xc06f233380} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, role=vpn Value:0xc06f233440} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, role=vpn Value:0xc06f232cf0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam Value:0xc06f232968} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, role=vpn Value:0xc06f2329c0} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam Value:0xc06f232d80} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam Value:0xc06f232a50} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam Value:0xc06f232fe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703621657s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Jerusalem, country=Israel, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam Value:0xc013c7a438} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam Value:0xc013c7a218} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, role=vpn Value:0xc013c7a130} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, role=vpn Value:0xc013c7a010} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam Value:0xc06f233a70} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, role=vpn Value:0xc013c7a0a8} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam Value:0xc013c7a2b8} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam Value:0xc013c7a3b0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam Value:0xc06f233d20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703653685s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam} value=2000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam} value=2000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Johannesburg, country=South Africa, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam Value:0xc013c7aa60} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam Value:0xc013c7ab18} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, role=vpn Value:0xc013c7a7b0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, role=vpn Value:0xc013c7a8d0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam Value:0xc013c7a860} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, role=vpn Value:0xc013c7a668} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam Value:0xc013c7a9a0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam Value:0xc013c7a728} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam Value:0xc013c7a600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.70369234s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam} value=280 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam} value=280 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Karachi, country=Pakistan, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kathmandu, country=Unknown country, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kathmandu, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc013c7b150} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kathmandu, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc013c7b570} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Kathmandu, country=Unknown country, role=vpn Value:0xc013c7b1d0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Kathmandu, country=Unknown country, role=vpn Value:0xc013c7b250} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kathmandu, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc013c7b080} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Kathmandu, country=Unknown country, role=vpn Value:0xc013c7b2c0} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kathmandu, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc013c7ae28} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kathmandu, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc013c7b358} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kathmandu, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc013c7afb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703726538s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kathmandu, country=Unknown country, environment=production, role=vpn, service_name=zam} value=1200 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kathmandu, country=Unknown country, environment=production, role=vpn, service_name=zam} value=1200 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Kathmandu, country=Unknown country, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Kathmandu, country=Unknown country, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kathmandu, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Kathmandu, country=Unknown country, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kathmandu, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kathmandu, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kathmandu, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam Value:0xc013c7b6c8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam Value:0xc013c7bb10} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, role=vpn Value:0xc013c7b760} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, role=vpn Value:0xc013c7b860} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam Value:0xc013c7b910} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, role=vpn Value:0xc013c7b7d0} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam Value:0xc013c7bbc0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam Value:0xc013c7bc60} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam Value:0xc013c7ba58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703759566s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam} value=1780 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam} value=1780 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kuala Lumpur, country=Malaysia, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kyiv, country=Ukraine, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kyiv, country=Ukraine, environment=production, role=vpn, service_name=zam Value:0xc013c7bdc8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kyiv, country=Ukraine, environment=production, role=vpn, service_name=zam Value:0xc02adc0228} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Kyiv, country=Ukraine, role=vpn Value:0xc02adc02a8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Kyiv, country=Ukraine, role=vpn Value:0xc013c7bfa0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kyiv, country=Ukraine, environment=production, role=vpn, service_name=zam Value:0xc02adc0048} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Kyiv, country=Ukraine, role=vpn Value:0xc02adc0120} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kyiv, country=Ukraine, environment=production, role=vpn, service_name=zam Value:0xc02adc01b0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kyiv, country=Ukraine, environment=production, role=vpn, service_name=zam Value:0xc013c7be80} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kyiv, country=Ukraine, environment=production, role=vpn, service_name=zam Value:0xc013c7bf50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703795839s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kyiv, country=Ukraine, environment=production, role=vpn, service_name=zam} value=1300 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kyiv, country=Ukraine, environment=production, role=vpn, service_name=zam} value=1300 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Kyiv, country=Ukraine, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Kyiv, country=Ukraine, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kyiv, country=Ukraine, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Kyiv, country=Ukraine, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kyiv, country=Ukraine, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kyiv, country=Ukraine, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Kyiv, country=Ukraine, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam Value:0xc02adc0780} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam Value:0xc02adc0598} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, role=vpn Value:0xc02adc05f0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, role=vpn Value:0xc02adc03a0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam Value:0xc02adc0460} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, role=vpn Value:0xc02adc0658} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam Value:0xc02adc0500} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam Value:0xc02adc06e0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam Value:0xc02adc0890}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703831549s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam} value=280 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam} value=280 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lagos, country=Nigeria, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lapaz, country=Unknown country, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lapaz, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc02adc0b90} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lapaz, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc02adc0a00} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Lapaz, country=Unknown country, role=vpn Value:0xc02adc0dc8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Lapaz, country=Unknown country, role=vpn Value:0xc02adc0a58} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lapaz, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc02adc0c40} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Lapaz, country=Unknown country, role=vpn Value:0xc02adc0cb0} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lapaz, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc02adc0b00} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lapaz, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc02adc0d68} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lapaz, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc02adc0e78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703881994s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lapaz, country=Unknown country, environment=production, role=vpn, service_name=zam} value=1200 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lapaz, country=Unknown country, environment=production, role=vpn, service_name=zam} value=1200 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Lapaz, country=Unknown country, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Lapaz, country=Unknown country, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lapaz, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Lapaz, country=Unknown country, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lapaz, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lapaz, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lapaz, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam Value:0xc02adc1320} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam Value:0xc02adc0fc8} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, role=vpn Value:0xc02adc1150} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, role=vpn Value:0xc02adc11d0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam Value:0xc02adc13d0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, role=vpn Value:0xc02adc1030} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam Value:0xc02adc1278} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam Value:0xc02adc10d8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam Value:0xc02adc1480}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703914099s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Las Vegas, country=United States, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lima, country=Unknown country, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lima, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc02adc1aa8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lima, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc02adc1958} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Lima, country=Unknown country, role=vpn Value:0xc02adc16f8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Lima, country=Unknown country, role=vpn Value:0xc02adc17c8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lima, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc02adc19f8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Lima, country=Unknown country, role=vpn Value:0xc02adc18b8} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lima, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc02adc15d0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lima, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc02adc1858} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lima, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc02adc1688}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703942571s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lima, country=Unknown country, environment=production, role=vpn, service_name=zam} value=1200 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lima, country=Unknown country, environment=production, role=vpn, service_name=zam} value=1200 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Lima, country=Unknown country, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Lima, country=Unknown country, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lima, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Lima, country=Unknown country, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lima, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lima, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lima, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam Value:0xc02adc1db0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam Value:0xc02adc1c68} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, role=vpn Value:0xc02adc1cc0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, role=vpn Value:0xc02adc1d20} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam Value:0xc02adc1fe8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, role=vpn Value:0xc02adc1e20} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam Value:0xc02adc1f58} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam Value:0xc02adc1eb8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam Value:0xc02adc1bd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.70397496s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam} value=2240 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Lisbon, country=Portugal, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam Value:0xc0300d03d8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam Value:0xc0300d0468} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, role=vpn Value:0xc0300d02a0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, role=vpn Value:0xc0300d0600} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam Value:0xc0300d0508} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, role=vpn Value:0xc0300d0328} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam Value:0xc0300d05a8} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam Value:0xc0300d06b0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam Value:0xc0300d0760}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.70400901s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam} value=280 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam} value=280 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Ljubljana, country=Slovenia, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc0300d0ad0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc0300d0e58} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, role=vpn Value:0xc0300d0d40} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, role=vpn Value:0xc0300d0ba0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc0300d08c0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, role=vpn Value:0xc0300d0c20} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc0300d0cb8} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc0300d09f8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc0300d0980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.704044723s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=14800 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=14800 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=1 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=London, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam Value:0xc0300d1000} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam Value:0xc0300d1658} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, role=vpn Value:0xc0300d13d0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, role=vpn Value:0xc0300d1078} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam Value:0xc0300d1120} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, role=vpn Value:0xc0300d1260} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam Value:0xc0300d14e0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam Value:0xc0300d11b8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam Value:0xc0300d12f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.704079328s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam} value=6720 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam} value=6720 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Los Angeles, country=United States, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam Value:0xc0300d18c0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam Value:0xc0300d1c78} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, role=vpn Value:0xc0300d1cf8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, role=vpn Value:0xc0300d1950} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam Value:0xc0300d1a00} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, role=vpn Value:0xc0300d1a60} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam Value:0xc0300d1b10} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam Value:0xc0300d1820} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam Value:0xc0300d1bf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.7041084s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam} value=2000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam} value=2000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Luxembourg, country=Luxembourg, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam Value:0xc065c763c0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam Value:0xc0300d1f68} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Macau, country=Macao, role=vpn Value:0xc065c764b0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Macau, country=Macao, role=vpn Value:0xc0300d1fb8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam Value:0xc065c765b8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Macau, country=Macao, role=vpn Value:0xc065c760d0} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam Value:0xc065c76218} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam Value:0xc0300d1e58} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam Value:0xc0300d1ed8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.704128984s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam} value=400 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam} value=400 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Macau, country=Macao, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Macau, country=Macao, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Macau, country=Macao, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Macau, country=Macao, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam Value:0xc065c76b40} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam Value:0xc065c76cd0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, role=vpn Value:0xc065c76820} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, role=vpn Value:0xc065c76ea8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam Value:0xc065c76f50} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, role=vpn Value:0xc065c769a8} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam Value:0xc065c77018} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam Value:0xc065c76da8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam Value:0xc065c76e50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.704150939s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam} value=3360 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam} value=3360 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Madrid, country=Spain, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc065c777d8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc065c77580} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, role=vpn Value:0xc065c77608} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, role=vpn Value:0xc065c77678} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc065c77328} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, role=vpn Value:0xc065c77700} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc065c773e0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc065c774c0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam Value:0xc065c77270}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.704171857s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=7700 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=7700 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manchester, country=United Kingdom, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam Value:0xc065c77dd0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam Value:0xc065c77e90} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, role=vpn Value:0xc065c77c40} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, role=vpn Value:0xc065c77908} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam Value:0xc065c77a90} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, role=vpn Value:0xc065c77bc8} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam Value:0xc065c779d0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam Value:0xc065c77b48} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam Value:0xc065c77d08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.704192427s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam} value=1920 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam} value=1920 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Manila, country=Philippines, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam Value:0xc02c490730} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam Value:0xc02c4905b8} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Marseille, country=France, role=vpn Value:0xc02c4906a0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Marseille, country=France, role=vpn Value:0xc02c490398} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam Value:0xc02c490518} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Marseille, country=France, role=vpn Value:0xc02c490178} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam Value:0xc02c490220} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam Value:0xc02c490020} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam Value:0xc02c490340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.704213489s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam} value=4400 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam} value=4400 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Marseille, country=France, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Marseille, country=France, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Marseille, country=France, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Marseille, country=France, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Melbourne, country=Australia, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Melbourne, country=Australia, environment=production, role=vpn, service_name=zam Value:0xc02c490c68} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Melbourne, country=Australia, environment=production, role=vpn, service_name=zam Value:0xc02c491090} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Melbourne, country=Australia, role=vpn Value:0xc02c491140} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Melbourne, country=Australia, role=vpn Value:0xc02c4911b0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Melbourne, country=Australia, environment=production, role=vpn, service_name=zam Value:0xc02c4912d0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Melbourne, country=Australia, role=vpn Value:0xc02c4909c0} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Melbourne, country=Australia, environment=production, role=vpn, service_name=zam Value:0xc02c490d50} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Melbourne, country=Australia, environment=production, role=vpn, service_name=zam Value:0xc02c490af0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Melbourne, country=Australia, environment=production, role=vpn, service_name=zam Value:0xc02c490e40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.704234767s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Melbourne, country=Australia, environment=production, role=vpn, service_name=zam} value=2600 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Melbourne, country=Australia, environment=production, role=vpn, service_name=zam} value=2600 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Melbourne, country=Australia, role=vpn} value=0 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Melbourne, country=Australia, role=vpn} value=0 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Melbourne, country=Australia, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Melbourne, country=Australia, role=vpn} value=1 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Melbourne, country=Australia, environment=production, role=vpn, service_name=zam} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Melbourne, country=Australia, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Melbourne, country=Australia, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam Value:0xc02c4918b8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam Value:0xc02c4917a8} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, role=vpn Value:0xc02c491a40} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, role=vpn Value:0xc02c4919c8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam Value:0xc02c4914c0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, role=vpn Value:0xc02c491800} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam Value:0xc02c491970} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam Value:0xc02c491550} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam Value:0xc02c4916e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.704262067s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam} value=1800 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam} value=1800 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mexico City, country=Mexico, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam Value:0xc020776178} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam Value:0xc0207760e8} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Miami, country=United States, role=vpn Value:0xc02c491eb0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Miami, country=United States, role=vpn Value:0xc02c491b48} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam Value:0xc02c491cc0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Miami, country=United States, role=vpn Value:0xc020776058} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam Value:0xc02c491c10} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam Value:0xc02c491d68} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam Value:0xc02c491e48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.704283955s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam} value=5300 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam} value=5300 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Miami, country=United States, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Miami, country=United States, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Miami, country=United States, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Miami, country=United States, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam Value:0xc0207764d0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam Value:0xc020776558} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Milano, country=Italy, role=vpn Value:0xc020776650} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Milano, country=Italy, role=vpn Value:0xc0207766b0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam Value:0xc020776398} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Milano, country=Italy, role=vpn Value:0xc020776270} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam Value:0xc020776300} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam Value:0xc020776430} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam Value:0xc0207765e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.704304927s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam} value=2000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam} value=2000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Milano, country=Italy, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Milano, country=Italy, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Milano, country=Italy, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Milano, country=Italy, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam Value:0xc020776a70} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam Value:0xc020776b08} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, role=vpn Value:0xc020776bc0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, role=vpn Value:0xc0207767a0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam Value:0xc020776830} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, role=vpn Value:0xc020776b60} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam Value:0xc0207768c8} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam Value:0xc020776958} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam Value:0xc0207769e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.704327084s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam} value=1500 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam} value=1500 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Minsk, country=Belarus, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam Value:0xc020776ef0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam Value:0xc020776f90} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, role=vpn Value:0xc020776ff0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, role=vpn Value:0xc020777048} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam Value:0xc0207770e8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, role=vpn Value:0xc020776d40} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam Value:0xc020776e60} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam Value:0xc020776ce8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam Value:0xc020776dd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.704348665s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam} value=560 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam} value=560 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Monaco, country=Monaco, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montevideo, country=Unknown country, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montevideo, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc020777440} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montevideo, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc0207774f0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Montevideo, country=Unknown country, role=vpn Value:0xc020777558} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Montevideo, country=Unknown country, role=vpn Value:0xc0207775c0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montevideo, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc020777638} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Montevideo, country=Unknown country, role=vpn Value:0xc0207772f0} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montevideo, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc020777390} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montevideo, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc020777290} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montevideo, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc020777710}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.704369406s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montevideo, country=Unknown country, environment=production, role=vpn, service_name=zam} value=1200 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montevideo, country=Unknown country, environment=production, role=vpn, service_name=zam} value=1200 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Montevideo, country=Unknown country, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Montevideo, country=Unknown country, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montevideo, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Montevideo, country=Unknown country, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montevideo, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montevideo, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montevideo, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam Value:0xc020777b98} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam Value:0xc020777978} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, role=vpn Value:0xc020777bf0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, role=vpn Value:0xc0207779d0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam Value:0xc020777a70} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, role=vpn Value:0xc020777c58} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam Value:0xc020777b08} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam Value:0xc020777848} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam Value:0xc0207778d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.704398118s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam} value=11700 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam} value=11700 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam} value=1 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Montreal, country=Canada, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam Value:0xc017982410} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam Value:0xc020777fd0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, role=vpn Value:0xc020777dd0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, role=vpn Value:0xc0179822e0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam Value:0xc020777e48} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, role=vpn Value:0xc020777eb8} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam Value:0xc017982378} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam Value:0xc020777d80} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam Value:0xc020777f28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.704418145s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam} value=4000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam} value=4000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Moscow, country=Russian Federation, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam Value:0xc031df2358} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam Value:0xc031df2078} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Mumbai, country=India, role=vpn Value:0xc031df2528} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Mumbai, country=India, role=vpn Value:0xc031df20d8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam Value:0xc031df25e0} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Mumbai, country=India, role=vpn Value:0xc031df23e0} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam Value:0xc031df21e0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam Value:0xc017982b98} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam Value:0xc031df24b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.70444102s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam} value=2000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam} value=2000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Mumbai, country=India, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Mumbai, country=India, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Mumbai, country=India, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Mumbai, country=India, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam Value:0xc031df2ca0} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam Value:0xc031df2790} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, role=vpn Value:0xc031df2a40} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, role=vpn Value:0xc031df2828} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam Value:0xc031df2908} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, role=vpn Value:0xc031df2ad8} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam Value:0xc031df2ba8} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam Value:0xc031df29c0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam Value:0xc031df2d68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.70446152s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam} value=280 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam} value=280 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nairobi, country=Kenya, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam Value:0xc031df3160} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam Value:0xc031df3250} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, role=vpn Value:0xc031df32c8} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, role=vpn Value:0xc031df3360} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam Value:0xc031df2f30} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, role=vpn Value:0xc031df2fb8} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam Value:0xc031df3440} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam Value:0xc031df3090} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam Value:0xc031df3540}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.704500264s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam} value=1200 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam} value=1200 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nassau, country=Bahamas, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Naypyidaw, country=Unknown country, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Naypyidaw, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc031df3740} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Naypyidaw, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc031df3de8} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Naypyidaw, country=Unknown country, role=vpn Value:0xc031df37f0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Naypyidaw, country=Unknown country, role=vpn Value:0xc031df3e78} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Naypyidaw, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc031df3b10} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Naypyidaw, country=Unknown country, role=vpn Value:0xc031df3bb0} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Naypyidaw, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc031df3cc0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Naypyidaw, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc031df39f0} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Naypyidaw, country=Unknown country, environment=production, role=vpn, service_name=zam Value:0xc031df3918}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.704521384s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Naypyidaw, country=Unknown country, environment=production, role=vpn, service_name=zam} value=1200 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Naypyidaw, country=Unknown country, environment=production, role=vpn, service_name=zam} value=1200 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Naypyidaw, country=Unknown country, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Naypyidaw, country=Unknown country, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Naypyidaw, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Naypyidaw, country=Unknown country, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Naypyidaw, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Naypyidaw, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Naypyidaw, country=Unknown country, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam Value:0xc0658500e8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam Value:0xc0658501d0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=New York, country=United States, role=vpn Value:0xc065850230} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=New York, country=United States, role=vpn Value:0xc0658502d0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam Value:0xc065850368} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=New York, country=United States, role=vpn Value:0xc0658503d8} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam Value:0xc065850468} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam Value:0xc065850580} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam Value:0xc065850078}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.70454276s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam} value=11240 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam} value=11240 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=New York, country=United States, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=New York, country=United States, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam} value=1 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=New York, country=United States, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam} value=1 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam} value=1 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=New York, country=United States, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam Value:0xc065850938} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam Value:0xc065850710} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, role=vpn Value:0xc065850788} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, role=vpn Value:0xc0658507e0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam Value:0xc065850bc8} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, role=vpn Value:0xc065850c50} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam Value:0xc0658509d8} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam Value:0xc065850b18} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam Value:0xc065850a88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.70456587s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam} value=400 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam} value=400 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nicosia, country=Cyprus, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam Value:0xc065851250} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam Value:0xc065850de0} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, role=vpn Value:0xc065850e60} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, role=vpn Value:0xc0658512b0} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam Value:0xc065851038} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, role=vpn Value:0xc0658510c0} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam Value:0xc065850f20} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam Value:0xc065851190} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam Value:0xc065851388}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.704584877s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam} value=1000 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam} value=1000 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=NoSpy Bucharest, country=Romania, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam Value:0xc0658514f8} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam Value:0xc065851988} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, role=vpn Value:0xc065851a88} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, role=vpn Value:0xc0658518d8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam Value:0xc065851880} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, role=vpn Value:0xc065851af8} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam Value:0xc065851ba0} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam Value:0xc0658516b8} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam Value:0xc065851798}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.704606445s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam} value=280 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam} value=280 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Nuuk, country=Greenland, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam Value:0xc046dd2110} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam Value:0xc065851d40} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, role=vpn Value:0xc065851da0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, role=vpn Value:0xc065851ca8} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam Value:0xc065851e40} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, role=vpn Value:0xc046dd2018} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam Value:0xc046dd2208} H:{Var:H Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam Value:0xc065851f18} I:{Var:I Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam Value:0xc065851fa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.704626763s EvaluationString:[ var='A' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam} value=800 ], [ var='B' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam} value=800 ], [ var='C' labels={access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, role=vpn} value=1 ], [ var='D' labels={access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, role=vpn} value=1 ], [ var='E' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam} value=0 ], [ var='F' labels={access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, role=vpn} value=0 ], [ var='G' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam} value=0 ], [ var='H' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam} value=0 ], [ var='I' labels={__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Oslo, country=Norway, environment=production, role=vpn, service_name=zam} value=0 ]} {Instance:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn, service_name=zam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn, service_name=zam Value:0xc046dd2710} B:{Var:B Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn, service_name=zam Value:0xc046dd2540} C:{Var:C Labels:access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, role=vpn Value:0xc046dd27b0} D:{Var:D Labels:access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, role=vpn Value:0xc046dd2a50} E:{Var:E Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn, service_name=zam Value:0xc046dd2880} F:{Var:F Labels:access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, role=vpn Value:0xc046dd25c8} G:{Var:G Labels:__name__=city:max_users_enabled, access_group=premium, brand=CyberGhost, city=Panama City, country=Panama, environment=production, role=vpn, service_name=zam Value:0 + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod144996e2_a276_4e47_93ec_2c1b65b67a5e.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay-api, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=elepay-allvalue-agent-69bd65dc9-xrw7v, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.769925617Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.769784717Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.769745244Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.769619448Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podae8001d5_cd5c_4b5a_a532_4d62a35444fb.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay-api, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=ele-gate-archiver-6c4b579bf-w88gk, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.769646513Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.769552042Z caller=remote_instance_store.go:51 user=374423 slug=bitburst msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.769585522Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.769567284Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=linkerd-proxy, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda32e25f3_75d4_4aac_a474_e8db3c6a8586.slice/cri-containerd-44c69e2cd75cff03141a48e3d81faf4dc4dce13df4c3067b3116a80b54351a18.scope, image=cr.l5d.io/linkerd/proxy:stable-2.14.4, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=44c69e2cd75cff03141a48e3d81faf4dc4dce13df4c3067b3116a80b54351a18, namespace=elepay-api, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=user-api-76ddd5b9d-xgzw8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.769252365Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.769126288Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda32e25f3_75d4_4aac_a474_e8db3c6a8586.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay-api, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=user-api-76ddd5b9d-xgzw8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.76911775Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.768997498Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=pubsub, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod0d10eb87_2363_47ff_bc69_0889ed783858.slice/cri-containerd-a2f9cdce800a241d9c977a492fb8afd063048f8539aeeee59b56abf75386f53f.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-pubsub:5725dcb8719960097ada1114d15fede23bd39b1b, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=a2f9cdce800a241d9c977a492fb8afd063048f8539aeeee59b56abf75386f53f, namespace=elepay-api, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=pubsub-775698cb6b-kjwt2, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.768968889Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.768872641Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.768786441Z caller=remote_instance_store.go:51 user=196413 slug=form3production msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=linkerd-proxy, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc5b3ad23_9062_4acb_8aa4_205d201fcb76.slice/cri-containerd-be6cb1191384c94d811ae2d33f13bd80e65a22bb291dd004b81f04ffdac370db.scope, image=cr.l5d.io/linkerd/proxy:stable-2.14.4, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=be6cb1191384c94d811ae2d33f13bd80e65a22bb291dd004b81f04ffdac370db, namespace=elepay-api, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-svc-6859d995bd-7hqrx, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.7686928Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.768591431Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=1mr10216z5, Method=--, Resource=/salary-calculator, Stage=--" t=2024-05-29T13:44:14.76844876Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod57c1a101_e535_43ad_bc77_65175274ca39.slice/cri-containerd-c182e6322db3b32c54e1544e3e9cb9504b84818151c5d2d1ada4a247f1d45a89.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=c182e6322db3b32c54e1544e3e9cb9504b84818151c5d2d1ada4a247f1d45a89, namespace=elepay-api, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-sys-api-6bd7ffd465-b7vpr, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.768545873Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=316418 slug=workmotion t=2024-05-29T13:44:14.768370747Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=linkerd-proxy, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3d46345b_4bb7_4d46_925c_6e2a54b3809c.slice/cri-containerd-6b1247322aa769e5e8bc2bba8e03f524dd9c0e39c94c25de550d0109ef8d55d3.scope, image=cr.l5d.io/linkerd/proxy:stable-2.14.4, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=6b1247322aa769e5e8bc2bba8e03f524dd9c0e39c94c25de550d0109ef8d55d3, namespace=elepay-api, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-charge-api-689c894b8d-wz7ng, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.768012724Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3d46345b_4bb7_4d46_925c_6e2a54b3809c.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay-api, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-charge-api-689c894b8d-wz7ng, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.767891807Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfa3276bd_75af_4ca1_837a_68cee6807936.slice/cri-containerd-b6653e781c8661282ca64a8748074537546b9e5ed221ecfa05f60304f72203f2.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=b6653e781c8661282ca64a8748074537546b9e5ed221ecfa05f60304f72203f2, namespace=elepay-api, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-business-svc-54797d9984-l7xcw, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.767756075Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poded79ceb6_7c32_439b_a7ec_bfcfb30ba5a6.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay-api, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ele-track-8565d75886-2zq8q, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.767512592Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.767433378Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.767314881Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.767269787Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf3c0f575_6812_40a7_be77_f81ea2245fcd.slice, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay-api, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=qrgw-management-api-5b585ff6fb-6n8f5, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.767258867Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.767164213Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod1b82ebec_dd5a_40bf_9b9d_fe7a41613c0f.slice/cri-containerd-366939461e668320aa0b40f7e8d656f488e1787edcc7ae103b175f5477c403e1.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=366939461e668320aa0b40f7e8d656f488e1787edcc7ae103b175f5477c403e1, namespace=elepay-api, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=pubsub-775698cb6b-45hd8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.767140929Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod1b82ebec_dd5a_40bf_9b9d_fe7a41613c0f.slice/cri-containerd-366939461e668320aa0b40f7e8d656f488e1787edcc7ae103b175f5477c403e1.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=366939461e668320aa0b40f7e8d656f488e1787edcc7ae103b175f5477c403e1, namespace=elepay-api, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=pubsub-775698cb6b-45hd8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.767124415Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.767076949Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.767040713Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.767015912Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.767012386Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.766953423Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=pubsub, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod1b82ebec_dd5a_40bf_9b9d_fe7a41613c0f.slice/cri-containerd-2c9e27059d77f66aa9b82a2a575d551a1453e55a74b9716afa3365a82eae66f9.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-pubsub:5725dcb8719960097ada1114d15fede23bd39b1b, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=2c9e27059d77f66aa9b82a2a575d551a1453e55a74b9716afa3365a82eae66f9, namespace=elepay-api, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=pubsub-775698cb6b-45hd8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.766984349Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.766979021Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.766979785Z caller=remote_instance_store.go:51 user=213445 slug=gan msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.766860723Z caller=remote_alert_sender.go:94 user=292865 slug=admin1671 host=admin1671-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.152.50.8:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=g8zS-Rwnk alerts=1 + logger=ngalert.scheduler user=342039 slug=criblcloud version=6 fingerprint=595d95b74e9ce84b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.766764104Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.766506242s EvaluationString:}]" duration=68.015226ms + logger=ngalert.state.manager.persist user=292865 slug=admin1671 t=2024-05-29T13:44:14.766736046Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.609961ms + level=debug ts=2024-05-29T13:44:14.766670719Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=70430 slug=dapperlabs version=3 fingerprint=7dc5a7cd428ca34a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.766666944Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.766425408s EvaluationString:}]" duration=44.271495ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=linkerd-proxy, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef042abe_87a4_49d5_92f7_720c2d17ca70.slice/cri-containerd-ec2a8063686e138f5a2e63e887bdda770cdcd99aa8031a6572798a53e2b99b07.scope, image=cr.l5d.io/linkerd/proxy:stable-2.14.4, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=ec2a8063686e138f5a2e63e887bdda770cdcd99aa8031a6572798a53e2b99b07, namespace=elepay-api, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=elepay-sys-api-6bd7ffd465-bkh5g, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.766705162Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.766611803Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef042abe_87a4_49d5_92f7_720c2d17ca70.slice/cri-containerd-6f956eddd7778980160d02f4ebbf51042d9ba3bcc2fb4f92915928bdedc95631.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=6f956eddd7778980160d02f4ebbf51042d9ba3bcc2fb4f92915928bdedc95631, namespace=elepay-api, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=elepay-sys-api-6bd7ffd465-bkh5g, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.76660895Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.766514607Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.766369431Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.766359415Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.766163882Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.766130903Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.40372ms + level=debug ts=2024-05-29T13:44:14.766213388Z caller=remote_instance_store.go:51 user=206439 slug=relaypro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda958dbfe_dbc8_4e01_852a_8e23b1e06ab3.slice, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay-api, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=oneqr-svc-6859d995bd-c99zk, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.766160065Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda958dbfe_dbc8_4e01_852a_8e23b1e06ab3.slice, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay-api, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=oneqr-svc-6859d995bd-c99zk, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.76614519Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.766078319Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40cab8bd_a13e_4741_a69c_5bd6ed4c0528.slice/cri-containerd-a7b866f62c3f2f292512584add68ee582b1f5f93728f5a51615de1b48cd99c96.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=a7b866f62c3f2f292512584add68ee582b1f5f93728f5a51615de1b48cd99c96, namespace=elepay-api, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=oneqr-connect-api-777645bf4b-p55c8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.766033017Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.765986672Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40cab8bd_a13e_4741_a69c_5bd6ed4c0528.slice/cri-containerd-a7b866f62c3f2f292512584add68ee582b1f5f93728f5a51615de1b48cd99c96.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=a7b866f62c3f2f292512584add68ee582b1f5f93728f5a51615de1b48cd99c96, namespace=elepay-api, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=oneqr-connect-api-777645bf4b-p55c8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.766015073Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40cab8bd_a13e_4741_a69c_5bd6ed4c0528.slice, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay-api, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=oneqr-connect-api-777645bf4b-p55c8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.765917629Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=310637 slug=notino instance="app_kubernetes_io_name=newsletter" t=2024-05-29T13:44:14.765826538Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.765802203Z caller=remote_instance_store.go:51 user=174016 slug=journalstaging msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=174016 slug=journalstaging instance="datasource_uid=bYQmLgyGz, ref_id=A" t=2024-05-29T13:44:14.765754131Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9838a433_0fc1_4759_bc79_f9f8235a9d2f.slice, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay-api, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=elepay-user-api-7b7b4686f4-2qvc2, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.765673703Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.76555989Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=310637 slug=notino t=2024-05-29T13:44:14.76558729Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.scheduler user=310637 slug=notino version=63 fingerprint=2e410b2d23ae87e5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.765482244Z level=debug msg="Alert rule evaluated" results="[{Instance:app_kubernetes_io_name=navigation-fragment State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:app_kubernetes_io_name=navigation-fragment Value:0xc08c7518d8} B:{Var:B Labels:app_kubernetes_io_name=navigation-fragment Value:0xc08c7518e0} C:{Var:C Labels:app_kubernetes_io_name=navigation-fragment Value:0xc08c7518e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.765078005s EvaluationString:[ var='A' labels={app_kubernetes_io_name=navigation-fragment} value=0.00037626900723048367 ], [ var='B' labels={app_kubernetes_io_name=navigation-fragment} value=0.00037626900723048367 ], [ var='C' labels={app_kubernetes_io_name=navigation-fragment} value=0 ]} {Instance:app_kubernetes_io_name=newsletter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:app_kubernetes_io_name=newsletter Value:0xc08c751910} B:{Var:B Labels:app_kubernetes_io_name=newsletter Value:0xc08c751918} C:{Var:C Labels:app_kubernetes_io_name=newsletter Value:0xc08c751950}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.765093254s EvaluationString:[ var='A' labels={app_kubernetes_io_name=newsletter} value=0 ], [ var='B' labels={app_kubernetes_io_name=newsletter} value=0 ], [ var='C' labels={app_kubernetes_io_name=newsletter} value=0 ]}]" duration=45.415977ms + level=debug ts=2024-05-29T13:44:14.765548523Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.765444654Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.765124376Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=linkerd-proxy, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod48909e2f_5237_4885_9f96_ab8c6bf3e05d.slice/cri-containerd-353e8d61d1875e48408f8eca488b75595af9245ed777682829cd5c142cd6c12d.scope, image=cr.l5d.io/linkerd/proxy:stable-2.14.4, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=353e8d61d1875e48408f8eca488b75595af9245ed777682829cd5c142cd6c12d, namespace=elepay-api, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=elepay-config-api-58bbd7b8df-mfcrz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.765415863Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.765327017Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod329fb0d5_43f4_4c91_ac57_f25751d74ea6.slice/cri-containerd-cb11a3c96346e32d65cdfc90ad24ce5bdf207d2c1247ad2e898dda3f8cfc1499.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=cb11a3c96346e32d65cdfc90ad24ce5bdf207d2c1247ad2e898dda3f8cfc1499, namespace=elepay-api, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=elepay-admin-api-86d856dcf9-45h22, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.765289683Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.764960799Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.764835222Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=ele-track, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbb434f10_9f8f_4cc0_8e71_ee00c462c80a.slice/cri-containerd-b7bf2f80e41530704310ca2ce3e0f5aa72a63eb2b4bb1f0925fa298000335927.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-track:74afc41c07a696d3b43c18ecb7736b114876ed17, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=b7bf2f80e41530704310ca2ce3e0f5aa72a63eb2b4bb1f0925fa298000335927, namespace=elepay-api, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=ele-track-8565d75886-8xrk9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.764884009Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.764777224Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:14.764697472Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.075969ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod06c8a23b_2dd1_4434_b7ab_83e9f7d3069b.slice/cri-containerd-df7a2d4ec51095d6eda01faa350220e2e6f2c5dca9acb5aa41aa3975254b46d1.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=df7a2d4ec51095d6eda01faa350220e2e6f2c5dca9acb5aa41aa3975254b46d1, namespace=elepay-api, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=ele-health-5ff845d465-86nb7, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.764765303Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfadcf7ee_ce35_413f_9788_4076d50ee183.slice, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay-api, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=ele-gate-log-85fc965bcb-pnr5w, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.76462953Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod53442782_907c_46eb_90c6_d59565b4503c.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-web-6d464c885b-s9xjn, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.76433409Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod53442782_907c_46eb_90c6_d59565b4503c.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-web-6d464c885b-s9xjn, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.764295749Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod24c668de_701e_4a95_b120_cdee43baaf4d.slice/cri-containerd-273a4e65e89e02cc43e09667f65d4ad249ed24f227489611fec4deeeb85c2c1c.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=273a4e65e89e02cc43e09667f65d4ad249ed24f227489611fec4deeeb85c2c1c, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-tablet-web-66f778fd76-msxjv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.764003546Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f1e496e_b4bd_4835_80ba_0846877db8bb.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-km6hf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.763732645Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f1e496e_b4bd_4835_80ba_0846877db8bb.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-km6hf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.763716905Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=318831 slug=grafanavianet t=2024-05-29T13:44:14.763559337Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.938506ms + level=debug ts=2024-05-29T13:44:14.763552041Z caller=remote_instance_store.go:51 user=537068 slug=bitvavotrading msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.763463728Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.763427059Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=oneqr-kds-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod7560659c_394d_4296_b94b_a8e43da0746f.slice/cri-containerd-33a81cc58d3c552250e310ca732323addcb8b9117f281606c1c20fe8d480ad66.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-kds-web:24766930eeb1d1b9930b37273004c6de759014bb, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=33a81cc58d3c552250e310ca732323addcb8b9117f281606c1c20fe8d480ad66, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-crxkv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.763284761Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.762756271Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.762861086Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.762919476Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.762806859Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod82a9dd53_4c9c_4eae_8674_c245f021a87e.slice/cri-containerd-e48d9d18b82a48df2ca1df1aeaa1640f888ebceec064e3df1d96f50b7cf434b0.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e48d9d18b82a48df2ca1df1aeaa1640f888ebceec064e3df1d96f50b7cf434b0, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-4fcpc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.76285899Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=elepay-business-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod82a9dd53_4c9c_4eae_8674_c245f021a87e.slice/cri-containerd-8a1e01b2a72b2e2da5159819c40a6cba279c2bfe4e2c448bf35d5dc1a209d627.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-business-web:ee0fdcc7c4d968f05d7f5937fdbdf12b66ba215f, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8a1e01b2a72b2e2da5159819c40a6cba279c2bfe4e2c448bf35d5dc1a209d627, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-4fcpc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.762714285Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=elepay-business-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod82a9dd53_4c9c_4eae_8674_c245f021a87e.slice/cri-containerd-8a1e01b2a72b2e2da5159819c40a6cba279c2bfe4e2c448bf35d5dc1a209d627.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-business-web:ee0fdcc7c4d968f05d7f5937fdbdf12b66ba215f, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8a1e01b2a72b2e2da5159819c40a6cba279c2bfe4e2c448bf35d5dc1a209d627, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-4fcpc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.762698665Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=537068 slug=bitvavotrading version=3 fingerprint=8658b5501438435d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.7624207Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.76179836s EvaluationString:}]" duration=20.817897ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podb69b84e0_a5ba_4dab_8e5c_59fe2f782449.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-bigscreen-5f65bc55dd-q2mpb, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.762290663Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podb69b84e0_a5ba_4dab_8e5c_59fe2f782449.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-bigscreen-5f65bc55dd-q2mpb, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.762277231Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.762211345Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=656284 slug=cencosudx instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.762151928Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.762124423Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.762167375Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=elepay-admin-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod13574a3d_1aef_4847_b784_822d2bee7e25.slice/cri-containerd-d1abf3881d2552ad60a4a2f4bb44ecc96a5f54ab857373b1ad4a14d587caf838.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-admin-web:f915d2ee1324500ccc34d13fa4e6e5fa0f2321d1, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d1abf3881d2552ad60a4a2f4bb44ecc96a5f54ab857373b1ad4a14d587caf838, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-tdlzg, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.762149304Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.762002316Z caller=remote_instance_store.go:51 user=334644 slug=meiro msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=656284 slug=cencosudx version=3 fingerprint=6f83b942941c831e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.762032987Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.761571319s EvaluationString:}]" duration=9.385271ms + level=debug ts=2024-05-29T13:44:14.761857341Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod13574a3d_1aef_4847_b784_822d2bee7e25.slice/cri-containerd-2a5597f4e6abdcde5e9ccc24565c5a7842fb3d01f9d58ace228896669d0f29c8.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=2a5597f4e6abdcde5e9ccc24565c5a7842fb3d01f9d58ace228896669d0f29c8, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-tdlzg, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.762013842Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod13574a3d_1aef_4847_b784_822d2bee7e25.slice/cri-containerd-2a5597f4e6abdcde5e9ccc24565c5a7842fb3d01f9d58ace228896669d0f29c8.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=2a5597f4e6abdcde5e9ccc24565c5a7842fb3d01f9d58ace228896669d0f29c8, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-tdlzg, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.76199617Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.761915522Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.761893017Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod13574a3d_1aef_4847_b784_822d2bee7e25.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-tdlzg, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.761876968Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce314567_d66d_4b0b_a589_ab58366a21f6.slice/cri-containerd-02509e284a3444f95c647db8637ce12045a947af30ad298a9d73804f3a24df06.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=02509e284a3444f95c647db8637ce12045a947af30ad298a9d73804f3a24df06, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-pxcbq, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.761600408Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=chewy-gateway-worker, pod=chewy-gateway-worker-846bfb78d5-rntbx" t=2024-05-29T13:44:14.761288282Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.761150284Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=698963 slug=lemonade version=3 fingerprint=231dbde8e0789ebb attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.761066284Z level=debug msg="Alert rule evaluated" results="[{Instance:app=chewy-gateway-worker, pod=chewy-gateway-worker-846bfb78d5-rntbx State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=chewy-gateway-worker, pod=chewy-gateway-worker-846bfb78d5-rntbx Value:0xc0232fd068} THRESHOLD:{Var:THRESHOLD Labels:app=chewy-gateway-worker, pod=chewy-gateway-worker-846bfb78d5-rntbx Value:0xc0232fd220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.759250352s EvaluationString:[ var='QUERY' labels={app=chewy-gateway-worker, pod=chewy-gateway-worker-846bfb78d5-rntbx} value=0 ], [ var='THRESHOLD' labels={app=chewy-gateway-worker, pod=chewy-gateway-worker-846bfb78d5-rntbx} value=0 ]}]" duration=32.695442ms + level=debug ts=2024-05-29T13:44:14.761110975Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod5434cbc5_5792_4398_b291_e61e6a965d6a.slice/cri-containerd-48d269541de8a1e111f94cf7e144b21fc6a3ec151ffeed2b68396f747e2bad4b.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=48d269541de8a1e111f94cf7e144b21fc6a3ec151ffeed2b68396f747e2bad4b, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=easyqr-web-6c686bd948-w766z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.761166798Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=elepay-sys-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod0325e49e_a3eb_426e_9ee3_f5b63f25022e.slice/cri-containerd-d3bc119901a572a50a15905fdf9071dc742ed7289c0c077412d32a7efecfcf03.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-sys-web:d125d8ec4acd0f64e6b4156eb8c154c141c91dbb, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d3bc119901a572a50a15905fdf9071dc742ed7289c0c077412d32a7efecfcf03, namespace=elepay, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=elepay-sys-web-688dfd7c74-lz5p5, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.761033275Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=elepay-sys-entry, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod2a0fe16b_c836_4d25_924c_a95825b8faf7.slice/cri-containerd-544da02696d458175d5bd2bc2691e857c58dcaaac437454c82cfed04077e1ce7.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-sys-entry:ee0fdcc7c4d968f05d7f5937fdbdf12b66ba215f, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=544da02696d458175d5bd2bc2691e857c58dcaaac437454c82cfed04077e1ce7, namespace=elepay, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=elepay-sys-entry-6958579d5c-r8l27, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.760772362Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.76061442Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod76509970_1679_4f37_9f51_0103c3304ffa.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=oneqr-web-6d464c885b-qzx66, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.760602053Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=472647 slug=planet t=2024-05-29T13:44:14.760577316Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=472647 slug=planet instance= t=2024-05-29T13:44:14.76056776Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.760536133Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.760466553Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod7c2b6de9_d7a9_493e_9072_d616881da9c0.slice/cri-containerd-0c858c75bea5adabffc5c65d29aa2c5bd65297a7a9efbf6f58e181506efae7d9.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=0c858c75bea5adabffc5c65d29aa2c5bd65297a7a9efbf6f58e181506efae7d9, namespace=elepay, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=delivery-web-848fcb956d-zv4v4, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.760304993Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc5fb7cfb_62c1_41fd_825f_794755e624ef.slice/cri-containerd-79d43eac6a3ac9d72dfacf067e130486a4256ceb4219719bb769f6294726c82d.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=79d43eac6a3ac9d72dfacf067e130486a4256ceb4219719bb769f6294726c82d, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=smart-boss-web-56db8c9b7f-tvcjf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.760184973Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.760048978Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.759808011Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod08a3e7fa_efa8_4976_ac9f_1c2104f56051.slice/cri-containerd-fdc729c91bc9a1c590739a74afb6338539f3a1891cb08ce37a86edd31febbda3.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=fdc729c91bc9a1c590739a74afb6338539f3a1891cb08ce37a86edd31febbda3, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-5xcng, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.759646675Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.759586913Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=oneqr-shop-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod08a3e7fa_efa8_4976_ac9f_1c2104f56051.slice/cri-containerd-cae781464148b5d7a02841637e3c170418251bd01f30bf73efd000dbb1daa8b9.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-shop-web:225fa2d402c97fbc06c49395d1afa22a1c7b0f62, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=cae781464148b5d7a02841637e3c170418251bd01f30bf73efd000dbb1daa8b9, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-5xcng, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.75950907Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.759074194Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podbce6a792_0b50_4454_94ab_27e2391a7141.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-pos-web-7dccbbb488-c9tpz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.75900408Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.758999405Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.758852464Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=oneqr-kds-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod69caf257_dbdb_4b49_b0c0_d0a4561e0c3d.slice/cri-containerd-eb420a1a0cbe72b7a490adfb4bb5ea924911ec80986927918a376a279d6241b3.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-kds-web:24766930eeb1d1b9930b37273004c6de759014bb, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=eb420a1a0cbe72b7a490adfb4bb5ea924911ec80986927918a376a279d6241b3, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-sbf4t, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.758830142Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod69caf257_dbdb_4b49_b0c0_d0a4561e0c3d.slice/cri-containerd-470a47a685704d45ea168da45bb26e33b619ae064050f44fe33eea7621a4e711.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=470a47a685704d45ea168da45bb26e33b619ae064050f44fe33eea7621a4e711, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-sbf4t, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.758700922Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.758612266Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=elepay-sys-v2, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2825f886_a70e_451f_9425_0c5928801a22.slice/cri-containerd-fc48a567482b88fd136bc3ebdbfde0952f2d045232355baf587a76c7ea5ec188.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-sys-v2:ee0fdcc7c4d968f05d7f5937fdbdf12b66ba215f, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=fc48a567482b88fd136bc3ebdbfde0952f2d045232355baf587a76c7ea5ec188, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-sys-v2-5f4b8d8f74-5hb5z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.758552363Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.758522151Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.758499526Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.758472441Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2825f886_a70e_451f_9425_0c5928801a22.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-sys-v2-5f4b8d8f74-5hb5z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.758403989Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.758367372Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.758340625Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2825f886_a70e_451f_9425_0c5928801a22.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-sys-v2-5f4b8d8f74-5hb5z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.758386335Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.758328878Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.758243953Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.758222694Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.758154838Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod85f11360_3767_4662_90be_0717d5287bf8.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-pdck8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.757969098Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.757797917Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod48bd39b8_7cd2_4e91_994e_cf131780100a.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-pbwtf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.757809498Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=374423 slug=bitburst instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.757362361Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=374423 slug=bitburst instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.757334307Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=374423 slug=bitburst instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.757322574Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=374423 slug=bitburst instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.757278016Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=374423 slug=bitburst instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.757270651Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=374423 slug=bitburst t=2024-05-29T13:44:14.757253606Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c0efc5b_ddeb_4f51_986a_7374034ee09f.slice/cri-containerd-67223776d2671f12e5b3c49fb7a4ff0e31fcb15d6caec7ec2e4f3a77e101021a.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=67223776d2671f12e5b3c49fb7a4ff0e31fcb15d6caec7ec2e4f3a77e101021a, namespace=elepay, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=smart-boss-web-56db8c9b7f-dg6jm, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.757350909Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.757252428Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c0efc5b_ddeb_4f51_986a_7374034ee09f.slice, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=smart-boss-web-56db8c9b7f-dg6jm, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.757196653Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.757125064Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0db5f1d1_a396_49c8_b09d_d09a2b43ae54.slice/cri-containerd-f5afa8262eb4cddf5acaa33392d79e798fb3287222126e343672cf8e9d0909db.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=f5afa8262eb4cddf5acaa33392d79e798fb3287222126e343672cf8e9d0909db, namespace=elepay, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=elepay-sys-v2-5f4b8d8f74-tq7dt, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.757047917Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod022efa11_2f7b_42b2_adbf_ba733e8e2c6c.slice/cri-containerd-168c7495240ad1230d8c7525b7e0718bae4c05980072c823755c6f17024907c6.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=168c7495240ad1230d8c7525b7e0718bae4c05980072c823755c6f17024907c6, namespace=ele-toolbox, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-send-746548dd8-hd6sr, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.756895438Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.756825043Z caller=remote_instance_store.go:51 user=130276 slug=devops8 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod6605ede6_dcbe_4b32_b7da_36f0624df496.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-master-56b547f46c-jmz2m, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.756751593Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=224047 slug=ppbtradingtribeprd t=2024-05-29T13:44:14.756724822Z level=debug msg="Saving alert states" count=10 max_state_save_concurrency=1 + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance="Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-selection-result-update" t=2024-05-29T13:44:14.756710748Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance="Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-selection-result-update" t=2024-05-29T13:44:14.756695385Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance="Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-rule-4-update" t=2024-05-29T13:44:14.756648203Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.756458281Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance="Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-dm-state-update" t=2024-05-29T13:44:14.756557113Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance="Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-selection-result-update" t=2024-05-29T13:44:14.756515457Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance="Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-rule-4-update" t=2024-05-29T13:44:14.756480238Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance="Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-price-update-update" t=2024-05-29T13:44:14.756446041Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance="Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-ew-terms-update" t=2024-05-29T13:44:14.756409029Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance="Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-ew-terms-update" t=2024-05-29T13:44:14.75639653Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd t=2024-05-29T13:44:14.756316156Z level=debug msg="State manager processing evaluation results" resultCount=10 + logger=ngalert.scheduler user=224047 slug=ppbtradingtribeprd version=10 fingerprint=e40ad891c0f9b8f5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.756125578Z level=debug msg="Alert rule evaluated" results="[{Instance:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-dm-state-update State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-dm-state-update Value:0xc02a8939a0} C:{Var:C Labels:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-dm-state-update Value:0xc02a893958}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.755439034s EvaluationString:[ var='B' labels={Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-dm-state-update} value=0 ], [ var='C' labels={Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-dm-state-update} value=0 ]} {Instance:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-ew-terms-update State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-ew-terms-update Value:0xc02a893a48} C:{Var:C Labels:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-ew-terms-update Value:0xc02a893a90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.755456386s EvaluationString:[ var='B' labels={Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-ew-terms-update} value=0 ], [ var='C' labels={Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-ew-terms-update} value=0 ]} {Instance:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-price-update-update State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-price-update-update Value:0xc02a893b80} C:{Var:C Labels:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-price-update-update Value:0xc02a893b28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.755463436s EvaluationString:[ var='B' labels={Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-price-update-update} value=0 ], [ var='C' labels={Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-price-update-update} value=0 ]} {Instance:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-rule-4-update State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-rule-4-update Value:0xc02a893c30} C:{Var:C Labels:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-rule-4-update Value:0xc02a893c78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.755471744s EvaluationString:[ var='B' labels={Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-rule-4-update} value=0 ], [ var='C' labels={Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-rule-4-update} value=0 ]} {Instance:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-selection-result-update State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-selection-result-update Value:0xc02a893d18} C:{Var:C Labels:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-selection-result-update Value:0xc02a893d70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.755480624s EvaluationString:[ var='B' labels={Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-selection-result-update} value=0 ], [ var='C' labels={Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie1-rapl-internal-selection-result-update} value=0 ]} {Instance:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-dm-state-update State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-dm-state-update Value:0xc02a893e68} C:{Var:C Labels:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-dm-state-update Value:0xc02a893e10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.755487186s EvaluationString:[ var='B' labels={Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-dm-state-update} value=0 ], [ var='C' labels={Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-dm-state-update} value=0 ]} {Instance:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-ew-terms-update State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-ew-terms-update Value:0xc02a893f08} C:{Var:C Labels:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-ew-terms-update Value:0xc02a893f50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.755494686s EvaluationString:[ var='B' labels={Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-ew-terms-update} value=0 ], [ var='C' labels={Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-ew-terms-update} value=0 ]} {Instance:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-price-update-update State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-price-update-update Value:0xc02a893fe8} C:{Var:C Labels:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-price-update-update Value:0xc012166188}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.755501501s EvaluationString:[ var='B' labels={Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-price-update-update} value=0 ], [ var='C' labels={Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-price-update-update} value=0 ]} {Instance:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-rule-4-update State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-rule-4-update Value:0xc012166258} C:{Var:C Labels:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-rule-4-update Value:0xc0121662a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.755507494s EvaluationString:[ var='B' labels={Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-rule-4-update} value=0 ], [ var='C' labels={Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-rule-4-update} value=0 ]} {Instance:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-selection-result-update State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-selection-result-update Value:0xc0121663b8} C:{Var:C Labels:Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-selection-result-update Value:0xc012166350}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.755514734s EvaluationString:[ var='B' labels={Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-selection-result-update} value=0 ], [ var='C' labels={Cluster Name=GFBKMSK-PRD, Consumer Group=rapl-prd, Topic=ie2-rapl-internal-selection-result-update} value=0 ]}]" duration=150.602567ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0c7e7f8_3c58_4a25_b7c6_bac9e34a5beb.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=ele-send-v2-6797bcf994-xmmzl, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.756220932Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.755981329Z caller=remote_instance_store.go:51 user=542095 slug=intelligencefusion msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podadb2eacc_f64a_481a_b576_48f290129503.slice/cri-containerd-44a08b574fcdb823ceed67cff1c70f17e2ea18097d9f8147b3416f90ccf9a6f7.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=44a08b574fcdb823ceed67cff1c70f17e2ea18097d9f8147b3416f90ccf9a6f7, namespace=ele-toolbox, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-bx74s, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.756047885Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.755878624Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podadb2eacc_f64a_481a_b576_48f290129503.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-bx74s, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.755914285Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podadb2eacc_f64a_481a_b576_48f290129503.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-bx74s, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.755900501Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.755696042Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.755815506Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.75565423Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=314067 slug=itsme t=2024-05-29T13:44:14.755709207Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.606894ms + level=debug ts=2024-05-29T13:44:14.755647542Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=87bc560c4d59aa10 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.755385488Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=LIMA Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc05cfd7858} Threshold:{Var:Threshold Labels: Value:0xc05cfd7880} compare:{Var:compare Labels:aggregatedBy=sum, name=LIMA Query Value:0xc05cfd7810} sum:{Var:sum Labels:aggregatedBy=sum, name=LIMA Query Value:0xc05cfd7840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.754918146s EvaluationString:[ var='Breaches' labels={} value=72 ], [ var='Threshold' labels={} value=1 ], [ var='compare' labels={aggregatedBy=sum, name=LIMA Query} value=0 ], [ var='sum' labels={aggregatedBy=sum, name=LIMA Query} value=20 ]}]" duration=23.052282ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod15df7545_987d_4bee_b8aa_d02c26b6e1e2.slice, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=tv-mock-server-web-54689bf759-mfcwk, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.75551398Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=90284 slug=volantio t=2024-05-29T13:44:14.755466798Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.754939038Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.754923682Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf27ac39_99b1_4ac5_82fb_8c69380265b9.slice/cri-containerd-c37ed2ef672f557e1d65c88e696f87a9cc116b83ef1074dab0c60a5e075a9b5f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=c37ed2ef672f557e1d65c88e696f87a9cc116b83ef1074dab0c60a5e075a9b5f, namespace=development-proxy, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=frpc-887d78d45-27tjs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.754856601Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf27ac39_99b1_4ac5_82fb_8c69380265b9.slice/cri-containerd-c37ed2ef672f557e1d65c88e696f87a9cc116b83ef1074dab0c60a5e075a9b5f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=c37ed2ef672f557e1d65c88e696f87a9cc116b83ef1074dab0c60a5e075a9b5f, namespace=development-proxy, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=frpc-887d78d45-27tjs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.75484179Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf27ac39_99b1_4ac5_82fb_8c69380265b9.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=development-proxy, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=frpc-887d78d45-27tjs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.754725015Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.754641768Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.754296689Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce80d7e6_ce82_4840_9289_119260ecc966.slice, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=development-proxy, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=shadowsocks-5cd87d76c5-wlwhf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.754486571Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.754198969Z caller=remote_instance_store.go:51 user=292865 slug=admin1671 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddc95d93d_ea20_4da1_a49e_56dad7cb4286.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=cert-manager, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=cert-manager-webhook-7bfc7b8579-p8587, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.754068374Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=292865 slug=admin1671 version=11 fingerprint=e7928539da5ce80f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.753931721Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Alerting Error: Results:map[] Values:map[B0:{Var:B Labels: Value:0xc0227d2638} B1:{Var:B Labels: Value:0xc0227d2648}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.753511687s EvaluationString:[ var='B0' metric='Alarm Flag' labels={} value=1 ], [ var='B1' metric='ELCB' labels={} value=1 ]}]" duration=216.895381ms + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.75392296Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.753876742Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podffa5689b_cfd5_4376_9cac_2718616c986c.slice/cri-containerd-bc0404a0370ab6947f3b7c46daee614ebaf8e14938bf035e328712e1a20ac758.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=bc0404a0370ab6947f3b7c46daee614ebaf8e14938bf035e328712e1a20ac758, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-redis-6599b4bcbd-x7jvz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.753554508Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb077757c_3892_4e3c_aab4_fae751ed260e.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-notifications-controller-6f594bddd9-2ld7b, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.753241187Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.753181733Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=applicationset-controller, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27d5d111_8870_4796_9dc5_6378b78d0984.slice/cri-containerd-0e4e9fa19a646dabf36ae9e8c5a7d437bfe14b895c67f82d3814e376d1447fb7.scope, image=quay.io/argoproj/argocd:v2.6.7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=0e4e9fa19a646dabf36ae9e8c5a7d437bfe14b895c67f82d3814e376d1447fb7, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-applicationset-controller-7cbf759f86-hpd4g, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.753103249Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID721dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=20177 slug=paddledash t=2024-05-29T13:44:14.752995581Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=26.869624ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27d5d111_8870_4796_9dc5_6378b78d0984.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-applicationset-controller-7cbf759f86-hpd4g, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.752993686Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27d5d111_8870_4796_9dc5_6378b78d0984.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-applicationset-controller-7cbf759f86-hpd4g, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.752980075Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod431447fd_b1a9_40c2_b2e0_371e85ed0dd6.slice/cri-containerd-c3af35f39f643a12dea27b84d147cede179f36c9e2383333fd1d95c8272804e4.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=c3af35f39f643a12dea27b84d147cede179f36c9e2383333fd1d95c8272804e4, namespace=argocd, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=argocd-server-5858655bdf-ckbj6, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.752747277Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod431447fd_b1a9_40c2_b2e0_371e85ed0dd6.slice/cri-containerd-c3af35f39f643a12dea27b84d147cede179f36c9e2383333fd1d95c8272804e4.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=c3af35f39f643a12dea27b84d147cede179f36c9e2383333fd1d95c8272804e4, namespace=argocd, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=argocd-server-5858655bdf-ckbj6, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.752734631Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=SRviRMnVz, ref_id=A" t=2024-05-29T13:44:14.752616449Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode01fbad9_d8d5_45d8_8dbf_89072d54998d.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=argocd-dex-server-6446fdbf4b-pzk8c, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.752610256Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=430961 slug=solifi version=2 fingerprint=92220d5f070ed3cd attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.752545292Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=SRviRMnVz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.752272036s EvaluationString:}]" duration=33.266569ms + logger=ngalert.state.manager user=707575 slug=prod1themomproject instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.752468378Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=argo-rollouts, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8cc95bf_d54f_49f5_b151_9dd14d3f77da.slice/cri-containerd-08df6f58d91ca0000cacc70e3363eb97d211867ff6afc9dd8025e6dc3ac5073f.scope, image=quay.io/argoproj/argo-rollouts:v1.4.1, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=08df6f58d91ca0000cacc70e3363eb97d211867ff6afc9dd8025e6dc3ac5073f, namespace=argo-rollouts, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=argo-rollouts-695f876754-wpvl9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet" t=2024-05-29T13:44:14.752273613Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=707575 slug=prod1themomproject instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.752424693Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=707575 slug=prod1themomproject t=2024-05-29T13:44:14.752393385Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=309009 slug=elestyle version=1 fingerprint=61306dc640c08f41 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.738516758Z level=debug msg="Alert rule evaluated" results="[{Instance:container=argo-rollouts, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8cc95bf_d54f_49f5_b151_9dd14d3f77da.slice/cri-containerd-08df6f58d91ca0000cacc70e3363eb97d211867ff6afc9dd8025e6dc3ac5073f.scope, image=quay.io/argoproj/argo-rollouts:v1.4.1, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=08df6f58d91ca0000cacc70e3363eb97d211867ff6afc9dd8025e6dc3ac5073f, namespace=argo-rollouts, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=argo-rollouts-695f876754-wpvl9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=argo-rollouts, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8cc95bf_d54f_49f5_b151_9dd14d3f77da.slice/cri-containerd-08df6f58d91ca0000cacc70e3363eb97d211867ff6afc9dd8025e6dc3ac5073f.scope, image=quay.io/argoproj/argo-rollouts:v1.4.1, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=08df6f58d91ca0000cacc70e3363eb97d211867ff6afc9dd8025e6dc3ac5073f, namespace=argo-rollouts, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=argo-rollouts-695f876754-wpvl9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01f88aa88} C:{Var:C Labels:container=argo-rollouts, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8cc95bf_d54f_49f5_b151_9dd14d3f77da.slice/cri-containerd-08df6f58d91ca0000cacc70e3363eb97d211867ff6afc9dd8025e6dc3ac5073f.scope, image=quay.io/argoproj/argo-rollouts:v1.4.1, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=08df6f58d91ca0000cacc70e3363eb97d211867ff6afc9dd8025e6dc3ac5073f, namespace=argo-rollouts, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=argo-rollouts-695f876754-wpvl9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01f88a940}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715048277s EvaluationString:[ var='B' labels={container=argo-rollouts, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8cc95bf_d54f_49f5_b151_9dd14d3f77da.slice/cri-containerd-08df6f58d91ca0000cacc70e3363eb97d211867ff6afc9dd8025e6dc3ac5073f.scope, image=quay.io/argoproj/argo-rollouts:v1.4.1, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=08df6f58d91ca0000cacc70e3363eb97d211867ff6afc9dd8025e6dc3ac5073f, namespace=argo-rollouts, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=argo-rollouts-695f876754-wpvl9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=2 ], [ var='C' labels={container=argo-rollouts, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8cc95bf_d54f_49f5_b151_9dd14d3f77da.slice/cri-containerd-08df6f58d91ca0000cacc70e3363eb97d211867ff6afc9dd8025e6dc3ac5073f.scope, image=quay.io/argoproj/argo-rollouts:v1.4.1, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=08df6f58d91ca0000cacc70e3363eb97d211867ff6afc9dd8025e6dc3ac5073f, namespace=argo-rollouts, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=argo-rollouts-695f876754-wpvl9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8cc95bf_d54f_49f5_b151_9dd14d3f77da.slice/cri-containerd-9689fb34b9a7af7b151ae4ad04c3199193c06e7e1e0a8bdf01a1c646967d9f84.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=9689fb34b9a7af7b151ae4ad04c3199193c06e7e1e0a8bdf01a1c646967d9f84, namespace=argo-rollouts, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=argo-rollouts-695f876754-wpvl9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8cc95bf_d54f_49f5_b151_9dd14d3f77da.slice/cri-containerd-9689fb34b9a7af7b151ae4ad04c3199193c06e7e1e0a8bdf01a1c646967d9f84.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=9689fb34b9a7af7b151ae4ad04c3199193c06e7e1e0a8bdf01a1c646967d9f84, namespace=argo-rollouts, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=argo-rollouts-695f876754-wpvl9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01f88ac48} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8cc95bf_d54f_49f5_b151_9dd14d3f77da.slice/cri-containerd-9689fb34b9a7af7b151ae4ad04c3199193c06e7e1e0a8bdf01a1c646967d9f84.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=9689fb34b9a7af7b151ae4ad04c3199193c06e7e1e0a8bdf01a1c646967d9f84, namespace=argo-rollouts, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=argo-rollouts-695f876754-wpvl9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01f88ad38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715079472s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8cc95bf_d54f_49f5_b151_9dd14d3f77da.slice/cri-containerd-9689fb34b9a7af7b151ae4ad04c3199193c06e7e1e0a8bdf01a1c646967d9f84.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=9689fb34b9a7af7b151ae4ad04c3199193c06e7e1e0a8bdf01a1c646967d9f84, namespace=argo-rollouts, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=argo-rollouts-695f876754-wpvl9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=2 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8cc95bf_d54f_49f5_b151_9dd14d3f77da.slice/cri-containerd-9689fb34b9a7af7b151ae4ad04c3199193c06e7e1e0a8bdf01a1c646967d9f84.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=9689fb34b9a7af7b151ae4ad04c3199193c06e7e1e0a8bdf01a1c646967d9f84, namespace=argo-rollouts, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=argo-rollouts-695f876754-wpvl9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode01fbad9_d8d5_45d8_8dbf_89072d54998d.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=argocd-dex-server-6446fdbf4b-pzk8c, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode01fbad9_d8d5_45d8_8dbf_89072d54998d.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=argocd-dex-server-6446fdbf4b-pzk8c, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01f88af08} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode01fbad9_d8d5_45d8_8dbf_89072d54998d.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=argocd-dex-server-6446fdbf4b-pzk8c, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01f88b078}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715094284s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode01fbad9_d8d5_45d8_8dbf_89072d54998d.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=argocd-dex-server-6446fdbf4b-pzk8c, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=3 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode01fbad9_d8d5_45d8_8dbf_89072d54998d.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=argocd-dex-server-6446fdbf4b-pzk8c, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod431447fd_b1a9_40c2_b2e0_371e85ed0dd6.slice/cri-containerd-c3af35f39f643a12dea27b84d147cede179f36c9e2383333fd1d95c8272804e4.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=c3af35f39f643a12dea27b84d147cede179f36c9e2383333fd1d95c8272804e4, namespace=argocd, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=argocd-server-5858655bdf-ckbj6, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod431447fd_b1a9_40c2_b2e0_371e85ed0dd6.slice/cri-containerd-c3af35f39f643a12dea27b84d147cede179f36c9e2383333fd1d95c8272804e4.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=c3af35f39f643a12dea27b84d147cede179f36c9e2383333fd1d95c8272804e4, namespace=argocd, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=argocd-server-5858655bdf-ckbj6, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01f88b220} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod431447fd_b1a9_40c2_b2e0_371e85ed0dd6.slice/cri-containerd-c3af35f39f643a12dea27b84d147cede179f36c9e2383333fd1d95c8272804e4.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=c3af35f39f643a12dea27b84d147cede179f36c9e2383333fd1d95c8272804e4, namespace=argocd, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=argocd-server-5858655bdf-ckbj6, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01f88b310}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715109422s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod431447fd_b1a9_40c2_b2e0_371e85ed0dd6.slice/cri-containerd-c3af35f39f643a12dea27b84d147cede179f36c9e2383333fd1d95c8272804e4.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=c3af35f39f643a12dea27b84d147cede179f36c9e2383333fd1d95c8272804e4, namespace=argocd, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=argocd-server-5858655bdf-ckbj6, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=3 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod431447fd_b1a9_40c2_b2e0_371e85ed0dd6.slice/cri-containerd-c3af35f39f643a12dea27b84d147cede179f36c9e2383333fd1d95c8272804e4.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=c3af35f39f643a12dea27b84d147cede179f36c9e2383333fd1d95c8272804e4, namespace=argocd, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=argocd-server-5858655bdf-ckbj6, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4c9d6e1_9159_4355_a68d_03f903260611.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-application-controller-0, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4c9d6e1_9159_4355_a68d_03f903260611.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-application-controller-0, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01f88b490} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4c9d6e1_9159_4355_a68d_03f903260611.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-application-controller-0, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01f88b5d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715124142s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4c9d6e1_9159_4355_a68d_03f903260611.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-application-controller-0, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4c9d6e1_9159_4355_a68d_03f903260611.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-application-controller-0, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27d5d111_8870_4796_9dc5_6378b78d0984.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-applicationset-controller-7cbf759f86-hpd4g, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27d5d111_8870_4796_9dc5_6378b78d0984.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-applicationset-controller-7cbf759f86-hpd4g, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01f88b790} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27d5d111_8870_4796_9dc5_6378b78d0984.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-applicationset-controller-7cbf759f86-hpd4g, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01f88b840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715135272s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27d5d111_8870_4796_9dc5_6378b78d0984.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-applicationset-controller-7cbf759f86-hpd4g, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27d5d111_8870_4796_9dc5_6378b78d0984.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-applicationset-controller-7cbf759f86-hpd4g, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=applicationset-controller, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27d5d111_8870_4796_9dc5_6378b78d0984.slice/cri-containerd-0e4e9fa19a646dabf36ae9e8c5a7d437bfe14b895c67f82d3814e376d1447fb7.scope, image=quay.io/argoproj/argocd:v2.6.7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=0e4e9fa19a646dabf36ae9e8c5a7d437bfe14b895c67f82d3814e376d1447fb7, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-applicationset-controller-7cbf759f86-hpd4g, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=applicationset-controller, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27d5d111_8870_4796_9dc5_6378b78d0984.slice/cri-containerd-0e4e9fa19a646dabf36ae9e8c5a7d437bfe14b895c67f82d3814e376d1447fb7.scope, image=quay.io/argoproj/argocd:v2.6.7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=0e4e9fa19a646dabf36ae9e8c5a7d437bfe14b895c67f82d3814e376d1447fb7, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-applicationset-controller-7cbf759f86-hpd4g, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01f88bba8} C:{Var:C Labels:container=applicationset-controller, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27d5d111_8870_4796_9dc5_6378b78d0984.slice/cri-containerd-0e4e9fa19a646dabf36ae9e8c5a7d437bfe14b895c67f82d3814e376d1447fb7.scope, image=quay.io/argoproj/argocd:v2.6.7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=0e4e9fa19a646dabf36ae9e8c5a7d437bfe14b895c67f82d3814e376d1447fb7, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-applicationset-controller-7cbf759f86-hpd4g, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01f88ba98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715145413s EvaluationString:[ var='B' labels={container=applicationset-controller, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27d5d111_8870_4796_9dc5_6378b78d0984.slice/cri-containerd-0e4e9fa19a646dabf36ae9e8c5a7d437bfe14b895c67f82d3814e376d1447fb7.scope, image=quay.io/argoproj/argocd:v2.6.7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=0e4e9fa19a646dabf36ae9e8c5a7d437bfe14b895c67f82d3814e376d1447fb7, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-applicationset-controller-7cbf759f86-hpd4g, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={container=applicationset-controller, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27d5d111_8870_4796_9dc5_6378b78d0984.slice/cri-containerd-0e4e9fa19a646dabf36ae9e8c5a7d437bfe14b895c67f82d3814e376d1447fb7.scope, image=quay.io/argoproj/argocd:v2.6.7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=0e4e9fa19a646dabf36ae9e8c5a7d437bfe14b895c67f82d3814e376d1447fb7, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-applicationset-controller-7cbf759f86-hpd4g, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb077757c_3892_4e3c_aab4_fae751ed260e.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-notifications-controller-6f594bddd9-2ld7b, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb077757c_3892_4e3c_aab4_fae751ed260e.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-notifications-controller-6f594bddd9-2ld7b, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01f88bd60} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb077757c_3892_4e3c_aab4_fae751ed260e.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-notifications-controller-6f594bddd9-2ld7b, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01f88be28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715160154s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb077757c_3892_4e3c_aab4_fae751ed260e.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-notifications-controller-6f594bddd9-2ld7b, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb077757c_3892_4e3c_aab4_fae751ed260e.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-notifications-controller-6f594bddd9-2ld7b, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=notifications-controller, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb077757c_3892_4e3c_aab4_fae751ed260e.slice/cri-containerd-8f53a8ac0b03a19873d35d110dbab532023d681d519f92e4e392a24aef9616f6.scope, image=quay.io/argoproj/argocd:v2.6.7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8f53a8ac0b03a19873d35d110dbab532023d681d519f92e4e392a24aef9616f6, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-notifications-controller-6f594bddd9-2ld7b, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=notifications-controller, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb077757c_3892_4e3c_aab4_fae751ed260e.slice/cri-containerd-8f53a8ac0b03a19873d35d110dbab532023d681d519f92e4e392a24aef9616f6.scope, image=quay.io/argoproj/argocd:v2.6.7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8f53a8ac0b03a19873d35d110dbab532023d681d519f92e4e392a24aef9616f6, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-notifications-controller-6f594bddd9-2ld7b, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc002b72028} C:{Var:C Labels:container=notifications-controller, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb077757c_3892_4e3c_aab4_fae751ed260e.slice/cri-containerd-8f53a8ac0b03a19873d35d110dbab532023d681d519f92e4e392a24aef9616f6.scope, image=quay.io/argoproj/argocd:v2.6.7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8f53a8ac0b03a19873d35d110dbab532023d681d519f92e4e392a24aef9616f6, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-notifications-controller-6f594bddd9-2ld7b, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc002b720e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715171471s EvaluationString:[ var='B' labels={container=notifications-controller, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb077757c_3892_4e3c_aab4_fae751ed260e.slice/cri-containerd-8f53a8ac0b03a19873d35d110dbab532023d681d519f92e4e392a24aef9616f6.scope, image=quay.io/argoproj/argocd:v2.6.7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8f53a8ac0b03a19873d35d110dbab532023d681d519f92e4e392a24aef9616f6, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-notifications-controller-6f594bddd9-2ld7b, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={container=notifications-controller, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb077757c_3892_4e3c_aab4_fae751ed260e.slice/cri-containerd-8f53a8ac0b03a19873d35d110dbab532023d681d519f92e4e392a24aef9616f6.scope, image=quay.io/argoproj/argocd:v2.6.7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8f53a8ac0b03a19873d35d110dbab532023d681d519f92e4e392a24aef9616f6, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-notifications-controller-6f594bddd9-2ld7b, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podffa5689b_cfd5_4376_9cac_2718616c986c.slice/cri-containerd-bc0404a0370ab6947f3b7c46daee614ebaf8e14938bf035e328712e1a20ac758.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=bc0404a0370ab6947f3b7c46daee614ebaf8e14938bf035e328712e1a20ac758, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-redis-6599b4bcbd-x7jvz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podffa5689b_cfd5_4376_9cac_2718616c986c.slice/cri-containerd-bc0404a0370ab6947f3b7c46daee614ebaf8e14938bf035e328712e1a20ac758.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=bc0404a0370ab6947f3b7c46daee614ebaf8e14938bf035e328712e1a20ac758, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-redis-6599b4bcbd-x7jvz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc002b72260} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podffa5689b_cfd5_4376_9cac_2718616c986c.slice/cri-containerd-bc0404a0370ab6947f3b7c46daee614ebaf8e14938bf035e328712e1a20ac758.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=bc0404a0370ab6947f3b7c46daee614ebaf8e14938bf035e328712e1a20ac758, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-redis-6599b4bcbd-x7jvz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc002b72310}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715183952s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podffa5689b_cfd5_4376_9cac_2718616c986c.slice/cri-containerd-bc0404a0370ab6947f3b7c46daee614ebaf8e14938bf035e328712e1a20ac758.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=bc0404a0370ab6947f3b7c46daee614ebaf8e14938bf035e328712e1a20ac758, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-redis-6599b4bcbd-x7jvz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podffa5689b_cfd5_4376_9cac_2718616c986c.slice/cri-containerd-bc0404a0370ab6947f3b7c46daee614ebaf8e14938bf035e328712e1a20ac758.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=bc0404a0370ab6947f3b7c46daee614ebaf8e14938bf035e328712e1a20ac758, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-redis-6599b4bcbd-x7jvz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51cd9b7d_4bba_4136_ba96_168c01135cd1.slice/cri-containerd-1b05ea6b5f86696a28a917b8341f8ebea10684301e05d701beecbe0cd9be6ae3.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=1b05ea6b5f86696a28a917b8341f8ebea10684301e05d701beecbe0cd9be6ae3, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-repo-server-5d75d855d-6tnp9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51cd9b7d_4bba_4136_ba96_168c01135cd1.slice/cri-containerd-1b05ea6b5f86696a28a917b8341f8ebea10684301e05d701beecbe0cd9be6ae3.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=1b05ea6b5f86696a28a917b8341f8ebea10684301e05d701beecbe0cd9be6ae3, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-repo-server-5d75d855d-6tnp9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc002b728d0} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51cd9b7d_4bba_4136_ba96_168c01135cd1.slice/cri-containerd-1b05ea6b5f86696a28a917b8341f8ebea10684301e05d701beecbe0cd9be6ae3.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=1b05ea6b5f86696a28a917b8341f8ebea10684301e05d701beecbe0cd9be6ae3, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-repo-server-5d75d855d-6tnp9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc002b729a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715196994s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51cd9b7d_4bba_4136_ba96_168c01135cd1.slice/cri-containerd-1b05ea6b5f86696a28a917b8341f8ebea10684301e05d701beecbe0cd9be6ae3.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=1b05ea6b5f86696a28a917b8341f8ebea10684301e05d701beecbe0cd9be6ae3, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-repo-server-5d75d855d-6tnp9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51cd9b7d_4bba_4136_ba96_168c01135cd1.slice/cri-containerd-1b05ea6b5f86696a28a917b8341f8ebea10684301e05d701beecbe0cd9be6ae3.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=1b05ea6b5f86696a28a917b8341f8ebea10684301e05d701beecbe0cd9be6ae3, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-repo-server-5d75d855d-6tnp9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=cmp-tanka-secrets, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51cd9b7d_4bba_4136_ba96_168c01135cd1.slice/cri-containerd-a9dd8192cf775a48271256ddd40dbe1825c9860460dfeef8934a19e2bccf5873.scope, image=quay.io/argoproj/argocd:v2.6.7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=a9dd8192cf775a48271256ddd40dbe1825c9860460dfeef8934a19e2bccf5873, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-repo-server-5d75d855d-6tnp9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=cmp-tanka-secrets, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51cd9b7d_4bba_4136_ba96_168c01135cd1.slice/cri-containerd-a9dd8192cf775a48271256ddd40dbe1825c9860460dfeef8934a19e2bccf5873.scope, image=quay.io/argoproj/argocd:v2.6.7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=a9dd8192cf775a48271256ddd40dbe1825c9860460dfeef8934a19e2bccf5873, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-repo-server-5d75d855d-6tnp9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc002b72b98} C:{Var:C Labels:container=cmp-tanka-secrets, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51cd9b7d_4bba_4136_ba96_168c01135cd1.slice/cri-containerd-a9dd8192cf775a48271256ddd40dbe1825c9860460dfeef8934a19e2bccf5873.scope, image=quay.io/argoproj/argocd:v2.6.7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=a9dd8192cf775a48271256ddd40dbe1825c9860460dfeef8934a19e2bccf5873, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-repo-server-5d75d855d-6tnp9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc002b72d10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715207427s EvaluationString:[ var='B' labels={container=cmp-tanka-secrets, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51cd9b7d_4bba_4136_ba96_168c01135cd1.slice/cri-containerd-a9dd8192cf775a48271256ddd40dbe1825c9860460dfeef8934a19e2bccf5873.scope, image=quay.io/argoproj/argocd:v2.6.7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=a9dd8192cf775a48271256ddd40dbe1825c9860460dfeef8934a19e2bccf5873, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-repo-server-5d75d855d-6tnp9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={container=cmp-tanka-secrets, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51cd9b7d_4bba_4136_ba96_168c01135cd1.slice/cri-containerd-a9dd8192cf775a48271256ddd40dbe1825c9860460dfeef8934a19e2bccf5873.scope, image=quay.io/argoproj/argocd:v2.6.7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=a9dd8192cf775a48271256ddd40dbe1825c9860460dfeef8934a19e2bccf5873, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-repo-server-5d75d855d-6tnp9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=cmp-tanka, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51cd9b7d_4bba_4136_ba96_168c01135cd1.slice/cri-containerd-bbbf3d7113ad40962c2195e409f921a3d277b8a821f85f59bdd891ba28a6ad55.scope, image=quay.io/argoproj/argocd:v2.6.7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=bbbf3d7113ad40962c2195e409f921a3d277b8a821f85f59bdd891ba28a6ad55, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-repo-server-5d75d855d-6tnp9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=cmp-tanka, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51cd9b7d_4bba_4136_ba96_168c01135cd1.slice/cri-containerd-bbbf3d7113ad40962c2195e409f921a3d277b8a821f85f59bdd891ba28a6ad55.scope, image=quay.io/argoproj/argocd:v2.6.7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=bbbf3d7113ad40962c2195e409f921a3d277b8a821f85f59bdd891ba28a6ad55, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-repo-server-5d75d855d-6tnp9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc002b72f18} C:{Var:C Labels:container=cmp-tanka, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51cd9b7d_4bba_4136_ba96_168c01135cd1.slice/cri-containerd-bbbf3d7113ad40962c2195e409f921a3d277b8a821f85f59bdd891ba28a6ad55.scope, image=quay.io/argoproj/argocd:v2.6.7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=bbbf3d7113ad40962c2195e409f921a3d277b8a821f85f59bdd891ba28a6ad55, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-repo-server-5d75d855d-6tnp9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc002b72ff0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715220057s EvaluationString:[ var='B' labels={container=cmp-tanka, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51cd9b7d_4bba_4136_ba96_168c01135cd1.slice/cri-containerd-bbbf3d7113ad40962c2195e409f921a3d277b8a821f85f59bdd891ba28a6ad55.scope, image=quay.io/argoproj/argocd:v2.6.7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=bbbf3d7113ad40962c2195e409f921a3d277b8a821f85f59bdd891ba28a6ad55, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-repo-server-5d75d855d-6tnp9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={container=cmp-tanka, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51cd9b7d_4bba_4136_ba96_168c01135cd1.slice/cri-containerd-bbbf3d7113ad40962c2195e409f921a3d277b8a821f85f59bdd891ba28a6ad55.scope, image=quay.io/argoproj/argocd:v2.6.7, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=bbbf3d7113ad40962c2195e409f921a3d277b8a821f85f59bdd891ba28a6ad55, namespace=argocd, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=argocd-repo-server-5d75d855d-6tnp9, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddc95d93d_ea20_4da1_a49e_56dad7cb4286.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=cert-manager, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=cert-manager-webhook-7bfc7b8579-p8587, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddc95d93d_ea20_4da1_a49e_56dad7cb4286.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=cert-manager, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=cert-manager-webhook-7bfc7b8579-p8587, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc002b73248} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddc95d93d_ea20_4da1_a49e_56dad7cb4286.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=cert-manager, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=cert-manager-webhook-7bfc7b8579-p8587, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc002b73390}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715229696s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddc95d93d_ea20_4da1_a49e_56dad7cb4286.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=cert-manager, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=cert-manager-webhook-7bfc7b8579-p8587, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=6 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddc95d93d_ea20_4da1_a49e_56dad7cb4286.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=cert-manager, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=cert-manager-webhook-7bfc7b8579-p8587, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51500c0c_877a_4b32_ba7a_1a820a22daa9.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=cert-manager, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=cert-manager-cainjector-9cb894775-l2xxd, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51500c0c_877a_4b32_ba7a_1a820a22daa9.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=cert-manager, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=cert-manager-cainjector-9cb894775-l2xxd, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc002b73518} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51500c0c_877a_4b32_ba7a_1a820a22daa9.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=cert-manager, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=cert-manager-cainjector-9cb894775-l2xxd, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc002b73600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715237669s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51500c0c_877a_4b32_ba7a_1a820a22daa9.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=cert-manager, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=cert-manager-cainjector-9cb894775-l2xxd, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=2 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51500c0c_877a_4b32_ba7a_1a820a22daa9.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=cert-manager, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=cert-manager-cainjector-9cb894775-l2xxd, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=cert-manager-cainjector, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51500c0c_877a_4b32_ba7a_1a820a22daa9.slice/cri-containerd-15c806703f97f08a34e997f68d4e9aa223736e75b18a94aa318e924713e1cf8e.scope, image=quay.io/jetstack/cert-manager-cainjector:v1.13.3, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=15c806703f97f08a34e997f68d4e9aa223736e75b18a94aa318e924713e1cf8e, namespace=cert-manager, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=cert-manager-cainjector-9cb894775-l2xxd, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=cert-manager-cainjector, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51500c0c_877a_4b32_ba7a_1a820a22daa9.slice/cri-containerd-15c806703f97f08a34e997f68d4e9aa223736e75b18a94aa318e924713e1cf8e.scope, image=quay.io/jetstack/cert-manager-cainjector:v1.13.3, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=15c806703f97f08a34e997f68d4e9aa223736e75b18a94aa318e924713e1cf8e, namespace=cert-manager, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=cert-manager-cainjector-9cb894775-l2xxd, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc002b737e0} C:{Var:C Labels:container=cert-manager-cainjector, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51500c0c_877a_4b32_ba7a_1a820a22daa9.slice/cri-containerd-15c806703f97f08a34e997f68d4e9aa223736e75b18a94aa318e924713e1cf8e.scope, image=quay.io/jetstack/cert-manager-cainjector:v1.13.3, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=15c806703f97f08a34e997f68d4e9aa223736e75b18a94aa318e924713e1cf8e, namespace=cert-manager, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=cert-manager-cainjector-9cb894775-l2xxd, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc002b73900}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715246654s EvaluationString:[ var='B' labels={container=cert-manager-cainjector, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51500c0c_877a_4b32_ba7a_1a820a22daa9.slice/cri-containerd-15c806703f97f08a34e997f68d4e9aa223736e75b18a94aa318e924713e1cf8e.scope, image=quay.io/jetstack/cert-manager-cainjector:v1.13.3, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=15c806703f97f08a34e997f68d4e9aa223736e75b18a94aa318e924713e1cf8e, namespace=cert-manager, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=cert-manager-cainjector-9cb894775-l2xxd, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=2 ], [ var='C' labels={container=cert-manager-cainjector, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51500c0c_877a_4b32_ba7a_1a820a22daa9.slice/cri-containerd-15c806703f97f08a34e997f68d4e9aa223736e75b18a94aa318e924713e1cf8e.scope, image=quay.io/jetstack/cert-manager-cainjector:v1.13.3, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=15c806703f97f08a34e997f68d4e9aa223736e75b18a94aa318e924713e1cf8e, namespace=cert-manager, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=cert-manager-cainjector-9cb894775-l2xxd, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce80d7e6_ce82_4840_9289_119260ecc966.slice, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=development-proxy, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=shadowsocks-5cd87d76c5-wlwhf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce80d7e6_ce82_4840_9289_119260ecc966.slice, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=development-proxy, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=shadowsocks-5cd87d76c5-wlwhf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc002b73a60} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce80d7e6_ce82_4840_9289_119260ecc966.slice, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=development-proxy, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=shadowsocks-5cd87d76c5-wlwhf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc002b73b20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.71525603s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce80d7e6_ce82_4840_9289_119260ecc966.slice, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=development-proxy, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=shadowsocks-5cd87d76c5-wlwhf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=54 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce80d7e6_ce82_4840_9289_119260ecc966.slice, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=development-proxy, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=shadowsocks-5cd87d76c5-wlwhf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce80d7e6_ce82_4840_9289_119260ecc966.slice/cri-containerd-5f833652e08bd89f05822b55f39d8461812bb5b4088d0f9bccb477134bbf12dd.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=5f833652e08bd89f05822b55f39d8461812bb5b4088d0f9bccb477134bbf12dd, namespace=development-proxy, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=shadowsocks-5cd87d76c5-wlwhf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce80d7e6_ce82_4840_9289_119260ecc966.slice/cri-containerd-5f833652e08bd89f05822b55f39d8461812bb5b4088d0f9bccb477134bbf12dd.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=5f833652e08bd89f05822b55f39d8461812bb5b4088d0f9bccb477134bbf12dd, namespace=development-proxy, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=shadowsocks-5cd87d76c5-wlwhf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc002b73d00} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce80d7e6_ce82_4840_9289_119260ecc966.slice/cri-containerd-5f833652e08bd89f05822b55f39d8461812bb5b4088d0f9bccb477134bbf12dd.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=5f833652e08bd89f05822b55f39d8461812bb5b4088d0f9bccb477134bbf12dd, namespace=development-proxy, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=shadowsocks-5cd87d76c5-wlwhf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc002b73e18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715262262s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce80d7e6_ce82_4840_9289_119260ecc966.slice/cri-containerd-5f833652e08bd89f05822b55f39d8461812bb5b4088d0f9bccb477134bbf12dd.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=5f833652e08bd89f05822b55f39d8461812bb5b4088d0f9bccb477134bbf12dd, namespace=development-proxy, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=shadowsocks-5cd87d76c5-wlwhf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=54 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce80d7e6_ce82_4840_9289_119260ecc966.slice/cri-containerd-5f833652e08bd89f05822b55f39d8461812bb5b4088d0f9bccb477134bbf12dd.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=5f833652e08bd89f05822b55f39d8461812bb5b4088d0f9bccb477134bbf12dd, namespace=development-proxy, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=shadowsocks-5cd87d76c5-wlwhf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf27ac39_99b1_4ac5_82fb_8c69380265b9.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=development-proxy, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=frpc-887d78d45-27tjs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf27ac39_99b1_4ac5_82fb_8c69380265b9.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=development-proxy, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=frpc-887d78d45-27tjs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc002b73f88} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf27ac39_99b1_4ac5_82fb_8c69380265b9.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=development-proxy, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=frpc-887d78d45-27tjs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18a030}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715269761s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf27ac39_99b1_4ac5_82fb_8c69380265b9.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=development-proxy, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=frpc-887d78d45-27tjs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=3 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf27ac39_99b1_4ac5_82fb_8c69380265b9.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=development-proxy, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=frpc-887d78d45-27tjs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf27ac39_99b1_4ac5_82fb_8c69380265b9.slice/cri-containerd-c37ed2ef672f557e1d65c88e696f87a9cc116b83ef1074dab0c60a5e075a9b5f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=c37ed2ef672f557e1d65c88e696f87a9cc116b83ef1074dab0c60a5e075a9b5f, namespace=development-proxy, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=frpc-887d78d45-27tjs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf27ac39_99b1_4ac5_82fb_8c69380265b9.slice/cri-containerd-c37ed2ef672f557e1d65c88e696f87a9cc116b83ef1074dab0c60a5e075a9b5f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=c37ed2ef672f557e1d65c88e696f87a9cc116b83ef1074dab0c60a5e075a9b5f, namespace=development-proxy, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=frpc-887d78d45-27tjs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18a1a0} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf27ac39_99b1_4ac5_82fb_8c69380265b9.slice/cri-containerd-c37ed2ef672f557e1d65c88e696f87a9cc116b83ef1074dab0c60a5e075a9b5f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=c37ed2ef672f557e1d65c88e696f87a9cc116b83ef1074dab0c60a5e075a9b5f, namespace=development-proxy, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=frpc-887d78d45-27tjs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18a250}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715278654s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf27ac39_99b1_4ac5_82fb_8c69380265b9.slice/cri-containerd-c37ed2ef672f557e1d65c88e696f87a9cc116b83ef1074dab0c60a5e075a9b5f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=c37ed2ef672f557e1d65c88e696f87a9cc116b83ef1074dab0c60a5e075a9b5f, namespace=development-proxy, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=frpc-887d78d45-27tjs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=3 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf27ac39_99b1_4ac5_82fb_8c69380265b9.slice/cri-containerd-c37ed2ef672f557e1d65c88e696f87a9cc116b83ef1074dab0c60a5e075a9b5f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=c37ed2ef672f557e1d65c88e696f87a9cc116b83ef1074dab0c60a5e075a9b5f, namespace=development-proxy, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=frpc-887d78d45-27tjs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=frpc, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf27ac39_99b1_4ac5_82fb_8c69380265b9.slice/cri-containerd-e6b35afb8cae90d761e68859d7cadac5a3d4950e530117cba253a10713dcf814.scope, image=docker.io/library/ubuntu:latest, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e6b35afb8cae90d761e68859d7cadac5a3d4950e530117cba253a10713dcf814, namespace=development-proxy, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=frpc-887d78d45-27tjs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=frpc, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf27ac39_99b1_4ac5_82fb_8c69380265b9.slice/cri-containerd-e6b35afb8cae90d761e68859d7cadac5a3d4950e530117cba253a10713dcf814.scope, image=docker.io/library/ubuntu:latest, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e6b35afb8cae90d761e68859d7cadac5a3d4950e530117cba253a10713dcf814, namespace=development-proxy, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=frpc-887d78d45-27tjs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18a3e0} C:{Var:C Labels:container=frpc, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf27ac39_99b1_4ac5_82fb_8c69380265b9.slice/cri-containerd-e6b35afb8cae90d761e68859d7cadac5a3d4950e530117cba253a10713dcf814.scope, image=docker.io/library/ubuntu:latest, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e6b35afb8cae90d761e68859d7cadac5a3d4950e530117cba253a10713dcf814, namespace=development-proxy, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=frpc-887d78d45-27tjs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18a4b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715287617s EvaluationString:[ var='B' labels={container=frpc, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf27ac39_99b1_4ac5_82fb_8c69380265b9.slice/cri-containerd-e6b35afb8cae90d761e68859d7cadac5a3d4950e530117cba253a10713dcf814.scope, image=docker.io/library/ubuntu:latest, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e6b35afb8cae90d761e68859d7cadac5a3d4950e530117cba253a10713dcf814, namespace=development-proxy, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=frpc-887d78d45-27tjs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=3 ], [ var='C' labels={container=frpc, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf27ac39_99b1_4ac5_82fb_8c69380265b9.slice/cri-containerd-e6b35afb8cae90d761e68859d7cadac5a3d4950e530117cba253a10713dcf814.scope, image=docker.io/library/ubuntu:latest, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e6b35afb8cae90d761e68859d7cadac5a3d4950e530117cba253a10713dcf814, namespace=development-proxy, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=frpc-887d78d45-27tjs, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37d962c3_c45e_4c54_b509_f82de062a5ba.slice/cri-containerd-8eda53275a01fd8e4cc3804dc0b829e3ec37d3912ae9e3c33c7acf7f64c9698f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8eda53275a01fd8e4cc3804dc0b829e3ec37d3912ae9e3c33c7acf7f64c9698f, namespace=ele-toolbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=connect-simulator-v2-69c98b86bc-m5gbx, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37d962c3_c45e_4c54_b509_f82de062a5ba.slice/cri-containerd-8eda53275a01fd8e4cc3804dc0b829e3ec37d3912ae9e3c33c7acf7f64c9698f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8eda53275a01fd8e4cc3804dc0b829e3ec37d3912ae9e3c33c7acf7f64c9698f, namespace=ele-toolbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=connect-simulator-v2-69c98b86bc-m5gbx, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18a630} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37d962c3_c45e_4c54_b509_f82de062a5ba.slice/cri-containerd-8eda53275a01fd8e4cc3804dc0b829e3ec37d3912ae9e3c33c7acf7f64c9698f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8eda53275a01fd8e4cc3804dc0b829e3ec37d3912ae9e3c33c7acf7f64c9698f, namespace=ele-toolbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=connect-simulator-v2-69c98b86bc-m5gbx, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18a6f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715296679s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37d962c3_c45e_4c54_b509_f82de062a5ba.slice/cri-containerd-8eda53275a01fd8e4cc3804dc0b829e3ec37d3912ae9e3c33c7acf7f64c9698f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8eda53275a01fd8e4cc3804dc0b829e3ec37d3912ae9e3c33c7acf7f64c9698f, namespace=ele-toolbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=connect-simulator-v2-69c98b86bc-m5gbx, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=7 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37d962c3_c45e_4c54_b509_f82de062a5ba.slice/cri-containerd-8eda53275a01fd8e4cc3804dc0b829e3ec37d3912ae9e3c33c7acf7f64c9698f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8eda53275a01fd8e4cc3804dc0b829e3ec37d3912ae9e3c33c7acf7f64c9698f, namespace=ele-toolbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=connect-simulator-v2-69c98b86bc-m5gbx, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=connect-simulator-v2, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37d962c3_c45e_4c54_b509_f82de062a5ba.slice/cri-containerd-de83763f9eca53ca3d03eb0ce3507642a01e0fd49382fefd80cde5457cef4c1c.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/connect-simulator-v2:59d44899762d6751e90c56ecc036412e81db7def, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=de83763f9eca53ca3d03eb0ce3507642a01e0fd49382fefd80cde5457cef4c1c, namespace=ele-toolbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=connect-simulator-v2-69c98b86bc-m5gbx, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=connect-simulator-v2, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37d962c3_c45e_4c54_b509_f82de062a5ba.slice/cri-containerd-de83763f9eca53ca3d03eb0ce3507642a01e0fd49382fefd80cde5457cef4c1c.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/connect-simulator-v2:59d44899762d6751e90c56ecc036412e81db7def, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=de83763f9eca53ca3d03eb0ce3507642a01e0fd49382fefd80cde5457cef4c1c, namespace=ele-toolbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=connect-simulator-v2-69c98b86bc-m5gbx, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18a878} C:{Var:C Labels:container=connect-simulator-v2, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37d962c3_c45e_4c54_b509_f82de062a5ba.slice/cri-containerd-de83763f9eca53ca3d03eb0ce3507642a01e0fd49382fefd80cde5457cef4c1c.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/connect-simulator-v2:59d44899762d6751e90c56ecc036412e81db7def, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=de83763f9eca53ca3d03eb0ce3507642a01e0fd49382fefd80cde5457cef4c1c, namespace=ele-toolbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=connect-simulator-v2-69c98b86bc-m5gbx, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18a958}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.7153079s EvaluationString:[ var='B' labels={container=connect-simulator-v2, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37d962c3_c45e_4c54_b509_f82de062a5ba.slice/cri-containerd-de83763f9eca53ca3d03eb0ce3507642a01e0fd49382fefd80cde5457cef4c1c.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/connect-simulator-v2:59d44899762d6751e90c56ecc036412e81db7def, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=de83763f9eca53ca3d03eb0ce3507642a01e0fd49382fefd80cde5457cef4c1c, namespace=ele-toolbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=connect-simulator-v2-69c98b86bc-m5gbx, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=7 ], [ var='C' labels={container=connect-simulator-v2, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37d962c3_c45e_4c54_b509_f82de062a5ba.slice/cri-containerd-de83763f9eca53ca3d03eb0ce3507642a01e0fd49382fefd80cde5457cef4c1c.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/connect-simulator-v2:59d44899762d6751e90c56ecc036412e81db7def, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=de83763f9eca53ca3d03eb0ce3507642a01e0fd49382fefd80cde5457cef4c1c, namespace=ele-toolbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=connect-simulator-v2-69c98b86bc-m5gbx, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=ele-send-v2, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1a1cedce_4e05_4203_8694_87c3712cbce0.slice/cri-containerd-9c92ef4088cd9967569c9a8d71763b51fc8b59617bba232140d9117b4e7e67f1.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-send-v2:3f854b6222fef3b4da0e3ab4d0f2c0741d12b473, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=9c92ef4088cd9967569c9a8d71763b51fc8b59617bba232140d9117b4e7e67f1, namespace=ele-toolbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=ele-send-v2-6797bcf994-pfv8j, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=ele-send-v2, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1a1cedce_4e05_4203_8694_87c3712cbce0.slice/cri-containerd-9c92ef4088cd9967569c9a8d71763b51fc8b59617bba232140d9117b4e7e67f1.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-send-v2:3f854b6222fef3b4da0e3ab4d0f2c0741d12b473, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=9c92ef4088cd9967569c9a8d71763b51fc8b59617bba232140d9117b4e7e67f1, namespace=ele-toolbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=ele-send-v2-6797bcf994-pfv8j, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18ab10} C:{Var:C Labels:container=ele-send-v2, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1a1cedce_4e05_4203_8694_87c3712cbce0.slice/cri-containerd-9c92ef4088cd9967569c9a8d71763b51fc8b59617bba232140d9117b4e7e67f1.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-send-v2:3f854b6222fef3b4da0e3ab4d0f2c0741d12b473, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=9c92ef4088cd9967569c9a8d71763b51fc8b59617bba232140d9117b4e7e67f1, namespace=ele-toolbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=ele-send-v2-6797bcf994-pfv8j, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18abf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.71531951s EvaluationString:[ var='B' labels={container=ele-send-v2, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1a1cedce_4e05_4203_8694_87c3712cbce0.slice/cri-containerd-9c92ef4088cd9967569c9a8d71763b51fc8b59617bba232140d9117b4e7e67f1.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-send-v2:3f854b6222fef3b4da0e3ab4d0f2c0741d12b473, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=9c92ef4088cd9967569c9a8d71763b51fc8b59617bba232140d9117b4e7e67f1, namespace=ele-toolbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=ele-send-v2-6797bcf994-pfv8j, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=7 ], [ var='C' labels={container=ele-send-v2, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1a1cedce_4e05_4203_8694_87c3712cbce0.slice/cri-containerd-9c92ef4088cd9967569c9a8d71763b51fc8b59617bba232140d9117b4e7e67f1.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-send-v2:3f854b6222fef3b4da0e3ab4d0f2c0741d12b473, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=9c92ef4088cd9967569c9a8d71763b51fc8b59617bba232140d9117b4e7e67f1, namespace=ele-toolbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=ele-send-v2-6797bcf994-pfv8j, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod15df7545_987d_4bee_b8aa_d02c26b6e1e2.slice, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=tv-mock-server-web-54689bf759-mfcwk, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod15df7545_987d_4bee_b8aa_d02c26b6e1e2.slice, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=tv-mock-server-web-54689bf759-mfcwk, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18ad60} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod15df7545_987d_4bee_b8aa_d02c26b6e1e2.slice, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=tv-mock-server-web-54689bf759-mfcwk, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18ae20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715332082s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod15df7545_987d_4bee_b8aa_d02c26b6e1e2.slice, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=tv-mock-server-web-54689bf759-mfcwk, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=7 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod15df7545_987d_4bee_b8aa_d02c26b6e1e2.slice, instance=10.100.40.95:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-40-95.ap-northeast-1.compute.internal, pod=tv-mock-server-web-54689bf759-mfcwk, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podb4597d1d_c38b_46ed_8c5f_d50b3a3b42de.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-8gcpb, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podb4597d1d_c38b_46ed_8c5f_d50b3a3b42de.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-8gcpb, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18afc0} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podb4597d1d_c38b_46ed_8c5f_d50b3a3b42de.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-8gcpb, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18b080}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715341884s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podb4597d1d_c38b_46ed_8c5f_d50b3a3b42de.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-8gcpb, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=6 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podb4597d1d_c38b_46ed_8c5f_d50b3a3b42de.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-8gcpb, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=ele-send-admin, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podb4597d1d_c38b_46ed_8c5f_d50b3a3b42de.slice/cri-containerd-8fe787d284e1c3846b0a3c1639abb5781df5df7dc18acaf49c671068569e2217.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-send-admin:c2d4c0c98d699e8345e82db15ad6457f43f1986f, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8fe787d284e1c3846b0a3c1639abb5781df5df7dc18acaf49c671068569e2217, namespace=ele-toolbox, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-8gcpb, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=ele-send-admin, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podb4597d1d_c38b_46ed_8c5f_d50b3a3b42de.slice/cri-containerd-8fe787d284e1c3846b0a3c1639abb5781df5df7dc18acaf49c671068569e2217.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-send-admin:c2d4c0c98d699e8345e82db15ad6457f43f1986f, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8fe787d284e1c3846b0a3c1639abb5781df5df7dc18acaf49c671068569e2217, namespace=ele-toolbox, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-8gcpb, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18b2a0} C:{Var:C Labels:container=ele-send-admin, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podb4597d1d_c38b_46ed_8c5f_d50b3a3b42de.slice/cri-containerd-8fe787d284e1c3846b0a3c1639abb5781df5df7dc18acaf49c671068569e2217.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-send-admin:c2d4c0c98d699e8345e82db15ad6457f43f1986f, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8fe787d284e1c3846b0a3c1639abb5781df5df7dc18acaf49c671068569e2217, namespace=ele-toolbox, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-8gcpb, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18b380}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715351918s EvaluationString:[ var='B' labels={container=ele-send-admin, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podb4597d1d_c38b_46ed_8c5f_d50b3a3b42de.slice/cri-containerd-8fe787d284e1c3846b0a3c1639abb5781df5df7dc18acaf49c671068569e2217.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-send-admin:c2d4c0c98d699e8345e82db15ad6457f43f1986f, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8fe787d284e1c3846b0a3c1639abb5781df5df7dc18acaf49c671068569e2217, namespace=ele-toolbox, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-8gcpb, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=6 ], [ var='C' labels={container=ele-send-admin, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podb4597d1d_c38b_46ed_8c5f_d50b3a3b42de.slice/cri-containerd-8fe787d284e1c3846b0a3c1639abb5781df5df7dc18acaf49c671068569e2217.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-send-admin:c2d4c0c98d699e8345e82db15ad6457f43f1986f, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8fe787d284e1c3846b0a3c1639abb5781df5df7dc18acaf49c671068569e2217, namespace=ele-toolbox, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-8gcpb, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podadb2eacc_f64a_481a_b576_48f290129503.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-bx74s, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podadb2eacc_f64a_481a_b576_48f290129503.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-bx74s, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18b4f8} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podadb2eacc_f64a_481a_b576_48f290129503.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-bx74s, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18b5a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715364652s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podadb2eacc_f64a_481a_b576_48f290129503.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-bx74s, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=2 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podadb2eacc_f64a_481a_b576_48f290129503.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-bx74s, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podadb2eacc_f64a_481a_b576_48f290129503.slice/cri-containerd-44a08b574fcdb823ceed67cff1c70f17e2ea18097d9f8147b3416f90ccf9a6f7.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=44a08b574fcdb823ceed67cff1c70f17e2ea18097d9f8147b3416f90ccf9a6f7, namespace=ele-toolbox, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-bx74s, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podadb2eacc_f64a_481a_b576_48f290129503.slice/cri-containerd-44a08b574fcdb823ceed67cff1c70f17e2ea18097d9f8147b3416f90ccf9a6f7.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=44a08b574fcdb823ceed67cff1c70f17e2ea18097d9f8147b3416f90ccf9a6f7, namespace=ele-toolbox, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-bx74s, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18b728} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podadb2eacc_f64a_481a_b576_48f290129503.slice/cri-containerd-44a08b574fcdb823ceed67cff1c70f17e2ea18097d9f8147b3416f90ccf9a6f7.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=44a08b574fcdb823ceed67cff1c70f17e2ea18097d9f8147b3416f90ccf9a6f7, namespace=ele-toolbox, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-bx74s, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18b7f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715376119s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podadb2eacc_f64a_481a_b576_48f290129503.slice/cri-containerd-44a08b574fcdb823ceed67cff1c70f17e2ea18097d9f8147b3416f90ccf9a6f7.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=44a08b574fcdb823ceed67cff1c70f17e2ea18097d9f8147b3416f90ccf9a6f7, namespace=ele-toolbox, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-bx74s, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=2 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podadb2eacc_f64a_481a_b576_48f290129503.slice/cri-containerd-44a08b574fcdb823ceed67cff1c70f17e2ea18097d9f8147b3416f90ccf9a6f7.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=44a08b574fcdb823ceed67cff1c70f17e2ea18097d9f8147b3416f90ccf9a6f7, namespace=ele-toolbox, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=ele-send-admin-5886c58fd7-bx74s, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0c7e7f8_3c58_4a25_b7c6_bac9e34a5beb.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=ele-send-v2-6797bcf994-xmmzl, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0c7e7f8_3c58_4a25_b7c6_bac9e34a5beb.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=ele-send-v2-6797bcf994-xmmzl, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18ba10} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0c7e7f8_3c58_4a25_b7c6_bac9e34a5beb.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=ele-send-v2-6797bcf994-xmmzl, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18baa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715388583s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0c7e7f8_3c58_4a25_b7c6_bac9e34a5beb.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=ele-send-v2-6797bcf994-xmmzl, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=3 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0c7e7f8_3c58_4a25_b7c6_bac9e34a5beb.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=ele-send-v2-6797bcf994-xmmzl, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0c7e7f8_3c58_4a25_b7c6_bac9e34a5beb.slice/cri-containerd-3deba35f0e8e63eb24bfb4f12f4d6df26b65e775a75b2efbfc6d45e168229c2f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=3deba35f0e8e63eb24bfb4f12f4d6df26b65e775a75b2efbfc6d45e168229c2f, namespace=ele-toolbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=ele-send-v2-6797bcf994-xmmzl, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0c7e7f8_3c58_4a25_b7c6_bac9e34a5beb.slice/cri-containerd-3deba35f0e8e63eb24bfb4f12f4d6df26b65e775a75b2efbfc6d45e168229c2f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=3deba35f0e8e63eb24bfb4f12f4d6df26b65e775a75b2efbfc6d45e168229c2f, namespace=ele-toolbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=ele-send-v2-6797bcf994-xmmzl, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18bc68} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0c7e7f8_3c58_4a25_b7c6_bac9e34a5beb.slice/cri-containerd-3deba35f0e8e63eb24bfb4f12f4d6df26b65e775a75b2efbfc6d45e168229c2f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=3deba35f0e8e63eb24bfb4f12f4d6df26b65e775a75b2efbfc6d45e168229c2f, namespace=ele-toolbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=ele-send-v2-6797bcf994-xmmzl, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18bd40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715398619s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0c7e7f8_3c58_4a25_b7c6_bac9e34a5beb.slice/cri-containerd-3deba35f0e8e63eb24bfb4f12f4d6df26b65e775a75b2efbfc6d45e168229c2f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=3deba35f0e8e63eb24bfb4f12f4d6df26b65e775a75b2efbfc6d45e168229c2f, namespace=ele-toolbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=ele-send-v2-6797bcf994-xmmzl, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=3 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0c7e7f8_3c58_4a25_b7c6_bac9e34a5beb.slice/cri-containerd-3deba35f0e8e63eb24bfb4f12f4d6df26b65e775a75b2efbfc6d45e168229c2f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=3deba35f0e8e63eb24bfb4f12f4d6df26b65e775a75b2efbfc6d45e168229c2f, namespace=ele-toolbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=ele-send-v2-6797bcf994-xmmzl, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=ele-send-v2, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0c7e7f8_3c58_4a25_b7c6_bac9e34a5beb.slice/cri-containerd-63603b3f8c77f2c061b94a4fbf12edf0e2963be3e38b3322f11d3b427c86e506.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-send-v2:3f854b6222fef3b4da0e3ab4d0f2c0741d12b473, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=63603b3f8c77f2c061b94a4fbf12edf0e2963be3e38b3322f11d3b427c86e506, namespace=ele-toolbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=ele-send-v2-6797bcf994-xmmzl, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=ele-send-v2, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0c7e7f8_3c58_4a25_b7c6_bac9e34a5beb.slice/cri-containerd-63603b3f8c77f2c061b94a4fbf12edf0e2963be3e38b3322f11d3b427c86e506.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-send-v2:3f854b6222fef3b4da0e3ab4d0f2c0741d12b473, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=63603b3f8c77f2c061b94a4fbf12edf0e2963be3e38b3322f11d3b427c86e506, namespace=ele-toolbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=ele-send-v2-6797bcf994-xmmzl, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18bf00} C:{Var:C Labels:container=ele-send-v2, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0c7e7f8_3c58_4a25_b7c6_bac9e34a5beb.slice/cri-containerd-63603b3f8c77f2c061b94a4fbf12edf0e2963be3e38b3322f11d3b427c86e506.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-send-v2:3f854b6222fef3b4da0e3ab4d0f2c0741d12b473, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=63603b3f8c77f2c061b94a4fbf12edf0e2963be3e38b3322f11d3b427c86e506, namespace=ele-toolbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=ele-send-v2-6797bcf994-xmmzl, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01b18bfe8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715409107s EvaluationString:[ var='B' labels={container=ele-send-v2, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0c7e7f8_3c58_4a25_b7c6_bac9e34a5beb.slice/cri-containerd-63603b3f8c77f2c061b94a4fbf12edf0e2963be3e38b3322f11d3b427c86e506.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-send-v2:3f854b6222fef3b4da0e3ab4d0f2c0741d12b473, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=63603b3f8c77f2c061b94a4fbf12edf0e2963be3e38b3322f11d3b427c86e506, namespace=ele-toolbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=ele-send-v2-6797bcf994-xmmzl, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=3 ], [ var='C' labels={container=ele-send-v2, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0c7e7f8_3c58_4a25_b7c6_bac9e34a5beb.slice/cri-containerd-63603b3f8c77f2c061b94a4fbf12edf0e2963be3e38b3322f11d3b427c86e506.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-send-v2:3f854b6222fef3b4da0e3ab4d0f2c0741d12b473, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=63603b3f8c77f2c061b94a4fbf12edf0e2963be3e38b3322f11d3b427c86e506, namespace=ele-toolbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=ele-send-v2-6797bcf994-xmmzl, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=shlink, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod12e48e1c_070e_4b37_8515_e19a1d76cbd0.slice/cri-containerd-d150e78392cf68f462cfab398a7879607545304117d330d09a1a088e3e419306.scope, image=docker.io/shlinkio/shlink:stable, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d150e78392cf68f462cfab398a7879607545304117d330d09a1a088e3e419306, namespace=ele-toolbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=shlink-944bcbcc-8vwlc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=shlink, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod12e48e1c_070e_4b37_8515_e19a1d76cbd0.slice/cri-containerd-d150e78392cf68f462cfab398a7879607545304117d330d09a1a088e3e419306.scope, image=docker.io/shlinkio/shlink:stable, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d150e78392cf68f462cfab398a7879607545304117d330d09a1a088e3e419306, namespace=ele-toolbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=shlink-944bcbcc-8vwlc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc0154ce458} C:{Var:C Labels:container=shlink, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod12e48e1c_070e_4b37_8515_e19a1d76cbd0.slice/cri-containerd-d150e78392cf68f462cfab398a7879607545304117d330d09a1a088e3e419306.scope, image=docker.io/shlinkio/shlink:stable, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d150e78392cf68f462cfab398a7879607545304117d330d09a1a088e3e419306, namespace=ele-toolbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=shlink-944bcbcc-8vwlc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc0154ce5b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715420577s EvaluationString:[ var='B' labels={container=shlink, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod12e48e1c_070e_4b37_8515_e19a1d76cbd0.slice/cri-containerd-d150e78392cf68f462cfab398a7879607545304117d330d09a1a088e3e419306.scope, image=docker.io/shlinkio/shlink:stable, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d150e78392cf68f462cfab398a7879607545304117d330d09a1a088e3e419306, namespace=ele-toolbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=shlink-944bcbcc-8vwlc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=3 ], [ var='C' labels={container=shlink, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod12e48e1c_070e_4b37_8515_e19a1d76cbd0.slice/cri-containerd-d150e78392cf68f462cfab398a7879607545304117d330d09a1a088e3e419306.scope, image=docker.io/shlinkio/shlink:stable, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d150e78392cf68f462cfab398a7879607545304117d330d09a1a088e3e419306, namespace=ele-toolbox, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=shlink-944bcbcc-8vwlc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod6605ede6_dcbe_4b32_b7da_36f0624df496.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-master-56b547f46c-jmz2m, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod6605ede6_dcbe_4b32_b7da_36f0624df496.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-master-56b547f46c-jmz2m, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc0154ce910} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod6605ede6_dcbe_4b32_b7da_36f0624df496.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-master-56b547f46c-jmz2m, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc0154ceae8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715430621s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod6605ede6_dcbe_4b32_b7da_36f0624df496.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-master-56b547f46c-jmz2m, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod6605ede6_dcbe_4b32_b7da_36f0624df496.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=ele-toolbox, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-master-56b547f46c-jmz2m, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod022efa11_2f7b_42b2_adbf_ba733e8e2c6c.slice/cri-containerd-168c7495240ad1230d8c7525b7e0718bae4c05980072c823755c6f17024907c6.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=168c7495240ad1230d8c7525b7e0718bae4c05980072c823755c6f17024907c6, namespace=ele-toolbox, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-send-746548dd8-hd6sr, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod022efa11_2f7b_42b2_adbf_ba733e8e2c6c.slice/cri-containerd-168c7495240ad1230d8c7525b7e0718bae4c05980072c823755c6f17024907c6.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=168c7495240ad1230d8c7525b7e0718bae4c05980072c823755c6f17024907c6, namespace=ele-toolbox, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-send-746548dd8-hd6sr, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc0154cec90} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod022efa11_2f7b_42b2_adbf_ba733e8e2c6c.slice/cri-containerd-168c7495240ad1230d8c7525b7e0718bae4c05980072c823755c6f17024907c6.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=168c7495240ad1230d8c7525b7e0718bae4c05980072c823755c6f17024907c6, namespace=ele-toolbox, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-send-746548dd8-hd6sr, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc0154ced60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715437816s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod022efa11_2f7b_42b2_adbf_ba733e8e2c6c.slice/cri-containerd-168c7495240ad1230d8c7525b7e0718bae4c05980072c823755c6f17024907c6.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=168c7495240ad1230d8c7525b7e0718bae4c05980072c823755c6f17024907c6, namespace=ele-toolbox, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-send-746548dd8-hd6sr, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod022efa11_2f7b_42b2_adbf_ba733e8e2c6c.slice/cri-containerd-168c7495240ad1230d8c7525b7e0718bae4c05980072c823755c6f17024907c6.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=168c7495240ad1230d8c7525b7e0718bae4c05980072c823755c6f17024907c6, namespace=ele-toolbox, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-send-746548dd8-hd6sr, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0db5f1d1_a396_49c8_b09d_d09a2b43ae54.slice/cri-containerd-f5afa8262eb4cddf5acaa33392d79e798fb3287222126e343672cf8e9d0909db.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=f5afa8262eb4cddf5acaa33392d79e798fb3287222126e343672cf8e9d0909db, namespace=elepay, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=elepay-sys-v2-5f4b8d8f74-tq7dt, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0db5f1d1_a396_49c8_b09d_d09a2b43ae54.slice/cri-containerd-f5afa8262eb4cddf5acaa33392d79e798fb3287222126e343672cf8e9d0909db.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=f5afa8262eb4cddf5acaa33392d79e798fb3287222126e343672cf8e9d0909db, namespace=elepay, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=elepay-sys-v2-5f4b8d8f74-tq7dt, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc0154ceee0} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0db5f1d1_a396_49c8_b09d_d09a2b43ae54.slice/cri-containerd-f5afa8262eb4cddf5acaa33392d79e798fb3287222126e343672cf8e9d0909db.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=f5afa8262eb4cddf5acaa33392d79e798fb3287222126e343672cf8e9d0909db, namespace=elepay, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=elepay-sys-v2-5f4b8d8f74-tq7dt, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc0154cefa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715445223s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0db5f1d1_a396_49c8_b09d_d09a2b43ae54.slice/cri-containerd-f5afa8262eb4cddf5acaa33392d79e798fb3287222126e343672cf8e9d0909db.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=f5afa8262eb4cddf5acaa33392d79e798fb3287222126e343672cf8e9d0909db, namespace=elepay, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=elepay-sys-v2-5f4b8d8f74-tq7dt, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=54 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0db5f1d1_a396_49c8_b09d_d09a2b43ae54.slice/cri-containerd-f5afa8262eb4cddf5acaa33392d79e798fb3287222126e343672cf8e9d0909db.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=f5afa8262eb4cddf5acaa33392d79e798fb3287222126e343672cf8e9d0909db, namespace=elepay, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=elepay-sys-v2-5f4b8d8f74-tq7dt, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c0efc5b_ddeb_4f51_986a_7374034ee09f.slice, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=smart-boss-web-56db8c9b7f-dg6jm, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c0efc5b_ddeb_4f51_986a_7374034ee09f.slice, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=smart-boss-web-56db8c9b7f-dg6jm, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc0154cf270} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c0efc5b_ddeb_4f51_986a_7374034ee09f.slice, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=smart-boss-web-56db8c9b7f-dg6jm, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc0154cf348}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715451739s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c0efc5b_ddeb_4f51_986a_7374034ee09f.slice, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=smart-boss-web-56db8c9b7f-dg6jm, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=54 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c0efc5b_ddeb_4f51_986a_7374034ee09f.slice, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=smart-boss-web-56db8c9b7f-dg6jm, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c0efc5b_ddeb_4f51_986a_7374034ee09f.slice/cri-containerd-67223776d2671f12e5b3c49fb7a4ff0e31fcb15d6caec7ec2e4f3a77e101021a.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=67223776d2671f12e5b3c49fb7a4ff0e31fcb15d6caec7ec2e4f3a77e101021a, namespace=elepay, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=smart-boss-web-56db8c9b7f-dg6jm, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c0efc5b_ddeb_4f51_986a_7374034ee09f.slice/cri-containerd-67223776d2671f12e5b3c49fb7a4ff0e31fcb15d6caec7ec2e4f3a77e101021a.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=67223776d2671f12e5b3c49fb7a4ff0e31fcb15d6caec7ec2e4f3a77e101021a, namespace=elepay, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=smart-boss-web-56db8c9b7f-dg6jm, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc0154cf568} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c0efc5b_ddeb_4f51_986a_7374034ee09f.slice/cri-containerd-67223776d2671f12e5b3c49fb7a4ff0e31fcb15d6caec7ec2e4f3a77e101021a.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=67223776d2671f12e5b3c49fb7a4ff0e31fcb15d6caec7ec2e4f3a77e101021a, namespace=elepay, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=smart-boss-web-56db8c9b7f-dg6jm, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc0154cf620}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715460032s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c0efc5b_ddeb_4f51_986a_7374034ee09f.slice/cri-containerd-67223776d2671f12e5b3c49fb7a4ff0e31fcb15d6caec7ec2e4f3a77e101021a.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=67223776d2671f12e5b3c49fb7a4ff0e31fcb15d6caec7ec2e4f3a77e101021a, namespace=elepay, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=smart-boss-web-56db8c9b7f-dg6jm, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=54 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c0efc5b_ddeb_4f51_986a_7374034ee09f.slice/cri-containerd-67223776d2671f12e5b3c49fb7a4ff0e31fcb15d6caec7ec2e4f3a77e101021a.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.42.89:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=67223776d2671f12e5b3c49fb7a4ff0e31fcb15d6caec7ec2e4f3a77e101021a, namespace=elepay, node=ip-10-100-42-89.ap-northeast-1.compute.internal, pod=smart-boss-web-56db8c9b7f-dg6jm, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=ele-dispatcher, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06b97dcd_b371_40a3_8ffc_65702ce4dda0.slice/cri-containerd-6539f153a024c52ef2acfd7e1e931762d1b27cbd669586577d23185dc36275d8.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-dispatcher:2399e18d8f35152e61ae33d1f820bbf6d02ab213, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=6539f153a024c52ef2acfd7e1e931762d1b27cbd669586577d23185dc36275d8, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-5ptpj, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=ele-dispatcher, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06b97dcd_b371_40a3_8ffc_65702ce4dda0.slice/cri-containerd-6539f153a024c52ef2acfd7e1e931762d1b27cbd669586577d23185dc36275d8.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-dispatcher:2399e18d8f35152e61ae33d1f820bbf6d02ab213, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=6539f153a024c52ef2acfd7e1e931762d1b27cbd669586577d23185dc36275d8, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-5ptpj, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc0154cf890} C:{Var:C Labels:container=ele-dispatcher, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06b97dcd_b371_40a3_8ffc_65702ce4dda0.slice/cri-containerd-6539f153a024c52ef2acfd7e1e931762d1b27cbd669586577d23185dc36275d8.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-dispatcher:2399e18d8f35152e61ae33d1f820bbf6d02ab213, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=6539f153a024c52ef2acfd7e1e931762d1b27cbd669586577d23185dc36275d8, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-5ptpj, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc0154cf980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715467918s EvaluationString:[ var='B' labels={container=ele-dispatcher, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06b97dcd_b371_40a3_8ffc_65702ce4dda0.slice/cri-containerd-6539f153a024c52ef2acfd7e1e931762d1b27cbd669586577d23185dc36275d8.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-dispatcher:2399e18d8f35152e61ae33d1f820bbf6d02ab213, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=6539f153a024c52ef2acfd7e1e931762d1b27cbd669586577d23185dc36275d8, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-5ptpj, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=6 ], [ var='C' labels={container=ele-dispatcher, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06b97dcd_b371_40a3_8ffc_65702ce4dda0.slice/cri-containerd-6539f153a024c52ef2acfd7e1e931762d1b27cbd669586577d23185dc36275d8.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-dispatcher:2399e18d8f35152e61ae33d1f820bbf6d02ab213, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=6539f153a024c52ef2acfd7e1e931762d1b27cbd669586577d23185dc36275d8, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-5ptpj, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=linkerd-proxy, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06b97dcd_b371_40a3_8ffc_65702ce4dda0.slice/cri-containerd-a00685f81eb871b17e252c2904d2bdb1e2103a2c0eb3b0237955f7ed5c6fc89e.scope, image=cr.l5d.io/linkerd/proxy:stable-2.14.4, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=a00685f81eb871b17e252c2904d2bdb1e2103a2c0eb3b0237955f7ed5c6fc89e, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-5ptpj, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=linkerd-proxy, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06b97dcd_b371_40a3_8ffc_65702ce4dda0.slice/cri-containerd-a00685f81eb871b17e252c2904d2bdb1e2103a2c0eb3b0237955f7ed5c6fc89e.scope, image=cr.l5d.io/linkerd/proxy:stable-2.14.4, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=a00685f81eb871b17e252c2904d2bdb1e2103a2c0eb3b0237955f7ed5c6fc89e, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-5ptpj, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc0154cfc70} C:{Var:C Labels:container=linkerd-proxy, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06b97dcd_b371_40a3_8ffc_65702ce4dda0.slice/cri-containerd-a00685f81eb871b17e252c2904d2bdb1e2103a2c0eb3b0237955f7ed5c6fc89e.scope, image=cr.l5d.io/linkerd/proxy:stable-2.14.4, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=a00685f81eb871b17e252c2904d2bdb1e2103a2c0eb3b0237955f7ed5c6fc89e, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-5ptpj, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc0154cfd48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715476685s EvaluationString:[ var='B' labels={container=linkerd-proxy, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06b97dcd_b371_40a3_8ffc_65702ce4dda0.slice/cri-containerd-a00685f81eb871b17e252c2904d2bdb1e2103a2c0eb3b0237955f7ed5c6fc89e.scope, image=cr.l5d.io/linkerd/proxy:stable-2.14.4, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=a00685f81eb871b17e252c2904d2bdb1e2103a2c0eb3b0237955f7ed5c6fc89e, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-5ptpj, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=6 ], [ var='C' labels={container=linkerd-proxy, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06b97dcd_b371_40a3_8ffc_65702ce4dda0.slice/cri-containerd-a00685f81eb871b17e252c2904d2bdb1e2103a2c0eb3b0237955f7ed5c6fc89e.scope, image=cr.l5d.io/linkerd/proxy:stable-2.14.4, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=a00685f81eb871b17e252c2904d2bdb1e2103a2c0eb3b0237955f7ed5c6fc89e, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-5ptpj, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod48bd39b8_7cd2_4e91_994e_cf131780100a.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-pbwtf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod48bd39b8_7cd2_4e91_994e_cf131780100a.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-pbwtf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc0154cfea0} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod48bd39b8_7cd2_4e91_994e_cf131780100a.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-pbwtf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc0154cff50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715483203s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod48bd39b8_7cd2_4e91_994e_cf131780100a.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-pbwtf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=6 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod48bd39b8_7cd2_4e91_994e_cf131780100a.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-pbwtf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod85f11360_3767_4662_90be_0717d5287bf8.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-pdck8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod85f11360_3767_4662_90be_0717d5287bf8.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-pdck8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01e65e290} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod85f11360_3767_4662_90be_0717d5287bf8.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-pdck8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01e65e3e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715489352s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod85f11360_3767_4662_90be_0717d5287bf8.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-pdck8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=6 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod85f11360_3767_4662_90be_0717d5287bf8.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-pdck8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod85f11360_3767_4662_90be_0717d5287bf8.slice/cri-containerd-8307c361e7fa1cf7168cb4ce7d7966cb82d2b9ed3e137f38cab2afeae1d22967.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8307c361e7fa1cf7168cb4ce7d7966cb82d2b9ed3e137f38cab2afeae1d22967, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-pdck8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod85f11360_3767_4662_90be_0717d5287bf8.slice/cri-containerd-8307c361e7fa1cf7168cb4ce7d7966cb82d2b9ed3e137f38cab2afeae1d22967.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8307c361e7fa1cf7168cb4ce7d7966cb82d2b9ed3e137f38cab2afeae1d22967, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-pdck8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01e65e688} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod85f11360_3767_4662_90be_0717d5287bf8.slice/cri-containerd-8307c361e7fa1cf7168cb4ce7d7966cb82d2b9ed3e137f38cab2afeae1d22967.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8307c361e7fa1cf7168cb4ce7d7966cb82d2b9ed3e137f38cab2afeae1d22967, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-pdck8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01e65e5b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715496487s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod85f11360_3767_4662_90be_0717d5287bf8.slice/cri-containerd-8307c361e7fa1cf7168cb4ce7d7966cb82d2b9ed3e137f38cab2afeae1d22967.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8307c361e7fa1cf7168cb4ce7d7966cb82d2b9ed3e137f38cab2afeae1d22967, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-pdck8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=6 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod85f11360_3767_4662_90be_0717d5287bf8.slice/cri-containerd-8307c361e7fa1cf7168cb4ce7d7966cb82d2b9ed3e137f38cab2afeae1d22967.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8307c361e7fa1cf7168cb4ce7d7966cb82d2b9ed3e137f38cab2afeae1d22967, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-pdck8, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pode77828cf_dd7b_4833_8aca_001ed2504e6b.slice/cri-containerd-bd12f75753cc4fb393bce837ea41b2597e5cdb599def20de144d6ef03977dd4f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=bd12f75753cc4fb393bce837ea41b2597e5cdb599def20de144d6ef03977dd4f, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-sys-entry-6958579d5c-8vz5m, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pode77828cf_dd7b_4833_8aca_001ed2504e6b.slice/cri-containerd-bd12f75753cc4fb393bce837ea41b2597e5cdb599def20de144d6ef03977dd4f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=bd12f75753cc4fb393bce837ea41b2597e5cdb599def20de144d6ef03977dd4f, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-sys-entry-6958579d5c-8vz5m, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01e65e920} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pode77828cf_dd7b_4833_8aca_001ed2504e6b.slice/cri-containerd-bd12f75753cc4fb393bce837ea41b2597e5cdb599def20de144d6ef03977dd4f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=bd12f75753cc4fb393bce837ea41b2597e5cdb599def20de144d6ef03977dd4f, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-sys-entry-6958579d5c-8vz5m, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01e65e858}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715506888s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pode77828cf_dd7b_4833_8aca_001ed2504e6b.slice/cri-containerd-bd12f75753cc4fb393bce837ea41b2597e5cdb599def20de144d6ef03977dd4f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=bd12f75753cc4fb393bce837ea41b2597e5cdb599def20de144d6ef03977dd4f, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-sys-entry-6958579d5c-8vz5m, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=6 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pode77828cf_dd7b_4833_8aca_001ed2504e6b.slice/cri-containerd-bd12f75753cc4fb393bce837ea41b2597e5cdb599def20de144d6ef03977dd4f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=bd12f75753cc4fb393bce837ea41b2597e5cdb599def20de144d6ef03977dd4f, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-sys-entry-6958579d5c-8vz5m, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2825f886_a70e_451f_9425_0c5928801a22.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-sys-v2-5f4b8d8f74-5hb5z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2825f886_a70e_451f_9425_0c5928801a22.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-sys-v2-5f4b8d8f74-5hb5z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01e65eb50} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2825f886_a70e_451f_9425_0c5928801a22.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-sys-v2-5f4b8d8f74-5hb5z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01e65ecb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715513205s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2825f886_a70e_451f_9425_0c5928801a22.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-sys-v2-5f4b8d8f74-5hb5z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=6 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2825f886_a70e_451f_9425_0c5928801a22.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-sys-v2-5f4b8d8f74-5hb5z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=elepay-sys-v2, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2825f886_a70e_451f_9425_0c5928801a22.slice/cri-containerd-fc48a567482b88fd136bc3ebdbfde0952f2d045232355baf587a76c7ea5ec188.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-sys-v2:ee0fdcc7c4d968f05d7f5937fdbdf12b66ba215f, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=fc48a567482b88fd136bc3ebdbfde0952f2d045232355baf587a76c7ea5ec188, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-sys-v2-5f4b8d8f74-5hb5z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=elepay-sys-v2, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2825f886_a70e_451f_9425_0c5928801a22.slice/cri-containerd-fc48a567482b88fd136bc3ebdbfde0952f2d045232355baf587a76c7ea5ec188.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-sys-v2:ee0fdcc7c4d968f05d7f5937fdbdf12b66ba215f, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=fc48a567482b88fd136bc3ebdbfde0952f2d045232355baf587a76c7ea5ec188, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-sys-v2-5f4b8d8f74-5hb5z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01e65f080} C:{Var:C Labels:container=elepay-sys-v2, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2825f886_a70e_451f_9425_0c5928801a22.slice/cri-containerd-fc48a567482b88fd136bc3ebdbfde0952f2d045232355baf587a76c7ea5ec188.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-sys-v2:ee0fdcc7c4d968f05d7f5937fdbdf12b66ba215f, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=fc48a567482b88fd136bc3ebdbfde0952f2d045232355baf587a76c7ea5ec188, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-sys-v2-5f4b8d8f74-5hb5z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01e65efb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715518943s EvaluationString:[ var='B' labels={container=elepay-sys-v2, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2825f886_a70e_451f_9425_0c5928801a22.slice/cri-containerd-fc48a567482b88fd136bc3ebdbfde0952f2d045232355baf587a76c7ea5ec188.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-sys-v2:ee0fdcc7c4d968f05d7f5937fdbdf12b66ba215f, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=fc48a567482b88fd136bc3ebdbfde0952f2d045232355baf587a76c7ea5ec188, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-sys-v2-5f4b8d8f74-5hb5z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=6 ], [ var='C' labels={container=elepay-sys-v2, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2825f886_a70e_451f_9425_0c5928801a22.slice/cri-containerd-fc48a567482b88fd136bc3ebdbfde0952f2d045232355baf587a76c7ea5ec188.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-sys-v2:ee0fdcc7c4d968f05d7f5937fdbdf12b66ba215f, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=fc48a567482b88fd136bc3ebdbfde0952f2d045232355baf587a76c7ea5ec188, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=elepay-sys-v2-5f4b8d8f74-5hb5z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod69caf257_dbdb_4b49_b0c0_d0a4561e0c3d.slice/cri-containerd-470a47a685704d45ea168da45bb26e33b619ae064050f44fe33eea7621a4e711.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=470a47a685704d45ea168da45bb26e33b619ae064050f44fe33eea7621a4e711, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-sbf4t, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod69caf257_dbdb_4b49_b0c0_d0a4561e0c3d.slice/cri-containerd-470a47a685704d45ea168da45bb26e33b619ae064050f44fe33eea7621a4e711.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=470a47a685704d45ea168da45bb26e33b619ae064050f44fe33eea7621a4e711, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-sbf4t, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01e65f2a0} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod69caf257_dbdb_4b49_b0c0_d0a4561e0c3d.slice/cri-containerd-470a47a685704d45ea168da45bb26e33b619ae064050f44fe33eea7621a4e711.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=470a47a685704d45ea168da45bb26e33b619ae064050f44fe33eea7621a4e711, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-sbf4t, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01e65f408}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715526036s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod69caf257_dbdb_4b49_b0c0_d0a4561e0c3d.slice/cri-containerd-470a47a685704d45ea168da45bb26e33b619ae064050f44fe33eea7621a4e711.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=470a47a685704d45ea168da45bb26e33b619ae064050f44fe33eea7621a4e711, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-sbf4t, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=6 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod69caf257_dbdb_4b49_b0c0_d0a4561e0c3d.slice/cri-containerd-470a47a685704d45ea168da45bb26e33b619ae064050f44fe33eea7621a4e711.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=470a47a685704d45ea168da45bb26e33b619ae064050f44fe33eea7621a4e711, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-sbf4t, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=oneqr-kds-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod69caf257_dbdb_4b49_b0c0_d0a4561e0c3d.slice/cri-containerd-eb420a1a0cbe72b7a490adfb4bb5ea924911ec80986927918a376a279d6241b3.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-kds-web:24766930eeb1d1b9930b37273004c6de759014bb, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=eb420a1a0cbe72b7a490adfb4bb5ea924911ec80986927918a376a279d6241b3, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-sbf4t, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=oneqr-kds-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod69caf257_dbdb_4b49_b0c0_d0a4561e0c3d.slice/cri-containerd-eb420a1a0cbe72b7a490adfb4bb5ea924911ec80986927918a376a279d6241b3.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-kds-web:24766930eeb1d1b9930b37273004c6de759014bb, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=eb420a1a0cbe72b7a490adfb4bb5ea924911ec80986927918a376a279d6241b3, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-sbf4t, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01e65f630} C:{Var:C Labels:container=oneqr-kds-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod69caf257_dbdb_4b49_b0c0_d0a4561e0c3d.slice/cri-containerd-eb420a1a0cbe72b7a490adfb4bb5ea924911ec80986927918a376a279d6241b3.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-kds-web:24766930eeb1d1b9930b37273004c6de759014bb, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=eb420a1a0cbe72b7a490adfb4bb5ea924911ec80986927918a376a279d6241b3, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-sbf4t, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01e65f790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.71554268s EvaluationString:[ var='B' labels={container=oneqr-kds-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod69caf257_dbdb_4b49_b0c0_d0a4561e0c3d.slice/cri-containerd-eb420a1a0cbe72b7a490adfb4bb5ea924911ec80986927918a376a279d6241b3.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-kds-web:24766930eeb1d1b9930b37273004c6de759014bb, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=eb420a1a0cbe72b7a490adfb4bb5ea924911ec80986927918a376a279d6241b3, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-sbf4t, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=6 ], [ var='C' labels={container=oneqr-kds-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod69caf257_dbdb_4b49_b0c0_d0a4561e0c3d.slice/cri-containerd-eb420a1a0cbe72b7a490adfb4bb5ea924911ec80986927918a376a279d6241b3.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-kds-web:24766930eeb1d1b9930b37273004c6de759014bb, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=eb420a1a0cbe72b7a490adfb4bb5ea924911ec80986927918a376a279d6241b3, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-sbf4t, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podbce6a792_0b50_4454_94ab_27e2391a7141.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-pos-web-7dccbbb488-c9tpz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podbce6a792_0b50_4454_94ab_27e2391a7141.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-pos-web-7dccbbb488-c9tpz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01e65f950} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podbce6a792_0b50_4454_94ab_27e2391a7141.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-pos-web-7dccbbb488-c9tpz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01e65fa20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715552198s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podbce6a792_0b50_4454_94ab_27e2391a7141.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-pos-web-7dccbbb488-c9tpz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=6 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podbce6a792_0b50_4454_94ab_27e2391a7141.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-pos-web-7dccbbb488-c9tpz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podbce6a792_0b50_4454_94ab_27e2391a7141.slice/cri-containerd-57b49f0476b0bf162406d61f15e64637fec466140508243932972edf1782ba9f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=57b49f0476b0bf162406d61f15e64637fec466140508243932972edf1782ba9f, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-pos-web-7dccbbb488-c9tpz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podbce6a792_0b50_4454_94ab_27e2391a7141.slice/cri-containerd-57b49f0476b0bf162406d61f15e64637fec466140508243932972edf1782ba9f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=57b49f0476b0bf162406d61f15e64637fec466140508243932972edf1782ba9f, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-pos-web-7dccbbb488-c9tpz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01e65fcb8} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podbce6a792_0b50_4454_94ab_27e2391a7141.slice/cri-containerd-57b49f0476b0bf162406d61f15e64637fec466140508243932972edf1782ba9f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=57b49f0476b0bf162406d61f15e64637fec466140508243932972edf1782ba9f, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-pos-web-7dccbbb488-c9tpz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01e65fbe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715557737s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podbce6a792_0b50_4454_94ab_27e2391a7141.slice/cri-containerd-57b49f0476b0bf162406d61f15e64637fec466140508243932972edf1782ba9f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=57b49f0476b0bf162406d61f15e64637fec466140508243932972edf1782ba9f, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-pos-web-7dccbbb488-c9tpz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=6 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podbce6a792_0b50_4454_94ab_27e2391a7141.slice/cri-containerd-57b49f0476b0bf162406d61f15e64637fec466140508243932972edf1782ba9f.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=57b49f0476b0bf162406d61f15e64637fec466140508243932972edf1782ba9f, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-pos-web-7dccbbb488-c9tpz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=oneqr-pos-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podbce6a792_0b50_4454_94ab_27e2391a7141.slice/cri-containerd-e02b1e6380b969d1dfb088ad6ebc253fe60a6c3b5460d0c7b0c1322716b2b48c.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-pos-web:d7a2ce2f5b8201eb1a0bc906284af0176c0db289, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e02b1e6380b969d1dfb088ad6ebc253fe60a6c3b5460d0c7b0c1322716b2b48c, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-pos-web-7dccbbb488-c9tpz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=oneqr-pos-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podbce6a792_0b50_4454_94ab_27e2391a7141.slice/cri-containerd-e02b1e6380b969d1dfb088ad6ebc253fe60a6c3b5460d0c7b0c1322716b2b48c.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-pos-web:d7a2ce2f5b8201eb1a0bc906284af0176c0db289, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e02b1e6380b969d1dfb088ad6ebc253fe60a6c3b5460d0c7b0c1322716b2b48c, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-pos-web-7dccbbb488-c9tpz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00468a0e0} C:{Var:C Labels:container=oneqr-pos-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podbce6a792_0b50_4454_94ab_27e2391a7141.slice/cri-containerd-e02b1e6380b969d1dfb088ad6ebc253fe60a6c3b5460d0c7b0c1322716b2b48c.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-pos-web:d7a2ce2f5b8201eb1a0bc906284af0176c0db289, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e02b1e6380b969d1dfb088ad6ebc253fe60a6c3b5460d0c7b0c1322716b2b48c, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-pos-web-7dccbbb488-c9tpz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc01e65ff30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715565414s EvaluationString:[ var='B' labels={container=oneqr-pos-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podbce6a792_0b50_4454_94ab_27e2391a7141.slice/cri-containerd-e02b1e6380b969d1dfb088ad6ebc253fe60a6c3b5460d0c7b0c1322716b2b48c.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-pos-web:d7a2ce2f5b8201eb1a0bc906284af0176c0db289, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e02b1e6380b969d1dfb088ad6ebc253fe60a6c3b5460d0c7b0c1322716b2b48c, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-pos-web-7dccbbb488-c9tpz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=6 ], [ var='C' labels={container=oneqr-pos-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podbce6a792_0b50_4454_94ab_27e2391a7141.slice/cri-containerd-e02b1e6380b969d1dfb088ad6ebc253fe60a6c3b5460d0c7b0c1322716b2b48c.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-pos-web:d7a2ce2f5b8201eb1a0bc906284af0176c0db289, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e02b1e6380b969d1dfb088ad6ebc253fe60a6c3b5460d0c7b0c1322716b2b48c, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-pos-web-7dccbbb488-c9tpz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=oneqr-shop-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod08a3e7fa_efa8_4976_ac9f_1c2104f56051.slice/cri-containerd-cae781464148b5d7a02841637e3c170418251bd01f30bf73efd000dbb1daa8b9.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-shop-web:225fa2d402c97fbc06c49395d1afa22a1c7b0f62, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=cae781464148b5d7a02841637e3c170418251bd01f30bf73efd000dbb1daa8b9, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-5xcng, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=oneqr-shop-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod08a3e7fa_efa8_4976_ac9f_1c2104f56051.slice/cri-containerd-cae781464148b5d7a02841637e3c170418251bd01f30bf73efd000dbb1daa8b9.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-shop-web:225fa2d402c97fbc06c49395d1afa22a1c7b0f62, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=cae781464148b5d7a02841637e3c170418251bd01f30bf73efd000dbb1daa8b9, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-5xcng, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00468a7d8} C:{Var:C Labels:container=oneqr-shop-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod08a3e7fa_efa8_4976_ac9f_1c2104f56051.slice/cri-containerd-cae781464148b5d7a02841637e3c170418251bd01f30bf73efd000dbb1daa8b9.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-shop-web:225fa2d402c97fbc06c49395d1afa22a1c7b0f62, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=cae781464148b5d7a02841637e3c170418251bd01f30bf73efd000dbb1daa8b9, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-5xcng, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00468a980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715573954s EvaluationString:[ var='B' labels={container=oneqr-shop-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod08a3e7fa_efa8_4976_ac9f_1c2104f56051.slice/cri-containerd-cae781464148b5d7a02841637e3c170418251bd01f30bf73efd000dbb1daa8b9.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-shop-web:225fa2d402c97fbc06c49395d1afa22a1c7b0f62, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=cae781464148b5d7a02841637e3c170418251bd01f30bf73efd000dbb1daa8b9, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-5xcng, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=6 ], [ var='C' labels={container=oneqr-shop-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod08a3e7fa_efa8_4976_ac9f_1c2104f56051.slice/cri-containerd-cae781464148b5d7a02841637e3c170418251bd01f30bf73efd000dbb1daa8b9.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-shop-web:225fa2d402c97fbc06c49395d1afa22a1c7b0f62, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=cae781464148b5d7a02841637e3c170418251bd01f30bf73efd000dbb1daa8b9, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-5xcng, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod08a3e7fa_efa8_4976_ac9f_1c2104f56051.slice/cri-containerd-fdc729c91bc9a1c590739a74afb6338539f3a1891cb08ce37a86edd31febbda3.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=fdc729c91bc9a1c590739a74afb6338539f3a1891cb08ce37a86edd31febbda3, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-5xcng, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod08a3e7fa_efa8_4976_ac9f_1c2104f56051.slice/cri-containerd-fdc729c91bc9a1c590739a74afb6338539f3a1891cb08ce37a86edd31febbda3.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=fdc729c91bc9a1c590739a74afb6338539f3a1891cb08ce37a86edd31febbda3, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-5xcng, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00468abf8} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod08a3e7fa_efa8_4976_ac9f_1c2104f56051.slice/cri-containerd-fdc729c91bc9a1c590739a74afb6338539f3a1891cb08ce37a86edd31febbda3.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=fdc729c91bc9a1c590739a74afb6338539f3a1891cb08ce37a86edd31febbda3, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-5xcng, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00468add0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715583022s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod08a3e7fa_efa8_4976_ac9f_1c2104f56051.slice/cri-containerd-fdc729c91bc9a1c590739a74afb6338539f3a1891cb08ce37a86edd31febbda3.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=fdc729c91bc9a1c590739a74afb6338539f3a1891cb08ce37a86edd31febbda3, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-5xcng, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=6 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod08a3e7fa_efa8_4976_ac9f_1c2104f56051.slice/cri-containerd-fdc729c91bc9a1c590739a74afb6338539f3a1891cb08ce37a86edd31febbda3.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=fdc729c91bc9a1c590739a74afb6338539f3a1891cb08ce37a86edd31febbda3, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-5xcng, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podf33d0044_3c67_4ef2_8944_9f2cd6478391.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-tablet-web-66f778fd76-ttdss, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podf33d0044_3c67_4ef2_8944_9f2cd6478391.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-tablet-web-66f778fd76-ttdss, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00468b0f8} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podf33d0044_3c67_4ef2_8944_9f2cd6478391.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-tablet-web-66f778fd76-ttdss, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00468b210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715599185s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podf33d0044_3c67_4ef2_8944_9f2cd6478391.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-tablet-web-66f778fd76-ttdss, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=6 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podf33d0044_3c67_4ef2_8944_9f2cd6478391.slice, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-tablet-web-66f778fd76-ttdss, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=oneqr-tablet-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podf33d0044_3c67_4ef2_8944_9f2cd6478391.slice/cri-containerd-883d466b44143d6ce969d33ffedf7d5f5b718e2919dd511ad550049cca43fd95.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-tablet-web:eebd53bf69e64886ebc3e209a68c715b237a420d, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=883d466b44143d6ce969d33ffedf7d5f5b718e2919dd511ad550049cca43fd95, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-tablet-web-66f778fd76-ttdss, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=oneqr-tablet-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podf33d0044_3c67_4ef2_8944_9f2cd6478391.slice/cri-containerd-883d466b44143d6ce969d33ffedf7d5f5b718e2919dd511ad550049cca43fd95.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-tablet-web:eebd53bf69e64886ebc3e209a68c715b237a420d, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=883d466b44143d6ce969d33ffedf7d5f5b718e2919dd511ad550049cca43fd95, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-tablet-web-66f778fd76-ttdss, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00468b3d8} C:{Var:C Labels:container=oneqr-tablet-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podf33d0044_3c67_4ef2_8944_9f2cd6478391.slice/cri-containerd-883d466b44143d6ce969d33ffedf7d5f5b718e2919dd511ad550049cca43fd95.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-tablet-web:eebd53bf69e64886ebc3e209a68c715b237a420d, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=883d466b44143d6ce969d33ffedf7d5f5b718e2919dd511ad550049cca43fd95, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-tablet-web-66f778fd76-ttdss, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00468b5f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715605782s EvaluationString:[ var='B' labels={container=oneqr-tablet-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podf33d0044_3c67_4ef2_8944_9f2cd6478391.slice/cri-containerd-883d466b44143d6ce969d33ffedf7d5f5b718e2919dd511ad550049cca43fd95.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-tablet-web:eebd53bf69e64886ebc3e209a68c715b237a420d, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=883d466b44143d6ce969d33ffedf7d5f5b718e2919dd511ad550049cca43fd95, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-tablet-web-66f778fd76-ttdss, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=6 ], [ var='C' labels={container=oneqr-tablet-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podf33d0044_3c67_4ef2_8944_9f2cd6478391.slice/cri-containerd-883d466b44143d6ce969d33ffedf7d5f5b718e2919dd511ad550049cca43fd95.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-tablet-web:eebd53bf69e64886ebc3e209a68c715b237a420d, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=883d466b44143d6ce969d33ffedf7d5f5b718e2919dd511ad550049cca43fd95, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=oneqr-tablet-web-66f778fd76-ttdss, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc5fb7cfb_62c1_41fd_825f_794755e624ef.slice/cri-containerd-79d43eac6a3ac9d72dfacf067e130486a4256ceb4219719bb769f6294726c82d.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=79d43eac6a3ac9d72dfacf067e130486a4256ceb4219719bb769f6294726c82d, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=smart-boss-web-56db8c9b7f-tvcjf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc5fb7cfb_62c1_41fd_825f_794755e624ef.slice/cri-containerd-79d43eac6a3ac9d72dfacf067e130486a4256ceb4219719bb769f6294726c82d.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=79d43eac6a3ac9d72dfacf067e130486a4256ceb4219719bb769f6294726c82d, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=smart-boss-web-56db8c9b7f-tvcjf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00468b7a0} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc5fb7cfb_62c1_41fd_825f_794755e624ef.slice/cri-containerd-79d43eac6a3ac9d72dfacf067e130486a4256ceb4219719bb769f6294726c82d.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=79d43eac6a3ac9d72dfacf067e130486a4256ceb4219719bb769f6294726c82d, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=smart-boss-web-56db8c9b7f-tvcjf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00468b898}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715613922s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc5fb7cfb_62c1_41fd_825f_794755e624ef.slice/cri-containerd-79d43eac6a3ac9d72dfacf067e130486a4256ceb4219719bb769f6294726c82d.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=79d43eac6a3ac9d72dfacf067e130486a4256ceb4219719bb769f6294726c82d, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=smart-boss-web-56db8c9b7f-tvcjf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=6 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc5fb7cfb_62c1_41fd_825f_794755e624ef.slice/cri-containerd-79d43eac6a3ac9d72dfacf067e130486a4256ceb4219719bb769f6294726c82d.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.153:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=79d43eac6a3ac9d72dfacf067e130486a4256ceb4219719bb769f6294726c82d, namespace=elepay, node=ip-10-100-46-153.ap-northeast-1.compute.internal, pod=smart-boss-web-56db8c9b7f-tvcjf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod7c2b6de9_d7a9_493e_9072_d616881da9c0.slice/cri-containerd-0c858c75bea5adabffc5c65d29aa2c5bd65297a7a9efbf6f58e181506efae7d9.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=0c858c75bea5adabffc5c65d29aa2c5bd65297a7a9efbf6f58e181506efae7d9, namespace=elepay, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=delivery-web-848fcb956d-zv4v4, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod7c2b6de9_d7a9_493e_9072_d616881da9c0.slice/cri-containerd-0c858c75bea5adabffc5c65d29aa2c5bd65297a7a9efbf6f58e181506efae7d9.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=0c858c75bea5adabffc5c65d29aa2c5bd65297a7a9efbf6f58e181506efae7d9, namespace=elepay, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=delivery-web-848fcb956d-zv4v4, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00468bc28} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod7c2b6de9_d7a9_493e_9072_d616881da9c0.slice/cri-containerd-0c858c75bea5adabffc5c65d29aa2c5bd65297a7a9efbf6f58e181506efae7d9.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=0c858c75bea5adabffc5c65d29aa2c5bd65297a7a9efbf6f58e181506efae7d9, namespace=elepay, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=delivery-web-848fcb956d-zv4v4, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00468bb28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715623295s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod7c2b6de9_d7a9_493e_9072_d616881da9c0.slice/cri-containerd-0c858c75bea5adabffc5c65d29aa2c5bd65297a7a9efbf6f58e181506efae7d9.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=0c858c75bea5adabffc5c65d29aa2c5bd65297a7a9efbf6f58e181506efae7d9, namespace=elepay, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=delivery-web-848fcb956d-zv4v4, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=2 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod7c2b6de9_d7a9_493e_9072_d616881da9c0.slice/cri-containerd-0c858c75bea5adabffc5c65d29aa2c5bd65297a7a9efbf6f58e181506efae7d9.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=0c858c75bea5adabffc5c65d29aa2c5bd65297a7a9efbf6f58e181506efae7d9, namespace=elepay, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=delivery-web-848fcb956d-zv4v4, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod03c832b8_6419_43bd_99f0_2ed8822e2894.slice/cri-containerd-47dae5bb787ee5cc3164be79ed4eca8d973bd98693d451c84bd473068f9b0814.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=47dae5bb787ee5cc3164be79ed4eca8d973bd98693d451c84bd473068f9b0814, namespace=elepay, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=easyqr-web-6c686bd948-8chvl, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod03c832b8_6419_43bd_99f0_2ed8822e2894.slice/cri-containerd-47dae5bb787ee5cc3164be79ed4eca8d973bd98693d451c84bd473068f9b0814.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=47dae5bb787ee5cc3164be79ed4eca8d973bd98693d451c84bd473068f9b0814, namespace=elepay, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=easyqr-web-6c686bd948-8chvl, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00468bdf0} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod03c832b8_6419_43bd_99f0_2ed8822e2894.slice/cri-containerd-47dae5bb787ee5cc3164be79ed4eca8d973bd98693d451c84bd473068f9b0814.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=47dae5bb787ee5cc3164be79ed4eca8d973bd98693d451c84bd473068f9b0814, namespace=elepay, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=easyqr-web-6c686bd948-8chvl, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00468bea0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715630403s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod03c832b8_6419_43bd_99f0_2ed8822e2894.slice/cri-containerd-47dae5bb787ee5cc3164be79ed4eca8d973bd98693d451c84bd473068f9b0814.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=47dae5bb787ee5cc3164be79ed4eca8d973bd98693d451c84bd473068f9b0814, namespace=elepay, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=easyqr-web-6c686bd948-8chvl, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=2 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod03c832b8_6419_43bd_99f0_2ed8822e2894.slice/cri-containerd-47dae5bb787ee5cc3164be79ed4eca8d973bd98693d451c84bd473068f9b0814.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=47dae5bb787ee5cc3164be79ed4eca8d973bd98693d451c84bd473068f9b0814, namespace=elepay, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=easyqr-web-6c686bd948-8chvl, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod76509970_1679_4f37_9f51_0103c3304ffa.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=oneqr-web-6d464c885b-qzx66, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod76509970_1679_4f37_9f51_0103c3304ffa.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=oneqr-web-6d464c885b-qzx66, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d26080} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod76509970_1679_4f37_9f51_0103c3304ffa.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=oneqr-web-6d464c885b-qzx66, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d26210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715636931s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod76509970_1679_4f37_9f51_0103c3304ffa.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=oneqr-web-6d464c885b-qzx66, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=2 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod76509970_1679_4f37_9f51_0103c3304ffa.slice, instance=10.100.46.71:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-46-71.ap-northeast-1.compute.internal, pod=oneqr-web-6d464c885b-qzx66, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=elepay-sys-entry, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod2a0fe16b_c836_4d25_924c_a95825b8faf7.slice/cri-containerd-544da02696d458175d5bd2bc2691e857c58dcaaac437454c82cfed04077e1ce7.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-sys-entry:ee0fdcc7c4d968f05d7f5937fdbdf12b66ba215f, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=544da02696d458175d5bd2bc2691e857c58dcaaac437454c82cfed04077e1ce7, namespace=elepay, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=elepay-sys-entry-6958579d5c-r8l27, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=elepay-sys-entry, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod2a0fe16b_c836_4d25_924c_a95825b8faf7.slice/cri-containerd-544da02696d458175d5bd2bc2691e857c58dcaaac437454c82cfed04077e1ce7.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-sys-entry:ee0fdcc7c4d968f05d7f5937fdbdf12b66ba215f, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=544da02696d458175d5bd2bc2691e857c58dcaaac437454c82cfed04077e1ce7, namespace=elepay, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=elepay-sys-entry-6958579d5c-r8l27, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d263d0} C:{Var:C Labels:container=elepay-sys-entry, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod2a0fe16b_c836_4d25_924c_a95825b8faf7.slice/cri-containerd-544da02696d458175d5bd2bc2691e857c58dcaaac437454c82cfed04077e1ce7.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-sys-entry:ee0fdcc7c4d968f05d7f5937fdbdf12b66ba215f, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=544da02696d458175d5bd2bc2691e857c58dcaaac437454c82cfed04077e1ce7, namespace=elepay, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=elepay-sys-entry-6958579d5c-r8l27, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d264c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715644147s EvaluationString:[ var='B' labels={container=elepay-sys-entry, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod2a0fe16b_c836_4d25_924c_a95825b8faf7.slice/cri-containerd-544da02696d458175d5bd2bc2691e857c58dcaaac437454c82cfed04077e1ce7.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-sys-entry:ee0fdcc7c4d968f05d7f5937fdbdf12b66ba215f, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=544da02696d458175d5bd2bc2691e857c58dcaaac437454c82cfed04077e1ce7, namespace=elepay, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=elepay-sys-entry-6958579d5c-r8l27, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=3 ], [ var='C' labels={container=elepay-sys-entry, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod2a0fe16b_c836_4d25_924c_a95825b8faf7.slice/cri-containerd-544da02696d458175d5bd2bc2691e857c58dcaaac437454c82cfed04077e1ce7.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-sys-entry:ee0fdcc7c4d968f05d7f5937fdbdf12b66ba215f, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=544da02696d458175d5bd2bc2691e857c58dcaaac437454c82cfed04077e1ce7, namespace=elepay, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=elepay-sys-entry-6958579d5c-r8l27, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod0325e49e_a3eb_426e_9ee3_f5b63f25022e.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=elepay-sys-web-688dfd7c74-lz5p5, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod0325e49e_a3eb_426e_9ee3_f5b63f25022e.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=elepay-sys-web-688dfd7c74-lz5p5, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d26630} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod0325e49e_a3eb_426e_9ee3_f5b63f25022e.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=elepay-sys-web-688dfd7c74-lz5p5, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d266e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.71565148s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod0325e49e_a3eb_426e_9ee3_f5b63f25022e.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=elepay-sys-web-688dfd7c74-lz5p5, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=3 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod0325e49e_a3eb_426e_9ee3_f5b63f25022e.slice, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=elepay-sys-web-688dfd7c74-lz5p5, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=elepay-sys-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod0325e49e_a3eb_426e_9ee3_f5b63f25022e.slice/cri-containerd-d3bc119901a572a50a15905fdf9071dc742ed7289c0c077412d32a7efecfcf03.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-sys-web:d125d8ec4acd0f64e6b4156eb8c154c141c91dbb, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d3bc119901a572a50a15905fdf9071dc742ed7289c0c077412d32a7efecfcf03, namespace=elepay, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=elepay-sys-web-688dfd7c74-lz5p5, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=elepay-sys-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod0325e49e_a3eb_426e_9ee3_f5b63f25022e.slice/cri-containerd-d3bc119901a572a50a15905fdf9071dc742ed7289c0c077412d32a7efecfcf03.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-sys-web:d125d8ec4acd0f64e6b4156eb8c154c141c91dbb, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d3bc119901a572a50a15905fdf9071dc742ed7289c0c077412d32a7efecfcf03, namespace=elepay, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=elepay-sys-web-688dfd7c74-lz5p5, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d26960} C:{Var:C Labels:container=elepay-sys-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod0325e49e_a3eb_426e_9ee3_f5b63f25022e.slice/cri-containerd-d3bc119901a572a50a15905fdf9071dc742ed7289c0c077412d32a7efecfcf03.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-sys-web:d125d8ec4acd0f64e6b4156eb8c154c141c91dbb, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d3bc119901a572a50a15905fdf9071dc742ed7289c0c077412d32a7efecfcf03, namespace=elepay, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=elepay-sys-web-688dfd7c74-lz5p5, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d26888}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.71565896s EvaluationString:[ var='B' labels={container=elepay-sys-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod0325e49e_a3eb_426e_9ee3_f5b63f25022e.slice/cri-containerd-d3bc119901a572a50a15905fdf9071dc742ed7289c0c077412d32a7efecfcf03.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-sys-web:d125d8ec4acd0f64e6b4156eb8c154c141c91dbb, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d3bc119901a572a50a15905fdf9071dc742ed7289c0c077412d32a7efecfcf03, namespace=elepay, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=elepay-sys-web-688dfd7c74-lz5p5, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=3 ], [ var='C' labels={container=elepay-sys-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod0325e49e_a3eb_426e_9ee3_f5b63f25022e.slice/cri-containerd-d3bc119901a572a50a15905fdf9071dc742ed7289c0c077412d32a7efecfcf03.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-sys-web:d125d8ec4acd0f64e6b4156eb8c154c141c91dbb, instance=10.100.48.211:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d3bc119901a572a50a15905fdf9071dc742ed7289c0c077412d32a7efecfcf03, namespace=elepay, node=ip-10-100-48-211.ap-northeast-1.compute.internal, pod=elepay-sys-web-688dfd7c74-lz5p5, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod5434cbc5_5792_4398_b291_e61e6a965d6a.slice/cri-containerd-48d269541de8a1e111f94cf7e144b21fc6a3ec151ffeed2b68396f747e2bad4b.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=48d269541de8a1e111f94cf7e144b21fc6a3ec151ffeed2b68396f747e2bad4b, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=easyqr-web-6c686bd948-w766z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod5434cbc5_5792_4398_b291_e61e6a965d6a.slice/cri-containerd-48d269541de8a1e111f94cf7e144b21fc6a3ec151ffeed2b68396f747e2bad4b.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=48d269541de8a1e111f94cf7e144b21fc6a3ec151ffeed2b68396f747e2bad4b, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=easyqr-web-6c686bd948-w766z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d26ad8} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod5434cbc5_5792_4398_b291_e61e6a965d6a.slice/cri-containerd-48d269541de8a1e111f94cf7e144b21fc6a3ec151ffeed2b68396f747e2bad4b.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=48d269541de8a1e111f94cf7e144b21fc6a3ec151ffeed2b68396f747e2bad4b, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=easyqr-web-6c686bd948-w766z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d26bd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715701882s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod5434cbc5_5792_4398_b291_e61e6a965d6a.slice/cri-containerd-48d269541de8a1e111f94cf7e144b21fc6a3ec151ffeed2b68396f747e2bad4b.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=48d269541de8a1e111f94cf7e144b21fc6a3ec151ffeed2b68396f747e2bad4b, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=easyqr-web-6c686bd948-w766z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod5434cbc5_5792_4398_b291_e61e6a965d6a.slice/cri-containerd-48d269541de8a1e111f94cf7e144b21fc6a3ec151ffeed2b68396f747e2bad4b.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=48d269541de8a1e111f94cf7e144b21fc6a3ec151ffeed2b68396f747e2bad4b, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=easyqr-web-6c686bd948-w766z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=easyqr-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod5434cbc5_5792_4398_b291_e61e6a965d6a.slice/cri-containerd-e0c5b38956d8838b77128ee65da0c3ec6497b844e54ea7f36f3b4bc9e066c140.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-easyqr-web:76c07442c3d22bf23b022fbc682c048b405d2a1e, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e0c5b38956d8838b77128ee65da0c3ec6497b844e54ea7f36f3b4bc9e066c140, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=easyqr-web-6c686bd948-w766z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=easyqr-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod5434cbc5_5792_4398_b291_e61e6a965d6a.slice/cri-containerd-e0c5b38956d8838b77128ee65da0c3ec6497b844e54ea7f36f3b4bc9e066c140.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-easyqr-web:76c07442c3d22bf23b022fbc682c048b405d2a1e, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e0c5b38956d8838b77128ee65da0c3ec6497b844e54ea7f36f3b4bc9e066c140, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=easyqr-web-6c686bd948-w766z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d26d80} C:{Var:C Labels:container=easyqr-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod5434cbc5_5792_4398_b291_e61e6a965d6a.slice/cri-containerd-e0c5b38956d8838b77128ee65da0c3ec6497b844e54ea7f36f3b4bc9e066c140.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-easyqr-web:76c07442c3d22bf23b022fbc682c048b405d2a1e, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e0c5b38956d8838b77128ee65da0c3ec6497b844e54ea7f36f3b4bc9e066c140, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=easyqr-web-6c686bd948-w766z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d26e60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715710642s EvaluationString:[ var='B' labels={container=easyqr-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod5434cbc5_5792_4398_b291_e61e6a965d6a.slice/cri-containerd-e0c5b38956d8838b77128ee65da0c3ec6497b844e54ea7f36f3b4bc9e066c140.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-easyqr-web:76c07442c3d22bf23b022fbc682c048b405d2a1e, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e0c5b38956d8838b77128ee65da0c3ec6497b844e54ea7f36f3b4bc9e066c140, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=easyqr-web-6c686bd948-w766z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={container=easyqr-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod5434cbc5_5792_4398_b291_e61e6a965d6a.slice/cri-containerd-e0c5b38956d8838b77128ee65da0c3ec6497b844e54ea7f36f3b4bc9e066c140.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-easyqr-web:76c07442c3d22bf23b022fbc682c048b405d2a1e, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e0c5b38956d8838b77128ee65da0c3ec6497b844e54ea7f36f3b4bc9e066c140, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=easyqr-web-6c686bd948-w766z, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce314567_d66d_4b0b_a589_ab58366a21f6.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-pxcbq, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce314567_d66d_4b0b_a589_ab58366a21f6.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-pxcbq, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d27080} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce314567_d66d_4b0b_a589_ab58366a21f6.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-pxcbq, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d26fb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715719331s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce314567_d66d_4b0b_a589_ab58366a21f6.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-pxcbq, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce314567_d66d_4b0b_a589_ab58366a21f6.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-pxcbq, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce314567_d66d_4b0b_a589_ab58366a21f6.slice/cri-containerd-02509e284a3444f95c647db8637ce12045a947af30ad298a9d73804f3a24df06.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=02509e284a3444f95c647db8637ce12045a947af30ad298a9d73804f3a24df06, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-pxcbq, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce314567_d66d_4b0b_a589_ab58366a21f6.slice/cri-containerd-02509e284a3444f95c647db8637ce12045a947af30ad298a9d73804f3a24df06.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=02509e284a3444f95c647db8637ce12045a947af30ad298a9d73804f3a24df06, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-pxcbq, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d27220} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce314567_d66d_4b0b_a589_ab58366a21f6.slice/cri-containerd-02509e284a3444f95c647db8637ce12045a947af30ad298a9d73804f3a24df06.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=02509e284a3444f95c647db8637ce12045a947af30ad298a9d73804f3a24df06, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-pxcbq, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d272e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715726042s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce314567_d66d_4b0b_a589_ab58366a21f6.slice/cri-containerd-02509e284a3444f95c647db8637ce12045a947af30ad298a9d73804f3a24df06.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=02509e284a3444f95c647db8637ce12045a947af30ad298a9d73804f3a24df06, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-pxcbq, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce314567_d66d_4b0b_a589_ab58366a21f6.slice/cri-containerd-02509e284a3444f95c647db8637ce12045a947af30ad298a9d73804f3a24df06.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=02509e284a3444f95c647db8637ce12045a947af30ad298a9d73804f3a24df06, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-pxcbq, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=ele-dispatcher, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce314567_d66d_4b0b_a589_ab58366a21f6.slice/cri-containerd-12757bb635464d8b6b2e3134e85862d614499ea5a6823e10d78cb757af36111e.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-dispatcher:2399e18d8f35152e61ae33d1f820bbf6d02ab213, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=12757bb635464d8b6b2e3134e85862d614499ea5a6823e10d78cb757af36111e, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-pxcbq, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=ele-dispatcher, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce314567_d66d_4b0b_a589_ab58366a21f6.slice/cri-containerd-12757bb635464d8b6b2e3134e85862d614499ea5a6823e10d78cb757af36111e.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-dispatcher:2399e18d8f35152e61ae33d1f820bbf6d02ab213, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=12757bb635464d8b6b2e3134e85862d614499ea5a6823e10d78cb757af36111e, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-pxcbq, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d27490} C:{Var:C Labels:container=ele-dispatcher, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce314567_d66d_4b0b_a589_ab58366a21f6.slice/cri-containerd-12757bb635464d8b6b2e3134e85862d614499ea5a6823e10d78cb757af36111e.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-dispatcher:2399e18d8f35152e61ae33d1f820bbf6d02ab213, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=12757bb635464d8b6b2e3134e85862d614499ea5a6823e10d78cb757af36111e, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-pxcbq, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d27568}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715732974s EvaluationString:[ var='B' labels={container=ele-dispatcher, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce314567_d66d_4b0b_a589_ab58366a21f6.slice/cri-containerd-12757bb635464d8b6b2e3134e85862d614499ea5a6823e10d78cb757af36111e.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-dispatcher:2399e18d8f35152e61ae33d1f820bbf6d02ab213, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=12757bb635464d8b6b2e3134e85862d614499ea5a6823e10d78cb757af36111e, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-pxcbq, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={container=ele-dispatcher, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce314567_d66d_4b0b_a589_ab58366a21f6.slice/cri-containerd-12757bb635464d8b6b2e3134e85862d614499ea5a6823e10d78cb757af36111e.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-ele-dispatcher:2399e18d8f35152e61ae33d1f820bbf6d02ab213, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=12757bb635464d8b6b2e3134e85862d614499ea5a6823e10d78cb757af36111e, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=ele-dispatcher-8599d4446c-pxcbq, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod13574a3d_1aef_4847_b784_822d2bee7e25.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-tdlzg, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod13574a3d_1aef_4847_b784_822d2bee7e25.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-tdlzg, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d276c0} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod13574a3d_1aef_4847_b784_822d2bee7e25.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-tdlzg, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d27770}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.71574029s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod13574a3d_1aef_4847_b784_822d2bee7e25.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-tdlzg, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod13574a3d_1aef_4847_b784_822d2bee7e25.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-tdlzg, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod13574a3d_1aef_4847_b784_822d2bee7e25.slice/cri-containerd-2a5597f4e6abdcde5e9ccc24565c5a7842fb3d01f9d58ace228896669d0f29c8.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=2a5597f4e6abdcde5e9ccc24565c5a7842fb3d01f9d58ace228896669d0f29c8, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-tdlzg, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod13574a3d_1aef_4847_b784_822d2bee7e25.slice/cri-containerd-2a5597f4e6abdcde5e9ccc24565c5a7842fb3d01f9d58ace228896669d0f29c8.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=2a5597f4e6abdcde5e9ccc24565c5a7842fb3d01f9d58ace228896669d0f29c8, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-tdlzg, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d278e0} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod13574a3d_1aef_4847_b784_822d2bee7e25.slice/cri-containerd-2a5597f4e6abdcde5e9ccc24565c5a7842fb3d01f9d58ace228896669d0f29c8.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=2a5597f4e6abdcde5e9ccc24565c5a7842fb3d01f9d58ace228896669d0f29c8, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-tdlzg, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d279a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715746915s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod13574a3d_1aef_4847_b784_822d2bee7e25.slice/cri-containerd-2a5597f4e6abdcde5e9ccc24565c5a7842fb3d01f9d58ace228896669d0f29c8.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=2a5597f4e6abdcde5e9ccc24565c5a7842fb3d01f9d58ace228896669d0f29c8, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-tdlzg, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod13574a3d_1aef_4847_b784_822d2bee7e25.slice/cri-containerd-2a5597f4e6abdcde5e9ccc24565c5a7842fb3d01f9d58ace228896669d0f29c8.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=2a5597f4e6abdcde5e9ccc24565c5a7842fb3d01f9d58ace228896669d0f29c8, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-tdlzg, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=elepay-admin-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod13574a3d_1aef_4847_b784_822d2bee7e25.slice/cri-containerd-d1abf3881d2552ad60a4a2f4bb44ecc96a5f54ab857373b1ad4a14d587caf838.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-admin-web:f915d2ee1324500ccc34d13fa4e6e5fa0f2321d1, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d1abf3881d2552ad60a4a2f4bb44ecc96a5f54ab857373b1ad4a14d587caf838, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-tdlzg, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=elepay-admin-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod13574a3d_1aef_4847_b784_822d2bee7e25.slice/cri-containerd-d1abf3881d2552ad60a4a2f4bb44ecc96a5f54ab857373b1ad4a14d587caf838.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-admin-web:f915d2ee1324500ccc34d13fa4e6e5fa0f2321d1, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d1abf3881d2552ad60a4a2f4bb44ecc96a5f54ab857373b1ad4a14d587caf838, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-tdlzg, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d27b50} C:{Var:C Labels:container=elepay-admin-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod13574a3d_1aef_4847_b784_822d2bee7e25.slice/cri-containerd-d1abf3881d2552ad60a4a2f4bb44ecc96a5f54ab857373b1ad4a14d587caf838.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-admin-web:f915d2ee1324500ccc34d13fa4e6e5fa0f2321d1, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d1abf3881d2552ad60a4a2f4bb44ecc96a5f54ab857373b1ad4a14d587caf838, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-tdlzg, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d27c28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715756893s EvaluationString:[ var='B' labels={container=elepay-admin-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod13574a3d_1aef_4847_b784_822d2bee7e25.slice/cri-containerd-d1abf3881d2552ad60a4a2f4bb44ecc96a5f54ab857373b1ad4a14d587caf838.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-admin-web:f915d2ee1324500ccc34d13fa4e6e5fa0f2321d1, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d1abf3881d2552ad60a4a2f4bb44ecc96a5f54ab857373b1ad4a14d587caf838, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-tdlzg, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={container=elepay-admin-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod13574a3d_1aef_4847_b784_822d2bee7e25.slice/cri-containerd-d1abf3881d2552ad60a4a2f4bb44ecc96a5f54ab857373b1ad4a14d587caf838.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-admin-web:f915d2ee1324500ccc34d13fa4e6e5fa0f2321d1, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d1abf3881d2552ad60a4a2f4bb44ecc96a5f54ab857373b1ad4a14d587caf838, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-admin-web-7df65cbbcb-tdlzg, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podb69b84e0_a5ba_4dab_8e5c_59fe2f782449.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-bigscreen-5f65bc55dd-q2mpb, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podb69b84e0_a5ba_4dab_8e5c_59fe2f782449.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-bigscreen-5f65bc55dd-q2mpb, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d27d98} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podb69b84e0_a5ba_4dab_8e5c_59fe2f782449.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-bigscreen-5f65bc55dd-q2mpb, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d27e38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715764978s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podb69b84e0_a5ba_4dab_8e5c_59fe2f782449.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-bigscreen-5f65bc55dd-q2mpb, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podb69b84e0_a5ba_4dab_8e5c_59fe2f782449.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-bigscreen-5f65bc55dd-q2mpb, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podb69b84e0_a5ba_4dab_8e5c_59fe2f782449.slice/cri-containerd-580da83e84573fad047272924926406626719caa62832bcd65ce0b314776d2df.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=580da83e84573fad047272924926406626719caa62832bcd65ce0b314776d2df, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-bigscreen-5f65bc55dd-q2mpb, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podb69b84e0_a5ba_4dab_8e5c_59fe2f782449.slice/cri-containerd-580da83e84573fad047272924926406626719caa62832bcd65ce0b314776d2df.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=580da83e84573fad047272924926406626719caa62832bcd65ce0b314776d2df, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-bigscreen-5f65bc55dd-q2mpb, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc019d27fb0} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podb69b84e0_a5ba_4dab_8e5c_59fe2f782449.slice/cri-containerd-580da83e84573fad047272924926406626719caa62832bcd65ce0b314776d2df.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=580da83e84573fad047272924926406626719caa62832bcd65ce0b314776d2df, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-bigscreen-5f65bc55dd-q2mpb, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc017524340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715771087s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podb69b84e0_a5ba_4dab_8e5c_59fe2f782449.slice/cri-containerd-580da83e84573fad047272924926406626719caa62832bcd65ce0b314776d2df.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=580da83e84573fad047272924926406626719caa62832bcd65ce0b314776d2df, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-bigscreen-5f65bc55dd-q2mpb, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-podb69b84e0_a5ba_4dab_8e5c_59fe2f782449.slice/cri-containerd-580da83e84573fad047272924926406626719caa62832bcd65ce0b314776d2df.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=580da83e84573fad047272924926406626719caa62832bcd65ce0b314776d2df, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-bigscreen-5f65bc55dd-q2mpb, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod82a9dd53_4c9c_4eae_8674_c245f021a87e.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-4fcpc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod82a9dd53_4c9c_4eae_8674_c245f021a87e.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-4fcpc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc0175246c0} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod82a9dd53_4c9c_4eae_8674_c245f021a87e.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-4fcpc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc017524860}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.71577839s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod82a9dd53_4c9c_4eae_8674_c245f021a87e.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-4fcpc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod82a9dd53_4c9c_4eae_8674_c245f021a87e.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-4fcpc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=elepay-business-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod82a9dd53_4c9c_4eae_8674_c245f021a87e.slice/cri-containerd-8a1e01b2a72b2e2da5159819c40a6cba279c2bfe4e2c448bf35d5dc1a209d627.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-business-web:ee0fdcc7c4d968f05d7f5937fdbdf12b66ba215f, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8a1e01b2a72b2e2da5159819c40a6cba279c2bfe4e2c448bf35d5dc1a209d627, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-4fcpc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=elepay-business-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod82a9dd53_4c9c_4eae_8674_c245f021a87e.slice/cri-containerd-8a1e01b2a72b2e2da5159819c40a6cba279c2bfe4e2c448bf35d5dc1a209d627.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-business-web:ee0fdcc7c4d968f05d7f5937fdbdf12b66ba215f, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8a1e01b2a72b2e2da5159819c40a6cba279c2bfe4e2c448bf35d5dc1a209d627, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-4fcpc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc017525778} C:{Var:C Labels:container=elepay-business-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod82a9dd53_4c9c_4eae_8674_c245f021a87e.slice/cri-containerd-8a1e01b2a72b2e2da5159819c40a6cba279c2bfe4e2c448bf35d5dc1a209d627.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-business-web:ee0fdcc7c4d968f05d7f5937fdbdf12b66ba215f, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8a1e01b2a72b2e2da5159819c40a6cba279c2bfe4e2c448bf35d5dc1a209d627, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-4fcpc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc0175254b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.71578465s EvaluationString:[ var='B' labels={container=elepay-business-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod82a9dd53_4c9c_4eae_8674_c245f021a87e.slice/cri-containerd-8a1e01b2a72b2e2da5159819c40a6cba279c2bfe4e2c448bf35d5dc1a209d627.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-business-web:ee0fdcc7c4d968f05d7f5937fdbdf12b66ba215f, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8a1e01b2a72b2e2da5159819c40a6cba279c2bfe4e2c448bf35d5dc1a209d627, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-4fcpc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={container=elepay-business-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod82a9dd53_4c9c_4eae_8674_c245f021a87e.slice/cri-containerd-8a1e01b2a72b2e2da5159819c40a6cba279c2bfe4e2c448bf35d5dc1a209d627.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-elepay-business-web:ee0fdcc7c4d968f05d7f5937fdbdf12b66ba215f, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=8a1e01b2a72b2e2da5159819c40a6cba279c2bfe4e2c448bf35d5dc1a209d627, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-4fcpc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod82a9dd53_4c9c_4eae_8674_c245f021a87e.slice/cri-containerd-e48d9d18b82a48df2ca1df1aeaa1640f888ebceec064e3df1d96f50b7cf434b0.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e48d9d18b82a48df2ca1df1aeaa1640f888ebceec064e3df1d96f50b7cf434b0, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-4fcpc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod82a9dd53_4c9c_4eae_8674_c245f021a87e.slice/cri-containerd-e48d9d18b82a48df2ca1df1aeaa1640f888ebceec064e3df1d96f50b7cf434b0.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e48d9d18b82a48df2ca1df1aeaa1640f888ebceec064e3df1d96f50b7cf434b0, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-4fcpc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc017525b68} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod82a9dd53_4c9c_4eae_8674_c245f021a87e.slice/cri-containerd-e48d9d18b82a48df2ca1df1aeaa1640f888ebceec064e3df1d96f50b7cf434b0.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e48d9d18b82a48df2ca1df1aeaa1640f888ebceec064e3df1d96f50b7cf434b0, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-4fcpc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc017525930}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715792964s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod82a9dd53_4c9c_4eae_8674_c245f021a87e.slice/cri-containerd-e48d9d18b82a48df2ca1df1aeaa1640f888ebceec064e3df1d96f50b7cf434b0.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e48d9d18b82a48df2ca1df1aeaa1640f888ebceec064e3df1d96f50b7cf434b0, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-4fcpc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod82a9dd53_4c9c_4eae_8674_c245f021a87e.slice/cri-containerd-e48d9d18b82a48df2ca1df1aeaa1640f888ebceec064e3df1d96f50b7cf434b0.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=e48d9d18b82a48df2ca1df1aeaa1640f888ebceec064e3df1d96f50b7cf434b0, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=elepay-business-web-576b954774-4fcpc, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod7560659c_394d_4296_b94b_a8e43da0746f.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-crxkv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod7560659c_394d_4296_b94b_a8e43da0746f.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-crxkv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00d2cc060} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod7560659c_394d_4296_b94b_a8e43da0746f.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-crxkv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00d2cc130}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715800492s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod7560659c_394d_4296_b94b_a8e43da0746f.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-crxkv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod7560659c_394d_4296_b94b_a8e43da0746f.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-crxkv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod7560659c_394d_4296_b94b_a8e43da0746f.slice/cri-containerd-2e905abbd32362fd776ab7adc396c9106f657abcb0fa1db0909dc48a375b7814.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=2e905abbd32362fd776ab7adc396c9106f657abcb0fa1db0909dc48a375b7814, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-crxkv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod7560659c_394d_4296_b94b_a8e43da0746f.slice/cri-containerd-2e905abbd32362fd776ab7adc396c9106f657abcb0fa1db0909dc48a375b7814.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=2e905abbd32362fd776ab7adc396c9106f657abcb0fa1db0909dc48a375b7814, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-crxkv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00d2cc2d0} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod7560659c_394d_4296_b94b_a8e43da0746f.slice/cri-containerd-2e905abbd32362fd776ab7adc396c9106f657abcb0fa1db0909dc48a375b7814.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=2e905abbd32362fd776ab7adc396c9106f657abcb0fa1db0909dc48a375b7814, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-crxkv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00d2cc3a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715805674s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod7560659c_394d_4296_b94b_a8e43da0746f.slice/cri-containerd-2e905abbd32362fd776ab7adc396c9106f657abcb0fa1db0909dc48a375b7814.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=2e905abbd32362fd776ab7adc396c9106f657abcb0fa1db0909dc48a375b7814, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-crxkv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod7560659c_394d_4296_b94b_a8e43da0746f.slice/cri-containerd-2e905abbd32362fd776ab7adc396c9106f657abcb0fa1db0909dc48a375b7814.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=2e905abbd32362fd776ab7adc396c9106f657abcb0fa1db0909dc48a375b7814, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-crxkv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=oneqr-kds-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod7560659c_394d_4296_b94b_a8e43da0746f.slice/cri-containerd-33a81cc58d3c552250e310ca732323addcb8b9117f281606c1c20fe8d480ad66.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-kds-web:24766930eeb1d1b9930b37273004c6de759014bb, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=33a81cc58d3c552250e310ca732323addcb8b9117f281606c1c20fe8d480ad66, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-crxkv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=oneqr-kds-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod7560659c_394d_4296_b94b_a8e43da0746f.slice/cri-containerd-33a81cc58d3c552250e310ca732323addcb8b9117f281606c1c20fe8d480ad66.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-kds-web:24766930eeb1d1b9930b37273004c6de759014bb, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=33a81cc58d3c552250e310ca732323addcb8b9117f281606c1c20fe8d480ad66, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-crxkv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00d2cc690} C:{Var:C Labels:container=oneqr-kds-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod7560659c_394d_4296_b94b_a8e43da0746f.slice/cri-containerd-33a81cc58d3c552250e310ca732323addcb8b9117f281606c1c20fe8d480ad66.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-kds-web:24766930eeb1d1b9930b37273004c6de759014bb, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=33a81cc58d3c552250e310ca732323addcb8b9117f281606c1c20fe8d480ad66, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-crxkv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00d2cc810}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715813241s EvaluationString:[ var='B' labels={container=oneqr-kds-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod7560659c_394d_4296_b94b_a8e43da0746f.slice/cri-containerd-33a81cc58d3c552250e310ca732323addcb8b9117f281606c1c20fe8d480ad66.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-kds-web:24766930eeb1d1b9930b37273004c6de759014bb, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=33a81cc58d3c552250e310ca732323addcb8b9117f281606c1c20fe8d480ad66, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-crxkv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={container=oneqr-kds-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod7560659c_394d_4296_b94b_a8e43da0746f.slice/cri-containerd-33a81cc58d3c552250e310ca732323addcb8b9117f281606c1c20fe8d480ad66.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-kds-web:24766930eeb1d1b9930b37273004c6de759014bb, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=33a81cc58d3c552250e310ca732323addcb8b9117f281606c1c20fe8d480ad66, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-kds-web-74d7cc5fcf-crxkv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod65d4ba3f_37a0_474a_80f1_a0541d82a5be.slice/cri-containerd-d7f8d7c2bb3db3a6d0931f9f9854e4adcc04e6dc186ea443c02c0a1acbef5bb5.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d7f8d7c2bb3db3a6d0931f9f9854e4adcc04e6dc186ea443c02c0a1acbef5bb5, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-pos-web-7dccbbb488-26zrz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod65d4ba3f_37a0_474a_80f1_a0541d82a5be.slice/cri-containerd-d7f8d7c2bb3db3a6d0931f9f9854e4adcc04e6dc186ea443c02c0a1acbef5bb5.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d7f8d7c2bb3db3a6d0931f9f9854e4adcc04e6dc186ea443c02c0a1acbef5bb5, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-pos-web-7dccbbb488-26zrz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00d2ccae0} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod65d4ba3f_37a0_474a_80f1_a0541d82a5be.slice/cri-containerd-d7f8d7c2bb3db3a6d0931f9f9854e4adcc04e6dc186ea443c02c0a1acbef5bb5.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d7f8d7c2bb3db3a6d0931f9f9854e4adcc04e6dc186ea443c02c0a1acbef5bb5, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-pos-web-7dccbbb488-26zrz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00d2cc9f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715823224s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod65d4ba3f_37a0_474a_80f1_a0541d82a5be.slice/cri-containerd-d7f8d7c2bb3db3a6d0931f9f9854e4adcc04e6dc186ea443c02c0a1acbef5bb5.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d7f8d7c2bb3db3a6d0931f9f9854e4adcc04e6dc186ea443c02c0a1acbef5bb5, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-pos-web-7dccbbb488-26zrz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod65d4ba3f_37a0_474a_80f1_a0541d82a5be.slice/cri-containerd-d7f8d7c2bb3db3a6d0931f9f9854e4adcc04e6dc186ea443c02c0a1acbef5bb5.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=d7f8d7c2bb3db3a6d0931f9f9854e4adcc04e6dc186ea443c02c0a1acbef5bb5, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-pos-web-7dccbbb488-26zrz, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f1e496e_b4bd_4835_80ba_0846877db8bb.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-km6hf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f1e496e_b4bd_4835_80ba_0846877db8bb.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-km6hf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00d2ccc70} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f1e496e_b4bd_4835_80ba_0846877db8bb.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-km6hf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00d2ccd48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715830194s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f1e496e_b4bd_4835_80ba_0846877db8bb.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-km6hf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f1e496e_b4bd_4835_80ba_0846877db8bb.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-km6hf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f1e496e_b4bd_4835_80ba_0846877db8bb.slice/cri-containerd-3b7b9eaccfb4ba7923e7da5c0a201fcbe70973ab4aec0cc6559c995f4472a3e7.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=3b7b9eaccfb4ba7923e7da5c0a201fcbe70973ab4aec0cc6559c995f4472a3e7, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-km6hf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f1e496e_b4bd_4835_80ba_0846877db8bb.slice/cri-containerd-3b7b9eaccfb4ba7923e7da5c0a201fcbe70973ab4aec0cc6559c995f4472a3e7.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=3b7b9eaccfb4ba7923e7da5c0a201fcbe70973ab4aec0cc6559c995f4472a3e7, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-km6hf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00d2ccf00} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f1e496e_b4bd_4835_80ba_0846877db8bb.slice/cri-containerd-3b7b9eaccfb4ba7923e7da5c0a201fcbe70973ab4aec0cc6559c995f4472a3e7.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=3b7b9eaccfb4ba7923e7da5c0a201fcbe70973ab4aec0cc6559c995f4472a3e7, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-km6hf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00d2ccfe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715835676s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f1e496e_b4bd_4835_80ba_0846877db8bb.slice/cri-containerd-3b7b9eaccfb4ba7923e7da5c0a201fcbe70973ab4aec0cc6559c995f4472a3e7.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=3b7b9eaccfb4ba7923e7da5c0a201fcbe70973ab4aec0cc6559c995f4472a3e7, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-km6hf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f1e496e_b4bd_4835_80ba_0846877db8bb.slice/cri-containerd-3b7b9eaccfb4ba7923e7da5c0a201fcbe70973ab4aec0cc6559c995f4472a3e7.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=3b7b9eaccfb4ba7923e7da5c0a201fcbe70973ab4aec0cc6559c995f4472a3e7, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-shop-web-7c4546445f-km6hf, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod24c668de_701e_4a95_b120_cdee43baaf4d.slice/cri-containerd-273a4e65e89e02cc43e09667f65d4ad249ed24f227489611fec4deeeb85c2c1c.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=273a4e65e89e02cc43e09667f65d4ad249ed24f227489611fec4deeeb85c2c1c, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-tablet-web-66f778fd76-msxjv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod24c668de_701e_4a95_b120_cdee43baaf4d.slice/cri-containerd-273a4e65e89e02cc43e09667f65d4ad249ed24f227489611fec4deeeb85c2c1c.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=273a4e65e89e02cc43e09667f65d4ad249ed24f227489611fec4deeeb85c2c1c, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-tablet-web-66f778fd76-msxjv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00d2cd290} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod24c668de_701e_4a95_b120_cdee43baaf4d.slice/cri-containerd-273a4e65e89e02cc43e09667f65d4ad249ed24f227489611fec4deeeb85c2c1c.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=273a4e65e89e02cc43e09667f65d4ad249ed24f227489611fec4deeeb85c2c1c, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-tablet-web-66f778fd76-msxjv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00d2cd1a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715843209s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod24c668de_701e_4a95_b120_cdee43baaf4d.slice/cri-containerd-273a4e65e89e02cc43e09667f65d4ad249ed24f227489611fec4deeeb85c2c1c.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=273a4e65e89e02cc43e09667f65d4ad249ed24f227489611fec4deeeb85c2c1c, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-tablet-web-66f778fd76-msxjv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod24c668de_701e_4a95_b120_cdee43baaf4d.slice/cri-containerd-273a4e65e89e02cc43e09667f65d4ad249ed24f227489611fec4deeeb85c2c1c.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=273a4e65e89e02cc43e09667f65d4ad249ed24f227489611fec4deeeb85c2c1c, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-tablet-web-66f778fd76-msxjv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:container=oneqr-tablet-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod24c668de_701e_4a95_b120_cdee43baaf4d.slice/cri-containerd-60ce9ad166af5d937f10f0731bebaee0bfa8e681c8fba3bcfa1476f624acb381.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-tablet-web:eebd53bf69e64886ebc3e209a68c715b237a420d, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=60ce9ad166af5d937f10f0731bebaee0bfa8e681c8fba3bcfa1476f624acb381, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-tablet-web-66f778fd76-msxjv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=oneqr-tablet-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod24c668de_701e_4a95_b120_cdee43baaf4d.slice/cri-containerd-60ce9ad166af5d937f10f0731bebaee0bfa8e681c8fba3bcfa1476f624acb381.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-tablet-web:eebd53bf69e64886ebc3e209a68c715b237a420d, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=60ce9ad166af5d937f10f0731bebaee0bfa8e681c8fba3bcfa1476f624acb381, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-tablet-web-66f778fd76-msxjv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00d2cd4a8} C:{Var:C Labels:container=oneqr-tablet-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod24c668de_701e_4a95_b120_cdee43baaf4d.slice/cri-containerd-60ce9ad166af5d937f10f0731bebaee0bfa8e681c8fba3bcfa1476f624acb381.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-tablet-web:eebd53bf69e64886ebc3e209a68c715b237a420d, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=60ce9ad166af5d937f10f0731bebaee0bfa8e681c8fba3bcfa1476f624acb381, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-tablet-web-66f778fd76-msxjv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00d2cd578}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715850446s EvaluationString:[ var='B' labels={container=oneqr-tablet-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod24c668de_701e_4a95_b120_cdee43baaf4d.slice/cri-containerd-60ce9ad166af5d937f10f0731bebaee0bfa8e681c8fba3bcfa1476f624acb381.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-tablet-web:eebd53bf69e64886ebc3e209a68c715b237a420d, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=60ce9ad166af5d937f10f0731bebaee0bfa8e681c8fba3bcfa1476f624acb381, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-tablet-web-66f778fd76-msxjv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={container=oneqr-tablet-web, ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod24c668de_701e_4a95_b120_cdee43baaf4d.slice/cri-containerd-60ce9ad166af5d937f10f0731bebaee0bfa8e681c8fba3bcfa1476f624acb381.scope, image=886471755268.dkr.ecr.ap-northeast-1.amazonaws.com/stg-oneqr-tablet-web:eebd53bf69e64886ebc3e209a68c715b237a420d, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=60ce9ad166af5d937f10f0731bebaee0bfa8e681c8fba3bcfa1476f624acb381, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-tablet-web-66f778fd76-msxjv, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod53442782_907c_46eb_90c6_d59565b4503c.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-web-6d464c885b-s9xjn, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod53442782_907c_46eb_90c6_d59565b4503c.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-web-6d464c885b-s9xjn, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00d2cd750} C:{Var:C Labels:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod53442782_907c_46eb_90c6_d59565b4503c.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-web-6d464c885b-s9xjn, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet Value:0xc00d2cd828}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715863034s EvaluationString:[ var='B' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod53442782_907c_46eb_90c6_d59565b4503c.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-web-6d464c885b-s9xjn, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=4 ], [ var='C' labels={ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod53442782_907c_46eb_90c6_d59565b4503c.slice, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-web-6d464c885b-s9xjn, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet} value=0 ]} {Instance:ele_env=stg, endpoint=https-metrics, id=/kubepods.slice/kubepods-pod53442782_907c_46eb_90c6_d59565b4503c.slice/cri-containerd-7d462c558e8e9b9a25bad82e29a473b8a0e52980eb8047246a78d75c244cd497.scope, image=602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/eks/pause:3.5, instance=10.100.50.226:10250, job=kubelet, metrics_path=/metrics/cadvisor, name=7d462c558e8e9b9a25bad82e29a473b8a0e52980eb8047246a78d75c244cd497, namespace=elepay, node=ip-10-100-50-226.ap-northeast-1.compute.internal, pod=oneqr-web-6d464c885b-s9xjn, prometheus=monitoring/k8s, prometheus_replica=prometheus-k8s-0, service=kubelet State:Nor + level=debug ts=2024-05-29T13:44:14.752006872Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.75170416Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.75161456Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.751616218Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=318831 slug=grafanavianet t=2024-05-29T13:44:14.75161649Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.751397608Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=85469 slug=portinsider instance= t=2024-05-29T13:44:14.751200305Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=85469 slug=portinsider t=2024-05-29T13:44:14.751152635Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:14.751268857Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.751170283Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.751081403Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=316418 slug=workmotion t=2024-05-29T13:44:14.750803791Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:14.750658789Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.750663451Z caller=remote_instance_store.go:51 user=191103 slug=amazonadmin msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=523054 slug=vialtopartners instance= t=2024-05-29T13:44:14.750646541Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=523054 slug=vialtopartners instance= t=2024-05-29T13:44:14.750641644Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=523054 slug=vialtopartners instance= t=2024-05-29T13:44:14.750631689Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=191103 slug=amazonadmin t=2024-05-29T13:44:14.750559927Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=191103 slug=amazonadmin version=76 fingerprint=243d1538d112fc38 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.750474114Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.750260322s EvaluationString:}]" duration=77.721066ms + level=debug ts=2024-05-29T13:44:14.750369832Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.749623124Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.749734706Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=474921 slug=flexpwr t=2024-05-29T13:44:14.749526267Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=474921 slug=flexpwr instance= t=2024-05-29T13:44:14.749510776Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=474921 slug=flexpwr instance= t=2024-05-29T13:44:14.749502476Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=474921 slug=flexpwr t=2024-05-29T13:44:14.749444287Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.749503136Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=214309 slug=spenmo t=2024-05-29T13:44:14.749486119Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.812232ms + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.749467279Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=4947 slug=mediamath version=1 fingerprint=4b2d1ad6965bea19 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.749344589Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=000000020, ref_id=I State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.749065588s EvaluationString:}]" duration=20.961778ms + logger=ngalert.state.manager.persist user=733461 slug=lattice t=2024-05-29T13:44:14.74929782Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=22.188372ms + logger=ngalert.state.manager.persist user=265585 slug=engageli t=2024-05-29T13:44:14.748763505Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=77.711855ms + logger=ngalert.state.historian backend=loki user=190917 slug=d1cx t=2024-05-29T13:44:14.748761444Z level=debug msg="Done saving alert state history batch" + level=debug ts=2024-05-29T13:44:14.748543294Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.747935025Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.747869308Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.747646268Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.40403ms + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.747717989Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.747705274Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=158536 slug=clearsaleantifraude t=2024-05-29T13:44:14.747578485Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=158536 slug=clearsaleantifraude instance="aggregatedBy=sum, name=counter.ClosedCircuit.True alert, summarize=1min, summarizeFunction=sum" t=2024-05-29T13:44:14.747543387Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=158536 slug=clearsaleantifraude version=81 fingerprint=59f0be4cfbd3712c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.747411952Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=counter.ClosedCircuit.True alert, summarize=1min, summarizeFunction=sum State:Normal Error: Results:map[] Values:map[below_1:{Var:below_1 Labels:aggregatedBy=sum, name=counter.ClosedCircuit.True alert, summarize=1min, summarizeFunction=sum Value:0xc0026ae7f8} mean_of:{Var:mean_of Labels:aggregatedBy=sum, name=counter.ClosedCircuit.True alert, summarize=1min, summarizeFunction=sum Value:0xc0026ae838}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.746973609s EvaluationString:[ var='below_1' labels={aggregatedBy=sum, name=counter.ClosedCircuit.True alert, summarize=1min, summarizeFunction=sum} value=0 ], [ var='mean_of' labels={aggregatedBy=sum, name=counter.ClosedCircuit.True alert, summarize=1min, summarizeFunction=sum} value=1304.4 ]}]" duration=16.921874ms + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:14.747198793Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.747025615Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=432323 slug=lithic version=1 fingerprint=b43fa66f99bea8fe attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.74653134Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.745980786s EvaluationString:}]" duration=56.378389ms + level=debug ts=2024-05-29T13:44:14.746265212Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.745947975Z caller=remote_alert_sender.go:94 user=465668 slug=xpressinfra host=xpressinfra-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.214.234:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=a1e9ea6b-4ef1-4f6e-835f-ae84bd01baab alerts=1 + level=debug ts=2024-05-29T13:44:14.745470598Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.745379062Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.74526617Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.74523883Z caller=remote_instance_store.go:51 user=213445 slug=gan msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.745076315Z caller=remote_instance_store.go:51 user=412141 slug=sharethrough msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=114492 slug=railsbank t=2024-05-29T13:44:14.744824787Z level=debug msg="Skip rule evaluation because it is paused" + level=debug ts=2024-05-29T13:44:14.744859914Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=849729 slug=medopsimscare t=2024-05-29T13:44:14.744764124Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=29.774589ms + level=debug ts=2024-05-29T13:44:14.744707208Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.744635238Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=307381 slug=kambitaskforce t=2024-05-29T13:44:14.744529797Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance= t=2024-05-29T13:44:14.744515807Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.744484323Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.744442297Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.744105881Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=633335 slug=promqlworkshop t=2024-05-29T13:44:14.744052684Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.743703714Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.743398996Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.743366273Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.743273815Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.743306007Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.743360932Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.743277804Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.74324755Z caller=remote_instance_store.go:51 user=130276 slug=devops8 msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.743235925Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.742921797Z caller=remote_instance_store.go:51 user=354676 slug=gridmarketenergy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.742805884Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.73833ms + logger=ngalert.state.historian backend=loki user=173374 slug=felmo t=2024-05-29T13:44:14.742710538Z level=debug msg="Done saving alert state history batch" + level=debug ts=2024-05-29T13:44:14.742484751Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.742458688Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.742414896Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.742379192Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.74238175Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=4947 slug=mediamath instance= t=2024-05-29T13:44:14.742231899Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:14.742196945Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.742193643Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=22115 slug=tiki t=2024-05-29T13:44:14.741930996Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.741798665Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:14.741720496Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.741695174Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.741636911Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=538037 slug=drivewealth t=2024-05-29T13:44:14.741591447Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=538037 slug=drivewealth version=34 fingerprint=4aae133bc60fe4d2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.74151432Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.741222848s EvaluationString:}]" duration=64.159693ms + level=debug ts=2024-05-29T13:44:14.741368613Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.741139497Z caller=remote_image_capturer.go:54 user=22115 slug=tiki rule_org_id=1 rule_uid=r_r3God7z dashboard=000000010 panel=23 msg="rendering alert image with grafana" + level=debug ts=2024-05-29T13:44:14.740907417Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.740767997Z caller=grafana.go:247 user=810903 slug=vespaai msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=16" groups=11 alerts=0 + level=debug ts=2024-05-29T13:44:14.740577233Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.740080312Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.739972684Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.739924961Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.739931796Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=173374 slug=felmo t=2024-05-29T13:44:14.739884596Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.534037ms + level=debug ts=2024-05-29T13:44:14.739767949Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.739620294Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.739246639Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.739207771Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.739203547Z caller=remote_instance_store.go:51 user=471861 slug=planetstaging msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=87780 slug=zencloudandhosting t=2024-05-29T13:44:14.73910798Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=471861 slug=planetstaging t=2024-05-29T13:44:14.739153935Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=471861 slug=planetstaging instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.739094289Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=108112 slug=btctrader t=2024-05-29T13:44:14.738958814Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=29.435626ms + level=debug ts=2024-05-29T13:44:14.738605372Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=214309 slug=spenmo instance="datasource_uid=grafanacloud-logs, ref_id=num_payout_requests" t=2024-05-29T13:44:14.738655116Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.738556729Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=214309 slug=spenmo instance="datasource_uid=grafanacloud-logs, ref_id=num_payout_requests" t=2024-05-29T13:44:14.738644069Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.738523296Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=214309 slug=spenmo instance="datasource_uid=grafanacloud-logs, ref_id=num_payout_requests" t=2024-05-29T13:44:14.738633006Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=214309 slug=spenmo version=40 fingerprint=bd594ef8fbc479db attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.73854469Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=num_payout_requests State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.738211904s EvaluationString:}]" duration=45.481634ms + level=debug ts=2024-05-29T13:44:14.738542609Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.738464319Z caller=remote_instance_store.go:51 user=830631 slug=api3 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.738422817Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.738442084Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:14.738404848Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=29.104266ms + logger=ngalert.state.manager.persist user=489921 slug=statuscake t=2024-05-29T13:44:14.738334317Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.814837ms + level=debug ts=2024-05-29T13:44:14.737791053Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.737568409Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.737342248Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.196269ms + level=debug ts=2024-05-29T13:44:14.737235523Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=183214 slug=vectorizedio t=2024-05-29T13:44:14.737153461Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.801338ms + logger=ngalert.state.manager.persist user=314067 slug=itsme t=2024-05-29T13:44:14.737099267Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=314067 slug=itsme instance= t=2024-05-29T13:44:14.737082761Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=314067 slug=itsme instance= t=2024-05-29T13:44:14.737065936Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.737013477Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.736929986Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=314067 slug=itsme t=2024-05-29T13:44:14.737010419Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info ts=2024-05-29T13:44:14.736860074Z caller=remote_alert_sender.go:94 user=47111 slug=swiftnavigation host=swiftnavigation-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.98.121:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=d9fc1e95-4660-430b-9973-9495d62608b2 alerts=1 + logger=ngalert.state.manager user=459086 slug=metricgamingprd instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.736838732Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=459086 slug=metricgamingprd instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.736833088Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=459086 slug=metricgamingprd instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.736826151Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=47111 slug=swiftnavigation t=2024-05-29T13:44:14.736648628Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=35.320524ms + level=debug ts=2024-05-29T13:44:14.736625011Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.736547068Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.736415376Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.736327931Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + Error parsing panelUID for alert annotationruleID916dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:14.736181742Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=47.031348ms + level=debug ts=2024-05-29T13:44:14.736201152Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.736048409Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=741823 slug=sudoops instance= t=2024-05-29T13:44:14.736001623Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=741823 slug=sudoops instance= t=2024-05-29T13:44:14.735988872Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.73599048Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=741823 slug=sudoops t=2024-05-29T13:44:14.735944152Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.735832926Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.735779464Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.735661569Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.836014ms + level=debug ts=2024-05-29T13:44:14.735594594Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.735366264Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=751407 slug=nethermindjuno t=2024-05-29T13:44:14.735350494Z level=debug msg="Saving alert states done" count=4 max_state_save_concurrency=1 duration=60.25865ms + level=debug ts=2024-05-29T13:44:14.734953671Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=186562 slug=defier t=2024-05-29T13:44:14.734899641Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=43.428009ms + level=debug ts=2024-05-29T13:44:14.734766707Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.734727121Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=325783 slug=bloxprod t=2024-05-29T13:44:14.734683807Z level=debug msg="Saving alert states done" count=21 max_state_save_concurrency=1 duration=344.197348ms + level=debug ts=2024-05-29T13:44:14.734513405Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.734416392Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=148654 slug=tinybeans t=2024-05-29T13:44:14.734191496Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=43.278669ms + level=debug ts=2024-05-29T13:44:14.73409681Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.733945842Z caller=remote_instance_store.go:51 user=341510 slug=voiroadmin msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=412779 slug=microstrategy t=2024-05-29T13:44:14.733977862Z level=debug msg="Saving alert states" count=840 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=277970 slug=teckresourcestest t=2024-05-29T13:44:14.733870128Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=28.949023ms + logger=ngalert.state.manager user=341510 slug=voiroadmin instance="datasource_uid=849Ou9s7z, ref_id=A" t=2024-05-29T13:44:14.733842693Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=341510 slug=voiroadmin instance="datasource_uid=849Ou9s7z, ref_id=A" t=2024-05-29T13:44:14.733830433Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.733769536Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.733660936Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.733621584Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.733557672Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.733536438Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-338160laio2use1, cloud_platform=AWS, customer_id=C591, env_id=338160, env_name=C591_PROD_PARALLEL, env_type=prod, instance=env-338160laio2use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.733454858Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.731798134Z caller=remote_instance_store.go:51 user=543654 slug=jobcloudprogrammaticprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-338160laio2use1, cloud_platform=AWS, customer_id=C591, env_id=338160, env_name=C591_PROD_PARALLEL, env_type=prod, instance=env-338160laio2use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.733439128Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.733328561Z caller=remote_instance_store.go:51 user=700783 slug=gsgmedia msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.731745595Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.73168655Z caller=remote_instance_store.go:51 user=350551 slug=loopme msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=740464 slug=kanabellstg instance= t=2024-05-29T13:44:14.733119654Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-338159laio1use1, cloud_platform=AWS, customer_id=C591, env_id=338159, env_name=C591_QA_PARALLEL, env_type=qa, instance=env-338159laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.733116357Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.733014146Z caller=remote_instance_store.go:51 user=350551 slug=loopme msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.733046033Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.732974944Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=350551 slug=loopme instance="datasource_uid=0eCCEONVk, ref_id=A,C" t=2024-05-29T13:44:14.732924011Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-338097laio1use1, cloud_platform=AWS, customer_id=C591, env_id=338097, env_name=C591_Parallel_Dev, env_type=dev, instance=env-338097laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.732941222Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.731626697Z caller=remote_instance_store.go:51 user=350551 slug=loopme msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.731558362Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-337879laio1use1, cloud_platform=AWS, customer_id=C793, env_id=337879, env_name=C793 AMX Dev, env_type=dev, instance=env-337879laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.732754426Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=148654 slug=tinybeans t=2024-05-29T13:44:14.732775597Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=27.290706ms + logger=ngalert.state.manager.persist user=112387 slug=lucidhq t=2024-05-29T13:44:14.732705262Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=67.871524ms + logger=ngalert.state.manager.persist user=112732 slug=gleamer t=2024-05-29T13:44:14.732653109Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=46.33143ms + level=debug ts=2024-05-29T13:44:14.732679643Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.732669602Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.731508652Z caller=grafana.go:247 user=336655 slug=odigeoconnect msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=f492ee03-454a-42f6-95f6-16e60b7b05bf" groups=0 alerts=0 + logger=ngalert.state.manager user=806229 slug=simplisafe instance="pool=primary" t=2024-05-29T13:44:14.732440336Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.732564516Z caller=remote_instance_store.go:51 user=487988 slug=microstrategyits msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=806229 slug=simplisafe instance="pool=events" t=2024-05-29T13:44:14.732381925Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=806229 slug=simplisafe t=2024-05-29T13:44:14.732333795Z level=debug msg="State manager processing evaluation results" resultCount=2 + level=debug ts=2024-05-29T13:44:14.732410664Z caller=remote_instance_store.go:51 user=701741 slug=thetradingpitproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.732345908Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.731486174Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.732380291Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.732158518Z caller=grafana.go:247 user=791121 slug=iskaylog msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=16" groups=2 alerts=0 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-337536laio1use1, cloud_platform=AWS, customer_id=C706, env_id=337536, env_name=C706 COX DEV Parallel, env_type=dev, instance=env-337536laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.732168198Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.732081946Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.732037612Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.731991902Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.731942241Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.731923441Z caller=remote_instance_store.go:51 user=334644 slug=meiro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=307381 slug=kambitaskforce t=2024-05-29T13:44:14.731935562Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-337502laio1euw2, cloud_platform=AWS, customer_id=C817, env_id=337502, env_name=C817_NHS_PROD, env_type=prod, instance=env-337502laio1euw2, job=integrations/node_exporter, region=eu-west-2, stage=preprod" t=2024-05-29T13:44:14.731821489Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=543654 slug=jobcloudprogrammaticprod t=2024-05-29T13:44:14.731734934Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.731708164Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.731661713Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-337500laio1euw2, cloud_platform=AWS, customer_id=C817, env_id=337500, env_name=C817_NHS_UAT, env_type=qa, instance=env-337500laio1euw2, job=integrations/node_exporter, region=eu-west-2, stage=preprod" t=2024-05-29T13:44:14.731663653Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-337500laio1euw2, cloud_platform=AWS, customer_id=C817, env_id=337500, env_name=C817_NHS_UAT, env_type=qa, instance=env-337500laio1euw2, job=integrations/node_exporter, region=eu-west-2, stage=preprod" t=2024-05-29T13:44:14.731647898Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-337438laio1use1, cloud_platform=AWS, customer_id=C448, env_id=337438, env_name=C448 DEV Parallel, env_type=dev, instance=env-337438laio1use1, job=integrations/node_exporter, region=us-east-1, stage=testing" t=2024-05-29T13:44:14.731257341Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.731191496Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-337438laio1use1, cloud_platform=AWS, customer_id=C448, env_id=337438, env_name=C448 DEV Parallel, env_type=dev, instance=env-337438laio1use1, job=integrations/node_exporter, region=us-east-1, stage=testing" t=2024-05-29T13:44:14.731240891Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.731259908Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:14.731218055Z level=debug msg="Saving alert states" count=10 max_state_save_concurrency=1 + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-z6xg2" t=2024-05-29T13:44:14.73120634Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=250150 slug=bizagi version=58 fingerprint=ffc91b730ff7d3ae attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.731132261Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.730935154s EvaluationString:}]" duration=502.953055ms + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-lhkm6" t=2024-05-29T13:44:14.731099607Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-htqh6" t=2024-05-29T13:44:14.731038043Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-7tbrs" t=2024-05-29T13:44:14.730975931Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-64dgg" t=2024-05-29T13:44:14.730905789Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:14.73085342Z level=debug msg="State manager processing evaluation results" resultCount=10 + logger=ngalert.scheduler user=698963 slug=lemonade version=3 fingerprint=614b932537cd6354 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.730650787Z level=debug msg="Alert rule evaluated" results="[{Instance:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-64dgg State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-64dgg Value:0xc0118f1630} THRESHOLD:{Var:THRESHOLD Labels:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-64dgg Value:0xc0118f1670}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.730027667s EvaluationString:[ var='QUERY' labels={app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-64dgg} value=0 ], [ var='THRESHOLD' labels={app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-64dgg} value=0 ]} {Instance:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-6qz8j State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-6qz8j Value:0xc0118f16a8} THRESHOLD:{Var:THRESHOLD Labels:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-6qz8j Value:0xc0118f16f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.730040443s EvaluationString:[ var='QUERY' labels={app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-6qz8j} value=0 ], [ var='THRESHOLD' labels={app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-6qz8j} value=0 ]} {Instance:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-7tbrs State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-7tbrs Value:0xc0118f1728} THRESHOLD:{Var:THRESHOLD Labels:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-7tbrs Value:0xc0118f1770}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.730046636s EvaluationString:[ var='QUERY' labels={app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-7tbrs} value=0 ], [ var='THRESHOLD' labels={app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-7tbrs} value=0 ]} {Instance:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-btq6r State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-btq6r Value:0xc0118f17a8} THRESHOLD:{Var:THRESHOLD Labels:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-btq6r Value:0xc0118f17f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.730053984s EvaluationString:[ var='QUERY' labels={app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-btq6r} value=0 ], [ var='THRESHOLD' labels={app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-btq6r} value=0 ]} {Instance:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-htqh6 State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-htqh6 Value:0xc0118f1870} THRESHOLD:{Var:THRESHOLD Labels:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-htqh6 Value:0xc0118f1840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.730070715s EvaluationString:[ var='QUERY' labels={app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-htqh6} value=0 ], [ var='THRESHOLD' labels={app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-htqh6} value=0 ]} {Instance:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-jk8rd State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-jk8rd Value:0xc0118f18a8} THRESHOLD:{Var:THRESHOLD Labels:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-jk8rd Value:0xc0118f1940}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.730077928s EvaluationString:[ var='QUERY' labels={app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-jk8rd} value=0 ], [ var='THRESHOLD' labels={app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-jk8rd} value=0 ]} {Instance:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-lhkm6 State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-lhkm6 Value:0xc0118f1a00} THRESHOLD:{Var:THRESHOLD Labels:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-lhkm6 Value:0xc0118f19d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.730084083s EvaluationString:[ var='QUERY' labels={app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-lhkm6} value=0 ], [ var='THRESHOLD' labels={app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-lhkm6} value=0 ]} {Instance:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-qzt9f State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-qzt9f Value:0xc0118f1ae8} THRESHOLD:{Var:THRESHOLD Labels:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-qzt9f Value:0xc0118f1b30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.730089285s EvaluationString:[ var='QUERY' labels={app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-qzt9f} value=0 ], [ var='THRESHOLD' labels={app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-qzt9f} value=0 ]} {Instance:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-w949f State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-w949f Value:0xc0118f1b68} THRESHOLD:{Var:THRESHOLD Labels:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-w949f Value:0xc0118f1bb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.730094792s EvaluationString:[ var='QUERY' labels={app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-w949f} value=0 ], [ var='THRESHOLD' labels={app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-w949f} value=0 ]} {Instance:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-z6xg2 State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-z6xg2 Value:0xc0118f1be8} THRESHOLD:{Var:THRESHOLD Labels:app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-z6xg2 Value:0xc0118f1c30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.730127256s EvaluationString:[ var='QUERY' labels={app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-z6xg2} value=0 ], [ var='THRESHOLD' labels={app=cx-worker-worker, pod=cx-worker-worker-75bcfc8b68-z6xg2} value=0 ]}]" duration=126.235943ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-337309laio2eastus, cloud_platform=Azure, customer_id=A253, env_id=337309, env_name=A253_Gilbane_Prod, env_type=prod, instance=env-337309laio2eastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:14.730734166Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=111653 slug=theassociationmxp t=2024-05-29T13:44:14.730671547Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=111653 slug=theassociationmxp instance= t=2024-05-29T13:44:14.730655139Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.730620417Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.730661544Z caller=remote_instance_store.go:51 user=538355 slug=flogic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=111653 slug=theassociationmxp instance= t=2024-05-29T13:44:14.730648792Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=111653 slug=theassociationmxp instance= t=2024-05-29T13:44:14.730637865Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=111653 slug=theassociationmxp t=2024-05-29T13:44:14.730623586Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=687021 slug=heviai instance="__name__=last_predict, instance=yeter, job=daily_inProgress_predict" t=2024-05-29T13:44:14.730515116Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.730436532Z caller=remote_instance_store.go:51 user=543660 slug=jobcloudprogrammaticstage msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-336798laio2southeastasia, cloud_platform=Azure, customer_id=A228, env_id=336798, env_name=A228 DFS Non-Prod-QA, env_type=qa, instance=env-336798laio2southeastasia, job=integrations/node_exporter, region=southeastasia, stage=testing" t=2024-05-29T13:44:14.730407039Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-336798laio2southeastasia, cloud_platform=Azure, customer_id=A228, env_id=336798, env_name=A228 DFS Non-Prod-QA, env_type=qa, instance=env-336798laio2southeastasia, job=integrations/node_exporter, region=southeastasia, stage=testing" t=2024-05-29T13:44:14.730392273Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=114492 slug=railsbank instance="arena=UNKNOWN, aws_ecs_container_know_status=RUNNING, aws_ecs_launchtype=fargate, aws_ecs_task_known_status=RUNNING, aws_ecs_task_revision=90, cloud_availability_zone=eu-central-1b, container_name=log_router, ecs_cluster_name=PROD-IAM, ecs_task_definition=PROD-IAM-STS, ecs_task_id=ac90c0488c1b4914a003cf996122f3f8, environment=PROD" t=2024-05-29T13:44:14.730217177Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank instance="arena=UNKNOWN, aws_ecs_container_know_status=RUNNING, aws_ecs_launchtype=fargate, aws_ecs_task_known_status=RUNNING, aws_ecs_task_revision=90, cloud_availability_zone=eu-central-1b, container_name=log_router, ecs_cluster_name=PROD-IAM, ecs_task_definition=PROD-IAM-STS, ecs_task_id=ac90c0488c1b4914a003cf996122f3f8, environment=PROD" t=2024-05-29T13:44:14.730208514Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-336798laio1southeastasia, cloud_platform=Azure, customer_id=A228, env_id=336798, env_name=A228 DFS Non-Prod-QA, env_type=qa, instance=env-336798laio1southeastasia, job=integrations/node_exporter, region=southeastasia, stage=testing" t=2024-05-29T13:44:14.730224704Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=114492 slug=railsbank instance="arena=UNKNOWN, aws_ecs_container_know_status=RUNNING, aws_ecs_launchtype=fargate, aws_ecs_task_known_status=RUNNING, aws_ecs_task_revision=90, cloud_availability_zone=eu-central-1b, container_name=datacollector-sidecar, ecs_cluster_name=PROD-IAM, ecs_task_definition=PROD-IAM-STS, ecs_task_id=ac90c0488c1b4914a003cf996122f3f8, environment=PROD" t=2024-05-29T13:44:14.730177939Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=114492 slug=railsbank instance="arena=UNKNOWN, aws_ecs_container_know_status=RUNNING, aws_ecs_launchtype=fargate, aws_ecs_task_known_status=RUNNING, aws_ecs_task_revision=90, cloud_availability_zone=eu-central-1b, container_name=aws-otel-collector, ecs_cluster_name=PROD-IAM, ecs_task_definition=PROD-IAM-STS, ecs_task_id=ac90c0488c1b4914a003cf996122f3f8, environment=PROD" t=2024-05-29T13:44:14.730129746Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=687021 slug=heviai instance="__name__=last_predict, instance=samatya_1, job=daily_inProgress_predict" t=2024-05-29T13:44:14.730116614Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=114492 slug=railsbank instance="arena=UNKNOWN, aws_ecs_container_know_status=RUNNING, aws_ecs_launchtype=fargate, aws_ecs_task_known_status=RUNNING, aws_ecs_task_revision=90, cloud_availability_zone=eu-central-1b, container_name=STS, ecs_cluster_name=PROD-IAM, ecs_task_definition=PROD-IAM-STS, ecs_task_id=ac90c0488c1b4914a003cf996122f3f8, environment=PROD" t=2024-05-29T13:44:14.730007532Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank instance="arena=UNKNOWN, aws_ecs_container_know_status=RUNNING, aws_ecs_launchtype=fargate, aws_ecs_task_known_status=RUNNING, aws_ecs_task_revision=90, cloud_availability_zone=eu-central-1a, container_name=log_router, ecs_cluster_name=PROD-IAM, ecs_task_definition=PROD-IAM-STS, ecs_task_id=9b599b5080b142c5adb23bee86d5c70f, environment=PROD" t=2024-05-29T13:44:14.729958503Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=687021 slug=heviai instance="__name__=last_predict, instance=okmeydani1, job=daily_inProgress_predict" t=2024-05-29T13:44:14.729977213Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=687021 slug=heviai instance="__name__=last_predict, instance=kocaeli, job=daily_inProgress_predict" t=2024-05-29T13:44:14.729916413Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=687021 slug=heviai instance="__name__=last_predict, instance=kocaeli, job=daily_inProgress_predict" t=2024-05-29T13:44:14.729906313Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=114492 slug=railsbank instance="arena=UNKNOWN, aws_ecs_container_know_status=RUNNING, aws_ecs_launchtype=fargate, aws_ecs_task_known_status=RUNNING, aws_ecs_task_revision=90, cloud_availability_zone=eu-central-1a, container_name=datacollector-sidecar, ecs_cluster_name=PROD-IAM, ecs_task_definition=PROD-IAM-STS, ecs_task_id=9b599b5080b142c5adb23bee86d5c70f, environment=PROD" t=2024-05-29T13:44:14.729845848Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=114492 slug=railsbank instance="arena=UNKNOWN, aws_ecs_container_know_status=RUNNING, aws_ecs_launchtype=fargate, aws_ecs_task_known_status=RUNNING, aws_ecs_task_revision=90, cloud_availability_zone=eu-central-1a, container_name=aws-otel-collector, ecs_cluster_name=PROD-IAM, ecs_task_definition=PROD-IAM-STS, ecs_task_id=9b599b5080b142c5adb23bee86d5c70f, environment=PROD" t=2024-05-29T13:44:14.729806465Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=687021 slug=heviai instance="__name__=last_predict, instance=ilhanvarank, job=daily_inProgress_predict" t=2024-05-29T13:44:14.729848212Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=355343 slug=foxbase t=2024-05-29T13:44:14.729664727Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=355343 slug=foxbase t=2024-05-29T13:44:14.729592299Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-336213laio1use1, cloud_platform=AWS, customer_id=C694, env_id=336213, env_name=C694_PARALLEL_PROD, env_type=prod, instance=env-336213laio1use1, job=integrations/node_exporter, region=us-east-1, stage=testing" t=2024-05-29T13:44:14.729781892Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=687021 slug=heviai instance="__name__=last_predict, instance=ftr, job=daily_inProgress_predict" t=2024-05-29T13:44:14.729639611Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-336102laio1canadacentral, cloud_platform=Azure, customer_id=A215, env_id=336102, env_name=A215 FCL Dev, env_type=dev, instance=env-336102laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=preprod" t=2024-05-29T13:44:14.729613744Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-336101laio1canadacentral, cloud_platform=Azure, customer_id=A215, env_id=336101, env_name=A215 FCL Prod, env_type=prod, instance=env-336101laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=preprod" t=2024-05-29T13:44:14.729429475Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-336101laio1canadacentral, cloud_platform=Azure, customer_id=A215, env_id=336101, env_name=A215 FCL Prod, env_type=prod, instance=env-336101laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=preprod" t=2024-05-29T13:44:14.729411991Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=687021 slug=heviai instance="__name__=last_predict, instance=acibadem-1, job=daily_inProgress_predict" t=2024-05-29T13:44:14.729226909Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.729253771Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-336066laio1use1, cloud_platform=AWS, customer_id=C593, env_id=336066, env_name=C593_TASC_PROD, env_type=prod, instance=env-336066laio1use1, job=integrations/node_exporter, region=us-east-1, stage=testing" t=2024-05-29T13:44:14.729194294Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.728721812Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.728580049Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.728495667Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.728405578Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.728373062Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-335923laio1euc1, cloud_platform=AWS, customer_id=C501, env_id=335923, env_name=C501_PARALLEL_DEV, env_type=dev, instance=env-335923laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=testing" t=2024-05-29T13:44:14.728255229Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.727932783Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-335763laio1use1, cloud_platform=AWS, customer_id=C593, env_id=335763, env_name=C593_TASC_Dev, env_type=dev, instance=env-335763laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.727862065Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=608555 slug=ias instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.727842347Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.727673069Z caller=remote_instance_store.go:51 user=233137 slug=mirrornode msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=233137 slug=mirrornode instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.727630096Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-335691laio1euc1, cloud_platform=AWS, customer_id=C802, env_id=335691, env_name=C802 Adidas LocalBI PROD, env_type=prod, instance=env-335691laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=preprod" t=2024-05-29T13:44:14.727494076Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-335691laio1euc1, cloud_platform=AWS, customer_id=C802, env_id=335691, env_name=C802 Adidas LocalBI PROD, env_type=prod, instance=env-335691laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=preprod" t=2024-05-29T13:44:14.727480631Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.72739013Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=245291 slug=pismo version=45 fingerprint=46d6c7bc7d8a5e8a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.727296861Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.727017059s EvaluationString:}]" duration=207.276627ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-335637laio1switzerlandnorth, cloud_platform=Azure, customer_id=A237, env_id=335637, env_name=A237_Marti_Prod, env_type=prod, instance=env-335637laio1switzerlandnorth, job=integrations/node_exporter, region=SwitzerlandNorth, stage=preprod" t=2024-05-29T13:44:14.727343092Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-335636laio1switzerlandnorth, cloud_platform=Azure, customer_id=A237, env_id=335636, env_name=A237_Marti_Dev_New, env_type=dev, instance=env-335636laio1switzerlandnorth, job=integrations/node_exporter, region=SwitzerlandNorth, stage=preprod" t=2024-05-29T13:44:14.727241692Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.727187022Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.727144585Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.727042441Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.726992178Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=733461 slug=lattice instance="instance=localhost:7400, job=sequencer-0, network=redstone" t=2024-05-29T13:44:14.72706514Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.726975297Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=733461 slug=lattice instance="instance=localhost:7400, job=sequencer-0, network=redstone" t=2024-05-29T13:44:14.727054573Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=733461 slug=lattice t=2024-05-29T13:44:14.727024726Z level=debug msg="State manager processing evaluation results" resultCount=2 + level=debug ts=2024-05-29T13:44:14.726958636Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-335596laio1eus2, cloud_platform=AWS, customer_id=C784, env_id=335596, env_name=C784_RSI_PreProd4, env_type=prod, instance=env-335596laio1eus2, job=integrations/node_exporter, region=eu-south-2, stage=preprod" t=2024-05-29T13:44:14.726915191Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.726596504Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.726645013Z caller=grafana.go:247 user=336655 slug=odigeoconnect msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=f492ee03-454a-42f6-95f6-16e60b7b05bf" groups=0 alerts=0 + level=debug ts=2024-05-29T13:44:14.726512888Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-335427laio1aps2, cloud_platform=AWS, customer_id=C801, env_id=335427, env_name=C801_Nostradata_Prod, env_type=prod, instance=env-335427laio1aps2, job=integrations/node_exporter, region=ap-southeast-2, stage=preprod" t=2024-05-29T13:44:14.726585806Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.726432811Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.726428969Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.726333915Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.726378303Z caller=remote_instance_store.go:51 user=147497 slug=rhodev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.72624222Z caller=manager.go:153 user=555781 slug=danelec msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=20177 slug=paddledash instance="QueueName=paddle-staging-notification-service-low-dlq" t=2024-05-29T13:44:14.726090783Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.726012425Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.725927294Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-335354laio1use1, cloud_platform=AWS, customer_id=C595, env_id=335354, env_name=C595 PARALLEL PROD, env_type=prod, instance=env-335354laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.726034691Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.725941316Z caller=manager.go:153 user=537600 slug=oneytrustpoc msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:14.725778108Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.725834136Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.72577657Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.725466758Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-335182laio1euc1, cloud_platform=AWS, customer_id=C760, env_id=335182, env_name=C760 Lamborghini Prod New, env_type=prod, instance=env-335182laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=preprod" t=2024-05-29T13:44:14.725587273Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:14.725451278Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=25.315086ms + level=debug ts=2024-05-29T13:44:14.725121003Z caller=manager.go:153 user=740293 slug=sopamo msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:14.72488096Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.724656639Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.724538434Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.724495343Z caller=remote_instance_store.go:51 user=701741 slug=thetradingpitproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334975laio1euc1, cloud_platform=AWS, customer_id=C757, env_id=334975, env_name=C757 Adidas GlobalBI Dev, env_type=dev, instance=env-334975laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=preprod" t=2024-05-29T13:44:14.72452963Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=car-cron-worker, pod=car-cron-worker-5fb6cb5cc-9c5ht" t=2024-05-29T13:44:14.724456495Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.724496394Z caller=manager.go:153 user=511557 slug=freightdog msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:14.72439678Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=183214 slug=vectorizedio t=2024-05-29T13:44:14.724349326Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334974laio2euc1, cloud_platform=AWS, customer_id=C757, env_id=334974, env_name=C757 Adidas GlobalBI Prod, env_type=prod, instance=env-334974laio2euc1, job=integrations/node_exporter, region=eu-central-1, stage=preprod" t=2024-05-29T13:44:14.724293278Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334974laio1euc1, cloud_platform=AWS, customer_id=C757, env_id=334974, env_name=C757 Adidas GlobalBI Prod, env_type=prod, instance=env-334974laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=preprod" t=2024-05-29T13:44:14.724134821Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.723893958Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334801laio1use1, cloud_platform=AWS, customer_id=C797, env_id=334801, env_name=C797 Cox nVision PRD, env_type=prod, instance=env-334801laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.723942624Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.723806422Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.723743206Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.723799783Z caller=manager.go:153 user=674468 slug=transithubstage msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334729laioapn1, cloud_platform=AWS, customer_id=C787, env_id=334729, env_name=C787_JEOL_prod_new, env_type=prod, instance=env-334729laioapn1, job=integrations/node_exporter, region=ap-northeast-1, stage=testing" t=2024-05-29T13:44:14.723789349Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.723649028Z caller=remote_instance_store.go:51 user=213445 slug=gan msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.723658748Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.723681181Z caller=manager.go:153 user=757626 slug=hmgroup msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334575laio1use1, cloud_platform=AWS, customer_id=C798, env_id=334575, env_name=C798 Payway PROD, env_type=prod, instance=env-334575laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.723614806Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334574laio1use1, cloud_platform=AWS, customer_id=C798, env_id=334574, env_name=C798 Payway DEV, env_type=dev, instance=env-334574laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.723424077Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334542laio1use1, cloud_platform=AWS, customer_id=C798, env_id=334542, env_name=c798 RH Dev, env_type=dev, instance=env-334542laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.723240071Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.31.205:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-2, store=operators, test_override=uid_pods" t=2024-05-29T13:44:14.723196542Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.723212674Z caller=manager.go:153 user=765052 slug=joraas msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.30.81:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-1, store=partners, test_override=uid_pods" t=2024-05-29T13:44:14.723149345Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=site, test_override=uid_pods" t=2024-05-29T13:44:14.723067924Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=site, test_override=uid_pods" t=2024-05-29T13:44:14.723050134Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.723009171Z caller=manager.go:153 user=732381 slug=l337direct msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:14.722963126Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=client_side_keypairs, test_override=uid_pods" t=2024-05-29T13:44:14.722866967Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334538laio1eastus, cloud_platform=Azure, customer_id=A250, env_id=334538, env_name=A250 Fresh Market DEV new, env_type=dev, instance=env-334538laio1eastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:14.72266269Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=keysetkey, test_override=uid_pods" t=2024-05-29T13:44:14.722670214Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334538laio1eastus, cloud_platform=Azure, customer_id=A250, env_id=334538, env_name=A250 Fresh Market DEV new, env_type=dev, instance=env-334538laio1eastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:14.722643949Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=keysetkey, test_override=uid_pods" t=2024-05-29T13:44:14.722650883Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=keyset, test_override=uid_pods" t=2024-05-29T13:44:14.722616761Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=auth, test_override=uid_pods" t=2024-05-29T13:44:14.722517905Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=site, test_override=uid_pods" t=2024-05-29T13:44:14.722465398Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=430961 slug=solifi version=3 fingerprint=817a92a77c7bba98 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.722350298Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.72203536s EvaluationString:}]" duration=84.784918ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334531laio1southeastasia, cloud_platform=Azure, customer_id=A228, env_id=334531, env_name=A228 DFS Prod, env_type=prod, instance=env-334531laio1southeastasia, job=integrations/node_exporter, region=southeastasia, stage=testing" t=2024-05-29T13:44:14.722260485Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.72227536Z caller=manager.go:153 user=485797 slug=gconnect msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334531laio1southeastasia, cloud_platform=Azure, customer_id=A228, env_id=334531, env_name=A228 DFS Prod, env_type=prod, instance=env-334531laio1southeastasia, job=integrations/node_exporter, region=southeastasia, stage=testing" t=2024-05-29T13:44:14.722244159Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=site, test_override=uid_pods" t=2024-05-29T13:44:14.722164861Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=salt, test_override=uid_pods" t=2024-05-29T13:44:14.722116461Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=keysetkey, test_override=uid_pods" t=2024-05-29T13:44:14.722094738Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=keysetkey, test_override=uid_pods" t=2024-05-29T13:44:14.722082644Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334522laio2eastus, cloud_platform=Azure, customer_id=A250, env_id=334522, env_name=A250 Fresh Market Prod, env_type=prod, instance=env-334522laio2eastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:14.722052062Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=client_side_keypairs, test_override=uid_pods" t=2024-05-29T13:44:14.721965223Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=salt, test_override=uid_pods" t=2024-05-29T13:44:14.721833831Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.721804053Z caller=manager.go:153 user=705918 slug=vilea msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=keysetkey, test_override=uid_pods" t=2024-05-29T13:44:14.721779251Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=keyset, test_override=uid_pods" t=2024-05-29T13:44:14.721728823Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=keyset, test_override=uid_pods" t=2024-05-29T13:44:14.721713091Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=client_side_keypairs, test_override=uid_pods" t=2024-05-29T13:44:14.721679609Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=client_side_keypairs, test_override=uid_pods" t=2024-05-29T13:44:14.721666418Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.72166845Z caller=manager.go:153 user=671211 slug=satelliet msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=auth, test_override=uid_pods" t=2024-05-29T13:44:14.721633552Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334513laio1eus2, cloud_platform=AWS, customer_id=C788, env_id=334513, env_name=c788_Tendam_dev, env_type=dev, instance=env-334513laio1eus2, job=integrations/node_exporter, region=eu-south-2, stage=testing" t=2024-05-29T13:44:14.721575228Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.31.94:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-b65jn, store=enclaves, test_override=uid_pods" t=2024-05-29T13:44:14.721510507Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.30.247:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-tvw8b, store=enclaves, test_override=uid_pods" t=2024-05-29T13:44:14.721437986Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.30.247:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-tvw8b, store=enclaves, test_override=uid_pods" t=2024-05-29T13:44:14.721427089Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.27.176:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-js257, store=operators, test_override=uid_pods" t=2024-05-29T13:44:14.721403517Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.27.176:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-js257, store=enclaves, test_override=uid_pods" t=2024-05-29T13:44:14.721370058Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=873368 slug=euid version=42 fingerprint=46663d73faa3b90d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.720619602Z level=debug msg="Alert rule evaluated" results="[{Instance:application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.27.176:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-js257, store=enclaves, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.27.176:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-js257, store=enclaves, test_override=uid_pods Value:0xc003d94868} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.27.176:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-js257, store=enclaves, test_override=uid_pods Value:0xc003d948f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.71786212s EvaluationString:[ var='QUERY' labels={application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.27.176:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-js257, store=enclaves, test_override=uid_pods} value=14.23728813559322 ], [ var='THRESHOLD' labels={application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.27.176:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-js257, store=enclaves, test_override=uid_pods} value=0 ]} {Instance:application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.27.176:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-js257, store=operators, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.27.176:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-js257, store=operators, test_override=uid_pods Value:0xc003d94ce8} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.27.176:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-js257, store=operators, test_override=uid_pods Value:0xc003d94db0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.717873119s EvaluationString:[ var='QUERY' labels={application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.27.176:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-js257, store=operators, test_override=uid_pods} value=14.23728813559322 ], [ var='THRESHOLD' labels={application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.27.176:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-js257, store=operators, test_override=uid_pods} value=0 ]} {Instance:application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.30.247:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-tvw8b, store=enclaves, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.30.247:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-tvw8b, store=enclaves, test_override=uid_pods Value:0xc003d94ff8} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.30.247:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-tvw8b, store=enclaves, test_override=uid_pods Value:0xc003d95110}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.717880507s EvaluationString:[ var='QUERY' labels={application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.30.247:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-tvw8b, store=enclaves, test_override=uid_pods} value=15.254237288135592 ], [ var='THRESHOLD' labels={application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.30.247:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-tvw8b, store=enclaves, test_override=uid_pods} value=0 ]} {Instance:application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.30.247:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-tvw8b, store=operators, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.30.247:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-tvw8b, store=operators, test_override=uid_pods Value:0xc003d952a0} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.30.247:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-tvw8b, store=operators, test_override=uid_pods Value:0xc003d95410}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.717887258s EvaluationString:[ var='QUERY' labels={application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.30.247:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-tvw8b, store=operators, test_override=uid_pods} value=15.254237288135592 ], [ var='THRESHOLD' labels={application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.30.247:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-tvw8b, store=operators, test_override=uid_pods} value=0 ]} {Instance:application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.31.94:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-b65jn, store=enclaves, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.31.94:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-b65jn, store=enclaves, test_override=uid_pods Value:0xc003d955f8} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.31.94:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-b65jn, store=enclaves, test_override=uid_pods Value:0xc003d956d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.717893443s EvaluationString:[ var='QUERY' labels={application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.31.94:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-b65jn, store=enclaves, test_override=uid_pods} value=15.254237288135592 ], [ var='THRESHOLD' labels={application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.31.94:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-b65jn, store=enclaves, test_override=uid_pods} value=0 ]} {Instance:application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.31.94:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-b65jn, store=operators, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.31.94:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-b65jn, store=operators, test_override=uid_pods Value:0xc003d95830} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.31.94:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-b65jn, store=operators, test_override=uid_pods Value:0xc003d95d58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.717900841s EvaluationString:[ var='QUERY' labels={application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.31.94:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-b65jn, store=operators, test_override=uid_pods} value=15.254237288135592 ], [ var='THRESHOLD' labels={application=uid2-core, cluster=euid-prod, container=euid-core, env=prod, instance=10.212.31.94:9088, job=uid_pods, ns=core-prod, pod=euid-core-prod-5b5d8cc78d-b65jn, store=operators, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=auth, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=auth, test_override=uid_pods Value:0xc003034050} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=auth, test_override=uid_pods Value:0xc003034128}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.717906992s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=auth, test_override=uid_pods} value=90.50847457627118 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=auth, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=client_side_keypairs, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=client_side_keypairs, test_override=uid_pods Value:0xc003034310} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=client_side_keypairs, test_override=uid_pods Value:0xc0030343f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.717913721s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=client_side_keypairs, test_override=uid_pods} value=90.50847457627118 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=client_side_keypairs, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=keyset, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=keyset, test_override=uid_pods Value:0xc003034640} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=keyset, test_override=uid_pods Value:0xc003034570}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.717920215s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=keyset, test_override=uid_pods} value=90.50847457627118 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=keyset, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=keysetkey, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=keysetkey, test_override=uid_pods Value:0xc0030347d0} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=keysetkey, test_override=uid_pods Value:0xc0030348a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.717926169s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=keysetkey, test_override=uid_pods} value=90.50847457627118 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=keysetkey, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=salt, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=salt, test_override=uid_pods Value:0xc003034b00} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=salt, test_override=uid_pods Value:0xc003034a28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.717946682s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=salt, test_override=uid_pods} value=89.4915254237288 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=salt, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=site, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=site, test_override=uid_pods Value:0xc003034c80} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=site, test_override=uid_pods Value:0xc003034d80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.71795306s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=site, test_override=uid_pods} value=90.50847457627118 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.26.189:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-cl4fd, store=site, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=auth, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=auth, test_override=uid_pods Value:0xc003034ef8} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=auth, test_override=uid_pods Value:0xc003034fc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.717960347s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=auth, test_override=uid_pods} value=90.50847457627118 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=auth, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=client_side_keypairs, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=client_side_keypairs, test_override=uid_pods Value:0xc003035130} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=client_side_keypairs, test_override=uid_pods Value:0xc003035200}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.717966421s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=client_side_keypairs, test_override=uid_pods} value=90.50847457627118 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=client_side_keypairs, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=keyset, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=keyset, test_override=uid_pods Value:0xc003035348} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=keyset, test_override=uid_pods Value:0xc003035418}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.717974146s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=keyset, test_override=uid_pods} value=90.50847457627118 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=keyset, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=keysetkey, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=keysetkey, test_override=uid_pods Value:0xc0030355a8} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=keysetkey, test_override=uid_pods Value:0xc003035678}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.717979542s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=keysetkey, test_override=uid_pods} value=90.50847457627118 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=keysetkey, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=salt, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=salt, test_override=uid_pods Value:0xc0030357e0} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=salt, test_override=uid_pods Value:0xc0030358c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.717985278s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=salt, test_override=uid_pods} value=89.4915254237288 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=salt, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=site, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=site, test_override=uid_pods Value:0xc003035aa0} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=site, test_override=uid_pods Value:0xc003035b80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.717990915s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=site, test_override=uid_pods} value=90.50847457627118 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.27.207:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-4jkw2, store=site, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=auth, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=auth, test_override=uid_pods Value:0xc003035d98} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=auth, test_override=uid_pods Value:0xc003035d00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.717996921s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=auth, test_override=uid_pods} value=89.4915254237288 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=auth, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=client_side_keypairs, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=client_side_keypairs, test_override=uid_pods Value:0xc003035fd8} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=client_side_keypairs, test_override=uid_pods Value:0xc003035f20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.71800457s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=client_side_keypairs, test_override=uid_pods} value=89.4915254237288 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=client_side_keypairs, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=keyset, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=keyset, test_override=uid_pods Value:0xc00701a250} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=keyset, test_override=uid_pods Value:0xc00701a178}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.718011339s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=keyset, test_override=uid_pods} value=89.4915254237288 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=keyset, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=keysetkey, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=keysetkey, test_override=uid_pods Value:0xc00701a3e0} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=keysetkey, test_override=uid_pods Value:0xc00701a4c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.71801699s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=keysetkey, test_override=uid_pods} value=89.4915254237288 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=keysetkey, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=salt, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=salt, test_override=uid_pods Value:0xc00701a630} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=salt, test_override=uid_pods Value:0xc00701a708}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.71802278s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=salt, test_override=uid_pods} value=90.50847457627118 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=salt, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=site, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=site, test_override=uid_pods Value:0xc00701a938} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=site, test_override=uid_pods Value:0xc00701a880}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.718028044s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=site, test_override=uid_pods} value=89.4915254237288 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.130:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-89j6g, store=site, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=auth, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=auth, test_override=uid_pods Value:0xc00701aaa0} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=auth, test_override=uid_pods Value:0xc00701ab70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.718033478s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=auth, test_override=uid_pods} value=90 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=auth, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=client_side_keypairs, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=client_side_keypairs, test_override=uid_pods Value:0xc00701ace0} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=client_side_keypairs, test_override=uid_pods Value:0xc00701adb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.718038526s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=client_side_keypairs, test_override=uid_pods} value=90 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=client_side_keypairs, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=keyset, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=keyset, test_override=uid_pods Value:0xc00701af30} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=keyset, test_override=uid_pods Value:0xc00701b028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.718044553s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=keyset, test_override=uid_pods} value=90 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=keyset, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=keysetkey, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=keysetkey, test_override=uid_pods Value:0xc00701b198} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=keysetkey, test_override=uid_pods Value:0xc00701b268}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.718049995s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=keysetkey, test_override=uid_pods} value=90 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=keysetkey, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=salt, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=salt, test_override=uid_pods Value:0xc00701b3d8} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=salt, test_override=uid_pods Value:0xc00701b500}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.718054787s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=salt, test_override=uid_pods} value=90 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=salt, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=site, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=site, test_override=uid_pods Value:0xc00701b678} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=site, test_override=uid_pods Value:0xc00701b740}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.718059824s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=site, test_override=uid_pods} value=90 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.30.186:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-kxljb, store=site, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=auth, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=auth, test_override=uid_pods Value:0xc00701b8c0} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=auth, test_override=uid_pods Value:0xc00701b990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.718064618s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=auth, test_override=uid_pods} value=89.4915254237288 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=auth, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=client_side_keypairs, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=client_side_keypairs, test_override=uid_pods Value:0xc00701baf0} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=client_side_keypairs, test_override=uid_pods Value:0xc00701bbc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.718070594s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=client_side_keypairs, test_override=uid_pods} value=89.4915254237288 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=client_side_keypairs, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=keyset, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=keyset, test_override=uid_pods Value:0xc00701bd40} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=keyset, test_override=uid_pods Value:0xc00701be18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.718075584s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=keyset, test_override=uid_pods} value=89.4915254237288 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=keyset, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=keysetkey, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=keysetkey, test_override=uid_pods Value:0xc00701bf98} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=keysetkey, test_override=uid_pods Value:0xc0044fa068}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.71808252s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=keysetkey, test_override=uid_pods} value=89.4915254237288 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=keysetkey, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=salt, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=salt, test_override=uid_pods Value:0xc0044fa1c8} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=salt, test_override=uid_pods Value:0xc0044fa2a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.718087835s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=salt, test_override=uid_pods} value=90.50847457627118 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=salt, test_override=uid_pods} value=0 ]} {Instance:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=site, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=site, test_override=uid_pods Value:0xc0044fa838} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=site, test_override=uid_pods Value:0xc0044fa900}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.718092958s EvaluationString:[ var='QUERY' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=site, test_override=uid_pods} value=89.4915254237288 ], [ var='THRESHOLD' labels={application=uid2-operator, cluster=euid-prod, container=euid-operator, env=prod, instance=10.212.31.51:9080, job=uid_pods, ns=operator-prod, pod=euid-operator-prod-75b65c6b7b-dqc4w, store=site, test_override=uid_pods} value=0 ]} {Instance:application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.30.81:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-1, store=operators, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.30.81:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-1, store=operators, test_override=uid_pods Value:0xc0044fab80} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.30.81:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-1, store=operators, test_override=uid_pods Value:0xc0044faa80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.718098383s EvaluationString:[ var='QUERY' labels={application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.30.81:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-1, store=operators, test_override=uid_pods} value=90.50847457627118 ], [ var='THRESHOLD' labels={application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.30.81:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-1, store=operators, test_override=uid_pods} value=0 ]} {Instance:application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.30.81:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-1, store=partners, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.30.81:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-1, store=partners, test_override=uid_pods Value:0xc0044facf0} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.30.81:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-1, store=partners, test_override=uid_pods Value:0xc0044fadc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.718104246s EvaluationString:[ var='QUERY' labels={application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.30.81:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-1, store=partners, test_override=uid_pods} value=90.50847457627118 ], [ var='THRESHOLD' labels={application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.30.81:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-1, store=partners, test_override=uid_pods} value=0 ]} {Instance:application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.31.205:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-2, store=operators, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.31.205:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-2, store=operators, test_override=uid_pods Value:0xc0044fb050} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.31.205:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-2, store=operators, test_override=uid_pods Value:0xc0044faf88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.718109823s EvaluationString:[ var='QUERY' labels={application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.31.205:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-2, store=operators, test_override=uid_pods} value=89.4915254237288 ], [ var='THRESHOLD' labels={application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.31.205:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-2, store=operators, test_override=uid_pods} value=0 ]} {Instance:application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.31.205:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-2, store=partners, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.31.205:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-2, store=partners, test_override=uid_pods Value:0xc0044fb2a0} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.31.205:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-2, store=partners, test_override=uid_pods Value:0xc0044fb1b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.718116055s EvaluationString:[ var='QUERY' labels={application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.31.205:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-2, store=partners, test_override=uid_pods} value=89.4915254237288 ], [ var='THRESHOLD' labels={application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.31.205:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-2, store=partners, test_override=uid_pods} value=0 ]} {Instance:application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.31.85:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-0, store=operators, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.31.85:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-0, store=operators, test_override=uid_pods Value:0xc0044fb470} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.31.85:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-0, store=operators, test_override=uid_pods Value:0xc0044fb550}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.718121207s EvaluationString:[ var='QUERY' labels={application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.31.85:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-0, store=operators, test_override=uid_pods} value=90.50847457627118 ], [ var='THRESHOLD' labels={application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.31.85:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-0, store=operators, test_override=uid_pods} value=0 ]} {Instance:application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.31.85:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-0, store=partners, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.31.85:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-0, store=partners, test_override=uid_pods Value:0xc0044fb6b8} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.31.85:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-0, store=partners, test_override=uid_pods Value:0xc0044fb790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.718125895s EvaluationString:[ var='QUERY' labels={application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.31.85:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-0, store=partners, test_override=uid_pods} value=90.50847457627118 ], [ var='THRESHOLD' labels={application=uid2-optout, cluster=euid-prod, container=euid-optout, env=prod, instance=10.212.31.85:9081, job=uid_pods, ns=optout-prod, pod=euid-optout-prod-0, store=partners, test_override=uid_pods} value=0 ]}]" duration=12.021554ms + level=debug ts=2024-05-29T13:44:14.721491916Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.721441954Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=info ts=2024-05-29T13:44:14.721257607Z caller=remote_alert_sender.go:94 user=174675 slug=journalprod host=journalprod-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.1.144:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=e37709bc-3653-4cac-8930-3d7bf4db004c alerts=1 + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:14.72129868Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.721195143Z caller=manager.go:153 user=552534 slug=dragse msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:14.71569729Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.721056041Z caller=manager.go:153 user=549935 slug=rockethems msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334500laio1eastus2, cloud_platform=Azure, customer_id=A249, env_id=334500, env_name=A249 VS Services Prod 2, env_type=prod, instance=env-334500laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:14.720992399Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.720927139Z caller=manager.go:153 user=678966 slug=dpuzik58 msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:14.720863299Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.720851861Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.72085572Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334490laio1eus2, cloud_platform=AWS, customer_id=C788, env_id=334490, env_name=c788_Tendam_prod, env_type=prod, instance=env-334490laio1eus2, job=integrations/node_exporter, region=eu-south-2, stage=testing" t=2024-05-29T13:44:14.720643627Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.720511733Z caller=manager.go:153 user=941160 slug=uateu msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.720380041Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=43.351575ms + logger=ngalert.state.manager.persist user=18335 slug=semaphore t=2024-05-29T13:44:14.720390914Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=18335 slug=semaphore instance= t=2024-05-29T13:44:14.720351908Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=18335 slug=semaphore t=2024-05-29T13:44:14.720318509Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334477laio1use1, cloud_platform=AWS, customer_id=C032, env_id=334477, env_name=C032 US AAP Test Env, env_type=test, instance=env-334477laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.720249636Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334477laio1use1, cloud_platform=AWS, customer_id=C032, env_id=334477, env_name=C032 US AAP Test Env, env_type=test, instance=env-334477laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.720230997Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.719835069Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334466laio1eastus2, cloud_platform=Azure, customer_id=A249, env_id=334466, env_name=A249 VS Services Dev 1, env_type=dev, instance=env-334466laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:14.719533656Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.719516217Z caller=manager.go:153 user=739395 slug=cminformatik msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:14.719408426Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.719241349Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=173374 slug=felmo instance= t=2024-05-29T13:44:14.719327999Z level=debug msg="Changing state" previous_state=Pending next_state=Normal previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:44:10Z + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334465laio2eastus2, cloud_platform=Azure, customer_id=A249, env_id=334465, env_name=A249 VS Services Prod 1, env_type=prod, instance=env-334465laio2eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:14.719284402Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=173374 slug=felmo version=145 fingerprint=97db3fcbc2442920 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.719161439Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.718794552s EvaluationString:}]" duration=128.156737ms + level=debug ts=2024-05-29T13:44:14.7191317Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.719114011Z caller=manager.go:153 user=672534 slug=lely msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:14.718878584Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334461laio1euw1, cloud_platform=AWS, customer_id=C033, env_id=334461, env_name=C033 EU AAP Test Env, env_type=dev, instance=env-334461laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=testing" t=2024-05-29T13:44:14.718894108Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334461laio1euw1, cloud_platform=AWS, customer_id=C033, env_id=334461, env_name=C033 EU AAP Test Env, env_type=dev, instance=env-334461laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=testing" t=2024-05-29T13:44:14.718874629Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.718831807Z caller=manager.go:153 user=493344 slug=axhmonitor msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:14.71853301Z caller=remote_instance_store.go:51 user=526847 slug=soniclabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=489921 slug=statuscake t=2024-05-29T13:44:14.718516445Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=489921 slug=statuscake instance= t=2024-05-29T13:44:14.718507205Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.718431076Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114286 slug=enverus instance="datasource_uid=lzFWNTdGk, ref_id=A" t=2024-05-29T13:44:14.718454419Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.718410309Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.083076ms + level=debug ts=2024-05-29T13:44:14.718385581Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114286 slug=enverus instance="datasource_uid=lzFWNTdGk, ref_id=A" t=2024-05-29T13:44:14.718419433Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.718385Z caller=manager.go:153 user=787583 slug=mlone msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334436laio2use1, cloud_platform=AWS, customer_id=C439, env_id=334436, env_name=C439 BBU PROD Parallel, env_type=prod, instance=env-334436laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.718260792Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.718185094Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.717985481Z caller=remote_instance_store.go:51 user=751407 slug=nethermindjuno msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.717936593Z caller=manager.go:153 user=635135 slug=geaiothrtdev msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:14.71789916Z caller=remote_instance_store.go:51 user=159532 slug=getfabric msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.71775029Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=532654 slug=chathamdirectint t=2024-05-29T13:44:14.717372655Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=22.236847ms + level=debug ts=2024-05-29T13:44:14.717386285Z caller=manager.go:153 user=733256 slug=dentsu msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:14.717282146Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334409laio1use1, cloud_platform=AWS, customer_id=C794, env_id=334409, env_name=C794 Amica AWS Prod, env_type=prod, instance=env-334409laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.71729307Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.717085596Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:14.71703028Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=21.624068ms + level=debug ts=2024-05-29T13:44:14.716884877Z caller=manager.go:153 user=520381 slug=brofamily msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:14.716790276Z caller=manager.go:153 user=694445 slug=sofistik1 msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:14.716571672Z caller=manager.go:153 user=522591 slug=royalbamgroup msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:14.716287542Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=447897 slug=mysten t=2024-05-29T13:44:14.716291794Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=526847 slug=soniclabs t=2024-05-29T13:44:14.716244544Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=59.575059ms + logger=ngalert.state.manager user=447897 slug=mysten instance="container=redis" t=2024-05-29T13:44:14.716279234Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.716186853Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=447897 slug=mysten instance="container=redis" t=2024-05-29T13:44:14.7162683Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=447897 slug=mysten instance="container=orbr-backend-prod-production" t=2024-05-29T13:44:14.716250888Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=447897 slug=mysten instance="container=orbr-backend-prod-kms-sidecar-production" t=2024-05-29T13:44:14.716214517Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=447897 slug=mysten version=4 fingerprint=e1accacb621304a4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.716058623Z level=debug msg="Alert rule evaluated" results="[{Instance:container=orbr-backend-prod-kms-sidecar-production State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=orbr-backend-prod-kms-sidecar-production Value:0xc06a0bf570} C:{Var:C Labels:container=orbr-backend-prod-kms-sidecar-production Value:0xc06a0bf578}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715656793s EvaluationString:[ var='A' labels={container=orbr-backend-prod-kms-sidecar-production} value=2.531021447942501e-06 ], [ var='C' labels={container=orbr-backend-prod-kms-sidecar-production} value=0 ]} {Instance:container=orbr-backend-prod-production State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=orbr-backend-prod-production Value:0xc06a0bf5b0} C:{Var:C Labels:container=orbr-backend-prod-production Value:0xc06a0bf5b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715672561s EvaluationString:[ var='A' labels={container=orbr-backend-prod-production} value=0.0003810066388401132 ], [ var='C' labels={container=orbr-backend-prod-production} value=0 ]} {Instance:container=redis State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:container=redis Value:0xc06a0bf660} C:{Var:C Labels:container=redis Value:0xc06a0bf608}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.715679635s EvaluationString:[ var='A' labels={container=redis} value=0.0009131848337069375 ], [ var='C' labels={container=redis} value=0 ]}]" duration=186.908972ms + level=debug ts=2024-05-29T13:44:14.716114565Z caller=manager.go:153 user=636875 slug=kaisbaccour msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334340laio2canadacentral, cloud_platform=Azure, customer_id=A246, env_id=334340, env_name=A246 Longo Brothers PROD, env_type=prod, instance=env-334340laio2canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=preprod" t=2024-05-29T13:44:14.71614246Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.716010663Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.715902535Z caller=remote_instance_store.go:51 user=334644 slug=meiro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.715833061Z caller=manager.go:153 user=525810 slug=gategourmet msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334338laio1eastus, cloud_platform=Azure, customer_id=A244, env_id=334338, env_name=a244_Gilbane_DEV, env_type=dev, instance=env-334338laio1eastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:14.71584378Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.715419555Z caller=manager.go:153 user=543604 slug=kingmakers msg="rules have not changed, skipping rule manager update" + level=info ts=2024-05-29T13:44:14.715707039Z caller=remote_image_capturer.go:61 user=70430 slug=dapperlabs rule_org_id=1 rule_uid=c5c057a4-29ab-4c54-84da-322d1ef18b97 dashboard=zoFL90UFfRzI panel=13 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:14.715487184Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334336laio1eastus, cloud_platform=Azure, customer_id=A244, env_id=334336, env_name=a244_Gilbane_PROD, env_type=prod, instance=env-334336laio1eastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:14.715432647Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.715386008Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=482163 slug=pmgprod instance="ecs_cluster=pets4homes-production" t=2024-05-29T13:44:14.71521605Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482163 slug=pmgprod t=2024-05-29T13:44:14.715146618Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334314laio2use1, cloud_platform=AWS, customer_id=C793, env_id=334314, env_name=C793 Meredith Prod, env_type=prod, instance=env-334314laio2use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.715137814Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.71512615Z caller=manager.go:153 user=657772 slug=axpoprod msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334314laio1use1, cloud_platform=AWS, customer_id=C793, env_id=334314, env_name=C793 Meredith Prod, env_type=prod, instance=env-334314laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.714975334Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334314laio1use1, cloud_platform=AWS, customer_id=C793, env_id=334314, env_name=C793 Meredith Prod, env_type=prod, instance=env-334314laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.714962285Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.714840646Z caller=manager.go:153 user=560336 slug=powernet msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:14.714552041Z caller=manager.go:153 user=505053 slug=luydev msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:14.714333522Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=46.486309ms + level=debug ts=2024-05-29T13:44:14.71425983Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334242laio1use1, cloud_platform=AWS, customer_id=C674, env_id=334242, env_name=C674 Amica Dev, env_type=dev, instance=env-334242laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.71418363Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.713984798Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.713923532Z caller=manager.go:153 user=539659 slug=voltgoed msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:14.713734539Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.713545352Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.713446569Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334147laio1eastus, cloud_platform=Azure, customer_id=A243, env_id=334147, env_name=A243 David's Bridal Dev, env_type=dev, instance=env-334147laio1eastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:14.713566827Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-334147laio1eastus, cloud_platform=Azure, customer_id=A243, env_id=334147, env_name=A243 David's Bridal Dev, env_type=dev, instance=env-334147laio1eastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:14.713551964Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=697570 slug=carroteco t=2024-05-29T13:44:14.713028561Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.971165ms + ts=2024-05-29T13:44:14.712951333Z caller=memberlist_logger.go:74 level=debug msg="Stream connection from=10.18.68.30:48182" + level=debug ts=2024-05-29T13:44:14.712840426Z caller=remote_instance_store.go:51 user=354676 slug=gridmarketenergy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-333956laio1usw1, cloud_platform=AWS, customer_id=C585, env_id=333956, env_name=C585 Guess QA, env_type=testing, instance=env-333956laio1usw1, job=integrations/node_exporter, region=us-west-1, stage=live" t=2024-05-29T13:44:14.712840157Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.712007965Z caller=remote_instance_store.go:51 user=700783 slug=gsgmedia msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.711847361Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.712585111Z caller=manager.go:153 user=355705 slug=fiftycsp msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:14.712383906Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.712364178Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.712235201Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.712114895Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.712065103Z caller=manager.go:153 user=486194 slug=nievalfs msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:14.711747766Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.711642927Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.711473274Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.711404593Z caller=manager.go:153 user=487336 slug=plaksivayatryapka msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-333396laio1use1, cloud_platform=AWS, customer_id=C462, env_id=333396, env_name=C462 UAT Parallel, env_type=qa, instance=env-333396laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.71130841Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.71127051Z caller=remote_instance_store.go:51 user=412141 slug=sharethrough msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-333396laio1use1, cloud_platform=AWS, customer_id=C462, env_id=333396, env_name=C462 UAT Parallel, env_type=qa, instance=env-333396laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.711299129Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=471861 slug=planetstaging instance= t=2024-05-29T13:44:14.711218154Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=471861 slug=planetstaging instance= t=2024-05-29T13:44:14.711200068Z level=debug msg="Setting next state" handler=resultError + level=debug ts=2024-05-29T13:44:14.711171328Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-333392laio1use1, cloud_platform=AWS, customer_id=C462, env_id=333392, env_name=C462 FED UAT Parallel, env_type=qa, instance=env-333392laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.711091234Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.711007872Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.710900328Z caller=grafana.go:247 user=90424 slug=westerveltlumber msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=15&state=firing&state=error" groups=142 alerts=0 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-333270laio1use1, cloud_platform=AWS, customer_id=C785, env_id=333270, env_name=C785 Freddie Mac Prod, env_type=prod, instance=env-333270laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.710971206Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=ioms1_node_state_health:anomalous, application=ioms1p_health, business=institutional, data_type=application, db=telegraf, electionState=CLOSED, eligibleForLeader=true, healthy=true, host=ny4ap-uoms-03, live=true, location=NY4-PQT-8162, ml_algorithm=grafana_prophet_1_0_1, ml_forecast=anomalies, ml_job_id=f2aecc68-d434-43e3-9174-b80846d08107, ml_job_metric=ioms1_node_state_health, moduleState=ACTIVE, nodeRole=FOLLOWER, ready=true, url=http://127.0.0.1:8101/healthy" t=2024-05-29T13:44:14.710947433Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.710877256Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-333169laio1eastus2, cloud_platform=Azure, customer_id=A240, env_id=333169, env_name=A240 TBD, env_type=prod, instance=env-333169laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:14.71084624Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-333169laio1eastus2, cloud_platform=Azure, customer_id=A240, env_id=333169, env_name=A240 TBD, env_type=prod, instance=env-333169laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:14.7108394Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=ioms1_node_state_health:anomalous, application=ioms1p_health, business=institutional, data_type=application, db=telegraf, electionState=CLOSED, eligibleForLeader=true, healthy=true, host=ny4ap-uoms-01, live=true, location=NY4-PQT-8162, ml_algorithm=grafana_prophet_1_0_1, ml_forecast=anomalies, ml_job_id=f2aecc68-d434-43e3-9174-b80846d08107, ml_job_metric=ioms1_node_state_health, moduleState=ACTIVE, nodeRole=LEADER, ready=true, url=http://127.0.0.1:8101/healthy" t=2024-05-29T13:44:14.710805602Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.710693008Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=538037 slug=drivewealth version=67 fingerprint=d2e1ec83b8b874a5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.710471601Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=ioms1_node_state_health:anomalous, application=ioms1p_health, business=institutional, data_type=application, db=telegraf, electionState=CLOSED, eligibleForLeader=true, healthy=true, host=ny4ap-uoms-01, live=true, location=NY4-PQT-8162, ml_algorithm=grafana_prophet_1_0_1, ml_forecast=anomalies, ml_job_id=f2aecc68-d434-43e3-9174-b80846d08107, ml_job_metric=ioms1_node_state_health, moduleState=ACTIVE, nodeRole=LEADER, ready=true, url=http://127.0.0.1:8101/healthy State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=ioms1_node_state_health:anomalous, application=ioms1p_health, business=institutional, data_type=application, db=telegraf, electionState=CLOSED, eligibleForLeader=true, healthy=true, host=ny4ap-uoms-01, live=true, location=NY4-PQT-8162, ml_algorithm=grafana_prophet_1_0_1, ml_forecast=anomalies, ml_job_id=f2aecc68-d434-43e3-9174-b80846d08107, ml_job_metric=ioms1_node_state_health, moduleState=ACTIVE, nodeRole=LEADER, ready=true, url=http://127.0.0.1:8101/healthy Value:0xc0404c7b60} C:{Var:C Labels:__name__=ioms1_node_state_health:anomalous, application=ioms1p_health, business=institutional, data_type=application, db=telegraf, electionState=CLOSED, eligibleForLeader=true, healthy=true, host=ny4ap-uoms-01, live=true, location=NY4-PQT-8162, ml_algorithm=grafana_prophet_1_0_1, ml_forecast=anomalies, ml_job_id=f2aecc68-d434-43e3-9174-b80846d08107, ml_job_metric=ioms1_node_state_health, moduleState=ACTIVE, nodeRole=LEADER, ready=true, url=http://127.0.0.1:8101/healthy Value:0xc00f32e130}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.709849891s EvaluationString:[ var='B' labels={__name__=ioms1_node_state_health:anomalous, application=ioms1p_health, business=institutional, data_type=application, db=telegraf, electionState=CLOSED, eligibleForLeader=true, healthy=true, host=ny4ap-uoms-01, live=true, location=NY4-PQT-8162, ml_algorithm=grafana_prophet_1_0_1, ml_forecast=anomalies, ml_job_id=f2aecc68-d434-43e3-9174-b80846d08107, ml_job_metric=ioms1_node_state_health, moduleState=ACTIVE, nodeRole=LEADER, ready=true, url=http://127.0.0.1:8101/healthy} value=0 ], [ var='C' labels={__name__=ioms1_node_state_health:anomalous, application=ioms1p_health, business=institutional, data_type=application, db=telegraf, electionState=CLOSED, eligibleForLeader=true, healthy=true, host=ny4ap-uoms-01, live=true, location=NY4-PQT-8162, ml_algorithm=grafana_prophet_1_0_1, ml_forecast=anomalies, ml_job_id=f2aecc68-d434-43e3-9174-b80846d08107, ml_job_metric=ioms1_node_state_health, moduleState=ACTIVE, nodeRole=LEADER, ready=true, url=http://127.0.0.1:8101/healthy} value=0 ]} {Instance:__name__=ioms1_node_state_health:anomalous, application=ioms1p_health, business=institutional, data_type=application, db=telegraf, electionState=CLOSED, eligibleForLeader=true, healthy=true, host=ny4ap-uoms-02, live=true, location=NY4-PQT-8162, ml_algorithm=grafana_prophet_1_0_1, ml_forecast=anomalies, ml_job_id=f2aecc68-d434-43e3-9174-b80846d08107, ml_job_metric=ioms1_node_state_health, moduleState=ACTIVE, nodeRole=FOLLOWER, ready=true, url=http://127.0.0.1:8101/healthy State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=ioms1_node_state_health:anomalous, application=ioms1p_health, business=institutional, data_type=application, db=telegraf, electionState=CLOSED, eligibleForLeader=true, healthy=true, host=ny4ap-uoms-02, live=true, location=NY4-PQT-8162, ml_algorithm=grafana_prophet_1_0_1, ml_forecast=anomalies, ml_job_id=f2aecc68-d434-43e3-9174-b80846d08107, ml_job_metric=ioms1_node_state_health, moduleState=ACTIVE, nodeRole=FOLLOWER, ready=true, url=http://127.0.0.1:8101/healthy Value:0xc00f32e640} C:{Var:C Labels:__name__=ioms1_node_state_health:anomalous, application=ioms1p_health, business=institutional, data_type=application, db=telegraf, electionState=CLOSED, eligibleForLeader=true, healthy=true, host=ny4ap-uoms-02, live=true, location=NY4-PQT-8162, ml_algorithm=grafana_prophet_1_0_1, ml_forecast=anomalies, ml_job_id=f2aecc68-d434-43e3-9174-b80846d08107, ml_job_metric=ioms1_node_state_health, moduleState=ACTIVE, nodeRole=FOLLOWER, ready=true, url=http://127.0.0.1:8101/healthy Value:0xc00f32e7d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.709870783s EvaluationString:[ var='B' labels={__name__=ioms1_node_state_health:anomalous, application=ioms1p_health, business=institutional, data_type=application, db=telegraf, electionState=CLOSED, eligibleForLeader=true, healthy=true, host=ny4ap-uoms-02, live=true, location=NY4-PQT-8162, ml_algorithm=grafana_prophet_1_0_1, ml_forecast=anomalies, ml_job_id=f2aecc68-d434-43e3-9174-b80846d08107, ml_job_metric=ioms1_node_state_health, moduleState=ACTIVE, nodeRole=FOLLOWER, ready=true, url=http://127.0.0.1:8101/healthy} value=0 ], [ var='C' labels={__name__=ioms1_node_state_health:anomalous, application=ioms1p_health, business=institutional, data_type=application, db=telegraf, electionState=CLOSED, eligibleForLeader=true, healthy=true, host=ny4ap-uoms-02, live=true, location=NY4-PQT-8162, ml_algorithm=grafana_prophet_1_0_1, ml_forecast=anomalies, ml_job_id=f2aecc68-d434-43e3-9174-b80846d08107, ml_job_metric=ioms1_node_state_health, moduleState=ACTIVE, nodeRole=FOLLOWER, ready=true, url=http://127.0.0.1:8101/healthy} value=0 ]} {Instance:__name__=ioms1_node_state_health:anomalous, application=ioms1p_health, business=institutional, data_type=application, db=telegraf, electionState=CLOSED, eligibleForLeader=true, healthy=true, host=ny4ap-uoms-03, live=true, location=NY4-PQT-8162, ml_algorithm=grafana_prophet_1_0_1, ml_forecast=anomalies, ml_job_id=f2aecc68-d434-43e3-9174-b80846d08107, ml_job_metric=ioms1_node_state_health, moduleState=ACTIVE, nodeRole=FOLLOWER, ready=true, url=http://127.0.0.1:8101/healthy State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=ioms1_node_state_health:anomalous, application=ioms1p_health, business=institutional, data_type=application, db=telegraf, electionState=CLOSED, eligibleForLeader=true, healthy=true, host=ny4ap-uoms-03, live=true, location=NY4-PQT-8162, ml_algorithm=grafana_prophet_1_0_1, ml_forecast=anomalies, ml_job_id=f2aecc68-d434-43e3-9174-b80846d08107, ml_job_metric=ioms1_node_state_health, moduleState=ACTIVE, nodeRole=FOLLOWER, ready=true, url=http://127.0.0.1:8101/healthy Value:0xc00f32ede0} C:{Var:C Labels:__name__=ioms1_node_state_health:anomalous, application=ioms1p_health, business=institutional, data_type=application, db=telegraf, electionState=CLOSED, eligibleForLeader=true, healthy=true, host=ny4ap-uoms-03, live=true, location=NY4-PQT-8162, ml_algorithm=grafana_prophet_1_0_1, ml_forecast=anomalies, ml_job_id=f2aecc68-d434-43e3-9174-b80846d08107, ml_job_metric=ioms1_node_state_health, moduleState=ACTIVE, nodeRole=FOLLOWER, ready=true, url=http://127.0.0.1:8101/healthy Value:0xc00f32f030}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.709881753s EvaluationString:[ var='B' labels={__name__=ioms1_node_state_health:anomalous, application=ioms1p_health, business=institutional, data_type=application, db=telegraf, electionState=CLOSED, eligibleForLeader=true, healthy=true, host=ny4ap-uoms-03, live=true, location=NY4-PQT-8162, ml_algorithm=grafana_prophet_1_0_1, ml_forecast=anomalies, ml_job_id=f2aecc68-d434-43e3-9174-b80846d08107, ml_job_metric=ioms1_node_state_health, moduleState=ACTIVE, nodeRole=FOLLOWER, ready=true, url=http://127.0.0.1:8101/healthy} value=0 ], [ var='C' labels={__name__=ioms1_node_state_health:anomalous, application=ioms1p_health, business=institutional, data_type=application, db=telegraf, electionState=CLOSED, eligibleForLeader=true, healthy=true, host=ny4ap-uoms-03, live=true, location=NY4-PQT-8162, ml_algorithm=grafana_prophet_1_0_1, ml_forecast=anomalies, ml_job_id=f2aecc68-d434-43e3-9174-b80846d08107, ml_job_metric=ioms1_node_state_health, moduleState=ACTIVE, nodeRole=FOLLOWER, ready=true, url=http://127.0.0.1:8101/healthy} value=0 ]}]" duration=129.525188ms + level=debug ts=2024-05-29T13:44:14.710578981Z caller=manager.go:153 user=721351 slug=rtdmsmonitor msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:14.710370392Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.710393404Z caller=remote_instance_store.go:51 user=274199 slug=telemetriahgm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-333077laio1euw1, cloud_platform=AWS, customer_id=C472, env_id=333077, env_name=C472_PROD, env_type=prod, instance=env-333077laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.710349642Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-333077laio1euw1, cloud_platform=AWS, customer_id=C472, env_id=333077, env_name=C472_PROD, env_type=prod, instance=env-333077laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.710338826Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=274199 slug=telemetriahgm t=2024-05-29T13:44:14.7102381Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-332946laio1eastus, cloud_platform=Azure, customer_id=A236, env_id=332946, env_name=a236_Eagle, env_type=prod, instance=env-332946laio1eastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:14.710080593Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.70991917Z caller=manager.go:153 user=543767 slug=queergroup msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:14.709734068Z caller=manager.go:153 user=563357 slug=asew msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-332805laio1centralus, cloud_platform=Azure, customer_id=A239, env_id=332805, env_name=a239_partner_prod_Dr, env_type=prod, instance=env-332805laio1centralus, job=integrations/node_exporter, region=centralus, stage=preprod" t=2024-05-29T13:44:14.709707083Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-332713laio2apn1, cloud_platform=AWS, customer_id=C679, env_id=332713, env_name=C679_Parallel_Prod, env_type=prod, instance=env-332713laio2apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=live" t=2024-05-29T13:44:14.709604174Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.709561765Z caller=manager.go:153 user=498048 slug=sys3 msg="rules have not changed, skipping rule manager update" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-332713laio1apn1, cloud_platform=AWS, customer_id=C679, env_id=332713, env_name=C679_Parallel_Prod, env_type=prod, instance=env-332713laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=live" t=2024-05-29T13:44:14.70942072Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.709381562Z caller=manager.go:153 user=719587 slug=techtrail msg="rules have not changed, skipping rule manager update" + level=debug ts=2024-05-29T13:44:14.709366285Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.709283931Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114492 slug=railsbank instance="QueueName=BETA-DATA-FIXES-STATUS-UPDATES-SQS" t=2024-05-29T13:44:14.709277888Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank instance="QueueName=BETA-DATA-FIXES-STATUS-UPDATES-SQS" t=2024-05-29T13:44:14.709265518Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.709190072Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.708708389Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.708875537Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.708820258Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-332430laio1centralus, cloud_platform=Azure, customer_id=A239, env_id=332430, env_name=A239 BF InternalAnalytics, env_type=prod, instance=env-332430laio1centralus, job=integrations/node_exporter, region=centralus, stage=preprod" t=2024-05-29T13:44:14.708703169Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.708602477Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.708564452Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.708613745Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.708504252Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.708526433Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.708504549Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.708461481Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.708525649Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=668587 slug=brightacceptance group="GMS - Storing communicatie sensormodule" + level=debug ts=2024-05-29T13:44:14.708518449Z caller=ruler.go:606 msg="rule group owned" user=668587 slug=brightacceptance group="Ahold - Laadpaal - PADC011" + level=debug ts=2024-05-29T13:44:14.708499349Z caller=ruler.go:606 msg="rule group owned" user=668587 slug=brightacceptance group="GMS - Storing NI" + level=debug ts=2024-05-29T13:44:14.708447048Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.708473848Z caller=ruler.go:606 msg="rule group owned" user=668587 slug=brightacceptance group="Status - Laadpaal - ZDDC-003 -OP-1B" + logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:14.70841955Z level=debug msg="Setting next state" handler=resultError + level=debug ts=2024-05-29T13:44:14.708455848Z caller=ruler.go:606 msg="rule group owned" user=668587 slug=brightacceptance group="GMS - TL Sensor Critical Threshold" + level=debug ts=2024-05-29T13:44:14.708373547Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=668587 slug=brightacceptance group="GMS - Storing Wegdekgeleidbaarheidsensor ongeldig buiten strooiseizoen" + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=2 fingerprint=469869ace9cc04cc attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.708351585Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=4.738039ms + level=debug ts=2024-05-29T13:44:14.708330846Z caller=ruler.go:606 msg="rule group owned" user=668587 slug=brightacceptance group="Status Laadpaal - ZDDC-007 -OP-3B" + level=error ts=2024-05-29T13:44:14.708298289Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + level=debug ts=2024-05-29T13:44:14.708177544Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=668587 slug=brightacceptance group="Test Alerts BIC" + level=debug ts=2024-05-29T13:44:14.708238445Z caller=ruler.go:606 msg="rule group owned" user=355705 slug=fiftycsp group=VM + level=debug ts=2024-05-29T13:44:14.708209644Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=355705 slug=fiftycsp group=SN-FiftyOne + level=debug ts=2024-05-29T13:44:14.708192144Z caller=ruler.go:606 msg="rule group owned" user=355705 slug=fiftycsp group=FG-FiftyOne + level=debug ts=2024-05-29T13:44:14.708175844Z caller=ruler.go:606 msg="rule group owned" user=355705 slug=fiftycsp group=SN-Libra + level=debug ts=2024-05-29T13:44:14.708169244Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=355705 slug=fiftycsp group="Nordic Libra" + level=debug ts=2024-05-29T13:44:14.708125943Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=668587 slug=brightacceptance group="GMS - TW Sensor Moderate Threshold" + level=debug ts=2024-05-29T13:44:14.708025141Z caller=ruler.go:606 msg="rule group owned" user=355705 slug=fiftycsp group=SVK-Libra + level=debug ts=2024-05-29T13:44:14.708023541Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=668587 slug=brightacceptance group="GMS - Storing RV" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-332061laiouse1, cloud_platform=AWS, customer_id=C736, env_id=332061, env_name=C736 EDU_Zagreb, env_type=prod, instance=env-332061laiouse1, job=integrations/node_exporter, region=us-east-1, stage=testing" t=2024-05-29T13:44:14.708119923Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.70790484Z caller=ruler.go:606 msg="rule group owned" user=443783 slug=gt123test group=ds_fail + level=debug ts=2024-05-29T13:44:14.707659636Z caller=ruler.go:606 msg="rule group owned" user=443783 slug=gt123test group=test_alert_anno + level=debug ts=2024-05-29T13:44:14.707881939Z caller=ruler.go:606 msg="rule group owned" user=639928 slug=skatteetaten group=MimirMemoryMapAreasTooHigh + level=debug ts=2024-05-29T13:44:14.707865039Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=639928 slug=skatteetaten group=MimirInconsistentRuntimeConfig + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:14.707979976Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-332005laio1apn1, cloud_platform=AWS, customer_id=C679, env_id=332005, env_name=C679_Parallel_Dev, env_type=dev, instance=env-332005laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=live" t=2024-05-29T13:44:14.707939886Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.707788738Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=672534 slug=lely group=5min-eval-group + level=debug ts=2024-05-29T13:44:14.706772722Z caller=ruler.go:606 msg="rule group owned" user=672534 slug=lely group=sphere + level=debug ts=2024-05-29T13:44:14.707685736Z caller=ruler.go:606 msg="rule group owned" user=635135 slug=geaiothrtdev group="System CPU Usage" + level=debug ts=2024-05-29T13:44:14.707746237Z caller=ruler.go:606 msg="rule group owned" user=536612 slug=energyworx group=ewx-edp-dso + level=debug ts=2024-05-29T13:44:14.707683036Z caller=ruler.go:606 msg="rule group owned" user=729664 slug=airbusatlanticdev group=Test_5_min + level=debug ts=2024-05-29T13:44:14.707668836Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=729664 slug=airbusatlanticdev group=Azure_Alert + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-332002laio1northeurope, cloud_platform=Azure, customer_id=A164, env_id=332002, env_name=A164_CTTI_PARALLEL_DEV, env_type=dev, instance=env-332002laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=testing" t=2024-05-29T13:44:14.707778223Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.707644936Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=501304 slug=dotnetsocial group=default-4h + level=debug ts=2024-05-29T13:44:14.707633435Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=443783 slug=gt123test group=fail + level=debug ts=2024-05-29T13:44:14.707761752Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.707621035Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=787583 slug=mlone group="Ig_Id=\"Pre API App\"" + level=debug ts=2024-05-29T13:44:14.707614035Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=787583 slug=mlone group="Ig_Id=\"Nginx-server-pre-web\"" + level=debug ts=2024-05-29T13:44:14.707604235Z caller=ruler.go:606 msg="rule group owned" user=443783 slug=gt123test group=graf_rule + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-331956laio2use2, cloud_platform=AWS, customer_id=C756, env_id=331956, env_name=C756 Centene Prod DR, env_type=prod, instance=env-331956laio2use2, job=integrations/node_exporter, region=us-east-2, stage=preprod" t=2024-05-29T13:44:14.707682699Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:14.707661089Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.707578535Z caller=ruler.go:606 msg="rule group owned" user=706105 slug=axiansikvs group=PRDO-CH + level=debug ts=2024-05-29T13:44:14.707570734Z caller=ruler.go:606 msg="rule group owned" user=739395 slug=cminformatik group="CMI - Alerting - Logs" + level=debug ts=2024-05-29T13:44:14.707566129Z caller=remote_instance_store.go:51 user=159532 slug=getfabric msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-331956laio1use2, cloud_platform=AWS, customer_id=C756, env_id=331956, env_name=C756 Centene Prod DR, env_type=prod, instance=env-331956laio1use2, job=integrations/node_exporter, region=us-east-2, stage=preprod" t=2024-05-29T13:44:14.707567521Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-331956laio1use2, cloud_platform=AWS, customer_id=C756, env_id=331956, env_name=C756 Centene Prod DR, env_type=prod, instance=env-331956laio1use2, job=integrations/node_exporter, region=us-east-2, stage=preprod" t=2024-05-29T13:44:14.707553826Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.707498933Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=542055 slug=bangolufsen group=DevopsStuff + level=debug ts=2024-05-29T13:44:14.707488733Z caller=ruler.go:606 msg="rule group owned" user=542055 slug=bangolufsen group=SigningTool + level=debug ts=2024-05-29T13:44:14.707454333Z caller=ruler.go:606 msg="rule group owned" user=639839 slug=silae group=CPU + level=debug ts=2024-05-29T13:44:14.707414632Z caller=ruler.go:606 msg="rule group owned" user=733256 slug=dentsu group=DR_eval + level=debug ts=2024-05-29T13:44:14.707397032Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=787583 slug=mlone group="Ig_Id=\"pro-web-app-cluster\"" + level=debug ts=2024-05-29T13:44:14.707390132Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=521042 slug=unibussgiro group="Services Windows" + level=debug ts=2024-05-29T13:44:14.707355531Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=507602 slug=meronz group=Loki + level=debug ts=2024-05-29T13:44:14.707342231Z caller=ruler.go:606 msg="rule group owned" user=703790 slug=luisbonet group=kubernetes-resources + level=debug ts=2024-05-29T13:44:14.70729633Z caller=ruler.go:606 msg="rule group owned" user=552529 slug=hassring group=hass + level=debug ts=2024-05-29T13:44:14.70728383Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=686115 slug=pltsea group="Platform Engineering" + level=debug ts=2024-05-29T13:44:14.70726053Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=501331 slug=thenks group=node-exporter + level=debug ts=2024-05-29T13:44:14.70726423Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=522490 slug=cur group="Azure Alerts" + level=debug ts=2024-05-29T13:44:14.707230769Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.707184029Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=666056 slug=kreicer group=alerts + level=debug ts=2024-05-29T13:44:14.707160028Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=485797 slug=gconnect group="Once every 10m" + level=debug ts=2024-05-29T13:44:14.707135528Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=485797 slug=gconnect group="Daily Evaluation" + level=debug ts=2024-05-29T13:44:14.707080727Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=501258 slug=thriving group=logs + level=debug ts=2024-05-29T13:44:14.707066727Z caller=ruler.go:606 msg="rule group owned" user=635606 slug=royalbamgroupa group="SPIE 10 min alerts" + level=debug ts=2024-05-29T13:44:14.707050827Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=732089 slug=job2b group="Job2b Api Exception" + level=debug ts=2024-05-29T13:44:14.707047126Z caller=ruler.go:606 msg="rule group owned" user=740293 slug=sopamo group="5 minutes" + level=debug ts=2024-05-29T13:44:14.707023026Z caller=ruler.go:606 msg="rule group owned" user=557927 slug=axpodev group=Evaluate + level=debug ts=2024-05-29T13:44:14.707013526Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=557927 slug=axpodev group=Evalute + level=debug ts=2024-05-29T13:44:14.706964425Z caller=ruler.go:606 msg="rule group owned" user=557927 slug=axpodev group="Server Health" + level=debug ts=2024-05-29T13:44:14.706954825Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=557927 slug=axpodev group=ML + level=debug ts=2024-05-29T13:44:14.706936225Z caller=ruler.go:606 msg="rule group owned" user=623958 slug=penbevdev group=IntegreationReport + level=debug ts=2024-05-29T13:44:14.706802923Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=623958 slug=penbevdev group=Timeouts + level=debug ts=2024-05-29T13:44:14.706871924Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=639928 slug=skatteetaten group=MimirIngesterReachingTenantsLimit + level=debug ts=2024-05-29T13:44:14.706858424Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=639928 slug=skatteetaten group=MimirIngesterUnhealthy + level=debug ts=2024-05-29T13:44:14.706799623Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=843304 slug=ppcgroup group="Real Time - Sap" + level=debug ts=2024-05-29T13:44:14.706753422Z caller=ruler.go:606 msg="rule group owned" user=694196 slug=luisjavierhorcajada group=Cloud + level=debug ts=2024-05-29T13:44:14.706695621Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=635135 slug=geaiothrtdev group="System Memory" + level=debug ts=2024-05-29T13:44:14.70661692Z caller=ruler.go:606 msg="rule group owned" user=855928 slug=graftestcos group=Probe_status + level=debug ts=2024-05-29T13:44:14.705717006Z caller=ruler.go:606 msg="rule group owned" user=635135 slug=geaiothrtdev group="Disk - Root" + level=debug ts=2024-05-29T13:44:14.706577519Z caller=ruler.go:606 msg="rule group owned" user=732381 slug=l337direct group="Google Search enrichers" + level=debug ts=2024-05-29T13:44:14.706541819Z caller=ruler.go:606 msg="rule group owned" user=549802 slug=neax group="billing warning" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-331724laio1euw1, cloud_platform=AWS, customer_id=C613, env_id=331724, env_name=C613_DEV_2021U12, env_type=dev, instance=env-331724laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.706535872Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.706397917Z caller=ruler.go:606 msg="rule group owned" user=941160 slug=uateu group="Call2Teams API" + level=debug ts=2024-05-29T13:44:14.706451117Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=738591 slug=fermax group=Fermax-24h + level=debug ts=2024-05-29T13:44:14.706351916Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=541444 slug=coreup group="need to learn" + level=debug ts=2024-05-29T13:44:14.706344416Z caller=ruler.go:606 msg="rule group owned" user=537600 slug=oneytrustpoc group=Check_every_hour + level=debug ts=2024-05-29T13:44:14.706328315Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=754297 slug=exelcia group=test-evaluation-group + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-331714laio1usw2, cloud_platform=AWS, customer_id=C772, env_id=331714, env_name=C772 Boyd DEV, env_type=dev, instance=env-331714laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=preprod" t=2024-05-29T13:44:14.70635675Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.706253014Z caller=ruler.go:606 msg="rule group owned" user=757626 slug=hmgroup group=K8s + level=debug ts=2024-05-29T13:44:14.706219714Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=757626 slug=hmgroup group=Weekday + level=debug ts=2024-05-29T13:44:14.706099212Z caller=ruler.go:606 msg="rule group owned" user=757626 slug=hmgroup group="Expansion Pack" + level=debug ts=2024-05-29T13:44:14.706188913Z caller=ruler.go:606 msg="rule group owned" user=668587 slug=brightacceptance group="GMS - Storing Wegdekgeleidbaarheidsensor ongeldig regen" + level=debug ts=2024-05-29T13:44:14.706168913Z caller=ruler.go:606 msg="rule group owned" user=668587 slug=brightacceptance group="GMS - Storing communicatie meetstation" + level=debug ts=2024-05-29T13:44:14.706131612Z caller=ruler.go:606 msg="rule group owned" user=711423 slug=kapoko group=Hourly + level=debug ts=2024-05-29T13:44:14.703798577Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=609063 slug=geasft group=Alerts + level=debug ts=2024-05-29T13:44:14.706108212Z caller=ruler.go:606 msg="rule group owned" user=436902 slug=allianz1 group=coreforms + level=debug ts=2024-05-29T13:44:14.706097112Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=436902 slug=allianz1 group=synthetic_prod_instant + level=debug ts=2024-05-29T13:44:14.706063238Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.706084712Z caller=ruler.go:606 msg="rule group owned" user=757626 slug=hmgroup group="CPU utilization" + level=debug ts=2024-05-29T13:44:14.706077712Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=757626 slug=hmgroup group=e2e + level=debug ts=2024-05-29T13:44:14.706069211Z caller=ruler.go:606 msg="rule group owned" user=757626 slug=hmgroup group="Jarvis and CloudMft" + level=debug ts=2024-05-29T13:44:14.706037437Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.706065311Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=639083 slug=ucaremedical group=EvaluateAlarms + level=debug ts=2024-05-29T13:44:14.706050311Z caller=ruler.go:606 msg="rule group owned" user=639083 slug=ucaremedical group=ContainerAbsent + level=debug ts=2024-05-29T13:44:14.706005511Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=657772 slug=axpoprod group=Status + level=debug ts=2024-05-29T13:44:14.70598251Z caller=ruler.go:606 msg="rule group owned" user=657772 slug=axpoprod group=Alert_Logs + level=debug ts=2024-05-29T13:44:14.70595721Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=672408 slug=orionprod group=Api + level=debug ts=2024-05-29T13:44:14.705835208Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=672408 slug=orionprod group=Edge + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-331542laio1eastus2, cloud_platform=Azure, customer_id=A234, env_id=331542, env_name=A234 BF Partner DEV, env_type=dev, instance=env-331542laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:14.705978955Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.705873908Z caller=ruler.go:606 msg="rule group owned" user=636875 slug=kaisbaccour group=carrot-app + level=debug ts=2024-05-29T13:44:14.705827108Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=493344 slug=axhmonitor group="Web App" + level=debug ts=2024-05-29T13:44:14.705819608Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=619260 slug=mildblue group=Usage + level=debug ts=2024-05-29T13:44:14.705806307Z caller=ruler.go:606 msg="rule group owned" user=619260 slug=mildblue group=Resources + level=debug ts=2024-05-29T13:44:14.705795207Z caller=ruler.go:606 msg="rule group owned" user=619260 slug=mildblue group=Maintenance + level=debug ts=2024-05-29T13:44:14.705673905Z caller=ruler.go:606 msg="rule group owned" user=554491 slug=safeskyindustries group=Logs + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-331501laio1use1, cloud_platform=AWS, customer_id=C462, env_id=331501, env_name=C462 FED DEV Parallel, env_type=dev, instance=env-331501laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.705826181Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-331501laio1use1, cloud_platform=AWS, customer_id=C462, env_id=331501, env_name=C462 FED DEV Parallel, env_type=dev, instance=env-331501laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.705810057Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.705627905Z caller=ruler.go:606 msg="rule group owned" user=685144 slug=wsaudiologytest group="Demo Alert" + level=debug ts=2024-05-29T13:44:14.705547703Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=572672 slug=drath group=default + level=debug ts=2024-05-29T13:44:14.705507603Z caller=ruler.go:606 msg="rule group owned" user=919528 slug=bawagdev2 group=KONDOR + level=debug ts=2024-05-29T13:44:14.704586989Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=867755 slug=xgenpreprod group="Daily cost check" + level=debug ts=2024-05-29T13:44:14.705470602Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=522591 slug=royalbamgroup group="SPIE 1m alerts" + logger=ngalert.state.manager.persist user=662363 slug=facephi t=2024-05-29T13:44:14.705617178Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=662363 slug=facephi instance="datasource_uid=saa-eks-dpad-sae-1, ref_id=A" t=2024-05-29T13:44:14.705604968Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.705436202Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=713091 slug=expghohr group="Host Memory is under utilized" + logger=ngalert.state.manager user=662363 slug=facephi instance="datasource_uid=saa-eks-dpad-sae-1, ref_id=A" t=2024-05-29T13:44:14.705577227Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.705556641Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=662363 slug=facephi t=2024-05-29T13:44:14.705557057Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-331138laio1usw1, cloud_platform=AWS, customer_id=C537, env_id=331138, env_name=C537_PacSun_DEV_Parallel, env_type=dev, instance=env-331138laio1usw1, job=integrations/node_exporter, region=us-west-1, stage=testing" t=2024-05-29T13:44:14.705481529Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=662363 slug=facephi version=3 fingerprint=da2ebcfddefe6ba2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.705496515Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=saa-eks-dpad-sae-1, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.705172128s EvaluationString:}]" duration=208.024489ms + level=debug ts=2024-05-29T13:44:14.705463557Z caller=remote_instance_store.go:51 user=196413 slug=form3production msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=148654 slug=tinybeans t=2024-05-29T13:44:14.705480089Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.705479501Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.705469272Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=196413 slug=form3production instance="Region=-, ServiceLimit=Instance profiles, ServiceName=IAM" t=2024-05-29T13:44:14.705379023Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.705379401Z caller=ruler.go:606 msg="rule group owned" user=543604 slug=kingmakers group="SSB CMS" + level=debug ts=2024-05-29T13:44:14.705354401Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=603188 slug=cherrychain group="DAO log" + level=debug ts=2024-05-29T13:44:14.7053475Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=543604 slug=kingmakers group="node exporter" + level=debug ts=2024-05-29T13:44:14.705168998Z caller=ruler.go:606 msg="rule group owned" user=603188 slug=cherrychain group="Service Alert" + level=debug ts=2024-05-29T13:44:14.7053388Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=543604 slug=kingmakers group=kubernetes-resources + logger=ngalert.state.manager user=788474 slug=elisasre instance="cluster=sre-ci.k8s.local, component=artemis-test, instance=https://10.222.157.182, monitor=monitor-486, namespace=health, region=sdcv3, target=https://10.222.157.182" t=2024-05-29T13:44:14.705365902Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.7053232Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=543604 slug=kingmakers group=Compliance + level=debug ts=2024-05-29T13:44:14.705283299Z caller=ruler.go:606 msg="rule group owned" user=543604 slug=kingmakers group=Engagement + level=debug ts=2024-05-29T13:44:14.705248499Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=739130 slug=redphasetech group=Faults + level=debug ts=2024-05-29T13:44:14.705238099Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=739130 slug=redphasetech group=LiveEvaluations + level=debug ts=2024-05-29T13:44:14.705217698Z caller=ruler.go:606 msg="rule group owned" user=602695 slug=ffvo group=1m + level=debug ts=2024-05-29T13:44:14.705265123Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.705193498Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=603615 slug=geadigihub group=Edge_GW_Monitoring + level=debug ts=2024-05-29T13:44:14.705171498Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=603615 slug=geadigihub group="Disk - Data" + level=debug ts=2024-05-29T13:44:14.704894693Z caller=ruler.go:606 msg="rule group owned" user=603188 slug=cherrychain group=CloudWatch + level=debug ts=2024-05-29T13:44:14.705153097Z caller=ruler.go:606 msg="rule group owned" user=603615 slug=geadigihub group="System Last Seen" + level=debug ts=2024-05-29T13:44:14.705143597Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=603615 slug=geadigihub group="Device Offline Rule" + level=debug ts=2024-05-29T13:44:14.705100397Z caller=ruler.go:606 msg="rule group owned" user=603615 slug=geadigihub group="Container RAM Usage" + level=debug ts=2024-05-29T13:44:14.705020595Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=852863 slug=liantisitprod group=general-alerts + level=debug ts=2024-05-29T13:44:14.705077596Z caller=ruler.go:606 msg="rule group owned" user=922678 slug=jetta group=1m + level=debug ts=2024-05-29T13:44:14.705063996Z caller=ruler.go:606 msg="rule group owned" user=321342 slug=aldisouth group=Nodes + logger=ngalert.scheduler user=698963 slug=lemonade version=6 fingerprint=fa1c7fdc3b15ca04 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.705026227Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels: Value:0xc010e62f60} THRESHOLD:{Var:THRESHOLD Labels: Value:0xc010e62f68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.704699218s EvaluationString:[ var='QUERY' labels={} value=0 ], [ var='THRESHOLD' labels={} value=0 ]}]" duration=31.731918ms + level=debug ts=2024-05-29T13:44:14.705045696Z caller=ruler.go:606 msg="rule group owned" user=321342 slug=aldisouth group=ArgoCD + level=debug ts=2024-05-29T13:44:14.704999495Z caller=ruler.go:606 msg="rule group owned" user=852863 slug=liantisitprod group=github-alerts + level=debug ts=2024-05-29T13:44:14.704991695Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=629145 slug=bricklog group=Frontend + level=debug ts=2024-05-29T13:44:14.704954194Z caller=ruler.go:606 msg="rule group owned" user=569957 slug=hgp group=URL + logger=ngalert.state.manager user=277970 slug=teckresourcestest t=2024-05-29T13:44:14.704848921Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.704944194Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=686352 slug=peppermoney group="Every half hour" + level=debug ts=2024-05-29T13:44:14.70497654Z caller=remote_instance_store.go:51 user=277970 slug=teckresourcestest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.704887271Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.704852493Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=707638 slug=eparagony group=1m + level=error ts=2024-05-29T13:44:14.704760015Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=4 fingerprint=99d08ca71bd868bb attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.704797022Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=9.10705ms + level=debug ts=2024-05-29T13:44:14.704757445Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.704740617Z caller=remote_instance_store.go:51 user=530405 slug=zetetic msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.704764891Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=783896 slug=dhlexpeu group=gsntest + level=debug ts=2024-05-29T13:44:14.704750691Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=514652 slug=phamp group="Group 1" + level=debug ts=2024-05-29T13:44:14.704723191Z caller=ruler.go:606 msg="rule group owned" user=539659 slug=voltgoed group=test-eval + level=debug ts=2024-05-29T13:44:14.70469869Z caller=ruler.go:606 msg="rule group owned" user=640569 slug=heimdallpower group="Application Gateway Unhealthy Hosts" + level=debug ts=2024-05-29T13:44:14.70468439Z caller=ruler.go:606 msg="rule group owned" user=640569 slug=heimdallpower group="Azure Service Bus" + level=debug ts=2024-05-29T13:44:14.70467489Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=560078 slug=thermogold group=billing + level=debug ts=2024-05-29T13:44:14.70466009Z caller=ruler.go:606 msg="rule group owned" user=658776 slug=grundium group=ocus-cloud-evalution + level=debug ts=2024-05-29T13:44:14.70464729Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=518247 slug=monitoringbd group=Usage + level=debug ts=2024-05-29T13:44:14.704614189Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=687021 slug=heviai group="DEVICE STATUS" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-331007laio1use1, cloud_platform=AWS, customer_id=C462, env_id=331007, env_name=C462 DEMO Parallel, env_type=demo, instance=env-331007laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.704530783Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.704572989Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=560336 slug=powernet group=SNMP + level=debug ts=2024-05-29T13:44:14.704541488Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=560336 slug=powernet group=TELE + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-330929laiouse1, cloud_platform=AWS, customer_id=C780, env_id=330929, env_name=C780 Quipux PROD, env_type=prod, instance=env-330929laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.704311655Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.704311885Z caller=ruler.go:606 msg="rule group owned" user=656164 slug=conbat group=Lab + level=debug ts=2024-05-29T13:44:14.704232883Z caller=ruler.go:606 msg="rule group owned" user=656164 slug=conbat group=Container + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.70430763Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.704270884Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=555781 slug=danelec group=1h + level=debug ts=2024-05-29T13:44:14.704289762Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.704232583Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=677472 slug=sbsdash group="Test Eval Group" + logger=ngalert.scheduler user=679029 slug=joveoprodaws version=15239 fingerprint=441802cee5cca7c3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.704237579Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.70380682s EvaluationString:}]" duration=471.55076ms + logger=ngalert.state.manager.persist user=664976 slug=staging1themomproject t=2024-05-29T13:44:14.704184517Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.355863ms + level=debug ts=2024-05-29T13:44:14.704003324Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.704169482Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=662913 slug=gealptsfprod group="Amount of expected monitoring data not received" + level=debug ts=2024-05-29T13:44:14.704161282Z caller=ruler.go:606 msg="rule group owned" user=662913 slug=gealptsfprod group="Disk - Boot" + level=debug ts=2024-05-29T13:44:14.704143582Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=662913 slug=gealptsfprod group="Disk - Root" + level=debug ts=2024-05-29T13:44:14.704114882Z caller=ruler.go:606 msg="rule group owned" user=436902 slug=allianz1 group=CampaignMTACritical + level=debug ts=2024-05-29T13:44:14.704121079Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.704051581Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=436902 slug=allianz1 group=campaign_replication_status_check + level=debug ts=2024-05-29T13:44:14.704113Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.703938442Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=697570 slug=carroteco version=11 fingerprint=fe69a09425b00c24 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.703913514Z level=debug msg="Alert rule evaluated" results="[{Instance:ApiName=document-external-api-211df2e State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ApiName=document-external-api-211df2e Value:0xc03ce1ab08} C:{Var:C Labels:ApiName=document-external-api-211df2e Value:0xc03ce1aaf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703479247s EvaluationString:[ var='B' labels={ApiName=document-external-api-211df2e} value=0 ], [ var='C' labels={ApiName=document-external-api-211df2e} value=0 ]}]" duration=32.865748ms + level=debug ts=2024-05-29T13:44:14.703889978Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=695211 slug=dhldebrieftest group="Debrief Traces Group" + level=debug ts=2024-05-29T13:44:14.703935512Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.703893769Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.703977649Z caller=remote_instance_store.go:51 user=620449 slug=pocketbitcoin msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.703920846Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.703912793Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.703884419Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.703919478Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.703842277Z caller=ruler.go:606 msg="rule group owned" user=662913 slug=gealptsfprod group="Disk - Data" + level=debug ts=2024-05-29T13:44:14.703836777Z caller=ruler.go:606 msg="rule group owned" user=520651 slug=robertogiacomozzi group=SecOLO24h + level=debug ts=2024-05-29T13:44:14.703829077Z caller=ruler.go:606 msg="rule group owned" user=522591 slug=royalbamgroup group="10min Alerts" + level=debug ts=2024-05-29T13:44:14.703869419Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-330882laio1aps1, cloud_platform=AWS, customer_id=C782, env_id=330882, env_name=c782_smart_axiata_dev, env_type=dev, instance=env-330882laio1aps1, job=integrations/node_exporter, region=ap-southeast-1, stage=live" t=2024-05-29T13:44:14.703848029Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.703805777Z caller=ruler.go:606 msg="rule group owned" user=662913 slug=gealptsfprod group="System Memory" + level=debug ts=2024-05-29T13:44:14.703803577Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=520651 slug=robertogiacomozzi group="GateWay DE" + level=debug ts=2024-05-29T13:44:14.703779876Z caller=ruler.go:609 msg="rule group not owned, ignoring" user=554585 slug=openfleet group=Heartbeat + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-330759laio1euw1, cloud_platform=AWS, customer_id=C452, env_id=330759, env_name=C452_Sainsburys_Preview, env_type=sandbox, instance=env-330759laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=preprod" t=2024-05-29T13:44:14.703735939Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=620449 slug=pocketbitcoin version=11 fingerprint=c6c4a726d91b0063 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.703580349Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=lnpayout_channels_payout_provider_outbound, app=lnpayout-staging, cluster=cloud, instance=10.120.2.16:8081, job=lnpayout, namespace=lnpayout-staging State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=lnpayout_channels_payout_provider_outbound, app=lnpayout-staging, cluster=cloud, instance=10.120.2.16:8081, job=lnpayout, namespace=lnpayout-staging Value:0xc003202740} B:{Var:B Labels:__name__=lnpayout_channels_payout_provider_outbound, app=lnpayout-staging, cluster=cloud, instance=10.120.2.16:8081, job=lnpayout, namespace=lnpayout-staging Value:0xc0032027b8} C:{Var:C Labels:__name__=lnpayout_channels_payout_provider_outbound, app=lnpayout-staging, cluster=cloud, instance=10.120.2.16:8081, job=lnpayout, namespace=lnpayout-staging Value:0xc003202838}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.703195852s EvaluationString:[ var='A' labels={__name__=lnpayout_channels_payout_provider_outbound, app=lnpayout-staging, cluster=cloud, instance=10.120.2.16:8081, job=lnpayout, namespace=lnpayout-staging} value=5.017969e+06 ], [ var='B' labels={__name__=lnpayout_channels_payout_provider_outbound, app=lnpayout-staging, cluster=cloud, instance=10.120.2.16:8081, job=lnpayout, namespace=lnpayout-staging} value=5.017969e+06 ], [ var='C' labels={__name__=lnpayout_channels_payout_provider_outbound, app=lnpayout-staging, cluster=cloud, instance=10.120.2.16:8081, job=lnpayout, namespace=lnpayout-staging} value=0 ]}]" duration=8.030283ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-330672laio1aps1, cloud_platform=AWS, customer_id=C782, env_id=330672, env_name=C782_Smart_Axiata_PROD, env_type=prod, instance=env-330672laio1aps1, job=integrations/node_exporter, region=ap-southeast-1, stage=live" t=2024-05-29T13:44:14.703558272Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-330672laio1aps1, cloud_platform=AWS, customer_id=C782, env_id=330672, env_name=C782_Smart_Axiata_PROD, env_type=prod, instance=env-330672laio1aps1, job=integrations/node_exporter, region=ap-southeast-1, stage=live" t=2024-05-29T13:44:14.703538634Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.703560519Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=190917 slug=d1cx t=2024-05-29T13:44:14.703254919Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.70321351Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.70317048Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-330555laio1eastus, cloud_platform=Azure, customer_id=A222, env_id=330555, env_name=A222 Watsco Prod, env_type=prod, instance=env-330555laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:14.703117626Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.703145802Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.703100163Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.703099958Z caller=remote_instance_store.go:51 user=859590 slug=dfdssandbox msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-330555laio1eastus, cloud_platform=Azure, customer_id=A222, env_id=330555, env_name=A222 Watsco Prod, env_type=prod, instance=env-330555laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:14.703100568Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=250150 slug=bizagi version=58 fingerprint=c589e4b3f7866207 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.703009549Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.702750823s EvaluationString:}]" duration=172.950761ms + logger=ngalert.state.manager user=859590 slug=dfdssandbox instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.703040626Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-330540laio1eastus, cloud_platform=Azure, customer_id=A222, env_id=330540, env_name=A222 Watsco Dev, env_type=dev, instance=env-330540laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:14.702865876Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.702754761Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-330431laioeastus, cloud_platform=Azure, customer_id=A211, env_id=330431, env_name=a211_proact_dep_prod, env_type=prod, instance=env-330431laioeastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:14.702720664Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.702439948Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-330303laio1cac1, cloud_platform=AWS, customer_id=C589, env_id=330303, env_name=C589_COX_Canada_DEV, env_type=dev, instance=env-330303laio1cac1, job=integrations/node_exporter, region=ca-central-1, stage=live" t=2024-05-29T13:44:14.702593126Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=652086 slug=unihosted t=2024-05-29T13:44:14.702477948Z level=debug msg="Saving alert states done" count=42 max_state_save_concurrency=1 duration=663.615373ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-330301laio1cac1, cloud_platform=AWS, customer_id=C589, env_id=330301, env_name=C589_COX_Canada_PROD, env_type=prod, instance=env-330301laio1cac1, job=integrations/node_exporter, region=ca-central-1, stage=live" t=2024-05-29T13:44:14.702411246Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.702351521Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.702223402Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.702214726Z caller=remote_instance_store.go:51 user=813270 slug=adiante msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=838012 slug=lepton t=2024-05-29T13:44:14.702118477Z level=debug msg="Saving alert states" count=25908 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.702146582Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-330296laio2use1, cloud_platform=AWS, customer_id=C487, env_id=330296, env_name=C487 Pfizer PROD, env_type=prod, instance=env-330296laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.701980903Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-330296laio2use1, cloud_platform=AWS, customer_id=C487, env_id=330296, env_name=C487 Pfizer PROD, env_type=prod, instance=env-330296laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.701963732Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-330296laio1use1, cloud_platform=AWS, customer_id=C487, env_id=330296, env_name=C487 Pfizer PROD, env_type=prod, instance=env-330296laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.701803357Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-330296laio1use1, cloud_platform=AWS, customer_id=C487, env_id=330296, env_name=C487 Pfizer PROD, env_type=prod, instance=env-330296laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.701787953Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-330278laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=330278, env_name=C630_Parallel_Prod, env_type=prod, instance=env-330278laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=testing" t=2024-05-29T13:44:14.701633324Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=47111 slug=swiftnavigation instance= t=2024-05-29T13:44:14.701306974Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=47111 slug=swiftnavigation t=2024-05-29T13:44:14.701243428Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.701361331Z caller=remote_instance_store.go:51 user=47111 slug=swiftnavigation msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-330242laio1use1, cloud_platform=AWS, customer_id=C781, env_id=330242, env_name=C781_CON_AIBI, env_type=prod, instance=env-330242laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.701245945Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.701036997Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.700809657Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.700757226Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.752806ms + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.700510426Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.700182868Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206107 slug=hydrolix instance= t=2024-05-29T13:44:14.700135433Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=206107 slug=hydrolix version=2 fingerprint=3fa3bd3bf386f1c4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.700014038Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc0130dd7f8} B:{Var:B Labels: Value:0xc0130dd800}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.699690362s EvaluationString:[ var='A' labels={} value=2 ], [ var='B' labels={} value=0 ]}]" duration=53.124831ms + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance="datasource_uid=sAAhZ0a7z, ref_id=A" t=2024-05-29T13:44:14.700107913Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.699965669Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:14.699802615Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=554710 slug=timeneye + level=debug ts=2024-05-29T13:44:14.699771315Z caller=ruler.go:522 msg="tenant is owned by this instance" user=554710 slug=timeneye groups=0 + logger=ngalert.state.manager.persist user=150145 slug=pleasant t=2024-05-29T13:44:14.699718253Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.039732ms + level=debug ts=2024-05-29T13:44:14.696189676Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.693047535Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=884866 slug=cnonumerique t=2024-05-29T13:44:14.69333266Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=884866 slug=cnonumerique instance="datasource_uid=fdhk917z41xj4a, ref_id=A" t=2024-05-29T13:44:14.69332215Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329775laio1use1, cloud_platform=AWS, customer_id=C707, env_id=329775, env_name=C707 TacoBell DEV, env_type=dev, instance=env-329775laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.699207402Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329755laio1usw1, cloud_platform=AWS, customer_id=C652, env_id=329755, env_name=C652 Gilead DEV, env_type=dev, instance=env-329755laio1usw1, job=integrations/node_exporter, region=us-west-1, stage=live" t=2024-05-29T13:44:14.698967134Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.698737765Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.695505012Z caller=remote_instance_store.go:51 user=403369 slug=clearsaletechlabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.697575954Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.697369334Z caller=remote_instance_store.go:51 user=354676 slug=gridmarketenergy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:14.697243427Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=87.725729ms + level=debug ts=2024-05-29T13:44:14.697209811Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.697129508Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329466laio1use1, cloud_platform=AWS, customer_id=C737, env_id=329466, env_name=C737 Road Scholar Prod, env_type=prod, instance=env-329466laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.697077215Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=491157 slug=prd01wr t=2024-05-29T13:44:14.697042656Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=491157 slug=prd01wr instance="DatabaseClass=db.r5.xlarge" t=2024-05-29T13:44:14.696991348Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.696924738Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.696942613Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.696876255Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=491157 slug=prd01wr version=1 fingerprint=02723331e2e1e063 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.696790924Z level=debug msg="Alert rule evaluated" results="[{Instance:DatabaseClass=db.r5.xlarge State:NoData Error: Results:map[] Values:map[B:{Var:B Labels:DatabaseClass=db.r5.xlarge Value:} C:{Var:C Labels:DatabaseClass=db.r5.xlarge Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.696432566s EvaluationString:[ var='B' labels={DatabaseClass=db.r5.xlarge} value=null ], [ var='C' labels={DatabaseClass=db.r5.xlarge} value=null ]}]" duration=114.546825ms + logger=ngalert.scheduler user=245291 slug=pismo version=2 fingerprint=935b48a224ab0153 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.696768978Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.696541293s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=187.728664ms + level=debug ts=2024-05-29T13:44:14.696712854Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.696735002Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.696686368Z caller=grafana.go:247 user=810903 slug=vespaai msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=16" groups=7 alerts=0 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329398laio2usw2, cloud_platform=AWS, customer_id=C772, env_id=329398, env_name=C772 Boyd PROD, env_type=prod, instance=env-329398laio2usw2, job=integrations/node_exporter, region=us-west-2, stage=preprod" t=2024-05-29T13:44:14.696710109Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.695327894Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.696571917Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=340750 slug=aptoslabs instance= t=2024-05-29T13:44:14.696516418Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.696426864Z caller=remote_instance_store.go:51 user=71676 slug=lemonbeat msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.696398908Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.695180727Z caller=remote_instance_store.go:51 user=532654 slug=chathamdirectint msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.696286184Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.696207907Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329395laio1usw2, cloud_platform=AWS, customer_id=C774, env_id=329395, env_name=c774_Cheesecake_dev1, env_type=dev, instance=env-329395laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=preprod" t=2024-05-29T13:44:14.69617632Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329395laio1usw2, cloud_platform=AWS, customer_id=C774, env_id=329395, env_name=c774_Cheesecake_dev1, env_type=dev, instance=env-329395laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=preprod" t=2024-05-29T13:44:14.696160266Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329393laio1usw2, cloud_platform=AWS, customer_id=C774, env_id=329393, env_name=c774_Cheesecake_Prod, env_type=prod, instance=env-329393laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=preprod" t=2024-05-29T13:44:14.69595864Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.695880604Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.695525392Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329365laio2eastus2, cloud_platform=Azure, customer_id=A234, env_id=329365, env_name=A234 BF Partner Prod, env_type=prod, instance=env-329365laio2eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:14.695739242Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.695703559Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.695666235Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.695491409Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.695482535Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zzv4ifki-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.695478189Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329364laio1eastus2, cloud_platform=Azure, customer_id=A234, env_id=329364, env_name=A234 BF Internal Prod, env_type=prod, instance=env-329364laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:14.69544497Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=403369 slug=clearsaletechlabs t=2024-05-29T13:44:14.695412751Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:14.695405291Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zzv4ifki-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.695402758Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=523054 slug=vialtopartners version=227 fingerprint=0b0bb676aaaf43a7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.695221538Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.694455549s EvaluationString:}]" duration=42.101365ms + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=ubi-features-aggregator-obd, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=ubi-features-aggregator-obd, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development" t=2024-05-29T13:44:14.695215765Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=27998 slug=korob instance="datasource_uid=grafanacloud-korob, ref_id=A" t=2024-05-29T13:44:14.695210051Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zzv1yyjb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.695246817Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zzv1yyjb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.695210716Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329363laio1eastus2, cloud_platform=Azure, customer_id=A234, env_id=329363, env_name=A234 BF Partner Pre-Prod, env_type=qa, instance=env-329363laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:14.695247059Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=532654 slug=chathamdirectint instance= t=2024-05-29T13:44:14.691123866Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=344017 slug=descript t=2024-05-29T13:44:14.695033203Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329362laio1eastus2, cloud_platform=Azure, customer_id=A234, env_id=329362, env_name=A234 BF Internal Pre-Prod, env_type=qa, instance=env-329362laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:14.695093213Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zzv1yyjb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.695069605Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329362laio1eastus2, cloud_platform=Azure, customer_id=A234, env_id=329362, env_name=A234 BF Internal Pre-Prod, env_type=qa, instance=env-329362laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:14.695080032Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zzp90vgc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.695000954Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zzp90vgc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.694900943Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zzp90vgc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.694867943Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329361laio1eastus2, cloud_platform=Azure, customer_id=A234, env_id=329361, env_name=A234 BF Internal Test, env_type=dev, instance=env-329361laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:14.694907166Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.694867584Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zzcbmqf4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.694830582Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zzcbmqf4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.694729841Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.694781162Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329336laio1use1, cloud_platform=AWS, customer_id=C673, env_id=329336, env_name=C673 OBHG Prod, env_type=prod, instance=env-329336laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.694730113Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zzcbmqf4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.69463005Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.694662124Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zz8uezco-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.694475389Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zz8uezco-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.694426598Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zz8uezco-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.694409068Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329331laioaps1, cloud_platform=AWS, customer_id=C747, env_id=329331, env_name=C747_PILPTE_PROD, env_type=prod, instance=env-329331laioaps1, job=integrations/node_exporter, region=ap-southeast-1, stage=live" t=2024-05-29T13:44:14.694328131Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=22398 slug=sunfolding version=1 fingerprint=4e2a23bbc23803e5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.694268374Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-sunfolding, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.693979156s EvaluationString:}]" duration=18.459051ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zyv8kw1n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.694171576Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zyv8kw1n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.694135795Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329330laio1use1, cloud_platform=AWS, customer_id=C673, env_id=329330, env_name=C673 OBHG Dev, env_type=dev, instance=env-329330laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.69412754Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zyv8kw1n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.694070765Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zyqsuodi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.694020864Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zyqsuodi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.693906543Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zyqsuodi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.693833472Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zynubqst-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.693795762Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zynubqst-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.693726381Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.693681985Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329321laio1use1, cloud_platform=AWS, customer_id=C050, env_id=329321, env_name=C050_UAT_U12, env_type=qa, instance=env-329321laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.693759526Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zykpqyhj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.693459308Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zykpqyhj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.693427978Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329319laiouse1, cloud_platform=AWS, customer_id=C736, env_id=329319, env_name=C736_EDU_Denver_Parallel, env_type=prod, instance=env-329319laiouse1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.693439075Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zyfezbew-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.693392838Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zyfezbew-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.693321297Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:14.693349015Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zyfezbew-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.693286346Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329298laio1apn1, cloud_platform=AWS, customer_id=C767, env_id=329298, env_name=C767 MEGMILK Dev, env_type=dev, instance=env-329298laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=testing" t=2024-05-29T13:44:14.693292044Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zyas2hq5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.693186655Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix t=2024-05-29T13:44:14.693221273Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.693120315Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=206107 slug=hydrolix version=8 fingerprint=e7cd8f6d80447f72 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.69308237Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=prometheus_ready, instance=localhost:9090, job=prometheus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=prometheus_ready, instance=localhost:9090, job=prometheus Value:0xc016502ce0} B:{Var:B Labels:__name__=prometheus_ready, instance=localhost:9090, job=prometheus Value:0xc016502d30} C:{Var:C Labels:__name__=prometheus_ready, instance=localhost:9090, job=prometheus Value:0xc016502d80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.692716551s EvaluationString:[ var='A' labels={__name__=prometheus_ready, instance=localhost:9090, job=prometheus} value=1 ], [ var='B' labels={__name__=prometheus_ready, instance=localhost:9090, job=prometheus} value=1 ], [ var='C' labels={__name__=prometheus_ready, instance=localhost:9090, job=prometheus} value=0 ]}]" duration=156.848782ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zyas2hq5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.693113075Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zxy8a403-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.693005944Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329276laio2eastus2, cloud_platform=Azure, customer_id=A226, env_id=329276, env_name=A226 Big Lots Prod, env_type=prod, instance=env-329276laio2eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:14.693041404Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329276laio1eastus2, cloud_platform=Azure, customer_id=A226, env_id=329276, env_name=A226 Big Lots Prod, env_type=prod, instance=env-329276laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:14.692856004Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.692696851Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zxrod6c1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.6926587Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329252laio1eastus2, cloud_platform=Azure, customer_id=A226, env_id=329252, env_name=A226_Biglots_Dev, env_type=dev, instance=env-329252laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:14.692688027Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zxqiito0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.69262664Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zxqiito0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.692600929Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.692497694Z caller=remote_instance_store.go:51 user=405413 slug=fltmazzone msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=405413 slug=fltmazzone t=2024-05-29T13:44:14.692440997Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=405413 slug=fltmazzone t=2024-05-29T13:44:14.692373496Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=405413 slug=fltmazzone version=2 fingerprint=ad95a63db175f6ab attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.692239584Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc045155420} C:{Var:C Labels: Value:0xc045155428} D:{Var:D Labels: Value:0xc045155418}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.691772122s EvaluationString:[ var='B' labels={} value=0 ], [ var='C' labels={} value=0 ], [ var='D' labels={} value=0 ]}]" duration=569.190693ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zxl7qc86-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.692294706Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.692197478Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329246laio1euw1, cloud_platform=AWS, customer_id=C473, env_id=329246, env_name=C473_Avon_Dev_2021U11, env_type=dev, instance=env-329246laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.692284876Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zxl7qc86-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.692203255Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zxju872w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.692113024Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zxju872w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.692079104Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=231061 slug=teamaround t=2024-05-29T13:44:14.692039311Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=231061 slug=teamaround instance="CacheClusterId=redis-broker-prod-euwe3-002" t=2024-05-29T13:44:14.692025281Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zxju872w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.692011403Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.691913872Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.691979135Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.691799362Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zxjngikh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.691810881Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=824501 slug=bendingspoons instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.691776545Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.691723164Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=824501 slug=bendingspoons instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.69175119Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zxhjavuz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.691636549Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zxhjavuz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.691612899Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329056laioeuw1, cloud_platform=AWS, customer_id=C765, env_id=329056, env_name=c765_vp_eu, env_type=prod, instance=env-329056laioeuw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.691582297Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zxhjavuz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.691579629Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.691515873Z caller=remote_instance_store.go:51 user=186562 slug=defier msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.691419917Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=186562 slug=defier instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.691441036Z level=warn msg="Failed to take an image" dashboard=2h0Uit_Mz panel=89 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zxd7zufl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.691451307Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zxcus1fn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.691330936Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zxcus1fn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.691261345Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.691124151Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:14.691158135Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zxbbtmi8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.691080664Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329052laio1eastus, cloud_platform=Azure, customer_id=A231, env_id=329052, env_name=A231 Ann Taylor DEV, env_type=dev, instance=env-329052laio1eastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:14.691091521Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.691079795Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zxbbtmi8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.691049193Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=148654 slug=tinybeans instance= t=2024-05-29T13:44:14.690871787Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zxbbtmi8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.690935012Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.690811517Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=148654 slug=tinybeans instance= t=2024-05-29T13:44:14.690858885Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.690894694Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.690920017Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.690808705Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=148654 slug=tinybeans version=1 fingerprint=df9333938cbba870 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.690723402Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.690377703s EvaluationString:}]" duration=103.536466ms + level=debug ts=2024-05-29T13:44:14.690730562Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.690869325Z caller=remote_instance_store.go:51 user=664976 slug=staging1themomproject msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zxas52b1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.69071384Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329047laio1sae1, cloud_platform=AWS, customer_id=C764, env_id=329047, env_name=C764 Prodemge Dev, env_type=dev, instance=env-329047laio1sae1, job=integrations/node_exporter, region=sa-east-1, stage=live" t=2024-05-29T13:44:14.690781843Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zxas52b1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.690648249Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.690642834Z caller=remote_image_capturer.go:54 user=186562 slug=defier rule_org_id=1 rule_uid=l4VXModnz dashboard=2h0Uit_Mz panel=89 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=186562 slug=defier instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.690543314Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-329012laio1eastus, cloud_platform=Azure, customer_id=A232, env_id=329012, env_name=A232_Lane_Bryant_DEV, env_type=dev, instance=env-329012laio1eastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:14.690549572Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zx3wf5t3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.690531868Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=186562 slug=defier instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.690529897Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=320778 slug=omegaai t=2024-05-29T13:44:14.690428361Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.977284ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zx3wf5t3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.690492438Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=186562 slug=defier instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.690502952Z level=warn msg="Failed to take an image" dashboard=2h0Uit_Mz panel=89 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zx3wf5t3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.690459257Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.689954486Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-328970laio1sae1, cloud_platform=AWS, customer_id=C764, env_id=328970, env_name=C764 Prodemge Prod, env_type=prod, instance=env-328970laio1sae1, job=integrations/node_exporter, region=sa-east-1, stage=live" t=2024-05-29T13:44:14.690034918Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.689947078Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zx19chbj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.689943012Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zx19chbj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.689914292Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zx1388e2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.689873891Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zx1388e2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.689842111Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-328950laio1use1, cloud_platform=AWS, customer_id=C750, env_id=328950, env_name=c750_Centene_Test, env_type=test, instance=env-328950laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.68985384Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zx1388e2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.68977027Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zx1388e2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.689706569Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zwz40p4p-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.689664559Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.689630939Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zwz40p4p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.689597588Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=186562 slug=defier instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.689599742Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=186562 slug=defier instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.689588826Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zwz40p4p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.689568108Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-328872laio1use1, cloud_platform=AWS, customer_id=C750, env_id=328872, env_name=c750_Centene_Prod, env_type=prod, instance=env-328872laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.689449088Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-328872laio1use1, cloud_platform=AWS, customer_id=C750, env_id=328872, env_name=c750_Centene_Prod, env_type=prod, instance=env-328872laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.689429967Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.689406095Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=438185 slug=nodeinfra t=2024-05-29T13:44:14.689429584Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.845946ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zwskvn91-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.689398816Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.689310789Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zwskvn91-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.689333216Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-328855laio1use1, cloud_platform=AWS, customer_id=C635, env_id=328855, env_name=C635 TOTVS PROD, env_type=prod, instance=env-328855laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.689269955Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.689267322Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zwokf0ws-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.689152194Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:14.689143647Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zwokf0ws-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.689116503Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.68910738Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zwmz7zt1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.689054333Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=432323 slug=lithic instance= t=2024-05-29T13:44:14.689076506Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zwmz7zt1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.688951362Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.688956816Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.689023085Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo instance="datasource_uid=grafanacloud-prom, ref_id=Query" t=2024-05-29T13:44:14.688965754Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zw8s5c8u-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.688863641Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-328747laio1usw2, cloud_platform=AWS, customer_id=C556, env_id=328747, env_name=C556_G3_Dev_U11, env_type=dev, instance=env-328747laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=live" t=2024-05-29T13:44:14.688941212Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=432323 slug=lithic t=2024-05-29T13:44:14.688681247Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zw8s5c8u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.688732839Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.688514288Z caller=remote_instance_store.go:51 user=206439 slug=relaypro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-328736laio1euc1, cloud_platform=AWS, customer_id=C658, env_id=328736, env_name=C658_Dev_Parallel, env_type=dev, instance=env-328736laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:14.688556143Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zvymxoai-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.688513577Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206439 slug=relaypro instance="map_key=unregistered" t=2024-05-29T13:44:14.688450666Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zvvhxm4v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.688308695Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206439 slug=relaypro instance="map_key=unregistered" t=2024-05-29T13:44:14.68821573Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zvvhxm4v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.688257365Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zvvhxm4v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.688248285Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zvvhxm4v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.688219444Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.68810813Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-328528laio1use1, cloud_platform=AWS, customer_id=C648, env_id=328528, env_name=C648 Prisma DEV, env_type=dev, instance=env-328528laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.68797913Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.687929103Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zvgc6z3b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.68782978Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-328399laio1use1, cloud_platform=AWS, customer_id=C648, env_id=328399, env_name=C648 Prisma PROD, env_type=prod, instance=env-328399laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.68774824Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zvg8m9oe-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.687740689Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zvg8m9oe-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.687707029Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.687658999Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206439 slug=relaypro instance="map_key=unknown" t=2024-05-29T13:44:14.687731039Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.687658921Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206439 slug=relaypro instance="map_key=service_unavailable" t=2024-05-29T13:44:14.687697322Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zvezv0ml-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.687606238Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zvezv0ml-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.687596898Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zvezv0ml-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.687566348Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206439 slug=relaypro instance="map_key=invalid_argument" t=2024-05-29T13:44:14.687661583Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.687600237Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206439 slug=relaypro instance="map_key=fcm_server_error" t=2024-05-29T13:44:14.687630345Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206439 slug=relaypro instance="map_key=fcm_error" t=2024-05-29T13:44:14.687608257Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-328393laio1eastus, cloud_platform=Azure, customer_id=A229, env_id=328393, env_name=A229 Sompo QA, env_type=qa, instance=env-328393laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:14.687559266Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-328393laio1eastus, cloud_platform=Azure, customer_id=A229, env_id=328393, env_name=A229 Sompo QA, env_type=qa, instance=env-328393laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:14.687548499Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=320778 slug=omegaai t=2024-05-29T13:44:14.687525838Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.190637ms + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.687510865Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.657507ms + level=info ts=2024-05-29T13:44:14.687458123Z caller=remote_alert_sender.go:94 user=70430 slug=dapperlabs host=dapperlabs-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.144.208.13:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=c61c89ca-9a69-4050-bb9a-b701d93dd3f1 alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zvdw4a0s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.687430206Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zvdw4a0s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.687406966Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:14.68733277Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=25.246553ms + level=debug ts=2024-05-29T13:44:14.687352219Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zvdk1m54-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.687327235Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.687278653Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-328323laio1eastus, cloud_platform=Azure, customer_id=A229, env_id=328323, env_name=A229 Sompo DEV, env_type=dev, instance=env-328323laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:14.687322839Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.687330732Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=679029 slug=joveoprodaws version=30465 fingerprint=bd3265fed6f439e1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.687244731Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.686869726s EvaluationString:}]" duration=59.966513ms + level=debug ts=2024-05-29T13:44:14.687275015Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.687219302Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.687208554Z caller=remote_instance_store.go:51 user=35611 slug=play msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.687264789Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zvdk1m54-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.687216684Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.686891818Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zuv2anyi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.68682647Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=635771 slug=sharedservices t=2024-05-29T13:44:14.686698084Z level=debug msg="Saving alert states done" count=12 max_state_save_concurrency=1 duration=169.786818ms + level=debug ts=2024-05-29T13:44:14.686582027Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.686591999Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=150145 slug=pleasant t=2024-05-29T13:44:14.686628963Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zus1qsvd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.686580487Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=150145 slug=pleasant version=1 fingerprint=59eaa2935cd81b7e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.686565381Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.686258164s EvaluationString:}]" duration=24.722312ms + level=debug ts=2024-05-29T13:44:14.686553271Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=250150 slug=bizagi version=8 fingerprint=f6092cfa2d1db7a2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.686365294Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.686177137s EvaluationString:}]" duration=204.603204ms + level=debug ts=2024-05-29T13:44:14.686478258Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=901230 slug=integromonitor t=2024-05-29T13:44:14.68642393Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.629648ms + level=debug ts=2024-05-29T13:44:14.686380696Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zumbahc9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.686414056Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zumbahc9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.686379815Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.68631076Z caller=remote_instance_store.go:51 user=642786 slug=sophoscomnsg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zumbahc9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.686270554Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zujtosqf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.686079762Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zujtosqf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.685962301Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-328241laio1euw1, cloud_platform=AWS, customer_id=C434, env_id=328241, env_name=C434_AVORIS_DEV, env_type=dev, instance=env-328241laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.68603002Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zujtosqf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.68588426Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.685686148Z caller=remote_instance_store.go:51 user=652086 slug=unihosted msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.685494624Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.685391058Z caller=remote_instance_store.go:51 user=557231 slug=lnrsusinsuranceprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zuegymqz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.685377875Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="datasource_uid=KArZ6Sf4z, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:14.685326154Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="datasource_uid=KArZ6Sf4z, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:14.685320027Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-327817laio1use1, cloud_platform=AWS, customer_id=C651, env_id=327817, env_name=C651_Dorinka_Prod, env_type=prod, instance=env-327817laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.685269786Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.685208003Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-327817laio1use1, cloud_platform=AWS, customer_id=C651, env_id=327817, env_name=C651_Dorinka_Prod, env_type=prod, instance=env-327817laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.685248986Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zuegymqz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.685105012Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.685166332Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zu8ov8rz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.6848564Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zu57szv5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.684684188Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.684743987Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=656459 slug=activeport t=2024-05-29T13:44:14.684659533Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.933107ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zu57szv5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.684654838Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.684723974Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.684597111Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=814d40e3fa85d151 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.684622151Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.684476368s EvaluationString:}]" duration=175.161146ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zu17811b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.684516406Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-327789laio1use1, cloud_platform=AWS, customer_id=C557, env_id=327789, env_name=C557 AF PROD, env_type=prod, instance=env-327789laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.684501115Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ztovv7o5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.684324554Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.684330544Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.684272589Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ztovv7o5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.684194253Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.684057639Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ztn5b997-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.684066462Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-327730laio1euc1, cloud_platform=AWS, customer_id=C665, env_id=327730, env_name=C665_DKB_PROD_U11, env_type=prod, instance=env-327730laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=preprod" t=2024-05-29T13:44:14.684073038Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-327730laio1euc1, cloud_platform=AWS, customer_id=C665, env_id=327730, env_name=C665_DKB_PROD_U11, env_type=prod, instance=env-327730laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=preprod" t=2024-05-29T13:44:14.684059475Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.683968549Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.68395828Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zthc4dq2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.683804259Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-327717laio1euc1, cloud_platform=AWS, customer_id=C665, env_id=327717, env_name=C665_DKB_DEV_U11, env_type=dev, instance=env-327717laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=preprod" t=2024-05-29T13:44:14.683705295Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-327717laio1euc1, cloud_platform=AWS, customer_id=C665, env_id=327717, env_name=C665_DKB_DEV_U11, env_type=dev, instance=env-327717laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=preprod" t=2024-05-29T13:44:14.683688728Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ztcr0sri-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.683585687Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ztcr0sri-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.683539896Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zt7do5m8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.683503206Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zt7do5m8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.683422315Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-327668laio1eastus2, cloud_platform=Azure, customer_id=A223, env_id=327668, env_name=A223 Ross Dev, env_type=dev, instance=env-327668laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:14.683298077Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-327668laio1eastus2, cloud_platform=Azure, customer_id=A223, env_id=327668, env_name=A223 Ross Dev, env_type=dev, instance=env-327668laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:14.683282312Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zt1hj2ib-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.683186303Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zt1hj2ib-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.683117502Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zt058qy6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.683061311Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zt058qy6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.683005441Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zt058qy6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.6829785Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zt02r327-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.682860369Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-327524laio1use1, cloud_platform=AWS, customer_id=C545, env_id=327524, env_name=C545 City Austin PROD, env_type=prod, instance=env-327524laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.682863722Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.682823337Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.682726996Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zt02r327-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.682651287Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:14.682744877Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=308298 slug=xbto t=2024-05-29T13:44:14.682659417Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.49467ms + level=debug ts=2024-05-29T13:44:14.682547013Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=708873 slug=soultv instance="DBInstanceIdentifier=bd-prod-tiva" t=2024-05-29T13:44:14.682595686Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zsy9d80q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.682610127Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zsy9d80q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.682538986Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zsy9d80q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.682445125Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zsx5zy11-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.682405425Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zsx5zy11-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.682338174Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zsx5zy11-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.682323324Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-327440laio1usw2, cloud_platform=AWS, customer_id=C666, env_id=327440, env_name=C666 Green Dot PROD, env_type=prod, instance=env-327440laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=live" t=2024-05-29T13:44:14.682247491Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-327440laio1usw2, cloud_platform=AWS, customer_id=C666, env_id=327440, env_name=C666 Green Dot PROD, env_type=prod, instance=env-327440laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=live" t=2024-05-29T13:44:14.682231697Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zsvoa5bs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.682200893Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=163215 slug=tripadvisor t=2024-05-29T13:44:14.682075885Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.382219ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zssp5cul-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.68193567Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zssp5cul-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.68190844Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-327423laio1euw3, cloud_platform=AWS, customer_id=C676, env_id=327423, env_name=C676_Guess_EU_Prod_U11, env_type=prod, instance=env-327423laio1euw3, job=integrations/node_exporter, region=eu-west-3, stage=live" t=2024-05-29T13:44:14.681787176Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zssbzv9h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.681769108Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zssbzv9h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.681727078Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zssbzv9h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.681657507Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zshiu4s7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.681591426Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zshiu4s7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.681561786Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.673748988Z caller=remote_instance_store.go:51 user=700783 slug=gsgmedia msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance= t=2024-05-29T13:44:14.681566613Z level=debug msg="Setting next state" handler=resultError + level=debug ts=2024-05-29T13:44:14.669316181Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.669201139Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zshiu4s7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.681487035Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.673665627Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.673658965Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.673503522Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.673462423Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.673444591Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.673417262Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.67340056Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.67335477Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.673336559Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.673330609Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.673305369Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.673287598Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zshhshdm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.681311583Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.673252638Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.673203607Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.673193466Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.673102015Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:14.681123515Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.673084004Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.67287687Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.67287045Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.67283684Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zsgf2dpj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.681135192Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.67282892Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.672811419Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.672805359Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.672798279Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.672733438Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.672727578Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:14.681031205Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.672601675Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager.persist user=313711 slug=julienbeduneau t=2024-05-29T13:44:14.680890835Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.672504253Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=806229 slug=simplisafe t=2024-05-29T13:44:14.680899203Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=313711 slug=julienbeduneau t=2024-05-29T13:44:14.68081453Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="onprem=DECA" +logger=ngalert.state.manager user=313711 slug=julienbeduneau t=2024-05-29T13:44:14.680786247Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zsekb9w1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.680855829Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=313711 slug=julienbeduneau version=10 fingerprint=c4ddf1e3b1b6458c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.680663963Z level=debug msg="Alert rule evaluated" results="[{Instance:onprem=DECA State:Normal Error: Results:map[] Values:map[NB_LOGS_30_MIN:{Var:NB_LOGS_30_MIN Labels:onprem=DECA Value:0xc04d58af58} NB_LOGS_BELOW_1:{Var:NB_LOGS_BELOW_1 Labels:onprem=DECA Value:0xc04d58afa0} NB_LOGS_LAST_30M:{Var:NB_LOGS_LAST_30M Labels:onprem=DECA Value:0xc04d58afe8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.680201133s EvaluationString:[ var='NB_LOGS_30_MIN' labels={onprem=DECA} value=3 ], [ var='NB_LOGS_BELOW_1' labels={onprem=DECA} value=0 ], [ var='NB_LOGS_LAST_30M' labels={onprem=DECA} value=3 ]}]" duration=45.925469ms +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-327161laio1euc1, cloud_platform=AWS, customer_id=C560, env_id=327161, env_name=C560 C&A 04 PROD, env_type=prod, instance=env-327161laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:14.680707936Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=916144 slug=cmjjilpd t=2024-05-29T13:44:14.680453639Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.934049ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zse0cixt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.680586256Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-327044laio1usw1, cloud_platform=AWS, customer_id=C536, env_id=327044, env_name=C536 SKX DEV, env_type=dev, instance=env-327044laio1usw1, job=integrations/node_exporter, region=us-west-1, stage=live" t=2024-05-29T13:44:14.680446654Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.680384249Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zs7heflx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.680265193Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.68015583Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-327037laio1aps2, cloud_platform=AWS, customer_id=C566, env_id=327037, env_name=C566_KFC_Parallel_Prod, env_type=prod, instance=env-327037laio1aps2, job=integrations/node_exporter, region=ap-southeast-2, stage=live" t=2024-05-29T13:44:14.680200728Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zs7heflx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.680124991Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zs7heflx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.680097451Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.680095787Z caller=remote_instance_store.go:51 user=306551 slug=teckresourcesalerts msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zs16ere8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.68005371Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=306551 slug=teckresourcesalerts instance="datasource_uid=i12oD1b7k, ref_id=A" t=2024-05-29T13:44:14.680015376Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.scheduler user=306551 slug=teckresourcesalerts version=11 fingerprint=28c0db956e51c594 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.679896176Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=i12oD1b7k, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.679556953s EvaluationString:}]" duration=109.27515ms +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-staging/europe-west1/main, namespace=core, pod=worker-7f6fc5d8fb-dpd44" t=2024-05-29T13:44:14.67994411Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zs16ere8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.679907849Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zs16ere8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.679876559Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-staging/europe-west1/main, namespace=core, pod=web-statuspage-796ccfb8ff-7q9mm" t=2024-05-29T13:44:14.679812407Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-staging/europe-west1/main, namespace=core, pod=web-alerts-76d4685f4c-rtcdz" t=2024-05-29T13:44:14.679762086Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-staging/europe-west1/main, namespace=core, pod=web-alerts-76d4685f4c-fjqn5" t=2024-05-29T13:44:14.679730215Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-staging/europe-west1/main, namespace=core, pod=web-alerts-76d4685f4c-8frsl" t=2024-05-29T13:44:14.679714145Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-staging/europe-west1/main, namespace=core, pod=web-6b7c647b9c-v45ck" t=2024-05-29T13:44:14.679702434Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-staging/europe-west1/main, namespace=core, pod=web-6b7c647b9c-qzctm" t=2024-05-29T13:44:14.679681984Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-staging/europe-west1/main, namespace=core, pod=web-6b7c647b9c-qzctm" t=2024-05-29T13:44:14.679677654Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-staging/europe-west1/main, namespace=core, pod=web-6b7c647b9c-pg7kz" t=2024-05-29T13:44:14.679666333Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-staging/europe-west1/main, namespace=core, pod=web-6b7c647b9c-pg7kz" t=2024-05-29T13:44:14.679661973Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zryvrm2g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.679766988Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-staging/europe-west1/main, namespace=core, pod=stackdriver-exporter-7bd6bc7795-zmjhf" t=2024-05-29T13:44:14.679639093Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=700783 slug=gsgmedia version=13 fingerprint=70b263a67637075c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.67232986Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.671767814s EvaluationString:}]" duration=33.858402ms +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-staging/europe-west1/main, namespace=core, pod=cron-fffb686d5-8hw9s" t=2024-05-29T13:44:14.679623182Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.679679723Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-staging/europe-west1/main, namespace=core, pod=branch-previews-5d98cfc987-jltps" t=2024-05-29T13:44:14.679603192Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-staging/europe-west1/main, namespace=core, pod=branch-previews-5d98cfc987-7jxfn" t=2024-05-29T13:44:14.679560591Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-production/europe-west1/main, namespace=core, pod=worker-oncall-77f7d5c89f-pzzdf" t=2024-05-29T13:44:14.67953752Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-production/europe-west1/main, namespace=core, pod=worker-oncall-77f7d5c89f-pzzdf" t=2024-05-29T13:44:14.679498499Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zrwwivz3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.679555935Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-production/europe-west1/main, namespace=core, pod=worker-oncall-77f7d5c89f-5l2vk" t=2024-05-29T13:44:14.679453688Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zrwwivz3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.679462814Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.679422031Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-production/europe-west1/main, namespace=core, pod=worker-6746794dcb-wclqd" t=2024-05-29T13:44:14.679407397Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-production/europe-west1/main, namespace=core, pod=worker-6746794dcb-6mmrr" t=2024-05-29T13:44:14.679368226Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=4947 slug=mediamath instance= t=2024-05-29T13:44:14.679321155Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-production/europe-west1/main, namespace=core, pod=web-statuspage-756d794dc9-wcrz2" t=2024-05-29T13:44:14.679323365Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-production/europe-west1/main, namespace=core, pod=web-statuspage-756d794dc9-rfftw" t=2024-05-29T13:44:14.679300715Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-production/europe-west1/main, namespace=core, pod=web-statuspage-756d794dc9-rfftw" t=2024-05-29T13:44:14.679284394Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.679231463Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-production/europe-west1/main, namespace=core, pod=web-alerts-5f776dff67-qjqd6" t=2024-05-29T13:44:14.679248543Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zrprww81-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.679216022Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zrprww81-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.679117631Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-production/europe-west1/main, namespace=core, pod=web-alerts-5f776dff67-kwxmj" t=2024-05-29T13:44:14.679072859Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zrprww81-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.67905074Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-production/europe-west1/main, namespace=core, pod=web-alerts-5f776dff67-gmd69" t=2024-05-29T13:44:14.679030748Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-production/europe-west1/main, namespace=core, pod=web-alerts-5f776dff67-fwcbp" t=2024-05-29T13:44:14.679019628Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-production/europe-west1/main, namespace=core, pod=web-alerts-5f776dff67-8jdqb" t=2024-05-29T13:44:14.678993977Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-production/europe-west1/main, namespace=core, pod=web-599f98f8cf-sw4hc" t=2024-05-29T13:44:14.678962377Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zrpd694e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.67900933Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zrpd694e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.678939339Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-production/europe-west1/main, namespace=core, pod=web-599f98f8cf-6krg2" t=2024-05-29T13:44:14.678827883Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-production/europe-west1/main, namespace=core, pod=web-599f98f8cf-69vvl" t=2024-05-29T13:44:14.678795633Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zrpd694e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.678865738Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=824492 slug=pineapples instance="cluster=incident-io-production/europe-west1/main, namespace=core, pod=stackdriver-exporter-6c5845c7bc-l26dc" t=2024-05-29T13:44:14.678758122Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zrl9rstz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.678725497Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-326717laio1eastus2, cloud_platform=Azure, customer_id=A218, env_id=326717, env_name=A218 GFS POC, env_type=prod, instance=env-326717laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:14.678803166Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zrl9rstz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.678671646Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zrjq768h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.678559165Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-326680laio2use1, cloud_platform=AWS, customer_id=C570, env_id=326680, env_name=C570 Grocery Outlet PROD, env_type=dev, instance=env-326680laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.678507091Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zrjq768h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.678452854Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zri2knkn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.678247192Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-326680laio1use1, cloud_platform=AWS, customer_id=C570, env_id=326680, env_name=C570 Grocery Outlet PROD, env_type=dev, instance=env-326680laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.678255464Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:14.678212563Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:14.678162409Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=698119 slug=simonsprod t=2024-05-29T13:44:14.678132031Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zreod1tw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.678171021Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zreod1tw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.678124531Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zreau198-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.677959749Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-326677laio1usw1, cloud_platform=AWS, customer_id=C418, env_id=326677, env_name=C418 R&F DEV, env_type=dev, instance=env-326677laio1usw1, job=integrations/node_exporter, region=us-west-1, stage=live" t=2024-05-29T13:44:14.677968564Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.677833965Z caller=remote_instance_store.go:51 user=159532 slug=getfabric msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-326672laio1aps2, cloud_platform=AWS, customer_id=C566, env_id=326672, env_name=c566_kfc_dev u11 parallel, env_type=dev, instance=env-326672laio1aps2, job=integrations/node_exporter, region=ap-southeast-2, stage=live" t=2024-05-29T13:44:14.677775493Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.677572375Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.677647126Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zrd30mxd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.677660786Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.677543173Z caller=remote_instance_store.go:51 user=162543 slug=rapharacing msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zrb2cup9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.677492764Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=162543 slug=rapharacing instance= t=2024-05-29T13:44:14.677455429Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-326662laio1usw1, cloud_platform=AWS, customer_id=C418, env_id=326662, env_name=C418 R&F PROD, env_type=prod, instance=env-326662laio1usw1, job=integrations/node_exporter, region=us-west-1, stage=live" t=2024-05-29T13:44:14.677338689Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zr96yhz8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.677259702Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.677023368Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=debug ts=2024-05-29T13:44:14.67700576Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zr8vh34o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.676999409Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:14.676956981Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zr8vh34o-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.676928298Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.67688635Z caller=remote_instance_store.go:51 user=236496 slug=improbable msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.676942651Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.67693335Z caller=remote_instance_store.go:51 user=116659 slug=arcdotdev msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=116659 slug=arcdotdev instance= t=2024-05-29T13:44:14.676881982Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zqychojy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.676873428Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.676776724Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zqychojy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.676858138Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zqychojy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.676796437Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zqfeyawv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.676710416Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:14.676641521Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-326586laio1use1, cloud_platform=AWS, customer_id=C642, env_id=326586, env_name=C642 Unison PROD, env_type=prod, instance=env-326586laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.676524404Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.676327566Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zqfeyawv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.676435833Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zqerltz7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.676347122Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.676325614Z caller=remote_instance_store.go:51 user=840286 slug=y0h0h0 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zqerltz7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.676258342Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=840286 slug=y0h0h0 t=2024-05-29T13:44:14.676222925Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=840286 slug=y0h0h0 instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.676160177Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zqedk50r-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.676159251Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=840286 slug=y0h0h0 instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.676129937Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zqedk50r-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.67608804Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.67601429Z caller=remote_instance_store.go:51 user=334644 slug=meiro msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zqco2ike-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.675919238Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.67588896Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.675789927Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=PSUGAME PS5 Query" t=2024-05-29T13:44:14.675838728Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zqco2ike-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.675832617Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-326580laio1use1, cloud_platform=AWS, customer_id=C290, env_id=326580, env_name=C290 Enova PROD, env_type=prod, instance=env-326580laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.675792162Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zq22r2o8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.675760286Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zq22r2o8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.675728426Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:14.675633107Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=21.735172ms +level=debug ts=2024-05-29T13:44:14.67559569Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-e886d02f283a449c, persistentvolumeclaim=data-rabbitmq-0" t=2024-05-29T13:44:14.675541913Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-bc33ac42e0604f69, persistentvolumeclaim=main-repo1" t=2024-05-29T13:44:14.675506897Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zq0h6icc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.675532484Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-7aa2c8080eaf4782, persistentvolumeclaim=data-zookeeper-2" t=2024-05-29T13:44:14.675453183Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-76845f322ed44aed, persistentvolumeclaim=main-main-xvhg-pgdata" t=2024-05-29T13:44:14.67540724Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-76246a41d2c84388, persistentvolumeclaim=main-main-795j-pgdata" t=2024-05-29T13:44:14.675365904Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-350e709a0cc04c88, persistentvolumeclaim=data-redpanda-0" t=2024-05-29T13:44:14.675332751Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-350e709a0cc04c88, persistentvolumeclaim=data-redpanda-0" t=2024-05-29T13:44:14.675294101Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zq0h6icc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.675436213Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-326571laio1use1, cloud_platform=AWS, customer_id=C476, env_id=326571, env_name=C476 DXL DEV, env_type=dev, instance=env-326571laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.675403155Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zptnhw1o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.675404643Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-0012751a643f4157, persistentvolumeclaim=data-prometheus-0" t=2024-05-29T13:44:14.675206803Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.675092334Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.675127392Z caller=remote_instance_store.go:51 user=751407 slug=nethermindjuno msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zprk39x7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.67509257Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=751407 slug=nethermindjuno t=2024-05-29T13:44:14.675088452Z level=debug msg="Saving alert states" count=4 max_state_save_concurrency=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zprk39x7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.675036109Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=751407 slug=nethermindjuno t=2024-05-29T13:44:14.675023514Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}No Pods/repicas are running of deployment: {{$labels.deployment}': error parsing template __alert_No pods are running for deployments: template: __alert_No pods are running for deployments:1: bad character U+007D '}'" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zprk39x7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.675006559Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=751407 slug=nethermindjuno instance="__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager-webhook, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics" t=2024-05-29T13:44:14.674948727Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=751407 slug=nethermindjuno t=2024-05-29T13:44:14.674930927Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}No Pods/repicas are running of deployment: {{$labels.deployment}': error parsing template __alert_No pods are running for deployments: template: __alert_No pods are running for deployments:1: bad character U+007D '}'" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zphniljx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.674897238Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=751407 slug=nethermindjuno instance="__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager-cainjector, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics" t=2024-05-29T13:44:14.67486583Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zphniljx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.674834777Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.674733342Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-326565laio1euw1, cloud_platform=AWS, customer_id=C562, env_id=326565, env_name=C562 TSOL Dev U11, env_type=dev, instance=env-326565laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.674746987Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=751407 slug=nethermindjuno instance="__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics" t=2024-05-29T13:44:14.674735911Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=751407 slug=nethermindjuno t=2024-05-29T13:44:14.674705704Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}No Pods/repicas are running of deployment: {{$labels.deployment}': error parsing template __alert_No pods are running for deployments: template: __alert_No pods are running for deployments:1: bad character U+007D '}'" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zpf3as7g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.674673385Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zpf3as7g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.674646325Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-326497laio1usw2, cloud_platform=AWS, customer_id=C666, env_id=326497, env_name=C666 Green Dot DEV, env_type=dev, instance=env-326497laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=live" t=2024-05-29T13:44:14.674581662Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=751407 slug=nethermindjuno version=42 fingerprint=d6df00fa4f286293 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.674433325Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics Value:0xc0083634b8} B:{Var:B Labels:__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics Value:0xc008363568} C:{Var:C Labels:__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics Value:0xc0083635e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.673753061s EvaluationString:[ var='A' labels={__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics} value=1 ], [ var='B' labels={__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics} value=1 ], [ var='C' labels={__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics} value=0 ]} {Instance:__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager-cainjector, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager-cainjector, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics Value:0xc0083636d0} B:{Var:B Labels:__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager-cainjector, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics Value:0xc008363740} C:{Var:C Labels:__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager-cainjector, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics Value:0xc0083637c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.673772719s EvaluationString:[ var='A' labels={__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager-cainjector, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics} value=1 ], [ var='B' labels={__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager-cainjector, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics} value=1 ], [ var='C' labels={__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager-cainjector, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics} value=0 ]} {Instance:__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager-webhook, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager-webhook, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics Value:0xc0083638b0} B:{Var:B Labels:__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager-webhook, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics Value:0xc008363930} C:{Var:C Labels:__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager-webhook, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics Value:0xc0083639a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.673805437s EvaluationString:[ var='A' labels={__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager-webhook, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics} value=1 ], [ var='B' labels={__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager-webhook, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics} value=1 ], [ var='C' labels={__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=cert-manager-webhook, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics} value=0 ]} {Instance:__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=kong-kong, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=infrastructure, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=kong-kong, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=infrastructure, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics Value:0xc008363ab0} B:{Var:B Labels:__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=kong-kong, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=infrastructure, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics Value:0xc008363b30} C:{Var:C Labels:__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=kong-kong, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=infrastructure, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics Value:0xc008363bc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.673819414s EvaluationString:[ var='A' labels={__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=kong-kong, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=infrastructure, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics} value=1 ], [ var='B' labels={__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=kong-kong, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=infrastructure, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics} value=1 ], [ var='C' labels={__name__=kube_deployment_status_replicas_available, container=kube-state-metrics, deployment=kong-kong, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=infrastructure, pod=juno-prod-prometheus-kube-state-metrics-755fbf9c8c-xbq85, service=juno-prod-prometheus-kube-state-metrics} value=0 ]}]" duration=107.512124ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zpa4tv9h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.674545534Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zpa4tv9h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.674517684Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zpa4tv9h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.674455483Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zp9ttwra-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.674287301Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zp9ttwra-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.674255781Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zp6gwbo7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.67413191Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zp6gwbo7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.674094409Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.673948857Z caller=remote_instance_store.go:51 user=538040 slug=dwtsandbox msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zp6gwbo7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.674064669Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.673931255Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.673920204Z caller=remote_instance_store.go:51 user=656158 slug=muonspacegroundprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zp3lfa9l-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.673917947Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.673854421Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.673819551Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zp3lfa9l-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.673822207Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zoxpea6n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.673756376Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zoxpea6n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.673733916Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zoxpea6n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.673701755Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-326271laio1euc1, cloud_platform=AWS, customer_id=C552, env_id=326271, env_name=C552 Klingel DEV, env_type=dev, instance=env-326271laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:14.673612659Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.673515515Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zov809wz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.673471053Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zov809wz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.673430992Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zov809wz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.673360632Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zoqu94dk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.67322849Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.673154539Z caller=remote_instance_store.go:51 user=191103 slug=amazonadmin msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:14.673113115Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zoqu94dk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.67316565Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zoqu94dk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.673144039Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zola0rk6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.673080229Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=191103 slug=amazonadmin version=145 fingerprint=55284ddfbe497c75 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.672995713Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.672695125s EvaluationString:}]" duration=58.581667ms +level=debug ts=2024-05-29T13:44:14.672956482Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zola0rk6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.672912617Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zola0rk6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.672882787Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-326240laio1use2, cloud_platform=AWS, customer_id=C691, env_id=326240, env_name=C691 CFM Materials PROD, env_type=prod, instance=env-326240laio1use2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:14.672885439Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zola0rk6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.672851066Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zojthvgi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.672785666Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zojthvgi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.672755765Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zojthvgi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.672716605Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=163215 slug=tripadvisor instance= t=2024-05-29T13:44:14.67267156Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.672619032Z caller=remote_instance_store.go:51 user=438185 slug=nodeinfra msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zognrxe2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.672593994Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zognrxe2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.672572024Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zognrxe2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.672541053Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zognrxe2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.672511193Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=438185 slug=nodeinfra t=2024-05-29T13:44:14.672505417Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="chain=ETH" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zognrxe2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.672448752Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zodgnagi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.672371531Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zodgnagi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.672294021Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zodgnagi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.67223603Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zodgnagi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.67221118Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-326213laio1use1, cloud_platform=AWS, customer_id=C366, env_id=326213, env_name=C366 Amerisource Prod NEW, env_type=prod, instance=env-326213laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.67219266Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zocmb2gq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.672152489Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zocmb2gq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.672091579Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zocmb2gq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.672051638Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-326199laio2eastus2, cloud_platform=Azure, customer_id=A223, env_id=326199, env_name=A223 Ross Prod, env_type=prod, instance=env-326199laio2eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:14.671902068Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zob6ujf3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.671909407Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zo8jacdq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.671781495Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.671680267Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zo8jacdq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.671743465Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.671625964Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=656459 slug=activeport t=2024-05-29T13:44:14.671634996Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:14.67155897Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zo8hgdoi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.671587033Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zo8hgdoi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.671577193Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zo2ya0lz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.671468792Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.671423105Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zo2ya0lz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.671436112Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=320778 slug=omegaai t=2024-05-29T13:44:14.671446381Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=320778 slug=omegaai instance= t=2024-05-29T13:44:14.671318565Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.671280328Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=320778 slug=omegaai instance= t=2024-05-29T13:44:14.671305919Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-znz1nfak-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.67124402Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-znz1nfak-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.671209579Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.671113319Z caller=remote_instance_store.go:51 user=265585 slug=engageli msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-znz1nfak-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.671050408Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=250150 slug=bizagi version=58 fingerprint=b35b5ef6317db520 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.670983924Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.67074337s EvaluationString:}]" duration=479.200799ms +logger=ngalert.state.manager.persist user=265585 slug=engageli t=2024-05-29T13:44:14.67104941Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 +logger=ngalert.state.manager user=265585 slug=engageli instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.671025989Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=265585 slug=engageli instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.671018392Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=265585 slug=engageli instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.670993708Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-326198laio1euw1, cloud_platform=AWS, customer_id=C610, env_id=326198, env_name=C610_Telefonica_Prod_U11, env_type=prod, instance=env-326198laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.67094956Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=155740 slug=routific t=2024-05-29T13:44:14.669128367Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=24.617563ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-znwnsr31-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.670813525Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-znkllzjx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.670694254Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.670666466Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-326195laio1euw1, cloud_platform=AWS, customer_id=C610, env_id=326195, env_name=C610_Telefonica_Dev_U11, env_type=dev, instance=env-326195laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.670689849Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-znkllzjx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.670590623Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.670462662Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.670543409Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-znh2wnfa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.670456462Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-znh2wnfa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.670426711Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zng47qa9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.670227949Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zng47qa9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.670195719Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zng47qa9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.670164069Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-znfrniev-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.670060888Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-znc9a9bb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.669827475Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zn76sfnc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.669696414Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zn76sfnc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.669651663Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zn3gl0dy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.669441851Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zn3gl0dy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.669431281Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=193577 slug=ronbrian instance= t=2024-05-29T13:44:14.669452294Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-326160laio1use2, cloud_platform=AWS, customer_id=C512, env_id=326160, env_name=C512 BFS PROD, env_type=prod, instance=env-326160laio1use2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:14.669432364Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=193577 slug=ronbrian t=2024-05-29T13:44:14.66943367Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zmuv9442-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.669245499Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.669208236Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.669116989Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.668992048Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zmir0t5s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.668979416Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-326153laio2euw1, cloud_platform=AWS, customer_id=C426, env_id=326153, env_name=C426_Prod_Parallel, env_type=prod, instance=env-326153laio2euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.669012632Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.668940888Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zmir0t5s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.668931946Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zmir0t5s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.668879625Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=714577 slug=readypactest t=2024-05-29T13:44:14.668776282Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=debug ts=2024-05-29T13:44:14.668833275Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zmir0t5s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.668837405Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-326153laio1euw1, cloud_platform=AWS, customer_id=C426, env_id=326153, env_name=C426_Prod_Parallel, env_type=prod, instance=env-326153laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.668737007Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=901230 slug=integromonitor instance= previous_handler=resultError t=2024-05-29T13:44:14.668770154Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=901230 slug=integromonitor instance= previous_handler=resultError t=2024-05-29T13:44:14.668751461Z level=debug msg="Execution keep last state is Normal" handler=resultNormal +logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.668756523Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.668710799Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=901230 slug=integromonitor instance= t=2024-05-29T13:44:14.668617968Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=901230 slug=integromonitor t=2024-05-29T13:44:14.668483243Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325984laio1eastus2, cloud_platform=Azure, customer_id=A221, env_id=325984, env_name=a221_LV_Prod, env_type=prod, instance=env-325984laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:14.668458737Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325984laio1eastus2, cloud_platform=Azure, customer_id=A221, env_id=325984, env_name=a221_LV_Prod, env_type=prod, instance=env-325984laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:14.668440624Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zm63cxu6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.66834934Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.668298774Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.668297342Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.66822712Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zm6252vz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.668181988Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.668041646Z caller=remote_instance_store.go:51 user=71676 slug=lemonbeat msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zm21djm9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.667973566Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.66793742Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325887laio2use1, cloud_platform=AWS, customer_id=C494, env_id=325887, env_name=C494 CINCH PROD, env_type=prod, instance=env-325887laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.667885991Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zlsq6ixi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.667890205Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:14.667845621Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=debug ts=2024-05-29T13:44:14.667804865Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.667716486Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zlmx5ayn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.667648043Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zlmx5ayn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.667579382Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.667599149Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325887laio1use1, cloud_platform=AWS, customer_id=C494, env_id=325887, env_name=C494 CINCH PROD, env_type=prod, instance=env-325887laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.667655413Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.667574289Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.667489022Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zlhn7np1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.667505941Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.66750168Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zlhn7np1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.66740674Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.667397662Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.667061257Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.667104339Z caller=remote_instance_store.go:51 user=70430 slug=dapperlabs msg="calling SaveAlertInstance" +logger=ngalert.scheduler user=196013 slug=inmediasoftware version=83 fingerprint=d18a4a6c4d7a1ad6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.66689952Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=cloudwatch_exporter_scrape_error, env=production, hostname=ip-10-0-18-158.ec2.internal, instance=localhost:9106, job=training-metrics-scrapper State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=cloudwatch_exporter_scrape_error, env=production, hostname=ip-10-0-18-158.ec2.internal, instance=localhost:9106, job=training-metrics-scrapper Value:0xc00ff13b30} B:{Var:B Labels:__name__=cloudwatch_exporter_scrape_error, env=production, hostname=ip-10-0-18-158.ec2.internal, instance=localhost:9106, job=training-metrics-scrapper Value:0xc00ff13a40} C:{Var:C Labels:__name__=cloudwatch_exporter_scrape_error, env=production, hostname=ip-10-0-18-158.ec2.internal, instance=localhost:9106, job=training-metrics-scrapper Value:0xc00ff13a90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.666468569s EvaluationString:[ var='A' labels={__name__=cloudwatch_exporter_scrape_error, env=production, hostname=ip-10-0-18-158.ec2.internal, instance=localhost:9106, job=training-metrics-scrapper} value=0 ], [ var='B' labels={__name__=cloudwatch_exporter_scrape_error, env=production, hostname=ip-10-0-18-158.ec2.internal, instance=localhost:9106, job=training-metrics-scrapper} value=0 ], [ var='C' labels={__name__=cloudwatch_exporter_scrape_error, env=production, hostname=ip-10-0-18-158.ec2.internal, instance=localhost:9106, job=training-metrics-scrapper} value=0 ]}]" duration=13.58471ms +logger=ngalert.state.manager user=70430 slug=dapperlabs instance="datasource_uid=yYVf2qD4z, ref_id=A" t=2024-05-29T13:44:14.667024331Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.666978893Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=70430 slug=dapperlabs instance="datasource_uid=yYVf2qD4z, ref_id=A" t=2024-05-29T13:44:14.666983506Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=70430 slug=dapperlabs instance="datasource_uid=yYVf2qD4z, ref_id=A" t=2024-05-29T13:44:14.666970833Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zlg1zwf5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.666944596Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=70430 slug=dapperlabs instance="datasource_uid=yYVf2qD4z, ref_id=A" t=2024-05-29T13:44:14.666954783Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zlemns01-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.666901955Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=70430 slug=dapperlabs version=1 fingerprint=496fe532ecf781d1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.66676958Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=yYVf2qD4z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.666295515s EvaluationString:}]" duration=134.456341ms +level=debug ts=2024-05-29T13:44:14.666731003Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zlemns01-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.666697693Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325788laio1use1, cloud_platform=AWS, customer_id=C656, env_id=325788, env_name=C656 HarperCollins PROD, env_type=prod, instance=env-325788laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.666670445Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=316418 slug=workmotion t=2024-05-29T13:44:14.666658407Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.217049ms +level=debug ts=2024-05-29T13:44:14.666567506Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.666691043Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.66657237Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zl8777fw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.666471761Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325778laio1apn1, cloud_platform=AWS, customer_id=C443, env_id=325778, env_name=C443_TAKEDA_TEST, env_type=test, instance=env-325778laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=live" t=2024-05-29T13:44:14.666373711Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.666320301Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.666186788Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=20177 slug=paddledash t=2024-05-29T13:44:14.666199298Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zl7o9fc0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.666264919Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zl7o9fc0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.666195568Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=20177 slug=paddledash instance= t=2024-05-29T13:44:14.66615371Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325777laio2apn1, cloud_platform=AWS, customer_id=C443, env_id=325777, env_name=C443_TAKEDA_PROD, env_type=prod, instance=env-325777laio2apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=live" t=2024-05-29T13:44:14.666162762Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.666093467Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.666149508Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zl7o9fc0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.666110967Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.666078054Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zl6fxcqd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.665982896Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325777laio1apn1, cloud_platform=AWS, customer_id=C443, env_id=325777, env_name=C443_TAKEDA_PROD, env_type=prod, instance=env-325777laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=live" t=2024-05-29T13:44:14.66596708Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zl38x145-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.665882275Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zl1q1wjp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.665707263Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zl1q1wjp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.665640672Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.665509198Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.665234197Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325772laio2euw1, cloud_platform=AWS, customer_id=C608, env_id=325772, env_name=C608_IPSEN_Prod_2021U11, env_type=prod, instance=env-325772laio2euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.665557136Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zkwcgksu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.66545853Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.665339278Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.665254804Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zkwcgksu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.665357009Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325772laio1euw1, cloud_platform=AWS, customer_id=C608, env_id=325772, env_name=C608_IPSEN_Prod_2021U11, env_type=prod, instance=env-325772laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.665288406Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zkv30b9j-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.665220958Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=332031 slug=lexisnexisemailage t=2024-05-29T13:44:14.665143778Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=24.22245ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zktd94sf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.665121527Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.665133385Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.663582818Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=344017 slug=descript t=2024-05-29T13:44:14.664903066Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=23.895906ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zktd94sf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.665010666Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=237629 slug=ocrolus instance= t=2024-05-29T13:44:14.665042289Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zktd94sf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.664950395Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.664941913Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zkhy4wsr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.664883154Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.664869842Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" +level=info ts=2024-05-29T13:44:14.664807029Z caller=remote_alert_sender.go:94 user=233863 slug=rtsystems host=rtsystems-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.152.31.9:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=gMeopIenk alerts=1 +logger=ngalert.state.manager user=112387 slug=lucidhq instance="datasource_uid=grafanacloud-prom, ref_id=error_rate" t=2024-05-29T13:44:14.664803262Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zkgh1jnd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.664564111Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.664566424Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zkgh1jnd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.664554431Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zkesq366-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.664520031Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zkesq366-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.66449282Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325615laio1use2, cloud_platform=AWS, customer_id=C512, env_id=325615, env_name=C512 BFS DEV, env_type=dev, instance=env-325615laio1use2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:14.66456637Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zkesq366-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.664357999Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zkcx26a0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.664287698Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325612laiouse1, cloud_platform=AWS, customer_id=C494, env_id=325612, env_name=C494 CINCH DEV, env_type=dev, instance=env-325612laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.664266233Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.66422972Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.66416387Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zkccqesl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.664051426Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zkccqesl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.664017656Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.historian backend=loki user=371756 slug=asapp t=2024-05-29T13:44:14.664018939Z level=debug msg="Done saving alert state history batch" +level=debug ts=2024-05-29T13:44:14.663955444Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325602laio1use1, cloud_platform=AWS, customer_id=C745, env_id=325602, env_name=C745 The Hartford QA, env_type=qa, instance=env-325602laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.663987397Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zjyvjl61-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.663768513Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zjy8ziu6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.663678222Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zjy8ziu6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.663652492Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.663593104Z caller=remote_instance_store.go:51 user=642786 slug=sophoscomnsg msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zjy8ziu6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.66349873Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zjwzfx9t-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.66345788Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zjwzfx9t-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.66342455Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.663352158Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zjwzfx9t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.663383559Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.663304135Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zjwzfx9t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.663354469Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325588laio1use1, cloud_platform=AWS, customer_id=C745, env_id=325588, env_name=C745 The Hartford Prod, env_type=prod, instance=env-325588laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.663149185Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zjrg1l2m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.663163887Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zjrg1l2m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.663129547Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325571laio1euw1, cloud_platform=AWS, customer_id=C634, env_id=325571, env_name=C634_KFC_GB_DEV_Parallel, env_type=dev, instance=env-325571laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.662922747Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zjrg1l2m-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.663053416Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zjewlers-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.662937065Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zjcbojkn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.662632681Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.662642132Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.662532193Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=916144 slug=cmjjilpd t=2024-05-29T13:44:14.662445839Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zitb3je9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.6625275Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zitb3je9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.66249687Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zitb3je9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.66245563Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:14.662343869Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:14.662233943Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325513laio1euc1, cloud_platform=AWS, customer_id=C640, env_id=325513, env_name=C640 Rinascente DEV, env_type=dev, instance=env-325513laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:14.662278437Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zin40oxt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.662266908Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zin40oxt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.662238027Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=656158 slug=muonspacegroundprod instance="datasource_uid=a27bb067-67c3-4636-aa16-ed387b9bc21e, ref_id=GNSSFlags" previous_handler=resultNoData t=2024-05-29T13:44:14.662092996Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=656158 slug=muonspacegroundprod instance="datasource_uid=a27bb067-67c3-4636-aa16-ed387b9bc21e, ref_id=GNSSFlags" previous_handler=resultNoData t=2024-05-29T13:44:14.662079996Z level=debug msg="Execution keep last state is Normal" handler=resultNormal +logger=ngalert.state.manager user=656158 slug=muonspacegroundprod instance="datasource_uid=a27bb067-67c3-4636-aa16-ed387b9bc21e, ref_id=GNSSFlags" t=2024-05-29T13:44:14.662074016Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=656158 slug=muonspacegroundprod instance="datasource_uid=a27bb067-67c3-4636-aa16-ed387b9bc21e, ref_id=GNSSFlags" previous_handler=resultNoData t=2024-05-29T13:44:14.662065366Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=656158 slug=muonspacegroundprod instance="datasource_uid=a27bb067-67c3-4636-aa16-ed387b9bc21e, ref_id=GNSSFlags" previous_handler=resultNoData t=2024-05-29T13:44:14.662056006Z level=debug msg="Execution keep last state is Normal" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ziiz64vo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.662124446Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=656158 slug=muonspacegroundprod t=2024-05-29T13:44:14.662029984Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ziiz64vo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.662046945Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325491laio1use1, cloud_platform=AWS, customer_id=C459, env_id=325491, env_name=C459_AZ_US_UAT, env_type=qa, instance=env-325491laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.662042844Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=174016 slug=journalstaging t=2024-05-29T13:44:14.661970818Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.816668ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ziiz64vo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.662016575Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zifulgu6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.661929564Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=70430 slug=dapperlabs version=1 fingerprint=67274f4da3ef79aa attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.66190765Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A,B State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.661626875s EvaluationString:}]" duration=147.758018ms +level=debug ts=2024-05-29T13:44:14.661716632Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.661756004Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" +logger=ngalert.scheduler user=112387 slug=lucidhq version=10 fingerprint=88599f819e4aff89 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.661616439Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.661392785s EvaluationString:}]" duration=88.003337ms +logger=ngalert.scheduler user=245291 slug=pismo version=46 fingerprint=a6b6eeaf99171ce6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.661522221Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.66126016s EvaluationString:}]" duration=726.181447ms +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325401laio2euw1, cloud_platform=AWS, customer_id=C608, env_id=325401, env_name=C608_IPSEN_UAT_Parallel, env_type=qa, instance=env-325401laio2euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.661558808Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325401laio1euw1, cloud_platform=AWS, customer_id=C608, env_id=325401, env_name=C608_IPSEN_UAT_Parallel, env_type=qa, instance=env-325401laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.661221361Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=852841 slug=agrivolt t=2024-05-29T13:44:14.658219304Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=7.15341ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zidiale9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.661095696Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zidiale9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.660982435Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.656540791Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=689030 slug=simonsuat t=2024-05-29T13:44:14.660903618Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.494845ms +level=debug ts=2024-05-29T13:44:14.656391529Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.656290437Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zia7z1qb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.660904454Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.654862328Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325400laio1euw1, cloud_platform=AWS, customer_id=C608, env_id=325400, env_name=C608_Ipsen_DEV_2021U11, env_type=dev, instance=env-325400laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.660946369Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325400laio1euw1, cloud_platform=AWS, customer_id=C608, env_id=325400, env_name=C608_Ipsen_DEV_2021U11, env_type=dev, instance=env-325400laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.660855199Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.660762703Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zi8h7r2o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.660716192Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zi8h7r2o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.660678191Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zi7wgu1m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.66049525Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zi7wgu1m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.660471669Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:14.660440797Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=25.305504ms +level=debug ts=2024-05-29T13:44:14.660372609Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.660366002Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325389laio2use1, cloud_platform=AWS, customer_id=C348, env_id=325389, env_name=C348 CWHH PROD, env_type=prod, instance=env-325389laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.660392988Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zi4oi6iz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.660306538Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zi4oi6iz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.660245607Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.660228956Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.660191775Z caller=remote_instance_store.go:51 user=35611 slug=play msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zi3kxyi1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.660103245Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zi16uifg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.659939784Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zi16uifg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.659908713Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.659843641Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zi16uifg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.659865393Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325389laio1use1, cloud_platform=AWS, customer_id=C348, env_id=325389, env_name=C348 CWHH PROD, env_type=prod, instance=env-325389laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.659862075Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zi16uifg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.659833213Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zhrzukcf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.659651881Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zhrzukcf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.659625011Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325358laio1euw1, cloud_platform=AWS, customer_id=C634, env_id=325358, env_name=C634_KFC_GB_TEST_PARALLEL, env_type=test, instance=env-325358laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.659422026Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zhmsp5z8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.659197456Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=183214 slug=vectorizedio t=2024-05-29T13:44:14.659072537Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.892749ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zhjya1qb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.658989744Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.658861184Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.658846351Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=23.315886ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zhgcegjy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.658822952Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zhf46aqi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.65862757Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zhf46aqi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.65860793Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.658648224Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zhcz1mrz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.658482759Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zhcz1mrz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.658447069Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.65838725Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.629731ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zha0lsnt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.658311897Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.658114541Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zha0lsnt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.658162446Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325353laio1use1, cloud_platform=AWS, customer_id=C333, env_id=325353, env_name=c333_Mercer_PROD_U11, env_type=prod, instance=env-325353laio1use1, job=integrations/node_exporter, region=us-east-1, stage=testing" t=2024-05-29T13:44:14.658079355Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zh6gum04-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.658059445Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zh6gum04-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.657982974Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zh2zy14z-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.657803952Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zh2zy14z-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.657741451Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zh2zy14z-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.657715791Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zgyzkoyy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.657679381Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325351laio2use1, cloud_platform=AWS, customer_id=C633, env_id=325351, env_name=C633 Disney PRD U11, env_type=prod, instance=env-325351laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.65767704Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.657571866Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zgyzkoyy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.6576536Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.657479092Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zgwy8fpr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.657474879Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zgwy8fpr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.657446158Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zgwy8fpr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.657401678Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zgwy8fpr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.657308837Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zgrg4lzh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.657270266Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zgrg4lzh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.657175825Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=715709 slug=mtbprod t=2024-05-29T13:44:14.657108952Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.714365ms +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325350laio1euw1, cloud_platform=AWS, customer_id=C634, env_id=325350, env_name=C634_KFC_GB_PARALLEL_PROD, env_type=prod, instance=env-325350laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.657163346Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325350laio1euw1, cloud_platform=AWS, customer_id=C634, env_id=325350, env_name=C634_KFC_GB_PARALLEL_PROD, env_type=prod, instance=env-325350laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.657145587Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zgrg4lzh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.657141315Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zgh2j5y5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.656955993Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.656790946Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zgg3yyo0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.656770191Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.656733773Z caller=remote_instance_store.go:51 user=526847 slug=soniclabs msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zgg3yyo0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.656741611Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zgg3yyo0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.656699941Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325219laio2use1, cloud_platform=AWS, customer_id=C363, env_id=325219, env_name=C363_Lincoln_PRD_2021U11, env_type=prod, instance=env-325219laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.656623011Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.656483749Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zgab85m1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.656492368Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zgab85m1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.656458678Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zgab85m1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.656419058Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zgab85m1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.656397738Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325219laio1use1, cloud_platform=AWS, customer_id=C363, env_id=325219, env_name=C363_Lincoln_PRD_2021U11, env_type=prod, instance=env-325219laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.656344145Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zg8j31dy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.656364477Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zg8j31dy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.656339267Z level=debug msg="Setting next state" handler=resultNormal +level=info ts=2024-05-29T13:44:14.656199103Z caller=remote_alert_sender.go:94 user=471861 slug=planetstaging host=planetstaging-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.255.78:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=a19348a8-d822-47d0-bba8-b08a00f4c851 alerts=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zg8j31dy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.656209156Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zg562412-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.656176735Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325189laio1use1, cloud_platform=AWS, customer_id=C437, env_id=325189, env_name=C437_DPSG_QA, env_type=qa, instance=env-325189laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.656152934Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zg562412-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.656155415Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zg562412-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.656125675Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zg562412-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.656102884Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zg562412-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.656072314Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zg01v01j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.656021574Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.655957614Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325112laio2use1, cloud_platform=AWS, customer_id=C724, env_id=325112, env_name=C724 Travelers CL Prd, env_type=prod, instance=env-325112laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.655941138Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zg01v01j-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.655937593Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zfzrrxv0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.655829022Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zfzrrxv0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.655728051Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zfw1pnrn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.655542859Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=538040 slug=dwtsandbox t=2024-05-29T13:44:14.655359365Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 +logger=ngalert.state.manager user=538040 slug=dwtsandbox instance="datasource_uid=1hCz-F14k, ref_id=A" t=2024-05-29T13:44:14.655343357Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=538040 slug=dwtsandbox instance="datasource_uid=1hCz-F14k, ref_id=A" t=2024-05-29T13:44:14.655335796Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=538040 slug=dwtsandbox instance="datasource_uid=1hCz-F14k, ref_id=A" t=2024-05-29T13:44:14.655304011Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=538040 slug=dwtsandbox instance="datasource_uid=1hCz-F14k, ref_id=A" t=2024-05-29T13:44:14.655293873Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zfpfbyd9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.655400717Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=1mr10216z5, Method=--, Resource=/v1/clearances/{proxy+}, Stage=--" t=2024-05-29T13:44:14.65540185Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=538040 slug=dwtsandbox t=2024-05-29T13:44:14.655273149Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zfpfbyd9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.655386997Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.655410243Z caller=remote_instance_store.go:51 user=538040 slug=dwtsandbox msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325111laio2use1, cloud_platform=AWS, customer_id=C724, env_id=325111, env_name=C724 Travelers PI Prd, env_type=prod, instance=env-325111laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.655358537Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zfpfbyd9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.655263116Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zfp6p0yq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.655226916Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zfp6p0yq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.655133425Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325111laio1use1, cloud_platform=AWS, customer_id=C724, env_id=325111, env_name=C724 Travelers PI Prd, env_type=prod, instance=env-325111laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.655156017Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zfp6p0yq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.655082824Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-325052laio1westeurope, cloud_platform=Azure, customer_id=A217, env_id=325052, env_name=a217-NAV-PROD, env_type=prod, instance=env-325052laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.655010046Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.654992893Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zfocgmyx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.654863402Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zfma7y3e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.654740701Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324997laio1use1, cloud_platform=AWS, customer_id=C333, env_id=324997, env_name=C333_Mercer_DEV_U11, env_type=dev, instance=env-324997laio1use1, job=integrations/node_exporter, region=us-east-1, stage=testing" t=2024-05-29T13:44:14.654815907Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zfma7y3e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.654615449Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zfifm1ts-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.654576899Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324978laio1euw1, cloud_platform=AWS, customer_id=C394, env_id=324978, env_name=C394_AZ_EU_UAT, env_type=qa, instance=env-324978laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.654482636Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324978laio1euw1, cloud_platform=AWS, customer_id=C394, env_id=324978, env_name=C394_AZ_EU_UAT, env_type=qa, instance=env-324978laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.654464721Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.65439584Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zfhfzbtv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.654369557Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324960laio1euc1, cloud_platform=AWS, customer_id=C640, env_id=324960, env_name=C640 Rinascente PROD, env_type=prod, instance=env-324960laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:14.654279684Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zfhfzbtv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.654226105Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324954laio2use1, cloud_platform=AWS, customer_id=C628, env_id=324954, env_name=C628_MSTR_Autotrial_Prd, env_type=prod, instance=env-324954laio2use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.654172934Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zffv28l5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.654143084Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.654066229Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zffv28l5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.654070574Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zffv28l5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.654024723Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324954laio1use1, cloud_platform=AWS, customer_id=C628, env_id=324954, env_name=C628_MSTR_Autotrial_Prd, env_type=prod, instance=env-324954laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.654060495Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324954laio1use1, cloud_platform=AWS, customer_id=C628, env_id=324954, env_name=C628_MSTR_Autotrial_Prd, env_type=prod, instance=env-324954laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.654037528Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.654001076Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.653859688Z caller=remote_instance_store.go:51 user=147497 slug=rhodev msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324907laio1use1, cloud_platform=AWS, customer_id=C437, env_id=324907, env_name=C437_DPSG_DEV, env_type=dev, instance=env-324907laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.653867288Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=114492 slug=railsbank instance="QueueName=PROD-LT-STI-REPORT-ACCOUNT-DLQ" t=2024-05-29T13:44:14.653864772Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=114492 slug=railsbank version=8 fingerprint=e9e25f0ff4660ae5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.653718002Z level=debug msg="Alert rule evaluated" results="[{Instance:QueueName=PROD-LT-STI-REPORT-ACCOUNT-DLQ State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:QueueName=PROD-LT-STI-REPORT-ACCOUNT-DLQ Value:0xc02ef0bc60} C:{Var:C Labels:QueueName=PROD-LT-STI-REPORT-ACCOUNT-DLQ Value:0xc02ef0bc68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.653424359s EvaluationString:[ var='B' labels={QueueName=PROD-LT-STI-REPORT-ACCOUNT-DLQ} value=0 ], [ var='C' labels={QueueName=PROD-LT-STI-REPORT-ACCOUNT-DLQ} value=0 ]}]" duration=122.939028ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zfenodve-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.65369597Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zfcy19el-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.653573568Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.653622885Z caller=remote_instance_store.go:51 user=146728 slug=dgc msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=206439 slug=relaypro t=2024-05-29T13:44:14.65365154Z level=debug msg="Saving alert states done" count=5 max_state_save_concurrency=1 duration=216.068285ms +level=debug ts=2024-05-29T13:44:14.653530173Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.653494901Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zfcy19el-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.653514788Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324835laio1euw1, cloud_platform=AWS, customer_id=C394, env_id=324835, env_name=C394_AZ_EU_Dev, env_type=dev, instance=env-324835laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.653479192Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zf1b4ah0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.653375976Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zf1b4ah0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.653317786Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=852841 slug=agrivolt instance= t=2024-05-29T13:44:14.651032364Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=852841 slug=agrivolt t=2024-05-29T13:44:14.650985373Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zetea99x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.653192924Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zetea99x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.653117504Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.653075599Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.653037263Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324816laio1euw1, cloud_platform=AWS, customer_id=C426, env_id=324816, env_name=C426_Dev_Parallel, env_type=dev, instance=env-324816laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=Live" t=2024-05-29T13:44:14.653034535Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=412141 slug=sharethrough t=2024-05-29T13:44:14.652994965Z level=debug msg="Saving alert states" count=13 max_state_save_concurrency=1 +logger=ngalert.state.manager user=412141 slug=sharethrough instance="host_id=i-0b0fbf08195a02043" t=2024-05-29T13:44:14.65293981Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412141 slug=sharethrough instance="host_id=i-0b0fbf08195a02043" t=2024-05-29T13:44:14.652919297Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412141 slug=sharethrough instance="host_id=i-0a627e454d29bd5e4" t=2024-05-29T13:44:14.652901883Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412141 slug=sharethrough instance="host_id=i-0a627e454d29bd5e4" t=2024-05-29T13:44:14.65289673Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324815laio1use1, cloud_platform=AWS, customer_id=C348, env_id=324815, env_name=NEW_C348_QA_2021, env_type=qa, instance=env-324815laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.652819394Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412141 slug=sharethrough instance="host_id=i-047c2cf255a135b43" t=2024-05-29T13:44:14.652829691Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zer4dpqv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.652814561Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412141 slug=sharethrough instance="host_id=i-03cc5e17c24bbac0f" t=2024-05-29T13:44:14.652759686Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.652684487Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zeq4vgos-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.652698849Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=698047 slug=gamesworkshop instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.652466163Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412141 slug=sharethrough instance="host_id=i-0381a7c0d82ecd74b" t=2024-05-29T13:44:14.652687462Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.652697764Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412141 slug=sharethrough instance="host_id=i-022936f373437e361" t=2024-05-29T13:44:14.652669546Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412141 slug=sharethrough instance="host_id=i-022936f373437e361" t=2024-05-29T13:44:14.652664196Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412141 slug=sharethrough instance="host_id=i-008a538d06609c4ab" t=2024-05-29T13:44:14.652643412Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.652659109Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324813laio2use1, cloud_platform=AWS, customer_id=C324, env_id=324813, env_name=C324_Marsh_Prod_Parallel, env_type=prod, instance=env-324813laio2use1, job=integrations/node_exporter, region=us-east-1, stage=testing" t=2024-05-29T13:44:14.652648354Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=689030 slug=simonsuat instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.652387982Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zemko1pq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.652516577Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324813laio1use1, cloud_platform=AWS, customer_id=C324, env_id=324813, env_name=C324_Marsh_Prod_Parallel, env_type=prod, instance=env-324813laio1use1, job=integrations/node_exporter, region=us-east-1, stage=testing" t=2024-05-29T13:44:14.652488451Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=689030 slug=simonsuat t=2024-05-29T13:44:14.652346862Z level=debug msg="State manager processing evaluation results" resultCount=1 +level=debug ts=2024-05-29T13:44:14.652540972Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=320778 slug=omegaai instance= t=2024-05-29T13:44:14.652523415Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zemko1pq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.652479217Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zemko1pq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.652449497Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.652357052Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.652363601Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.652309941Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ze9gk9fs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.652293815Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.65231144Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.652258292Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.652228628Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.652253228Z caller=remote_instance_store.go:51 user=130276 slug=devops8 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=320778 slug=omegaai t=2024-05-29T13:44:14.652244775Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ze9gk9fs-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.652221674Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ze9gk9fs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.652191774Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.65207735Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324775laio1use1, cloud_platform=AWS, customer_id=C639, env_id=324775, env_name=C639 Whole Foods PROD, env_type=prod, instance=env-324775laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.652063664Z level=debug msg="Setting next state" handler=resultNormal +Error parsing panelUID for alert annotationruleID2685dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=320778 slug=omegaai version=1 fingerprint=d8713c61cd309fbe attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.647591917Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.643514035s EvaluationString:}]" duration=80.587464ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ze46mbj1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.652015602Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=127813 slug=clearsale instance= t=2024-05-29T13:44:14.652010635Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=127813 slug=clearsale version=7 fingerprint=a27390229606e6ed attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.651915939Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.651615895s EvaluationString:}]" duration=197.685477ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ze080jd2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.651948182Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ze080jd2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.65180259Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ze080jd2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.65177245Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zdvxta7x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.651733289Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324751laio1euw1, cloud_platform=AWS, customer_id=C754, env_id=324751, env_name=C754_Societe_General_PROD, env_type=prod, instance=env-324751laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=preprod" t=2024-05-29T13:44:14.651730585Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.651574293Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=320778 slug=omegaai instance= t=2024-05-29T13:44:14.65156444Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.651528088Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324739laio1eastus, cloud_platform=Azure, customer_id=A177, env_id=324739, env_name=A177 Telefonica Prod, env_type=prod, instance=env-324739laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:14.651504843Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zdku6f2c-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.651496587Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zdku6f2c-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.651454886Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324739laio1eastus, cloud_platform=Azure, customer_id=A177, env_id=324739, env_name=A177 Telefonica Prod, env_type=prod, instance=env-324739laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:14.651415992Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zdku6f2c-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.651362045Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zdghcpi6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.651279355Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=457025 slug=truta t=2024-05-29T13:44:14.651259719Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=65.10832ms +logger=ngalert.state.manager.persist user=82372 slug=fout t=2024-05-29T13:44:14.647249269Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=78.895115ms +logger=ngalert.state.manager user=228733 slug=csmoney instance="datasource_uid=stWgXyV7z, ref_id=C" t=2024-05-29T13:44:14.651157746Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.651049667Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324738laio1eastus, cloud_platform=Azure, customer_id=A177, env_id=324738, env_name=A177 Telefonica Dev, env_type=dev, instance=env-324738laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:14.651091126Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.651079079Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zdfjjy0j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.651040562Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.650904591Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zdfjjy0j-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.650901491Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.650715077Z caller=remote_rule_evaluator.go:193 user=656459 slug=activeport msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324711laio2use1, cloud_platform=AWS, customer_id=C680, env_id=324711, env_name=C680 FIS PROD U11, env_type=prod, instance=env-324711laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.650682291Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zd68hic1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.650548017Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zd4f3ufb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.650432496Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zd4f3ufb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.650363795Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zd4f3ufb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.650341505Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324700laio1use1, cloud_platform=AWS, customer_id=C511, env_id=324700, env_name=C511 LIBERTY DEV, env_type=dev, instance=env-324700laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.650336032Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zd1ngfot-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.650090812Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zd1ngfot-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.650020162Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zd1ngfot-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.649992711Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zd1ngfot-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.649953251Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zczlpryi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.64988082Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zczlpryi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.649699408Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zcyutkdo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.649661808Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zcyutkdo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.649631858Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zcyutkdo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.649589247Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zcyutkdo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.649562267Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zcyutkdo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.649532607Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zcwyhss7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.649450666Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324641laio1cac1, cloud_platform=AWS, customer_id=C482, env_id=324641, env_name=C482 AZGA DEV, env_type=dev, instance=env-324641laio1cac1, job=integrations/node_exporter, region=ca-central-1, stage=live" t=2024-05-29T13:44:14.649984056Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.649897672Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zcbn5sa6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.649017741Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=716630 slug=coapdev t=2024-05-29T13:44:14.649607715Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=7.491729ms +logger=ngalert.state.manager.persist user=937416 slug=cambridgeuniversitypress t=2024-05-29T13:44:14.649773285Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.668402ms +level=debug ts=2024-05-29T13:44:14.649609371Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.649606926Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.64962413Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324624laio1usw2, cloud_platform=AWS, customer_id=C753, env_id=324624, env_name=C753 COX DMS+ Non-Prod, env_type=dev, instance=env-324624laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=preprod" t=2024-05-29T13:44:14.649640703Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.649433439Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.649435399Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324599laio1euw2, cloud_platform=AWS, customer_id=C751, env_id=324599, env_name=C751_Dev_mmflowers_U11, env_type=dev, instance=env-324599laio1euw2, job=integrations/node_exporter, region=eu-west-2, stage=live" t=2024-05-29T13:44:14.649270257Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=174016 slug=journalstaging t=2024-05-29T13:44:14.649150447Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=174016 slug=journalstaging instance= t=2024-05-29T13:44:14.649121254Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.649020279Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324598laio1use1, cloud_platform=AWS, customer_id=C633, env_id=324598, env_name=C633 Disney SBX U11, env_type=sandbox, instance=env-324598laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.649090626Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.648951423Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324580laioeuc1, cloud_platform=AWS, customer_id=C746, env_id=324580, env_name=C746 SAINT HERBLAIN U11, env_type=prod, instance=env-324580laioeuc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:14.648880841Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zc0eu4qp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.648652478Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zc0eu4qp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.648614477Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zc0eu4qp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.648532376Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.64852911Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.64847439Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbz8v5j4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.648464156Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbz8v5j4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.648361595Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbytc8zj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.648268144Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.648270403Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324568laio2westeurope, cloud_platform=Azure, customer_id=A184, env_id=324568, env_name=A184 NEW CSI PROD, env_type=prod, instance=env-324568laio2westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.648297537Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.6482037Z caller=remote_instance_store.go:51 user=542095 slug=intelligencefusion msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.648235803Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.648208159Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbytc8zj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.648206633Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbytc8zj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.648164413Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.648088026Z caller=remote_instance_store.go:51 user=290313 slug=replit msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbxha2ir-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.648098712Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.648060675Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbxha2ir-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.648069132Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.648054322Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.647920335Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324568laio1westeurope, cloud_platform=Azure, customer_id=A184, env_id=324568, env_name=A184 NEW CSI PROD, env_type=prod, instance=env-324568laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.647970843Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.647912975Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbxha2ir-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.647960641Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbxha2ir-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.64793357Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbqq16gy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.64786746Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.647814634Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbqq16gy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.647836899Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbqq16gy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.647741898Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.647751662Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=EMA9p0nVk, ref_id=A" t=2024-05-29T13:44:14.647736706Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:14.64769687Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.scheduler user=430961 slug=solifi version=5 fingerprint=9634c4339ecad31c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.64759972Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=EMA9p0nVk, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.647277879s EvaluationString:}]" duration=35.197921ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbpuhnfl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.647685428Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbol1wsq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.647480766Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.647461768Z caller=remote_instance_store.go:51 user=715709 slug=mtbprod msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.647465381Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbol1wsq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.647428405Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbk6fyrv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.647343484Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=715709 slug=mtbprod version=1 fingerprint=19e713de6c7d2877 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.647276165Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.647098084s EvaluationString:}]" duration=6.765346ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbhwmnk0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.647187443Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.647169227Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.647145847Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324503laio1use1, cloud_platform=AWS, customer_id=C740, env_id=324503, env_name=C740 ICBC Dev, env_type=dev, instance=env-324503laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.647073997Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.647059919Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbh8ii4b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.64696977Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbh8ii4b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.64691286Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbh8ii4b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.646843949Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=307001 slug=hirerightdev t=2024-05-29T13:44:14.646796733Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.409596ms +level=debug ts=2024-05-29T13:44:14.646554302Z caller=remote_instance_store.go:51 user=316418 slug=workmotion msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbehahrn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.646505646Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbehahrn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.646455035Z level=debug msg="Setting next state" handler=resultNormal +level=info ts=2024-05-29T13:44:14.646273087Z caller=grafana.go:247 user=391538 slug=risknarrative msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=15&state=firing&state=noData&state=error" groups=7 alerts=0 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbehahrn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.646424085Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324489laio1aps2, cloud_platform=AWS, customer_id=C730, env_id=324489, env_name=C730 NEW UoA DEV, env_type=dev, instance=env-324489laio1aps2, job=integrations/node_exporter, region=ap-southeast-2, stage=live" t=2024-05-29T13:44:14.646345644Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.646305347Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324489laio1aps2, cloud_platform=AWS, customer_id=C730, env_id=324489, env_name=C730 NEW UoA DEV, env_type=dev, instance=env-324489laio1aps2, job=integrations/node_exporter, region=ap-southeast-2, stage=live" t=2024-05-29T13:44:14.646329593Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.646274903Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=150145 slug=pleasant instance= t=2024-05-29T13:44:14.646258237Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.646224188Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbcsfc3q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.646210933Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324488laio1use1, cloud_platform=AWS, customer_id=C633, env_id=324488, env_name=C633 Disney DEV U11, env_type=dev, instance=env-324488laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.646179237Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=163513 slug=dialpad version=7 fingerprint=dcfbc8f3d9c9b424 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.646100871Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.645637443s EvaluationString:}]" duration=560.222388ms +logger=ngalert.state.manager.persist user=231061 slug=teamaround t=2024-05-29T13:44:14.646019015Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbcmhsjq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.646019581Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbc8kd6i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.64598386Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbc8kd6i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.64595489Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbc8kd6i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.64591339Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zbc8kd6i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.645818229Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zb6tb545-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.645762448Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324483laio1use1, cloud_platform=AWS, customer_id=C631, env_id=324483, env_name=C631 The Bay QA, env_type=qa, instance=env-324483laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.645714606Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324480laio1euc1, cloud_platform=AWS, customer_id=C654, env_id=324480, env_name=C654 NEW Test Swift Iris, env_type=test, instance=env-324480laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:14.64552348Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324480laio1euc1, cloud_platform=AWS, customer_id=C654, env_id=324480, env_name=C654 NEW Test Swift Iris, env_type=test, instance=env-324480laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:14.645497865Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.645498584Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=472647 slug=planet t=2024-05-29T13:44:14.645452292Z level=debug msg="Saving alert states" count=11 max_state_save_concurrency=1 +logger=ngalert.state.manager user=472647 slug=planet instance="metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-alt-procfeed" t=2024-05-29T13:44:14.645444851Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=472647 slug=planet instance="metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-default-procfeed" t=2024-05-29T13:44:14.645392917Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=472647 slug=planet instance="metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-alt-01-procfeed" t=2024-05-29T13:44:14.645375032Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=472647 slug=planet instance="metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-alt-01-procfeed" t=2024-05-29T13:44:14.645368573Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=472647 slug=planet instance="metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-alt-02-procfeed" t=2024-05-29T13:44:14.645354375Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zb6ntvhu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.645387314Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=472647 slug=planet instance="metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-dflt-02-procfeed" t=2024-05-29T13:44:14.645299296Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324479laio1cac1, cloud_platform=AWS, customer_id=C720, env_id=324479, env_name=C720 Trader DEV, env_type=dev, instance=env-324479laio1cac1, job=integrations/node_exporter, region=ca-central-1, stage=live" t=2024-05-29T13:44:14.645283625Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.644951798Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" +logger=ngalert.scheduler user=472647 slug=planet version=361 fingerprint=5fe47fa9287d9949 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.645003072Z level=debug msg="Alert rule evaluated" results="[{Instance:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-qe-ov2-default-procfeed State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-qe-ov2-default-procfeed Value:0xc02297d118} C:{Var:C Labels:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-qe-ov2-default-procfeed Value:0xc02297d180}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.644450863s EvaluationString:[ var='B' labels={metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-qe-ov2-default-procfeed} value=0 ], [ var='C' labels={metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-qe-ov2-default-procfeed} value=0 ]} {Instance:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-default-02-procfeed State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-default-02-procfeed Value:0xc02297d290} C:{Var:C Labels:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-default-02-procfeed Value:0xc02297d298}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.644465849s EvaluationString:[ var='B' labels={metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-default-02-procfeed} value=0 ], [ var='C' labels={metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-default-02-procfeed} value=0 ]} {Instance:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-default-01-procfeed State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-default-01-procfeed Value:0xc02297d2f0} C:{Var:C Labels:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-default-01-procfeed Value:0xc02297d2c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.644472071s EvaluationString:[ var='B' labels={metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-default-01-procfeed} value=0 ], [ var='C' labels={metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-default-01-procfeed} value=0 ]} {Instance:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-dflt-02-procfeed State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-dflt-02-procfeed Value:0xc02297d328} C:{Var:C Labels:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-dflt-02-procfeed Value:0xc02297d320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.644478181s EvaluationString:[ var='B' labels={metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-dflt-02-procfeed} value=0 ], [ var='C' labels={metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-dflt-02-procfeed} value=0 ]} {Instance:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-dflt-01-procfeed State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-dflt-01-procfeed Value:0xc02297d358} C:{Var:C Labels:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-dflt-01-procfeed Value:0xc02297d3a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.644485632s EvaluationString:[ var='B' labels={metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-dflt-01-procfeed} value=0 ], [ var='C' labels={metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-dflt-01-procfeed} value=0 ]} {Instance:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-alt-02-procfeed State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-alt-02-procfeed Value:0xc02297d3d0} C:{Var:C Labels:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-alt-02-procfeed Value:0xc02297d3d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.644492026s EvaluationString:[ var='B' labels={metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-alt-02-procfeed} value=0 ], [ var='C' labels={metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-alt-02-procfeed} value=0 ]} {Instance:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-alt-01-procfeed State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-alt-01-procfeed Value:0xc02297d450} C:{Var:C Labels:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-alt-01-procfeed Value:0xc02297d418}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.644497854s EvaluationString:[ var='B' labels={metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-alt-01-procfeed} value=0 ], [ var='C' labels={metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-02-ov2-alt-01-procfeed} value=0 ]} {Instance:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-default-procfeed State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-default-procfeed Value:0xc02297d490} C:{Var:C Labels:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-default-procfeed Value:0xc02297d498}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.644510119s EvaluationString:[ var='B' labels={metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-default-procfeed} value=0 ], [ var='C' labels={metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-default-procfeed} value=0 ]} {Instance:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-alt-02-procfeed State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-alt-02-procfeed Value:0xc02297d4c8} C:{Var:C Labels:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-alt-02-procfeed Value:0xc02297d500}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.644516415s EvaluationString:[ var='B' labels={metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-alt-02-procfeed} value=0 ], [ var='C' labels={metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-alt-02-procfeed} value=0 ]} {Instance:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-alt-01-procfeed State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-alt-01-procfeed Value:0xc02297d540} C:{Var:C Labels:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-alt-01-procfeed Value:0xc02297d548}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.644521138s EvaluationString:[ var='B' labels={metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-alt-01-procfeed} value=0 ], [ var='C' labels={metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-alt-01-procfeed} value=0 ]} {Instance:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-alt-procfeed State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-alt-procfeed Value:0xc02297d588} C:{Var:C Labels:metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-alt-procfeed Value:0xc02297d5b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.644525385s EvaluationString:[ var='B' labels={metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-alt-procfeed} value=0 ], [ var='C' labels={metric.name=value_num_undelivered_messages_aggregate, resource.label.subscription_id=next-ov2-alt-procfeed} value=0 ]}]" duration=123.327638ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zaykrzgi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.645118321Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zaykrzgi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.64500662Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324475laio1use1, cloud_platform=AWS, customer_id=C511, env_id=324475, env_name=C511 LIBERTY PROD, env_type=prod, instance=env-324475laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.644860922Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zaxtz9mr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.644833378Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.644738346Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zaxtz9mr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.644775298Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zaw7vpho-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.644615656Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zaw7vpho-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.644550186Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=155740 slug=routific t=2024-05-29T13:44:14.644510229Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zatyb7eb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.644416044Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zatyb7eb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.644373274Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zaoxr3sa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.644298423Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zaoxr3sa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.644266563Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zaoxr3sa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.644219022Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zaoxr3sa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.644189012Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zaoxr3sa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.644149001Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324403laio1euc1, cloud_platform=AWS, customer_id=C654, env_id=324403, env_name=C654 New QA Swift Iris, env_type=qa, instance=env-324403laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:14.644247892Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zan18mjx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.64404185Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.644120769Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-zadtdzuz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.643874829Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-za59luld-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.643706157Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-za59luld-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.643687997Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.644045074Z caller=remote_instance_store.go:51 user=109452 slug=deltarisk msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-za59luld-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.643644336Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-za59luld-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.643607906Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z9yhftrq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.643514515Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.643539765Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.643840898Z caller=remote_instance_store.go:51 user=224047 slug=ppbtradingtribeprd msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324303laio2use1, cloud_platform=AWS, customer_id=C744, env_id=324303, env_name=C744 Bayer Prod, env_type=prod, instance=env-324303laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.643872605Z level=debug msg="Keeping state" state=Normal +level=info ts=2024-05-29T13:44:14.643834416Z caller=grafana.go:247 user=224047 slug=ppbtradingtribeprd msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=16" groups=72 alerts=0 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z9yhftrq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.643450874Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324303laio1use1, cloud_platform=AWS, customer_id=C744, env_id=324303, env_name=C744 Bayer Prod, env_type=prod, instance=env-324303laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.643658043Z level=debug msg="Keeping state" state=Normal +level=info ts=2024-05-29T13:44:14.643585905Z caller=remote_alert_sender.go:94 user=337951 slug=pawapay host=pawapay-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.148.224.250:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=L0phcGv/ alerts=1 +level=debug ts=2024-05-29T13:44:14.64354458Z caller=remote_instance_store.go:51 user=444725 slug=devnextgen msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=444725 slug=devnextgen instance= t=2024-05-29T13:44:14.643449594Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.643442471Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:14.643450668Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.566302ms +logger=ngalert.state.manager.persist user=713314 slug=tpceunonprod t=2024-05-29T13:44:14.643180265Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z9wt6iv9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.643297563Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z9wt6iv9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.643259832Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=337951 slug=pawapay t=2024-05-29T13:44:14.643221053Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.724534ms +logger=ngalert.scheduler user=444725 slug=devnextgen version=1 fingerprint=7ce9b28539c1151b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.643194975Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.642921161s EvaluationString:}]" duration=367.09301ms +level=debug ts=2024-05-29T13:44:14.643129565Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.643174505Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z9vvr16m-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.643199172Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=417450 slug=legitsecurity t=2024-05-29T13:44:14.643153087Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=417450 slug=legitsecurity instance= t=2024-05-29T13:44:14.643138416Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.643087108Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=417450 slug=legitsecurity instance= t=2024-05-29T13:44:14.643127824Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z9vvr16m-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.64304963Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324104laio2use1, cloud_platform=AWS, customer_id=C481, env_id=324104, env_name=C481 Pfizer PROD, env_type=prod, instance=env-324104laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.643038894Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.642964131Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=23.46565ms +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-324104laio2use1, cloud_platform=AWS, customer_id=C481, env_id=324104, env_name=C481 Pfizer PROD, env_type=prod, instance=env-324104laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.643020151Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z9v9hk52-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.642952179Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z9v9hk52-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.642901249Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.642837862Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.642549038Z caller=remote_instance_store.go:51 user=487988 slug=microstrategyits msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z9rrze19-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.642557345Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z9rrze19-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.642492154Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.642483052Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=4947 slug=mediamath instance= t=2024-05-29T13:44:14.642463371Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:46:50Z next_ends_at=2024-05-29T13:47:50Z +level=debug ts=2024-05-29T13:44:14.642433686Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z9rrze19-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.642425364Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z9qgj0hb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.642383523Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z9qgj0hb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.642356703Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.642313363Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z9qgj0hb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.642217442Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.642171081Z caller=remote_instance_store.go:51 user=158536 slug=clearsaleantifraude msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.642132329Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=183214 slug=vectorizedio instance="name=redpanda-broker-client, redpanda_id=cp5srjuvd5pm5ndt9he0" t=2024-05-29T13:44:14.642145639Z level=warn msg="Failed to take an image" dashboard=2vSQVvSVz panel=35 error="rpc error: code = Code(422) desc = screenshots unavailable" +level=debug ts=2024-05-29T13:44:14.642059375Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" +level=info ts=2024-05-29T13:44:14.642031342Z caller=grafana.go:247 user=224047 slug=ppbtradingtribeprd msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=16" groups=85 alerts=0 +logger=ngalert.state.manager user=716630 slug=coapdev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.642072936Z level=debug msg="Setting next state" handler=resultNoData +level=info ts=2024-05-29T13:44:14.642117498Z caller=remote_image_capturer.go:61 user=183214 slug=vectorizedio rule_org_id=1 rule_uid=DWoJZKS4k dashboard=2vSQVvSVz panel=35 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" +logger=ngalert.state.manager user=716630 slug=coapdev t=2024-05-29T13:44:14.642054586Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z9h2w391-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.642151661Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=233863 slug=rtsystems t=2024-05-29T13:44:14.642105352Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z9h2w391-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.642111221Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z9h2w391-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.64207414Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=233863 slug=rtsystems instance= t=2024-05-29T13:44:14.64205708Z level=debug msg="Execution error state is Alerting" handler=resultAlerting previous_handler=resultError +logger=ngalert.state.manager.persist user=84360 slug=sib t=2024-05-29T13:44:14.642050886Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=23.80586ms +logger=ngalert.scheduler user=716630 slug=coapdev version=1 fingerprint=22e8c4410d0c55f3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.641984195Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.641746852s EvaluationString:}]" duration=9.591644ms +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-323970laio1use1, cloud_platform=AWS, customer_id=C586, env_id=323970, env_name=C586 VNSNY PROD, env_type=prod, instance=env-323970laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.642081529Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.642050427Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=233863 slug=rtsystems t=2024-05-29T13:44:14.642014044Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-323970laio1use1, cloud_platform=AWS, customer_id=C586, env_id=323970, env_name=C586 VNSNY PROD, env_type=prod, instance=env-323970laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.642036469Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.641976488Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z95zym3k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.641956059Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=224047 slug=ppbtradingtribeprd version=56 fingerprint=7ceb8abe0ce3b39b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.641847915Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.641542144s EvaluationString:}]" duration=990.581926ms +level=debug ts=2024-05-29T13:44:14.641823884Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z95zym3k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.641847948Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z95ga6f4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.641747737Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z95ga6f4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.641694396Z level=debug msg="Setting next state" handler=resultNormal +level=info ts=2024-05-29T13:44:14.641637416Z caller=remote_alert_sender.go:94 user=230713 slug=flocksafety host=flocksafety-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.22.91:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=c4349e43-99b3-4e09-88af-732e3acfb2b0 alerts=1 +level=info ts=2024-05-29T13:44:14.641519677Z caller=remote_alert_sender.go:94 user=230713 slug=flocksafety host=flocksafety-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.182.88:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=c4349e43-99b3-4e09-88af-732e3acfb2b0 alerts=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z90u8owo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.641484744Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z8x9ytbt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.641280742Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z8x9ytbt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.641241972Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z8x9ytbt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.641216021Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.641188284Z caller=remote_instance_store.go:51 user=344017 slug=descript msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z8touk6i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.640999199Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z8touk6i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.640932678Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=332031 slug=lexisnexisemailage t=2024-05-29T13:44:14.640913846Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=debug ts=2024-05-29T13:44:14.640773817Z caller=remote_instance_store.go:51 user=701741 slug=thetradingpitproduction msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=344017 slug=descript instance="tenancy=production" t=2024-05-29T13:44:14.640862541Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.64078592Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z8q49xsv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.640780907Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-323771laio1use2, cloud_platform=AWS, customer_id=C571, env_id=323771, env_name=C571 PROD Ventura Foods, env_type=prod, instance=env-323771laio1use2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:14.640707536Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.640603709Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z8g7j04n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.640586445Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=295631 slug=dapvizor instance="datasource_uid=ioFV1Jn4z, ref_id=A" t=2024-05-29T13:44:14.64051477Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z8g7j04n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.640475304Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.640506668Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z8d1ypqx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.640361423Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z8d1ypqx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.640262292Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z88goy7w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.640171381Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-323590laio1use1, cloud_platform=AWS, customer_id=C656, env_id=323590, env_name=C656 HarperCollins DEV, env_type=dev, instance=env-323590laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.640140498Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.640087093Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z88goy7w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.64007014Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.640012995Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.639968987Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z7ywu361-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.639973929Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=435206 slug=kkrprivateuat t=2024-05-29T13:44:14.639904027Z level=debug msg="Skip rule evaluation because it is paused" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-323452laio2use1, cloud_platform=AWS, customer_id=C437, env_id=323452, env_name=C437_DPSG_PROD, env_type=prod, instance=env-323452laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.639946265Z level=debug msg="Setting next state" handler=resultNormal +Error parsing panelUID for alert annotationruleID2733dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=269887 slug=blackrockdev t=2024-05-29T13:44:14.639890752Z level=debug msg="Saving alert states done" count=20 max_state_save_concurrency=1 duration=1.488643878s +level=debug ts=2024-05-29T13:44:14.639834518Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.639729498Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-323452laio1use1, cloud_platform=AWS, customer_id=C437, env_id=323452, env_name=C437_DPSG_PROD, env_type=prod, instance=env-323452laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.63977733Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z7vm484q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.639779677Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z7vm484q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.639755636Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z7vm484q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.639703776Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z7sx24bk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.639625465Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-323431laio1euc1, cloud_platform=AWS, customer_id=C535, env_id=323431, env_name=C535_HRS_Prod, env_type=prod, instance=env-323431laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:14.639590038Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z7sx24bk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.639472774Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z7n7ljt6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.639194671Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z7n7ljt6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.639074909Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.639070629Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=937416 slug=cambridgeuniversitypress t=2024-05-29T13:44:14.639102341Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=937416 slug=cambridgeuniversitypress instance= t=2024-05-29T13:44:14.639088522Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z7k7h9pf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.638994869Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z7k7h9pf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.638967358Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z7k7h9pf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.638926868Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.638879593Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.63880881Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z7k7h9pf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.638739106Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.635585532Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z7ggkiyi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.638655015Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z7ggkiyi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.638644225Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z7ggkiyi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.638613325Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z79ckbdv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.638396222Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z79ckbdv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.638330022Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-323160laio1use2, cloud_platform=AWS, customer_id=C571, env_id=323160, env_name=C571 DEV Ventura Foods, env_type=dev, instance=env-323160laio1use2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:14.638258092Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-323160laio1use2, cloud_platform=AWS, customer_id=C571, env_id=323160, env_name=C571 DEV Ventura Foods, env_type=dev, instance=env-323160laio1use2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:14.638243702Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z77fv0pv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.63819871Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.638139969Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z77fv0pv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.63815965Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.638094372Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z75q3xbu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.638064549Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z72wbaix-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.637906807Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z72wbaix-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.637804956Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-323027laio1use2, cloud_platform=AWS, customer_id=C657, env_id=323027, env_name=C657 Cardinal Invest PROD, env_type=prod, instance=env-323027laio1use2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:14.637788923Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-323027laio1use2, cloud_platform=AWS, customer_id=C657, env_id=323027, env_name=C657 Cardinal Invest PROD, env_type=prod, instance=env-323027laio1use2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:14.637741825Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z72ewwqr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.637625454Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z72ewwqr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.637596584Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.637616325Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z71oztpb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.637485003Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.637419843Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.637383926Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z6xegs09-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.637255311Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z6xegs09-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.63719044Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z6sphv4s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.637083959Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322731laio1use1, cloud_platform=AWS, customer_id=C626, env_id=322731, env_name=C626 Shoe Carnival PROD, env_type=prod, instance=env-322731laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.637046088Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z6sphv4s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.636948737Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322731laio1use1, cloud_platform=AWS, customer_id=C626, env_id=322731, env_name=C626 Shoe Carnival PROD, env_type=prod, instance=env-322731laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.637030222Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z6rctjk8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.636884417Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z6rctjk8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.636814766Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.636811355Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322729laio1cac1, cloud_platform=AWS, customer_id=C636, env_id=322729, env_name=C636_DEV_GoEasy_Parallel, env_type=dev, instance=env-322729laio1cac1, job=integrations/node_exporter, region=ca-central-1, stage=live" t=2024-05-29T13:44:14.636847469Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322688laio1euw1, cloud_platform=AWS, customer_id=C454, env_id=322688, env_name=C454_AZ_UK_EXP_U10, env_type=dev, instance=env-322688laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.636645659Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z6m6596r-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.636558243Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.636487694Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z6jlcrbx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.636522063Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z6jlcrbx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.636490963Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322672laio2euw1, cloud_platform=AWS, customer_id=C454, env_id=322672, env_name=C454_AZ_UK_Prod_U10, env_type=prod, instance=env-322672laio2euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.63645627Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z6frxfd3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.636167859Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.636127037Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.636110738Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.636121572Z caller=remote_instance_store.go:51 user=642786 slug=sophoscomnsg msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="namespace=sase-common-dp" t=2024-05-29T13:44:14.636043811Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="namespace=sase-common-dp" t=2024-05-29T13:44:14.63603292Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z65nubyi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.636082008Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z65nubyi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.636044018Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z65nubyi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.636016548Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="namespace=prometheus" t=2024-05-29T13:44:14.63595042Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="namespace=prometheus" t=2024-05-29T13:44:14.635936559Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z65nubyi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.635984367Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z65gf1cn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.635937387Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.635872241Z caller=remote_instance_store.go:51 user=465816 slug=metricgamingqa msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z65gf1cn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.635842006Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="namespace=cert-manager" t=2024-05-29T13:44:14.635806219Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.635780134Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322648laio1aps2, cloud_platform=AWS, customer_id=C671, env_id=322648, env_name=C671_Kmart_DEV, env_type=dev, instance=env-322648laio1aps2, job=integrations/node_exporter, region=ap-southeast-2, stage=live" t=2024-05-29T13:44:14.635821165Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z65gf1cn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.635798255Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.635744164Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="namespace=calico-system" t=2024-05-29T13:44:14.635765397Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="namespace=argocd" t=2024-05-29T13:44:14.635743068Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="namespace=amazon-cloudwatch" t=2024-05-29T13:44:14.635727077Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z60ofb35-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.635546393Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z60ofb35-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.635503362Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z60ofb35-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.635443412Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.635457794Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z60ofb35-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.635419092Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322500laio1usw2, cloud_platform=AWS, customer_id=C723, env_id=322500, env_name=C723 DirecTV Prod, env_type=prod, instance=env-322500laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=live" t=2024-05-29T13:44:14.63561885Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.635530058Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.63552799Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.635511836Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322497laio1use1, cloud_platform=AWS, customer_id=C442, env_id=322497, env_name=C442_NCB_Prod, env_type=prod, instance=env-322497laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.635381257Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.635366546Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z60aq2de-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.635386651Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z60aq2de-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.63524721Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z60aq2de-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.635218279Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:14.635133573Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=70430 slug=dapperlabs instance="datasource_uid=uiNClwN7k, ref_id=A" t=2024-05-29T13:44:14.635115859Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z5skx0xs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.635052798Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322458laioapn1, cloud_platform=AWS, customer_id=C735, env_id=322458, env_name=C735 Toyota MP, env_type=prod, instance=env-322458laioapn1, job=integrations/node_exporter, region=ap-northeast-1, stage=live" t=2024-05-29T13:44:14.635099315Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.635043932Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z5s2lb4e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.634974767Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z5s2lb4e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.634840516Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322447laio1use1, cloud_platform=AWS, customer_id=C486, env_id=322447, env_name=C486_Cox_Manheim_SUP, env_type=qa, instance=env-322447laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.634949072Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322447laio1use1, cloud_platform=AWS, customer_id=C486, env_id=322447, env_name=C486_Cox_Manheim_SUP, env_type=qa, instance=env-322447laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.634929677Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.634825684Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z5pdgdjx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.634798365Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z5pdgdjx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.634712644Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.634674234Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322445laio2euw1, cloud_platform=AWS, customer_id=C394, env_id=322445, env_name=C394_AZ_EU_PROD, env_type=prod, instance=env-322445laio2euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.634770217Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z5pdgdjx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.634696414Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322445laio2euw1, cloud_platform=AWS, customer_id=C394, env_id=322445, env_name=C394_AZ_EU_PROD, env_type=prod, instance=env-322445laio2euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.634760767Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z5pdgdjx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.634630503Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.634515227Z caller=remote_instance_store.go:51 user=111653 slug=theassociationmxp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=111653 slug=theassociationmxp instance= t=2024-05-29T13:44:14.634458266Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322287laiosouthcentralus, cloud_platform=Azure, customer_id=A207, env_id=322287, env_name=A207 7Eleven Mex PROD, env_type=prod, instance=env-322287laiosouthcentralus, job=integrations/node_exporter, region=southcentralus, stage=live" t=2024-05-29T13:44:14.634443143Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=111653 slug=theassociationmxp instance= t=2024-05-29T13:44:14.634420724Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=111653 slug=theassociationmxp instance= t=2024-05-29T13:44:14.63439883Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z5iek53q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.634415711Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z5iek53q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.63433817Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z534b8vw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.634231529Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322273laio1euw2, cloud_platform=AWS, customer_id=C684, env_id=322273, env_name=C684_NHS_GGC_dev_new, env_type=dev, instance=env-322273laio1euw2, job=integrations/node_exporter, region=eu-west-2, stage=live" t=2024-05-29T13:44:14.634275915Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z534b8vw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.634201069Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=350551 slug=loopme instance="cluster_name=pop-001-ae1" t=2024-05-29T13:44:14.634167047Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.634046598Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z51l665x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.634095628Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.63406176Z caller=remote_instance_store.go:51 user=130276 slug=devops8 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z51l665x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.634030727Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=350551 slug=loopme instance="cluster_name=pop-001-ae1" t=2024-05-29T13:44:14.634008955Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322262laio2use1, cloud_platform=AWS, customer_id=C486, env_id=322262, env_name=C486_Cox_Manheim_Prod, env_type=prod, instance=env-322262laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.634036827Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.633878375Z caller=remote_instance_store.go:51 user=159532 slug=getfabric msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z4uj1o3m-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.633916666Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z4uj1o3m-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.633895316Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z4uj1o3m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.633865846Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z4uj1o3m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.633843915Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.633774396Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z4nrugrz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.633777365Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z4nrugrz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.633727764Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.633746306Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z4nrugrz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.633704034Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.633680694Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z4mtk7vf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.633538282Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z4mtk7vf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.633505722Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z4gvm9sp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.633445451Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z4gvm9sp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.633416661Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.633440486Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.633332255Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=350551 slug=loopme instance="cluster_name=ads-nc8z" t=2024-05-29T13:44:14.633269939Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z4flpdk4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.633241299Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z4flpdk4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.633189509Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.632898108Z caller=remote_instance_store.go:51 user=35611 slug=play msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z49fhhyw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.632858885Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.632883928Z caller=remote_instance_store.go:51 user=230713 slug=flocksafety msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.632455853Z caller=remote_image_capturer.go:33 user=337951 slug=pawapay rule_org_id=1 rule_uid=L0phcGv/ msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:14.632810008Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z3o27x70-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.632761774Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.632313805Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.632737386Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.244.39:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=edbhspyypxj40c alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z3o27x70-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.632717634Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.632678706Z caller=remote_instance_store.go:51 user=316418 slug=workmotion msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.632661758Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322234laio1euw2, cloud_platform=AWS, customer_id=C684, env_id=322234, env_name=C684_NHS_GGC_prod_new, env_type=prod, instance=env-322234laio1euw2, job=integrations/node_exporter, region=eu-west-2, stage=live" t=2024-05-29T13:44:14.632627898Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=337951 slug=pawapay instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.632436621Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=337951 slug=pawapay instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.632419753Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager.persist user=316418 slug=workmotion t=2024-05-29T13:44:14.632626184Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z37j0iky-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.632629523Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=230713 slug=flocksafety version=40 fingerprint=cb8ee4d310e032db attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.632495014Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.632153085s EvaluationString:}]" duration=81.108017ms + logger=ngalert.state.manager.persist user=307381 slug=kambitaskforce t=2024-05-29T13:44:14.632292972Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=350551 slug=loopme instance="cluster_name=ads-1fxg" t=2024-05-29T13:44:14.632142473Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322218laio1use1, cloud_platform=AWS, customer_id=C737, env_id=322218, env_name=C737 Road Scholar Dev, env_type=dev, instance=env-322218laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.632436556Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.632325328Z caller=remote_instance_store.go:51 user=652086 slug=unihosted msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.632325328Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.632187736Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.632125111Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.632156277Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=701741 slug=thetradingpitproduction instance="application_name=trading-service, filename=/tmp/logs/trading-service/trading-service-1, job=default, log_level=WARN, service_name=default" t=2024-05-29T13:44:14.632230531Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z33gdxev-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.632162248Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.632088163Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=701741 slug=thetradingpitproduction instance="application_name=trading-service, filename=/tmp/logs/trading-service/trading-service-1, job=default, log_level=TRACE, service_name=default" t=2024-05-29T13:44:14.632143939Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z33gdxev-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.632089387Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=236496 slug=improbable t=2024-05-29T13:44:14.63202671Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.210954ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322143laio1usw2, cloud_platform=AWS, customer_id=C678, env_id=322143, env_name=C678_DR_PARALLEL, env_type=prod, instance=env-322143laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=preprod" t=2024-05-29T13:44:14.632099789Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=701741 slug=thetradingpitproduction instance="application_name=trading-service, filename=/tmp/logs/trading-service/trading-service-1, job=default, log_level=INFO, service_name=default" t=2024-05-29T13:44:14.632042817Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.631618738Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z33cx3ci-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.631974636Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z33cx3ci-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.631918116Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z33cx3ci-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.631891205Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=701741 slug=thetradingpitproduction instance="application_name=quote-service, filename=/tmp/logs/quote-service/quote-service-1, job=default, log_level=TRACE, service_name=default" t=2024-05-29T13:44:14.631887833Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z337hn5y-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.631827745Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=350551 slug=loopme t=2024-05-29T13:44:14.631768238Z level=debug msg="State manager processing evaluation results" resultCount=6 + logger=ngalert.state.manager user=701741 slug=thetradingpitproduction instance="application_name=quote-service, filename=/tmp/logs/quote-service/quote-service-1, job=default, log_level=INFO, service_name=default" t=2024-05-29T13:44:14.631799471Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z337hn5y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.631701743Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=701741 slug=thetradingpitproduction instance="application_name=product-service, filename=/tmp/logs/product-service/product-service-1, job=default, log_level=TRACE, service_name=default" t=2024-05-29T13:44:14.631710199Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=701741 slug=thetradingpitproduction instance="application_name=product-service, filename=/tmp/logs/product-service/product-service-1, job=default, log_level=TRACE, service_name=default" t=2024-05-29T13:44:14.631687388Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z32luvz7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.631557922Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322081laio1use1, cloud_platform=AWS, customer_id=C459, env_id=322081, env_name=C459_AZ_US_PROD, env_type=prod, instance=env-322081laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.631553589Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=701741 slug=thetradingpitproduction instance="application_name=product-service, filename=/tmp/logs/product-service/product-service-1, job=default, log_level=INFO, service_name=default" t=2024-05-29T13:44:14.631485713Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z32luvz7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.631492111Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=701741 slug=thetradingpitproduction instance="application_name=product-service, filename=/tmp/logs/product-service/product-service-1, job=default, log_level=INFO, service_name=default" t=2024-05-29T13:44:14.631471863Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z31cmawu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.631444241Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z31cmawu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.63141708Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=307001 slug=hirerightdev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.631335416Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z31cmawu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.631308579Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322060laio2use1, cloud_platform=AWS, customer_id=C628, env_id=322060, env_name=C628_MSTR_DEMO_PROD, env_type=prod, instance=env-322060laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.631318031Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.631280952Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322060laio2use1, cloud_platform=AWS, customer_id=C628, env_id=322060, env_name=C628_MSTR_DEMO_PROD, env_type=prod, instance=env-322060laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.631300435Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.631268397Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.631238793Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.631221523Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z2uznk4b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.631069297Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.631093598Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z2lp43h8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.631026206Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.630943106Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.630939089Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.630995371Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.630919442Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=320778 slug=omegaai t=2024-05-29T13:44:14.630881743Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.027621ms + level=debug ts=2024-05-29T13:44:14.630852995Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322014laio1use1, cloud_platform=AWS, customer_id=C675, env_id=322014, env_name=C675 Rag & Bone - PROD, env_type=prod, instance=env-322014laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.630856761Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322014laio1use1, cloud_platform=AWS, customer_id=C675, env_id=322014, env_name=C675 Rag & Bone - PROD, env_type=prod, instance=env-322014laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.63083853Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z2k3e5om-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.630696963Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=701741 slug=thetradingpitproduction instance="application_name=notification-service, filename=/tmp/logs/notification-service/notification-service-1, job=default, log_level=TRACE, service_name=default" t=2024-05-29T13:44:14.630717335Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=701741 slug=thetradingpitproduction instance="application_name=notification-service, filename=/tmp/logs/notification-service/notification-service-1, job=default, log_level=TRACE, service_name=default" t=2024-05-29T13:44:14.630684784Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322006laio1use1, cloud_platform=AWS, customer_id=C736, env_id=322006, env_name=C736 Education Sandbox, env_type=prod, instance=env-322006laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.630642648Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-322006laio1use1, cloud_platform=AWS, customer_id=C736, env_id=322006, env_name=C736 Education Sandbox, env_type=prod, instance=env-322006laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.630628393Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z2k3e5om-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.630478881Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=701741 slug=thetradingpitproduction instance="application_name=gtm-tracking-service, filename=/tmp/logs/gtm-tracking-service/gtm-tracking-service-1, job=default, log_level=TRACE, service_name=default" t=2024-05-29T13:44:14.630427768Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z2ixk5po-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.63035464Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=314067 slug=itsme t=2024-05-29T13:44:14.630363859Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.63024687Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z2ixk5po-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.630321389Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.630214637Z caller=remote_instance_store.go:51 user=213445 slug=gan msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=701741 slug=thetradingpitproduction instance="application_name=document-service, filename=/tmp/logs/document-service/document-service-1, job=default, log_level=TRACE, service_name=default" t=2024-05-29T13:44:14.630218113Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=701741 slug=thetradingpitproduction instance="application_name=contract-service, filename=/tmp/logs/contract-service/contract-service-1, job=default, log_level=TRACE, service_name=default" t=2024-05-29T13:44:14.63010887Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-321923laio1euw1, cloud_platform=AWS, customer_id=C637, env_id=321923, env_name=C637 Pfizer EU Prod U10, env_type=prod, instance=env-321923laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.630070819Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z2f4imou-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.630034886Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=245291 slug=pismo instance="datasource_uid=grafanacloud-logs, ref_id=Query" t=2024-05-29T13:44:14.630048074Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=701741 slug=thetradingpitproduction instance="application_name=contract-service, filename=/tmp/logs/contract-service/contract-service-1, job=default, log_level=INFO, service_name=default" t=2024-05-29T13:44:14.630039238Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.629946936Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=327842 slug=exabeam t=2024-05-29T13:44:14.629924949Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=37.92756ms + level=debug ts=2024-05-29T13:44:14.629924157Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=701741 slug=thetradingpitproduction instance="application_name=client-service, filename=/tmp/logs/client-service/client-service-1, job=default, log_level=TRACE, service_name=default" t=2024-05-29T13:44:14.629868494Z level=warn msg="Failed to take an image" dashboard=dd7d56ea-ce50-4efa-9ea1-17aab54a8d45 panel=1 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.629902604Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=177453 slug=clabs t=2024-05-29T13:44:14.629844496Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + level=info ts=2024-05-29T13:44:14.629821343Z caller=remote_image_capturer.go:61 user=701741 slug=thetradingpitproduction rule_org_id=1 rule_uid=daf567bc-81ba-4637-949a-2690e4d5b1b5 dashboard=dd7d56ea-ce50-4efa-9ea1-17aab54a8d45 panel=1 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=177453 slug=clabs instance="datasource_uid=pvsyCDT4k, ref_id=A" t=2024-05-29T13:44:14.629833266Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.629803443Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.629722474Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=175748 slug=skyspecs instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.629767956Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=177453 slug=clabs t=2024-05-29T13:44:14.629738786Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.state.manager user=175748 slug=skyspecs t=2024-05-29T13:44:14.629742251Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=177453 slug=clabs version=15 fingerprint=cb5acfbd651f9bb6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.629641696Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=pvsyCDT4k, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.629296033s EvaluationString:} {Instance:datasource_uid=ZeGX0STVz, ref_id=B State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.629300499s EvaluationString:}]" duration=306.575448ms + logger=ngalert.scheduler user=175748 slug=skyspecs version=1 fingerprint=9637b3cbd41c58e7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.629656488Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.629297729s EvaluationString:}]" duration=42.083783ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-321794laio1use1, cloud_platform=AWS, customer_id=C724, env_id=321794, env_name=C724 Travelers CL Dev, env_type=dev, instance=env-321794laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.629708435Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-321794laio1use1, cloud_platform=AWS, customer_id=C724, env_id=321794, env_name=C724 Travelers CL Dev, env_type=dev, instance=env-321794laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.629691906Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z2c69hn3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.629600192Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.629526327Z caller=remote_instance_store.go:51 user=698103 slug=vericast msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698103 slug=vericast instance= t=2024-05-29T13:44:14.629471304Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z1z0cma8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.629297189Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z1z0cma8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.629267788Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z1z0cma8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.629253018Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.629146368Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z1tyxr48-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.629161047Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-321784laio1use1, cloud_platform=AWS, customer_id=C318, env_id=321784, env_name=C318 Kohls PROD, env_type=prod, instance=env-321784laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.629129216Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.629069747Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z1tyxr48-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.629017706Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=884866 slug=cnonumerique t=2024-05-29T13:44:14.623977976Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.872922ms + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.629021625Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=info ts=2024-05-29T13:44:14.624141888Z caller=remote_alert_sender.go:94 user=884866 slug=cnonumerique host=cnonumerique-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.74.54:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=edlo3ua9ez0n4e alerts=1 + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:14.628995702Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:14.628955052Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.628922684Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=430961 slug=solifi version=5 fingerprint=27868b22a0e10ffe attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.628855383Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.628483995s EvaluationString:}]" duration=72.222449ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-321783laio1use1, cloud_platform=AWS, customer_id=C485, env_id=321783, env_name=C485 Ralph Lauren SBX, env_type=sandbox, instance=env-321783laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.628948535Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-321782laio1use1, cloud_platform=AWS, customer_id=C485, env_id=321782, env_name=C485 Ralph Lauren DEV, env_type=dev, instance=env-321782laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.628797013Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.628781665Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.416578ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-321780laio1use1, cloud_platform=AWS, customer_id=C318, env_id=321780, env_name=C318 Kohls QA, env_type=qa, instance=env-321780laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.628649895Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z1s5qd4z-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.628607542Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z1s5qd4z-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.628594722Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z1qm4eiz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.628351939Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z1pixwdy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.628273728Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z1pixwdy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.628263048Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=701741 slug=thetradingpitproduction instance="application_name=checkout-service, filename=/tmp/logs/checkout-service/checkout-service-1, job=default, log_level=TRACE, service_name=default" t=2024-05-29T13:44:14.628369578Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-321713laio1use1, cloud_platform=AWS, customer_id=C724, env_id=321713, env_name=C724 Travelers PI DISC, env_type=disc, instance=env-321713laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.628277284Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=701741 slug=thetradingpitproduction instance="application_name=affiliate-service, filename=/tmp/logs/affiliate-service/affiliate-service-1, job=default, log_level=WARN, service_name=default" t=2024-05-29T13:44:14.628205774Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z1n2ex72-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.628181167Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=701741 slug=thetradingpitproduction instance="application_name=affiliate-service, filename=/tmp/logs/affiliate-service/affiliate-service-1, job=default, log_level=TRACE, service_name=default" t=2024-05-29T13:44:14.628107292Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-321618laio2use1, cloud_platform=AWS, customer_id=C485, env_id=321618, env_name=C485 Ralph Lauren PROD, env_type=prod, instance=env-321618laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.628029693Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z1eh0zo3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.627947025Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-321618laio1use1, cloud_platform=AWS, customer_id=C485, env_id=321618, env_name=C485 Ralph Lauren PROD, env_type=prod, instance=env-321618laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.627867101Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:14.62774092Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z1asvf6d-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.627671672Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-321542laio1use1, cloud_platform=AWS, customer_id=C442, env_id=321542, env_name=C442_NCB_Dev, env_type=dev, instance=env-321542laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.627550388Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z15pt3bd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.62742609Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.627137722Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z15pt3bd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.627363269Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-321501laio1use2, cloud_platform=AWS, customer_id=C734, env_id=321501, env_name=C734 GoEasy DR, env_type=prod, instance=env-321501laio1use2, job=integrations/node_exporter, region=us-east-2, stage=preprod" t=2024-05-29T13:44:14.627312652Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z15pt3bd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.627320068Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z14zeaqv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.627260488Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z14zeaqv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.627230988Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z0xauoat-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.627053546Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z0xauoat-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.626973185Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.6267378Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z0rlc2cb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.626665042Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.626606835Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.626437402Z caller=remote_instance_store.go:51 user=202755 slug=iwmedia msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.626435092Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.626409284Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.626356663Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.626353988Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.626250249Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=843304 slug=ppcgroup t=2024-05-29T13:44:14.626081386Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z0mvpe1q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.626226277Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z0gejvhs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.626173317Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z0gejvhs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.626163647Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z0gejvhs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.626079186Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z041gcld-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.626018485Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z041gcld-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.625943504Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.62575643Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=60199 slug=wallapop t=2024-05-29T13:44:14.625717893Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=39.238346ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-321163laio1use1, cloud_platform=AWS, customer_id=C647, env_id=321163, env_name=C647 Psycho Bunny PROD, env_type=prod, instance=env-321163laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.625731143Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-z01i3rl9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.625688082Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yzzfkic7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.625589591Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.625573387Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:14.625554264Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=21.660148ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-321155laio1use1, cloud_platform=AWS, customer_id=C522, env_id=321155, env_name=C522_Stuller_Prod, env_type=prod, instance=env-321155laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.625570713Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.625441878Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=201644 slug=thoughtspot t=2024-05-29T13:44:14.625320016Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=47.150877ms + level=debug ts=2024-05-29T13:44:14.625364955Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yzyqu6vl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.625264497Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.625210497Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=716519 slug=bradfordprod t=2024-05-29T13:44:14.625071986Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.022301ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yzwxzky4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.625125176Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=713315 slug=mtbnonprod t=2024-05-29T13:44:14.625040646Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.22257ms + logger=ngalert.state.manager.persist user=112732 slug=gleamer t=2024-05-29T13:44:14.625200036Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-321097laiouse2, cloud_platform=AWS, customer_id=C503, env_id=321097, env_name=C503 Van Andel PROD, env_type=prod, instance=env-321097laiouse2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:14.625085767Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.625018692Z caller=remote_instance_store.go:51 user=697627 slug=haqq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.625031799Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yzuheq9t-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.624971274Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yzuheq9t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.624919034Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-321093laio1use1, cloud_platform=AWS, customer_id=C705, env_id=321093, env_name=C705 Cox Ecommerce Prod, env_type=prod, instance=env-321093laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.624771113Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yztf6ytj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.624742162Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yztf6ytj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.624624821Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=112387 slug=lucidhq t=2024-05-29T13:44:14.624544289Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=45.894954ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-321092laio1use1, cloud_platform=AWS, customer_id=C705, env_id=321092, env_name=C705 Cox Ecommerce Dev, env_type=dev, instance=env-321092laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.624586841Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-321092laio1use1, cloud_platform=AWS, customer_id=C705, env_id=321092, env_name=C705 Cox Ecommerce Dev, env_type=dev, instance=env-321092laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.62457199Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yztf6ytj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.62455625Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-321044laiouse2, cloud_platform=AWS, customer_id=C503, env_id=321044, env_name=C503 Van Andel DEV, env_type=dev, instance=env-321044laiouse2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:14.624394136Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.624342616Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.156.57:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=cdbhspyh8n402c alerts=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-321010laio1use1, cloud_platform=AWS, customer_id=C491, env_id=321010, env_name=C491_Prod, env_type=prod, instance=env-321010laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.624230286Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-321010laio1use1, cloud_platform=AWS, customer_id=C491, env_id=321010, env_name=C491_Prod, env_type=prod, instance=env-321010laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.624216079Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yzo0zr13-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.624259267Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yzo0zr13-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.624221907Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.624198557Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yzncs9fp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.624119076Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yzncs9fp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.624051115Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-320949laio2use1, cloud_platform=AWS, customer_id=C660, env_id=320949, env_name=C660 V.P. Prod, env_type=prod, instance=env-320949laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.624066374Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yzkxt17i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.623998714Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.623916065Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.623851316Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yzkxt17i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.623882133Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-320920laio1use1, cloud_platform=AWS, customer_id=C423, env_id=320920, env_name=C423 MMRIET PROD, env_type=prod, instance=env-320920laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.623650694Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yzhsof8t-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.623485119Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yzhsof8t-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.623455879Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.623395323Z caller=remote_instance_store.go:51 user=432323 slug=lithic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yzdg3mlx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.623416068Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yzdg3mlx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.623390138Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-320825laio1use2, cloud_platform=AWS, customer_id=C581, env_id=320825, env_name=C581 - ISG - PROD, env_type=prod, instance=env-320825laio1use2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:14.623387334Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.623193854Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yzdg3mlx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.623287727Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.623125364Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yz7fc2et-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.623137996Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.623011569Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-320754laio1use1, cloud_platform=AWS, customer_id=C664, env_id=320754, env_name=C664 NonProd Disney Studi, env_type=dev, instance=env-320754laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.623063788Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-320754laio1use1, cloud_platform=AWS, customer_id=C664, env_id=320754, env_name=C664 NonProd Disney Studi, env_type=dev, instance=env-320754laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.623049094Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yz65djab-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.622944954Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yz65djab-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.622914633Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-320741laio1euw1, cloud_platform=AWS, customer_id=C637, env_id=320741, env_name=C637 Pfizer EU QA U10, env_type=qa, instance=env-320741laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.622894172Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:14.622865765Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yz2t1all-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.622786532Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.6227849Z caller=remote_instance_store.go:51 user=159532 slug=getfabric msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yz2t1all-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.622699611Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=191103 slug=amazonadmin version=60 fingerprint=3efae7909b10a600 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.622719779Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.622461371s EvaluationString:}]" duration=300.307255ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yz28a2k4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.62255385Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yz26ccib-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.622450959Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-320677laio1euw1, cloud_platform=AWS, customer_id=C452, env_id=320677, env_name=C452_DEV_2021_U10, env_type=dev, instance=env-320677laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.622494251Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-320662laio2euw1, cloud_platform=AWS, customer_id=C452, env_id=320662, env_name=C452_Preprod_2021_U10, env_type=test, instance=env-320662laio2euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.622306177Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.622274211Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=696798 slug=mcv instance="datasource_uid=a7c4b457-6a7a-416c-b994-1407dd32ed34, ref_id=Query" t=2024-05-29T13:44:14.62225151Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-320662laio1euw1, cloud_platform=AWS, customer_id=C452, env_id=320662, env_name=C452_Preprod_2021_U10, env_type=test, instance=env-320662laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.622139699Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yyy77zgy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.622188746Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yyy77zgy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.622126155Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-320510laio1euw1, cloud_platform=AWS, customer_id=C693, env_id=320510, env_name=C693_Gate_Gourmet_ProdU10, env_type=prod, instance=env-320510laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.621515405Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-320510laio1euw1, cloud_platform=AWS, customer_id=C693, env_id=320510, env_name=C693_Gate_Gourmet_ProdU10, env_type=prod, instance=env-320510laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.621483897Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.621910192Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-320362laio1euc1, cloud_platform=AWS, customer_id=C559, env_id=320362, env_name=C559_SWIFT_TEST_U10, env_type=prod, instance=env-320362laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:14.620898754Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.621910693Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-320341laio2euc1, cloud_platform=AWS, customer_id=C559, env_id=320341, env_name=C559_SWIFT_PROD_U10, env_type=prod, instance=env-320341laio2euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:14.620651107Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-320341laio1euc1, cloud_platform=AWS, customer_id=C559, env_id=320341, env_name=C559_SWIFT_PROD_U10, env_type=prod, instance=env-320341laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:14.620453879Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-320270laio1use2, cloud_platform=AWS, customer_id=C580, env_id=320270, env_name=C580_TCS_Prod, env_type=prod, instance=env-320270laio1use2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:14.619969027Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.621815992Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yywq1585-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.621942053Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-320159laio1use1, cloud_platform=AWS, customer_id=C680, env_id=320159, env_name=C680 FIS UAT U11, env_type=qa, instance=env-320159laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.619764018Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yyv6dkyn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.621909853Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-320159laio1use1, cloud_platform=AWS, customer_id=C680, env_id=320159, env_name=C680 FIS UAT U11, env_type=qa, instance=env-320159laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.619753147Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-320109laio1euc1, cloud_platform=AWS, customer_id=C559, env_id=320109, env_name=C559_SWIFT_QA_U10, env_type=prod, instance=env-320109laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:14.619223833Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-320109laio1euc1, cloud_platform=AWS, customer_id=C559, env_id=320109, env_name=C559_SWIFT_QA_U10, env_type=prod, instance=env-320109laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:14.619213945Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-320098laio1use1, cloud_platform=AWS, customer_id=C632, env_id=320098, env_name=C632 TATA DEV, env_type=dev, instance=env-320098laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.619051913Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-320097laio1use1, cloud_platform=AWS, customer_id=C632, env_id=320097, env_name=C632 TATA PROD, env_type=prod, instance=env-320097laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.618894711Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yyv6dkyn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.621800442Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.621735487Z caller=remote_instance_store.go:51 user=487988 slug=microstrategyits msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-319839laio1use1, cloud_platform=AWS, customer_id=C313, env_id=319839, env_name=C313 DeRoyal PROD, env_type=prod, instance=env-319839laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.617969703Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=472647 slug=planet t=2024-05-29T13:44:14.621746444Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.816108ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-319740laio1use1, cloud_platform=AWS, customer_id=C711, env_id=319740, env_name=C711 Disney CP Dev, env_type=dev, instance=env-319740laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.61757933Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yyopv9le-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.621719491Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yyopv9le-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.621689241Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.620030082Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yyopv9le-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.62163096Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.621612484Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yyopv9le-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.6216214Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-yugalabs-justice-prod-justice-002, environment_name=yugalabs-justice-prod" t=2024-05-29T13:44:14.621540388Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yyf0v1qt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.621454748Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yy3io7vg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.621365257Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-yugalabs-justice-prod-justice-001, environment_name=yugalabs-justice-prod" t=2024-05-29T13:44:14.621417099Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yy3io7vg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.621301657Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yy25ytzd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.621234426Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yy25ytzd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.621195056Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=695888 slug=boeingdr t=2024-05-29T13:44:14.6212036Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.667604ms + level=debug ts=2024-05-29T13:44:14.621201534Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yy1ml6wr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.621101315Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-tripwire-justice-prod-justice-001, environment_name=tripwire-justice-prod" t=2024-05-29T13:44:14.621147012Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.620871554Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.620956242Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=475799 slug=dpdcz t=2024-05-29T13:44:14.620931324Z level=debug msg="Saving alert states done" count=34 max_state_save_concurrency=1 duration=565.460171ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yy0zi9nt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.620943553Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yy0zi9nt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.620870482Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yy0zi9nt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.620836762Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-tripwire-justice-prod-dsmc-0002-002, environment_name=tripwire-justice-prod" t=2024-05-29T13:44:14.620859485Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yy0zi9nt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.620793982Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=471861 slug=planetstaging instance= t=2024-05-29T13:44:14.620641641Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yxzloa3p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.620584289Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=109452 slug=deltarisk t=2024-05-29T13:44:14.620286135Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-tripwire-justice-prod-dsmc-0001-002, environment_name=tripwire-justice-prod" t=2024-05-29T13:44:14.620468251Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.620386233Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yxtwhg6p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.620377197Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yxr17kuq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.620346707Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yxr17kuq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.620334767Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.62025789Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yxr17kuq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.620266086Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.620201472Z caller=grafana.go:247 user=447873 slug=pn0625test01 msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=VRxT0ojZz" groups=0 alerts=0 + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.620259548Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yxpj3awq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.620135675Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-tripwire-justice-prod-dsmc-0001-001, environment_name=tripwire-justice-prod" t=2024-05-29T13:44:14.620124611Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yxpj3awq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.620044494Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yxnlq7br-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.619868732Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=74ec5b8f83d5a68c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.619813471Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.619585169s EvaluationString:}]" duration=174.273678ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yxnlq7br-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.619859022Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.619746345Z caller=remote_image_capturer.go:33 user=426229 slug=accelbyte rule_org_id=1 rule_uid=a953fa1f-eaed-4249-8dca-5bedd27d2eba msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:14.61967958Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.619551583Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-theorycraft-justice-prod-justice-001, environment_name=theorycraft-justice-prod" t=2024-05-29T13:44:14.619577155Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-telltale-justice-prod-justice-001, environment_name=telltale-justice-prod" t=2024-05-29T13:44:14.619452513Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.619355116Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yxjdq4co-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.619216365Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yxjdq4co-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.619176515Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.618600161Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ywpiawax-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.619105234Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ywpiawax-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.619077574Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.616817055Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.618929148Z caller=grafana.go:247 user=447873 slug=pn0625test01 msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=VRxT0ojZz" groups=0 alerts=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ywpiawax-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.619034694Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-starbreeze-pd3-armada-green-2-cluster-0002-002, environment_name=starbreeze-justice-pd3-prod" t=2024-05-29T13:44:14.619077133Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ywn3r2w8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.618897622Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.618889851Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-starbreeze-pd3-armada-green-2-cluster-0002-001, environment_name=starbreeze-justice-pd3-prod" t=2024-05-29T13:44:14.618949107Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-starbreeze-pd3-armada-green-2-cluster-0002-001, environment_name=starbreeze-justice-pd3-prod" t=2024-05-29T13:44:14.618936225Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ywn3r2w8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.618866552Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.618647583Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-starbreeze-pd3-armada-green-2-cluster-0001-001, environment_name=starbreeze-justice-pd3-prod" t=2024-05-29T13:44:14.618696563Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ywn3r2w8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.61866622Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-starbreeze-payday3-prod-435-armada-001-002, environment_name=starbreeze-justice-pd3-prod" t=2024-05-29T13:44:14.618593148Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-starbreeze-payday3-prod-435-armada-001-002, environment_name=starbreeze-justice-pd3-prod" t=2024-05-29T13:44:14.618578551Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=695885 slug=lululemonprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.618443583Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-starbreeze-payday3-prod-435-armada-001-001, environment_name=starbreeze-justice-pd3-prod" t=2024-05-29T13:44:14.618466131Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=134486 slug=podigee t=2024-05-29T13:44:14.618376038Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=41.802637ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ywk9me67-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.618383117Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.618330074Z caller=remote_instance_store.go:51 user=84360 slug=sib msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ywguf3b8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.618254265Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ywguf3b8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.618195265Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=84360 slug=sib t=2024-05-29T13:44:14.618131631Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.61801634Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-resolution-justice-prod-justice-002, environment_name=resolution-justice-prod" t=2024-05-29T13:44:14.618079048Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=618024 slug=aptosdev instance= t=2024-05-29T13:44:14.617853746Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.617928646Z caller=remote_instance_store.go:51 user=618024 slug=aptosdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-nerial-justice-prod-justice-002, environment_name=nerial-justice-prod" t=2024-05-29T13:44:14.617855075Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yw6pituz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.617804911Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yw6pituz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.61773589Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.617735479Z caller=remote_instance_store.go:51 user=22398 slug=sunfolding msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-nerial-justice-prod-justice-001, environment_name=nerial-justice-prod" t=2024-05-29T13:44:14.617718603Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yw6pituz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.61769341Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yw38xlkf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.617507868Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-neonmachine-justice-prod-justice-001, environment_name=neonmachine-justice-prod" t=2024-05-29T13:44:14.617490454Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-loadtest-justice-prod-justice-002, environment_name=loadtest-justice-prod" t=2024-05-29T13:44:14.617384682Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-loadtest-justice-prod-justice-002, environment_name=loadtest-justice-prod" t=2024-05-29T13:44:14.617371512Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=713315 slug=mtbnonprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.616798705Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yw01qke3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.617265955Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-loadtest-justice-prod-justice-001, environment_name=loadtest-justice-prod" t=2024-05-29T13:44:14.617271293Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yvznolme-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.617143984Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.616883246Z caller=remote_instance_store.go:51 user=713315 slug=mtbnonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-genpop-justice-prod-justice-001, environment_name=genpop-justice-prod" t=2024-05-29T13:44:14.616981175Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yvx85lue-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.616909772Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.616911623Z caller=remote_instance_store.go:51 user=320778 slug=omegaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yvx85lue-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.616847691Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=320778 slug=omegaai instance="datasource_uid=fd517b64-917d-450a-97e0-ad3c0d31f9e1, ref_id=A" t=2024-05-29T13:44:14.616831964Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=320778 slug=omegaai instance="datasource_uid=fd517b64-917d-450a-97e0-ad3c0d31f9e1, ref_id=A" t=2024-05-29T13:44:14.616820324Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.61678116Z caller=remote_instance_store.go:51 user=697627 slug=haqq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yvrvhxft-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.616661059Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yvi8ffx8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.616589318Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-foundations-justice-dev-justice-002, environment_name=foundations-justice-dev" t=2024-05-29T13:44:14.616566232Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yvi8ffx8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.616520627Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yvi8ffx8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.616466897Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-foundations-justice-dev-justice-001, environment_name=foundations-justice-dev" t=2024-05-29T13:44:14.616457661Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yv2z7264-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.616437697Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-319692laio1cac1, cloud_platform=AWS, customer_id=C629, env_id=319692, env_name=C629 Flow PROD, env_type=prod, instance=env-319692laio1cac1, job=integrations/node_exporter, region=ca-central-1, stage=live" t=2024-05-29T13:44:14.616386895Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-ssn-ntv-0003-001, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.616353675Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.616271999Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.616162003Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:14.616223073Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=245291 slug=pismo version=8 fingerprint=5bbded3ce492f85c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.616166404Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[C0:{Var:C Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.615931956s EvaluationString:[ var='C0' metric='NoData' labels={} value=null ]}]" duration=160.345311ms + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-ssn-ntv-0002-001, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.616122928Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-ssn-ntv-0002-001, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.616106689Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yv0tipmr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.616061513Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-319688laio1use1, cloud_platform=AWS, customer_id=C558, env_id=319688, env_name=C558_Covanta_PROD_U10, env_type=prod, instance=env-319688laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.616036627Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.615996712Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.61590283Z caller=remote_instance_store.go:51 user=652086 slug=unihosted msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-ssn-ntv-0001-001, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.615951683Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yuynq3cu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.61584635Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yuxoy1y7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.61581466Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.615860099Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yuxoy1y7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.61580436Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.61586186Z caller=remote_instance_store.go:51 user=236496 slug=improbable msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-319579laio2use1, cloud_platform=AWS, customer_id=C711, env_id=319579, env_name=C711 Disney CP Prod, env_type=prod, instance=env-319579laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.615829351Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:14.615786304Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:14.61577791Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yuxoy1y7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.615666069Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yuxctpsq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.615556477Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yuxctpsq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.615489707Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-social-001, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.615515745Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yuxctpsq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.615459016Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.615431855Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-session-002, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.615342132Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.615323321Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yux2s222-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.615361315Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yux2s222-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.615351855Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.615374946Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yux2s222-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.615291525Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yutexb6s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.615258754Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yutexb6s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.615232794Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-session-001, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.615083864Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yun6cfyv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.614983222Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-319571laio1use1, cloud_platform=AWS, customer_id=C711, env_id=319571, env_name=C711 Disney CP UAT, env_type=qa, instance=env-319571laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.614899464Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yulqqgtp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.614722169Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-platform-002, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.614748666Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.612435919Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-319522laio1euw2, cloud_platform=AWS, customer_id=C578, env_id=319522, env_name=C578_SLC_Prod_U10, env_type=prod, instance=env-319522laio1euw2, job=integrations/node_exporter, region=eu-west-2, stage=live" t=2024-05-29T13:44:14.614683765Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yulqqgtp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.614637738Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yuk7lsi1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.614587307Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-platform-002, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.614609521Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=517596 slug=datar t=2024-05-29T13:44:14.614489641Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.13599ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yuk7lsi1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.614516997Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yuk7lsi1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.614474536Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-319437laio1use1, cloud_platform=AWS, customer_id=C558, env_id=319437, env_name=C558_Covanta_Dev_u10, env_type=dev, instance=env-319437laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.614500722Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yuk7lsi1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.614447566Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yue8miu6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.614393295Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yue8miu6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.614360685Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yue8miu6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.614348705Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-platform-001, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.614400754Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-319432laio1euw3, cloud_platform=AWS, customer_id=C573, env_id=319432, env_name=C573_DEV_Printemps, env_type=dev, instance=env-319432laio1euw3, job=integrations/node_exporter, region=eu-west-3, stage=live" t=2024-05-29T13:44:14.614313464Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-platform-001, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.614326244Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yuctqaxa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.614193343Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yuctqaxa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.614139013Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-319426laio1euw2, cloud_platform=AWS, customer_id=C578, env_id=319426, env_name=C578_SLC_Dev_U10, env_type=dev, instance=env-319426laio1euw2, job=integrations/node_exporter, region=eu-west-2, stage=live" t=2024-05-29T13:44:14.614136122Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-mmv2-002, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.614147587Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-mmv2-002, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.614073383Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.61396454Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yu4ekae0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.613944791Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ytys1upq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.61383955Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-mmv2-001, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.613916124Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ytys1upq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.613768689Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-319398laio1eastus, cloud_platform=Azure, customer_id=A140, env_id=319398, env_name=A140 OBE DEV, env_type=dev, instance=env-319398laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:14.613748774Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.613709527Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yttfjwtq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.613628148Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yttfjwtq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.613513456Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-lobby-002, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.613515715Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.613221244Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ytj6imr1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.613418855Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ytj6imr1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.613372325Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-319120laio1euw3, cloud_platform=AWS, customer_id=C573, env_id=319120, env_name=C573_PROD_Printemps, env_type=prod, instance=env-319120laio1euw3, job=integrations/node_exporter, region=eu-west-3, stage=live" t=2024-05-29T13:44:14.613345888Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=714711 slug=nomiai t=2024-05-29T13:44:14.613172533Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.271128ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ytj6imr1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.613311144Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-lobby-001, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.613285124Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=679029 slug=joveoprodaws t=2024-05-29T13:44:14.613193544Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.613116954Z caller=remote_instance_store.go:51 user=202755 slug=iwmedia msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.613177323Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ytg1uc6e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.613147943Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=679029 slug=joveoprodaws t=2024-05-29T13:44:14.613082212Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=679029 slug=joveoprodaws version=14990 fingerprint=933c2f7cc19c5a48 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.612975911Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.612464075s EvaluationString:}]" duration=643.246801ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ytg1uc6e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.613097822Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.613094284Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.613055426Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yt9uaqni-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.612980121Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-319022laio2use1, cloud_platform=AWS, customer_id=C577, env_id=319022, env_name=C577 CBrands QA, env_type=qa, instance=env-319022laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.613016052Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ysyk2u6a-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.612765369Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ysyk2u6a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.612712378Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=173730 slug=nikon t=2024-05-29T13:44:14.612779555Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.392291ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-318931laio1use1, cloud_platform=AWS, customer_id=C577, env_id=318931, env_name=C577 CBrands DEV, env_type=dev, instance=env-318931laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.612639402Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yswuf4cc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.612542436Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.612408823Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-318919laio1use1, cloud_platform=AWS, customer_id=C577, env_id=318919, env_name=C577 CBrands SANDBOX, env_type=sandbox, instance=env-318919laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.61248088Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-legal-001, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.612482526Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-318919laio1use1, cloud_platform=AWS, customer_id=C577, env_id=318919, env_name=C577 CBrands SANDBOX, env_type=sandbox, instance=env-318919laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.612461153Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yswuf4cc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.612418145Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yssdel2u-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.612373715Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.612150614Z caller=remote_instance_store.go:51 user=884866 slug=cnonumerique msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ysep6evm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.612234093Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ysep6evm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.612211153Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.612254064Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=884866 slug=cnonumerique version=96 fingerprint=483b2774a7cb30a2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.611857588Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=fdhk917z41xj4a, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.608452868s EvaluationString:}]" duration=13.090425ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-318834laio1use1, cloud_platform=AWS, customer_id=C577, env_id=318834, env_name=C577 CBrands EXTERNAL, env_type=other, instance=env-318834laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.612218506Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.61210629Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ysep6evm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.612140982Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ysep6evm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.612037641Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-318756laio2use1, cloud_platform=AWS, customer_id=C577, env_id=318756, env_name=C577 CBrands PROD, env_type=prod, instance=env-318756laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.612026595Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-justice-001, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.611988886Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.612001078Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ys8wxlxb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.61195504Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ys8wxlxb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.61190595Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-318756laio1use1, cloud_platform=AWS, customer_id=C577, env_id=318756, env_name=C577 CBrands PROD, env_type=prod, instance=env-318756laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.61184492Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ys0vjmcz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.611804789Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-justice-001, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.611780414Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ys0vjmcz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.611749608Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-318655laio1euw1, cloud_platform=AWS, customer_id=C600, env_id=318655, env_name=C600_Celio_Dev, env_type=dev, instance=env-318655laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.61166841Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-318655laio1euw1, cloud_platform=AWS, customer_id=C600, env_id=318655, env_name=C600_Celio_Dev, env_type=dev, instance=env-318655laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.611651793Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-iam-002, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.611662426Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ys0bqx0y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.611570167Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=695888 slug=boeingdr t=2024-05-29T13:44:14.611532755Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.611470775Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=695888 slug=boeingdr instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.611506595Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=695888 slug=boeingdr instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.611493344Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.611479319Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=695888 slug=boeingdr t=2024-05-29T13:44:14.611479254Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-iam-001, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.611557012Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.611434976Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-318650laio2euw1, cloud_platform=AWS, customer_id=C600, env_id=318650, env_name=C600_Celio_Prod, env_type=prod, instance=env-318650laio2euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.611439063Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-dsmc-0002-002, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.611448606Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yryhs618-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.611436735Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yryhs618-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.611307174Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-dsmc-0002-001, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.611339746Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yryhs618-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.611231173Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yrq8hvc3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.611177492Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yrq8hvc3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.611161722Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yrq8hvc3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.611122432Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.611083852Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yrq8hvc3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.611047031Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-dsmc-0001-001, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.611070402Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.611022835Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.610911594Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yrnris3p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.61093338Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yrnris3p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.61090072Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-dshub-001, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.610863204Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.610773573Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yrav62jy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.610736568Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yrav62jy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.610699618Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yrav62jy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.610596457Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=163215 slug=tripadvisor t=2024-05-29T13:44:14.610576627Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.234632ms + level=debug ts=2024-05-29T13:44:14.610524415Z caller=remote_instance_store.go:51 user=159532 slug=getfabric msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-318374laio1euw1, cloud_platform=AWS, customer_id=C422, env_id=318374, env_name=C422_CEVA_PROD_U10, env_type=prod, instance=env-318374laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.61047631Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.610460535Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yr8hfj9o-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.610393234Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yr6915bk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.610311864Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-basic-001, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.610358646Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.610337636Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.610260741Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yr6915bk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.610252793Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-analytics-002, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.61018945Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.602517676Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.61020204Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=feac9bdb527fad1e55b3ba87b9da5feb96da646bc24dbd1751876aea06ff17b2" t=2024-05-29T13:44:14.610193714Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.610189875Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.610112317Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.598290324Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.610150307Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.598207812Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.610105608Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.610015767Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yqsnf2kb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.610071331Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-analytics-001, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.610085902Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yqsnf2kb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.610026401Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-stage-analytics-001, environment_name=firewalk-justice-stage" t=2024-05-29T13:44:14.610071962Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.610007414Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yqsnf2kb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.60999608Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.609948842Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=127813 slug=clearsale t=2024-05-29T13:44:14.609970157Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.609946349Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.609928471Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.609917401Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-ssn-ntv-0003-001, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.609924027Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=127813 slug=clearsale instance= t=2024-05-29T13:44:14.609956056Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.609942067Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yqs9iyyg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.609852849Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-318286laio2use1, cloud_platform=AWS, customer_id=C487, env_id=318286, env_name=C487 Pfizer QA, env_type=qa, instance=env-318286laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.609842677Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yqs9iyyg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.609753448Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-ssn-ntv-0002-001, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.609742696Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yqs9iyyg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.609735168Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.609658858Z caller=remote_instance_store.go:51 user=811546 slug=fyld msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yqncfokx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.609567556Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yqh73n1b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.609475335Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-ssn-ntv-0001-001, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.609577999Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.609464603Z caller=remote_instance_store.go:51 user=465816 slug=metricgamingqa msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=691103 slug=caetest t=2024-05-29T13:44:14.60948679Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=18.536115ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yqh73n1b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.609434375Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=df74826e5ec518858e9e8b35833f812093bbda80052955f88ff1ffc222c549b6" t=2024-05-29T13:44:14.609401878Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-318184laio1use1, cloud_platform=AWS, customer_id=C726, env_id=318184, env_name=C726 MiMedx DEV, env_type=dev, instance=env-318184laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.609421429Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-318184laio1use1, cloud_platform=AWS, customer_id=C726, env_id=318184, env_name=C726 MiMedx DEV, env_type=dev, instance=env-318184laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.60940415Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.609163586Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yqfqkwq0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.609175362Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yqcbvy9r-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.60900612Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yqcbvy9r-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.608931679Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yq8917a0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.608856089Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yq8917a0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.608783778Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yq6zjyp1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.608622176Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yq4n49z1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.608415204Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yq4n49z1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.608352554Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yq09svyc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.608322173Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=316418 slug=workmotion t=2024-05-29T13:44:14.608743821Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=38.932798ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yppacjii-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.60801611Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ypm3fwar-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.607918589Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ypkixudd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.607750897Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.608676906Z caller=remote_instance_store.go:51 user=334644 slug=meiro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ypbjmxqx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.607558085Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ypbjmxqx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.607535725Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-318112laio2uksouth, cloud_platform=Azure, customer_id=A187, env_id=318112, env_name=A187_Admiral_Prod, env_type=prod, instance=env-318112laio2uksouth, job=integrations/node_exporter, region=uksouth, stage=live" t=2024-05-29T13:44:14.608641589Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-social-0001-002, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.60863046Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.608568806Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yp1zzf94-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.607375634Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-social-0001-001, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.608358019Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yp1zzf94-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.607344633Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-318033laio2use1, cloud_platform=AWS, customer_id=C724, env_id=318033, env_name=C724 Travelers BI PROD, env_type=prod, instance=env-318033laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.608277525Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.608200311Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.608142446Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yp1zzf94-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.607285543Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yoxbi81s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.607228652Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.608085658Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=a9ac4d6f832d3ef6003049cedf46db03a0a5671bcb61892e00861507efd2fafc" t=2024-05-29T13:44:14.608136889Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yoxbi81s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.607207122Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.607954754Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-session-002, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.608088254Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-318033laio1use1, cloud_platform=AWS, customer_id=C724, env_id=318033, env_name=C724 Travelers BI PROD, env_type=prod, instance=env-318033laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.608043918Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=a8a95dc728dd98fbb7b70fde1b7ebea1e09cda04c7d6eb56d816bdf626633300" t=2024-05-29T13:44:14.607958538Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-session-001, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.607921566Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.607835645Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=a3e218a7f9d770845d7c76a2563f55c33458b44b1fe31ecf4b62518e376e4e2a" t=2024-05-29T13:44:14.607860661Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yoxbi81s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.607153731Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=a34c7dffa8b6170bb8d196a240f4f69288895958faf45621fe006dcc4bcb8b59" t=2024-05-29T13:44:14.607727339Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yoxbi81s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.607099241Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.607547234Z caller=remote_instance_store.go:51 user=697627 slug=haqq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-playerportal-002, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.607629202Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.607627934Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=a331d90da71fa43542f58adfa9681763e1d7bdd62497c6d1ef233359a4d67c57" t=2024-05-29T13:44:14.607592504Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.607568149Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yorurg9u-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.60699482Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=9f7fcfba9d6f3357d511e75eb070f1d9420c6efc204544ef0c63478e10f977ec" t=2024-05-29T13:44:14.607466776Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.607448951Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.607486489Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-playerportal-001, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.607461863Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yomg1346-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.606875998Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.607285241Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=989467794613a0820338b65b2f8b1725ac50190860d7ae09f9233a2cce053064" t=2024-05-29T13:44:14.607327556Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.607269523Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.232201ms + level=debug ts=2024-05-29T13:44:14.607284396Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-318031laio1use1, cloud_platform=AWS, customer_id=C724, env_id=318031, env_name=C724 Travelers BI DEV, env_type=dev, instance=env-318031laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.607269821Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-platform-002, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.607243014Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.60713676Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.607149244Z caller=remote_instance_store.go:51 user=487988 slug=microstrategyits msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=8ef894aa6a38dd76614c053719e6c812662fdd93ad1719eba88bdc06e25b4d89" t=2024-05-29T13:44:14.607100971Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.607057319Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.607046512Z caller=remote_instance_store.go:51 user=667326 slug=lakovna msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.607001357Z caller=remote_instance_store.go:51 user=530405 slug=zetetic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-318030laio1use1, cloud_platform=AWS, customer_id=C724, env_id=318030, env_name=C724 Travelers ES DEV, env_type=dev, instance=env-318030laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.607033159Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.606940433Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=667326 slug=lakovna instance= t=2024-05-29T13:44:14.60694495Z level=warn msg="Failed to take an image" dashboard=d655a547-d823-4496-952e-3d997ba6eacd panel=3 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-mmv2-002, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.606889044Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=472647 slug=planet t=2024-05-29T13:44:14.606927852Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:14.606855116Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=885d59a902cc432dd4850fe963b05a047ce311b6be81c6b52d931046f2bba633" t=2024-05-29T13:44:14.606875627Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=885d59a902cc432dd4850fe963b05a047ce311b6be81c6b52d931046f2bba633" t=2024-05-29T13:44:14.606864154Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yom3sj22-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.606408014Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=7d462c558e8e9b9a25bad82e29a473b8a0e52980eb8047246a78d75c244cd497" t=2024-05-29T13:44:14.606660002Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.606562028Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=7c1d8ec3c21e31df189233b13447234a9a1691c2f6cbbf778e743344b0339daf" t=2024-05-29T13:44:14.606543133Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yoj6dynr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.606153161Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.606447312Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=70b1684f7ca2446a43e5ac14844e7c4a5509bcadede66e5aef8f99c087f62848" t=2024-05-29T13:44:14.606429102Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-adf7416984f24cb4, persistentvolumeclaim=data-zookeeper-1" t=2024-05-29T13:44:14.606368539Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yogpap5t-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.60608771Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.606285037Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-lobby-0002-001, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.606300091Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-lobby-0002-001, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.606285318Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.606200243Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=206107 slug=hydrolix t=2024-05-29T13:44:14.606111385Z level=debug msg="State manager processing evaluation results" resultCount=9 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yogpap5t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.606001039Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-lobby-0001-002, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.606132719Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=286924 slug=kmpdashboard t=2024-05-29T13:44:14.605966607Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=65.591632ms + level=debug ts=2024-05-29T13:44:14.605912273Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=5885ae00d0796b4340e95184c16f52f8abe5bd7e0e6e006576f56c32697507d1" t=2024-05-29T13:44:14.60584517Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.605884834Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=667326 slug=lakovna instance= t=2024-05-29T13:44:14.605847707Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-318018laio1apn1, cloud_platform=AWS, customer_id=C704, env_id=318018, env_name=C704_Nitori_Dev, env_type=dev, instance=env-318018laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=live" t=2024-05-29T13:44:14.605863899Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716527 slug=newpigqa instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.605696016Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.605740555Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.605689667Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.60574032Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=580da83e84573fad047272924926406626719caa62832bcd65ce0b314776d2df" t=2024-05-29T13:44:14.605720878Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yodypsc6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.605697106Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ynutkg0w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.605662756Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=612525 slug=adleyeview version=83 fingerprint=8f79a7a79163a982 attempt=1 now=2024-05-29T13:43:50Z t=2024-05-29T13:44:14.605550769Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[C:{Var:C Labels: Value:0xc01d8d2968} D:{Var:D Labels: Value:0xc01d8d2970}] EvaluatedAt:2024-05-29 13:43:50 +0000 UTC EvaluationDuration:24.599433185s EvaluationString:[ var='C' labels={} value=0 ], [ var='D' labels={} value=0 ]}]" duration=20.77837115s + level=debug ts=2024-05-29T13:44:14.605603168Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ynutkg0w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.605596205Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.605552998Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=55a96b44e3aeca202cc81612c4e9720496d842b84bf2cda2a600ebb91c51550b" t=2024-05-29T13:44:14.605597913Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ynutkg0w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.605518814Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.605402617Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ynuhdbwe-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.605478964Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=54fa2546f3349c1824839f9f5b93658ea3150c986311391c1f5504097241756c" t=2024-05-29T13:44:14.605489055Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-317998laio1use1, cloud_platform=AWS, customer_id=C724, env_id=317998, env_name=C724 Travelers BI SBX, env_type=sandbox, instance=env-317998laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.605434618Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.605470457Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-justice-002, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.605401285Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ynrduqzw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.605316802Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=515848d38dcad782abc619597f27b7a6cbc918af349bfb7f2010e5018f5829d0" t=2024-05-29T13:44:14.605249646Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-justice-001, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.605294222Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.605176877Z caller=remote_instance_store.go:51 user=224047 slug=ppbtradingtribeprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-justice-001, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.605277049Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ynrduqzw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.605255662Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:14.60519918Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=63.755403ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-317997laio1use1, cloud_platform=AWS, customer_id=C724, env_id=317997, env_name=C724 Travelers ES SBX, env_type=sandbox, instance=env-317997laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.605216712Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ynqmr125-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.60509891Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ynqmr125-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.60503646Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.605001199Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-iam-002, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.605068504Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ynqmr125-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.604960689Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.60493842Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.604783549Z caller=remote_instance_store.go:51 user=137351 slug=pinnacle21 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-iam-001, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.604952772Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ynk2egwh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.604835847Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.604743309Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=33a81cc58d3c552250e310ca732323addcb8b9117f281606c1c20fe8d480ad66" t=2024-05-29T13:44:14.604889657Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.604849159Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-317680laio1use1, cloud_platform=AWS, customer_id=C726, env_id=317680, env_name=C726 MiMedx Prod, env_type=prod, instance=env-317680laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.604794476Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-gdpr-002, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.604811955Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=3373237f38d9511b349ef4df71e8af0a7e93dc7355db8b61664ef3cd7d8c9b4b" t=2024-05-29T13:44:14.60479155Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.604751048Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.60467254Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.604618989Z caller=remote_instance_store.go:51 user=354676 slug=gridmarketenergy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=155740 slug=routific t=2024-05-29T13:44:14.604661966Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=41.003653ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=333737f428bc5cf3b213baa78317cc384fea1a5f83e2c407f4cc00fee76bb82b" t=2024-05-29T13:44:14.604678928Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ynhp764f-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.604556525Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ynhp764f-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.604520764Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=333737f428bc5cf3b213baa78317cc384fea1a5f83e2c407f4cc00fee76bb82b" t=2024-05-29T13:44:14.604666852Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ynhp764f-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.604484674Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.6045439Z caller=remote_instance_store.go:51 user=543660 slug=jobcloudprogrammaticstage msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.604509549Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.604406643Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:14.604385693Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-dsmc-0002-001, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.604436636Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=543660 slug=jobcloudprogrammaticstage version=1 fingerprint=c3907e1c2013dc08 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.604277955Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.603796394s EvaluationString:}]" duration=10.728542ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=1b05ea6b5f86696a28a917b8341f8ebea10684301e05d701beecbe0cd9be6ae3" t=2024-05-29T13:44:14.604347866Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-317548laio1usw1, cloud_platform=AWS, customer_id=C713, env_id=317548, env_name=c713_CPK_prod, env_type=prod, instance=env-317548laio1usw1, job=integrations/node_exporter, region=us-west-1, stage=live" t=2024-05-29T13:44:14.604299621Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-dsmc-0001-002, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.604293523Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ynbz8anu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.604177091Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-dsmc-0001-002, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.604278559Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.604217283Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.604192092Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=168c7495240ad1230d8c7525b7e0718bae4c05980072c823755c6f17024907c6" t=2024-05-29T13:44:14.604172533Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=0e4e9fa19a646dabf36ae9e8c5a7d437bfe14b895c67f82d3814e376d1447fb7" t=2024-05-29T13:44:14.604044084Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=523054 slug=vialtopartners instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.603872544Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.603969222Z caller=remote_instance_store.go:51 user=523054 slug=vialtopartners msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=523054 slug=vialtopartners t=2024-05-29T13:44:14.603791937Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=0cc832734dbe15ab6986688985ecf9e5289743130f80c0aaabe38798d9fb9ce5" t=2024-05-29T13:44:14.603937961Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.603946572Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.50.226:10250, name=0c4e5153b1309550a3e3084e26f2ba81498eb2800dc26d8d9e628a2e6b5d038e" t=2024-05-29T13:44:14.603827883Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-chat-002, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.603697688Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ymt3asw1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.603687066Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-317398laio1use1, cloud_platform=AWS, customer_id=C621, env_id=317398, env_name=C621 Swiss Medical Prod, env_type=prod, instance=env-317398laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.603672756Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ymt3asw1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.603636295Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ymt3asw1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.603624925Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=4947 slug=mediamath instance="datasource_uid=000000020, ref_id=A,B,C,D,E,F,G" t=2024-05-29T13:44:14.603542711Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ympxgx9c-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.603455663Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-buildinfo-002, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.603437209Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.603356394Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-buildinfo-001, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.603326587Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.48.211:10250, name=f040cbef36823db3aafaf9a5745c65e43a88e898cf4a3bc27dcbcc6802b78767" t=2024-05-29T13:44:14.60333289Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-buildinfo-001, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.603311056Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ympglsvg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.603254411Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ympglsvg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.603226331Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ymp0pvxo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.60315679Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.603225019Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ymp0pvxo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.60308898Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-basic-002, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.603152625Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ymp0pvxo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.603019349Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-317220laio2apn1, cloud_platform=AWS, customer_id=C703, env_id=317220, env_name=C703_sumitomo_PROD, env_type=prod, instance=env-317220laio2apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=live" t=2024-05-29T13:44:14.602996287Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ymp0pvxo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.602978508Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-basic-001, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.603009097Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.48.211:10250, name=e4c3b0e175a67ca465ce64394e858dbb3eeae6c1773fcbcff4ff69be371045ba" t=2024-05-29T13:44:14.602887925Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.602895724Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-analytics-002, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.60286392Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ymon3t8b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.602662515Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.48.211:10250, name=e20985d2b48ad76c11060b978538605d33716bb284f0f971a077c54233197e0c" t=2024-05-29T13:44:14.602779546Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.602745551Z caller=remote_instance_store.go:51 user=679029 slug=joveoprodaws msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=679029 slug=joveoprodaws t=2024-05-29T13:44:14.602696328Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.602681238Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.602664717Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.602680252Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-achievement-002, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.60259158Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-qa-achievement-002, environment_name=firewalk-justice-qa" t=2024-05-29T13:44:14.602559513Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ymi39z4v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.602552244Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ymi39z4v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.602496963Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ymhj1tvt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.602337122Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ymhj1tvt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.602322142Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ymhj1tvt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.602245011Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-ssn-ntv-0003-001, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.602314038Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.48.211:10250, name=d150e78392cf68f462cfab398a7879607545304117d330d09a1a088e3e419306" t=2024-05-29T13:44:14.602319859Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.48.211:10250, name=d132cbacbaceec107282645721a1768987ac5dc8f17574679d02f4626e7b4219" t=2024-05-29T13:44:14.602210124Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ymfc4czd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.60214628Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ymfc4czd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.602087229Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.602053834Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-social-0003-002, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.601909784Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.48.211:10250, name=bbeddf9e94ad07a7329af9f7d492649c1ab6ed3499255e1660fbb2d416cf4a8e" t=2024-05-29T13:44:14.601877219Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ymbd0fui-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.601781896Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-317150laio1use2, cloud_platform=AWS, customer_id=C717, env_id=317150, env_name=C717 R+L Carriers Prd, env_type=prod, instance=env-317150laio1use2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:14.601623143Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ym7h8yw5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.601673145Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-social-0002-002, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.601646543Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ym7h8yw5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.601606914Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.601527715Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-social-0002-001, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.601533699Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-social-0001-002, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.601383494Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ym4nl20b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.601379312Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ym4nl20b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.601350612Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ym4nl20b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.601297301Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.48.211:10250, name=88be87ceb1538049421d57dfcb5b8aa2ebac5596cebecaa4731f574bc4bfc04a" t=2024-05-29T13:44:14.601284295Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ym4nl20b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.60119617Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-social-0001-001, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.601252567Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-314802laio1northeurope, cloud_platform=Azure, customer_id=A202, env_id=314802, env_name=A202_sonae_dev, env_type=undefined, instance=env-314802laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live" t=2024-05-29T13:44:14.601187158Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=733461 slug=lattice t=2024-05-29T13:44:14.601055222Z level=debug msg="Saving alert states done" count=6 max_state_save_concurrency=1 duration=65.976193ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yluq1jbk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.600986018Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.600996626Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.600942715Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-session-001, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.600934885Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-platform-002, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.600815237Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ylmggxti-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.600680365Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-platform-001, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.600667394Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=679831 slug=joveostageaws t=2024-05-29T13:44:14.600613299Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=25.958171ms + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-mmv2-002, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.600529098Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.600528299Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.48.211:10250, name=6555a132a0ef86e6e8e82fad05ed7a106ce0fb9e8c6e59765e8c52b8856f3a71" t=2024-05-29T13:44:14.600508041Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.600469839Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yldcrgp0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.600477173Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-mmv2-001, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.60039314Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.599898738Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-313006laio2westeurope, cloud_platform=Azure, customer_id=A198, env_id=313006, env_name=A198_GCO_Prod, env_type=prod, instance=env-313006laio2westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.600322535Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-lobby-0002-002, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.600228171Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yl6ysb00-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.60023972Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.48.211:10250, name=584b7ffb40b9b367a092f66e41de1d20111769acd61dc2d81e89ac8f32d41ae2" t=2024-05-29T13:44:14.600219226Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yl6ysb00-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.6002076Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yl6ysb00-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.600082809Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-lobby-0002-001, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.600106665Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.600018118Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.48.211:10250, name=544da02696d458175d5bd2bc2691e857c58dcaaac437454c82cfed04077e1ce7" t=2024-05-29T13:44:14.600092712Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.599913502Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.48.211:10250, name=4bd62902be25d506a18c5cceed3557db323e1a10c712cd2fe28d2a52784b36c3" t=2024-05-29T13:44:14.599891059Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-lobby-0001-001, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.599866175Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yl0mb6hl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.599730445Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.48.211:10250, name=44483304b3be6dc2483db1053802c9765119122a7f37ea389294da27a3700e94" t=2024-05-29T13:44:14.599733907Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yl0mb6hl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.599653944Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-legal-002, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.599678252Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yl07iazq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.599586454Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-312233laio1use1, cloud_platform=AWS, customer_id=C687, env_id=312233, env_name=C687 DirecTV LatAm Prod, env_type=prod, instance=env-312233laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.599543724Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-legal-001, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.599560952Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.48.211:10250, name=41d617643afe51a3a074ee1784c44c502c59698a3fc421aa3bcb6a173c29112d" t=2024-05-29T13:44:14.599562647Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-legal-001, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.599546744Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.48.211:10250, name=41d617643afe51a3a074ee1784c44c502c59698a3fc421aa3bcb6a173c29112d" t=2024-05-29T13:44:14.599551344Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.48.211:10250, name=402063e63c9a24f2079c657889ad7995f2d2abe4f9890bdd090ffd3501321029" t=2024-05-29T13:44:14.599451859Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-leaderboard-002, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.599436142Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yksv4la5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.599386012Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.599286315Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.599068644Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yksv4la5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.599313121Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-leaderboard-001, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.599303253Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.48.211:10250, name=3deba35f0e8e63eb24bfb4f12f4d6df26b65e775a75b2efbfc6d45e168229c2f" t=2024-05-29T13:44:14.599267162Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.599128777Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-311974laio2eastus, cloud_platform=Azure, customer_id=A009, env_id=311974, env_name=A009 OLD AAP Test, env_type=qa, instance=env-311974laio2eastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:14.599179094Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-justice-002, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.59915575Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.48.211:10250, name=376a238b772228cf853aaee4244bfc029090f98a5981a81cea2af0d3c53bfc51" t=2024-05-29T13:44:14.599092812Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ykpn3usl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.599048978Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ykpn3usl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.599006278Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-justice-001, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.598996734Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.598864428Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ykmhbgjw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.598861426Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.598936182Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.48.211:10250, name=32b1826dc09c60879d786dd11569865d2f29f36423c8eaef1e3d6aa30499b40a" t=2024-05-29T13:44:14.598914325Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-iam-002, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.598825703Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.48.211:10250, name=3096b8eade8dae99d2092b6b1a3f31b97b1db21db34e896e957c3ee97c003535" t=2024-05-29T13:44:14.598744446Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.598706825Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-iam-001, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.598716152Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ykkzicpo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.598513613Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:14.598631735Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.5986446Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:14.598587725Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.48.211:10250, name=2bdd5ed5f5af3f84c22e3c562c90910af226b8a9a1b9b4cf488e0e713972fc68" t=2024-05-29T13:44:14.598635816Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.48.211:10250, name=2bdd5ed5f5af3f84c22e3c562c90910af226b8a9a1b9b4cf488e0e713972fc68" t=2024-05-29T13:44:14.598623139Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.scheduler user=430961 slug=solifi version=3 fingerprint=b438bb7bcdc5ea31 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.598519387Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.598257007s EvaluationString:}]" duration=82.112284ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ykku82rb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.598478752Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.48.211:10250, name=251b078a5ba5a3ab932dfbfa07bcbeb5acdfeaf7511d5c028b84a56842d5cca6" t=2024-05-29T13:44:14.598473498Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ykku82rb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.598378041Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=112387 slug=lucidhq t=2024-05-29T13:44:14.598328396Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=112387 slug=lucidhq instance="datasource_uid=grafanacloud-prom, ref_id=A,B" t=2024-05-29T13:44:14.598298155Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=112387 slug=lucidhq instance="datasource_uid=grafanacloud-prom, ref_id=A,B" t=2024-05-29T13:44:14.598286568Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ykku82rb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.598314791Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-dsmc-0001-002, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.598315507Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=112387 slug=lucidhq version=2 fingerprint=b0072e2563ef3806 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.598206928Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A,B State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.596771068s EvaluationString:}]" duration=57.430228ms + level=debug ts=2024-05-29T13:44:14.598214902Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ykkf0q1n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.598105418Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ykavoxe5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.598048908Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.598176723Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-dshub-002, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.598025524Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.597880078Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yk85u5qa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.597795675Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yk85u5qa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.597720554Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-311484laio1euw1, cloud_platform=AWS, customer_id=C033, env_id=311484, env_name=AAP-Test environment-EU, env_type=test, instance=env-311484laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=preprod" t=2024-05-29T13:44:14.597720193Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yk85u5qa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.597665464Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.597497313Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=fa511ee14a3f449bd7ab55bf4b9addab3635d28148eea69014522bac0070b2da" t=2024-05-29T13:44:14.597531716Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.597442943Z caller=remote_instance_store.go:51 user=173730 slug=nikon msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-311483laio1use1, cloud_platform=AWS, customer_id=C032, env_id=311483, env_name=AAP-Test environment-US, env_type=test, instance=env-311483laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.597437294Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=173730 slug=nikon instance= t=2024-05-29T13:44:14.597367363Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=173730 slug=nikon instance= t=2024-05-29T13:44:14.597359541Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yk5vlzsi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.597433631Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-firewalk-justice-prod-analytics-001, environment_name=firewalk-justice-prod" t=2024-05-29T13:44:14.597361714Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=517596 slug=datar t=2024-05-29T13:44:14.597351413Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=517596 slug=datar instance= t=2024-05-29T13:44:14.597337639Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.597143514Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=517596 slug=datar t=2024-05-29T13:44:14.597279816Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=517596 slug=datar version=129 fingerprint=c71a3a448de2d952 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.59714949Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[C:{Var:C Labels: Value:0xc0233e3d30} Spend in day Wallester_USD:{Var:Spend in day Wallester_USD Labels: Value:0xc0233e3d38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.596742238s EvaluationString:[ var='C' labels={} value=0 ], [ var='Spend in day Wallester_USD' labels={} value=8171.27 ]}]" duration=839.942681ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yk1zbs6t-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.597175019Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=e4e59778b984b7bcb7d7f7a8178f207d52bd1b66ab9344220868509b92e5b522" t=2024-05-29T13:44:14.597212913Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yk1zbs6t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.597123328Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.597117525Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-310499laio1usw2, cloud_platform=AWS, customer_id=C702, env_id=310499, env_name=C702 Cox Xtime UAT, env_type=qa, instance=env-310499laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=live" t=2024-05-29T13:44:14.597111604Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=755728 slug=velocitypresto t=2024-05-29T13:44:14.597163654Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=755728 slug=velocitypresto instance= t=2024-05-29T13:44:14.597138644Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=e1bb2ef2d9b05335e53003cc27e40e8d0bcb7cedf444ec586f2559a906b5a2de" t=2024-05-29T13:44:14.597091309Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.597060325Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yjztvlnn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.596973037Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-dreamhaven-justice-prod-ssn-service-001, environment_name=dreamhaven-justice-prod" t=2024-05-29T13:44:14.596972124Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=d673b8286597b504e89fe8c94a437f98cd6adab3e0fe362cb6d60a40d39febd0" t=2024-05-29T13:44:14.596819913Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-dreamhaven-justice-prod-ssn-native-0003-001, environment_name=dreamhaven-justice-prod" t=2024-05-29T13:44:14.596776172Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.596607635Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.596641823Z caller=remote_instance_store.go:51 user=491157 slug=prd01wr msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=c9dbda5db87ae43d24cd5f6a4c22230fe237c884c12d4651dba58d6fe1fbe818" t=2024-05-29T13:44:14.596659087Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yjv4w7se-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.596612513Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=491157 slug=prd01wr t=2024-05-29T13:44:14.596589283Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.596405102Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=491157 slug=prd01wr t=2024-05-29T13:44:14.596534279Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yjrbvg85-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.596390081Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=b8af2af8fa8ab4e47f081de0337eb179abf05367ce5d1c8d8ceb6aaf276aadb0" t=2024-05-29T13:44:14.59640684Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=b8af2af8fa8ab4e47f081de0337eb179abf05367ce5d1c8d8ceb6aaf276aadb0" t=2024-05-29T13:44:14.596398491Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yjkcqggp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.59630141Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yjkcqggp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.596261679Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=b180f07b25be350ed099840bd3bf84ad82652039e126dc75f283cc2ba2615b3e" t=2024-05-29T13:44:14.596298517Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-309888laio1use1, cloud_platform=AWS, customer_id=C711, env_id=309888, env_name=C711 Disney CP Dev, env_type=dev, instance=env-309888laio1use1, job=integrations/node_exporter, region=us-east-1, stage=decommission" t=2024-05-29T13:44:14.596316936Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=97cbf2cafb1dea0074dc93f0b629c93957b541df22097d7557562944fa4b5d47" t=2024-05-29T13:44:14.596168048Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-308749laio1use1, cloud_platform=AWS, customer_id=C694, env_id=308749, env_name=C694_COX_DEV_OLD, env_type=dev, instance=env-308749laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.596099552Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=400599 slug=unionai t=2024-05-29T13:44:14.596091633Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=400599 slug=unionai instance="id=249906, name=unionai-logs" t=2024-05-29T13:44:14.596076264Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yjkcqggp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.596149638Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.596174493Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager.persist user=633335 slug=promqlworkshop t=2024-05-29T13:44:14.59612061Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=23.962973ms + level=debug ts=2024-05-29T13:44:14.596137851Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.595971506Z caller=remote_instance_store.go:51 user=714711 slug=nomiai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.596042914Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=400599 slug=unionai t=2024-05-29T13:44:14.596025901Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=400599 slug=unionai version=231 fingerprint=f9411ab1360b6fa6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.595952327Z level=debug msg="Alert rule evaluated" results="[{Instance:id=249906, name=unionai-logs State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=249906, name=unionai-logs Value:0xc02f59f268} B:{Var:B Labels:id=249906, name=unionai-logs Value:0xc02f59f1f0} C:{Var:C Labels:id=249906, name=unionai-logs Value:0xc02f59f220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.595662162s EvaluationString:[ var='A' labels={id=249906, name=unionai-logs} value=3.2125 ], [ var='B' labels={id=249906, name=unionai-logs} value=3.2125 ], [ var='C' labels={id=249906, name=unionai-logs} value=0 ]}]" duration=12.916628ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yjg8mtav-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.595890475Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.595858062Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-308236laio1eastus, cloud_platform=Azure, customer_id=A105, env_id=308236, env_name=A105 Henry Schein PROD, env_type=prod, instance=env-308236laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:14.595851567Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-dreamhaven-justice-prod-platform-001, environment_name=dreamhaven-justice-prod" t=2024-05-29T13:44:14.595925028Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yjcw5068-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.595802025Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.595843302Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=91b310a68e1b00647f59498be471917a2187c5a73d8fa357231d7a35c698add1" t=2024-05-29T13:44:14.595840561Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.595769666Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=714711 slug=nomiai t=2024-05-29T13:44:14.595788784Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.595663507Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.595614509Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.595727721Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:14.595628113Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.595643753Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.595562525Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.595604175Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.595556802Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=163513 slug=dialpad t=2024-05-29T13:44:14.595538686Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yj6veb77-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.595568332Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yj6veb77-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.595545112Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=163513 slug=dialpad version=405 fingerprint=bfaeb71e2cabc046 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.595447255Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.595056526s EvaluationString:}]" duration=61.729276ms + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-dreamhaven-justice-prod-matchv2-001, environment_name=dreamhaven-justice-prod" t=2024-05-29T13:44:14.595522081Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.595513575Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.595443042Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=163215 slug=tripadvisor t=2024-05-29T13:44:14.595341534Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=163215 slug=tripadvisor instance= t=2024-05-29T13:44:14.595315204Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.595385073Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=8552a3ddbfbc8986fa025783e906c234fa860c76e5fca65a8f7d0c94e0009a62" t=2024-05-29T13:44:14.595377908Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-307881laioeastus, cloud_platform=Azure, customer_id=A110, env_id=307881, env_name=A110 DEV Cardinal Glass, env_type=dev, instance=env-307881laioeastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:14.595340562Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yj1ib6l5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.595307309Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.595240018Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.595190719Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yj1ib6l5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.595196088Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=163215 slug=tripadvisor version=121 fingerprint=0dbc2609d7a9ef4b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.594785231Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.594501994s EvaluationString:}]" duration=280.081352ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-307641laio1usw2, cloud_platform=AWS, customer_id=C702, env_id=307641, env_name=C702 Cox Xtime DEV, env_type=dev, instance=env-307641laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=live" t=2024-05-29T13:44:14.595132712Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yj0c12cs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.595100197Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=81f8d8b139ad6a11b4973a673d80cae8cb487d5b1f40338438746bce4317ce7f" t=2024-05-29T13:44:14.595101198Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=81f8d8b139ad6a11b4973a673d80cae8cb487d5b1f40338438746bce4317ce7f" t=2024-05-29T13:44:14.595093356Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.595021963Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=7bb8a98008394a7b6766a7ac74679b9248317f3cd57f192f28d39477cc2a663c" t=2024-05-29T13:44:14.595008368Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.595015675Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=7bb8a98008394a7b6766a7ac74679b9248317f3cd57f192f28d39477cc2a663c" t=2024-05-29T13:44:14.59499638Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yj0c12cs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.594994076Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yittd663-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.594871955Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.594916362Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-dreamhaven-justice-prod-lobby-0001-001, environment_name=dreamhaven-justice-prod" t=2024-05-29T13:44:14.594863538Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=64963be8bc5b3101703345867c17bf16ef0b6c6e9be0c09068d3bf6e986f76ee" t=2024-05-29T13:44:14.594824165Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=64963be8bc5b3101703345867c17bf16ef0b6c6e9be0c09068d3bf6e986f76ee" t=2024-05-29T13:44:14.594812506Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yittd663-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.594690793Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yittd663-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.594610952Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=5859e8a17addfcfd9ed247a2f12dcaf942b9e3ebe343e258892a449822d13f9e" t=2024-05-29T13:44:14.594595802Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=5859e8a17addfcfd9ed247a2f12dcaf942b9e3ebe343e258892a449822d13f9e" t=2024-05-29T13:44:14.594583629Z level=debug msg="Setting next state" handler=resultAlerting + Error parsing panelUID for alert annotationruleID2714dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=824501 slug=bendingspoons t=2024-05-29T13:44:14.594472362Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=26.164797ms + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-dreamhaven-justice-prod-justice-qosm-0001-001, environment_name=dreamhaven-justice-prod" t=2024-05-29T13:44:14.594567201Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-dreamhaven-justice-prod-justice-qosm-0001-001, environment_name=dreamhaven-justice-prod" t=2024-05-29T13:44:14.594554184Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.594386353Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=4c5952153f5719ecdb67bdaf612c62d4e939ca62c7543e8adaa36135a62665b9" t=2024-05-29T13:44:14.594458705Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yinmcezk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.59438555Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yinmcezk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.59435568Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yinmcezk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.594312209Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.594288323Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-307041laio1eastus2, cloud_platform=Azure, customer_id=A197, env_id=307041, env_name=A197 7-11 Dev, env_type=dev, instance=env-307041laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:14.594192045Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yi8f9oel-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.594086087Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.594133024Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.594132271Z caller=remote_instance_store.go:51 user=112732 slug=gleamer msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112732 slug=gleamer instance= t=2024-05-29T13:44:14.594057367Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.593950302Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-306132laio1uaenorth, cloud_platform=Azure, customer_id=A173, env_id=306132, env_name=A173_Adia_UAT, env_type=test, instance=env-306132laio1uaenorth, job=integrations/node_exporter, region=UAENorth, stage=live" t=2024-05-29T13:44:14.593995391Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.594035092Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.594023074Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.594023318Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.593924768Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=2f2fe361b248c07f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.59389282Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.593596529s EvaluationString:}]" duration=305.265942ms + logger=ngalert.scheduler user=426229 slug=accelbyte version=210 fingerprint=22aa88b8cb56295f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.593819134Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.593563446s EvaluationString:}]" duration=108.02954ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yi5b2rqz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.593831434Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=40237df7d3ae04532639e7a509efb5d465898e5826484880fb2ecfe170eef02c" t=2024-05-29T13:44:14.593890585Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yi5b2rqz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.593771544Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=40237df7d3ae04532639e7a509efb5d465898e5826484880fb2ecfe170eef02c" t=2024-05-29T13:44:14.59384182Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.593771607Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yi5b2rqz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.593702143Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-dreamhaven-justice-prod-justice-iam-001, environment_name=dreamhaven-justice-prod" t=2024-05-29T13:44:14.593692071Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yhtj6lui-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.593664242Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.593616856Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=33e9c3e3bc1f4a3943991819dbd532879dc3c467abfc6ec1df220ef0171af650" t=2024-05-29T13:44:14.593606521Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yhtj6lui-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.593552501Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-dreamhaven-justice-prod-justice-gdpr-001, environment_name=dreamhaven-justice-prod" t=2024-05-29T13:44:14.593558127Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=32282388707bef1250bb5a4dbf9941d4e2ffb4b1ad2176fffa9d254613913100" t=2024-05-29T13:44:14.593518588Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-dreamhaven-justice-prod-justice-gdpr-001, environment_name=dreamhaven-justice-prod" t=2024-05-29T13:44:14.593544369Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yhrlifas-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.593494681Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=32282388707bef1250bb5a4dbf9941d4e2ffb4b1ad2176fffa9d254613913100" t=2024-05-29T13:44:14.593507742Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-305784laio1eastus, cloud_platform=Azure, customer_id=A140, env_id=305784, env_name=A140 OBE PROD, env_type=prod, instance=env-305784laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:14.59349229Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-305784laio1eastus, cloud_platform=Azure, customer_id=A140, env_id=305784, env_name=A140 OBE PROD, env_type=prod, instance=env-305784laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:14.593447942Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=464973 slug=equansdatahub t=2024-05-29T13:44:14.593386079Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-dreamhaven-justice-prod-justice-dsu-0001-001, environment_name=dreamhaven-justice-prod" t=2024-05-29T13:44:14.593384996Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yhrlifas-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.593301109Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yhqoslwo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.593243528Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yhqoslwo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.593227588Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yhqoslwo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.593187828Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.59327413Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=15c806703f97f08a34e997f68d4e9aa223736e75b18a94aa318e924713e1cf8e" t=2024-05-29T13:44:14.593239234Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=119a9cefdf278af3a9e37b6008e530224b04563221ddfe5a9f848225176bad15" t=2024-05-29T13:44:14.593155411Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=119a9cefdf278af3a9e37b6008e530224b04563221ddfe5a9f848225176bad15" t=2024-05-29T13:44:14.593147055Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager.persist user=922741 slug=johnnyleeothon t=2024-05-29T13:44:14.593083369Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.804006ms + logger=ngalert.state.manager.persist user=715708 slug=ggiprod t=2024-05-29T13:44:14.592963128Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.630764ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.71:10250, name=07d46d287fe3fd5aa69a919c9383d4251aca3035aa8a02c5f3d17b6ecd089e21" t=2024-05-29T13:44:14.593001806Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yhpz7jqg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.592970275Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.153:10250, name=fdc729c91bc9a1c590739a74afb6338539f3a1891cb08ce37a86edd31febbda3" t=2024-05-29T13:44:14.592886843Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.59289526Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yhpz1coe-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.592813724Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.592779235Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.592635443Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yhotvq4n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.592611372Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-dreamhaven-justice-prod-justice-achv-001, environment_name=dreamhaven-justice-prod" t=2024-05-29T13:44:14.592626924Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.153:10250, name=faa2e1b732a16d84399468b6bb125784a60fd8b89528cc6ffa138932819fb6ac" t=2024-05-29T13:44:14.592592229Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yhotvq4n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.592506741Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yhotvq4n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.59247916Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-305592laio1westeurope, cloud_platform=Azure, customer_id=A172, env_id=305592, env_name=A172_SCA_Prod, env_type=prod, instance=env-305592laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.592511832Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.592352917Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-dreamhaven-justice-prod-justice-001, environment_name=dreamhaven-justice-prod" t=2024-05-29T13:44:14.592278354Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yhdyt1vw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.592179377Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.592190164Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.153:10250, name=e624f500f3b430cd6441c7562fbde3809b7946c19ddba6c4d3f87b127bdac85b" t=2024-05-29T13:44:14.592180346Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yhdyt1vw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.592135967Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-dreamhaven-justice-prod-dsmc-0001-001, environment_name=dreamhaven-justice-prod" t=2024-05-29T13:44:14.592136085Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.153:10250, name=e39694e35e6b69167cebbcaab04ee48bde1a59eef4658f83e23b8689ce0e9b21" t=2024-05-29T13:44:14.592066329Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=327842 slug=exabeam instance= t=2024-05-29T13:44:14.591986681Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.153:10250, name=e2a4ce6236ab5565909302269a4d2a618464f2051ad233462235ef0fbc68d463" t=2024-05-29T13:44:14.591907625Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-customerportal-justice-prod-justice-002, environment_name=customerportal-justice-prod" t=2024-05-29T13:44:14.591861824Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ygz7z5pf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.591786633Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ygyqy7vr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.591662322Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-customerportal-justice-prod-justice-001, environment_name=customerportal-justice-prod" t=2024-05-29T13:44:14.591698983Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ygyqy7vr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.591604581Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.153:10250, name=da9dfed20034ae506cc2fb67b840964d762d8655c44a3cd24ccf8a747ea2992f" t=2024-05-29T13:44:14.591664686Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-304379laio2eastus, cloud_platform=Azure, customer_id=A190, env_id=304379, env_name=A190 PwC PROD, env_type=prod, instance=env-304379laio2eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:14.5916059Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-304379laio2eastus, cloud_platform=Azure, customer_id=A190, env_id=304379, env_name=A190 PwC PROD, env_type=prod, instance=env-304379laio2eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:14.591563045Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ygt19pq5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.59149529Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.153:10250, name=d60bfafa139495466191fbe031e6615eaf832761f8595224539e6a9f4eb5eccc" t=2024-05-29T13:44:14.591553608Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-barb-justice-stage-justice-002, environment_name=barb-justice-stage" t=2024-05-29T13:44:14.591550676Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ygt19pq5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.591414389Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ygnl1ees-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.591367619Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.591439024Z caller=remote_instance_store.go:51 user=656284 slug=cencosudx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.153:10250, name=d2cb06efa7ba018a9b63f700eb410f45df3bbc570f54b84c4410127b76879c46" t=2024-05-29T13:44:14.591455719Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-barb-justice-stage-justice-001, environment_name=barb-justice-stage" t=2024-05-29T13:44:14.591422383Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.591200864Z caller=remote_instance_store.go:51 user=354676 slug=gridmarketenergy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-303879laio1apn1, cloud_platform=AWS, customer_id=C689, env_id=303879, env_name=C689_Joshin-Denki_Prod, env_type=prod, instance=env-303879laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=live" t=2024-05-29T13:44:14.591231361Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.591239492Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager.persist user=691103 slug=caetest t=2024-05-29T13:44:14.590945954Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=691103 slug=caetest instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.590931773Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.591164088Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=691103 slug=caetest instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.590904153Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.590987411Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ygkoi32g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.591068396Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-barb-justice-prod-justice-002, environment_name=barb-justice-prod" t=2024-05-29T13:44:14.591057867Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.591021811Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ygkoi32g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.590992845Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-303873laio1apn1, cloud_platform=AWS, customer_id=C689, env_id=303873, env_name=C689_Joshin-Denki_DEV, env_type=dev, instance=env-303873laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=live" t=2024-05-29T13:44:14.591024362Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=691103 slug=caetest t=2024-05-29T13:44:14.590856752Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-303873laio1apn1, cloud_platform=AWS, customer_id=C689, env_id=303873, env_name=C689_Joshin-Denki_DEV, env_type=dev, instance=env-303873laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=live" t=2024-05-29T13:44:14.591008378Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.59077041Z caller=remote_instance_store.go:51 user=487988 slug=microstrategyits msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ygd52xu1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.590939605Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-barb-justice-prod-justice-001, environment_name=barb-justice-prod" t=2024-05-29T13:44:14.590883597Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.590777686Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-aexlab-justice-prod-justice-002, environment_name=aexlab-justice-prod" t=2024-05-29T13:44:14.590742238Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.590676384Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.590673853Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.153:10250, name=a2f9cdce800a241d9c977a492fb8afd063048f8539aeeee59b56abf75386f53f" t=2024-05-29T13:44:14.590655097Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.590558821Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yg6v1xz4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.59051864Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=457025 slug=truta instance="resource.label.database_id=core-db-prd-255:core-db-prd-108, resource.label.project_id=core-db-prd-255, resource.label.region=us-east1, resource.type=cloudsql_database" t=2024-05-29T13:44:14.590463551Z level=debug msg="Setting next state" handler=resultNormal + ts=2024-05-29T13:44:14.590387384Z caller=memberlist_logger.go:74 level=debug msg="Initiating push/pull sync with: grafana-ruler-6ddb6c5b98-9897w-2a4648ee 10.144.12.35:7946" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-accelbyte-justice-demo-justice-001, environment_name=accelbyte-justice-demo" t=2024-05-29T13:44:14.590433542Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yg6v1xz4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.590374969Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=457025 slug=truta version=7 fingerprint=a9b298bcb84cd0ab attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.590286148Z level=debug msg="Alert rule evaluated" results="[{Instance:resource.label.database_id=core-db-prd-255:core-db-prd-108, resource.label.project_id=core-db-prd-255, resource.label.region=us-east1, resource.type=cloudsql_database State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:resource.label.database_id=core-db-prd-255:core-db-prd-108, resource.label.project_id=core-db-prd-255, resource.label.region=us-east1, resource.type=cloudsql_database Value:0xc016453400} C:{Var:C Labels:resource.label.database_id=core-db-prd-255:core-db-prd-108, resource.label.project_id=core-db-prd-255, resource.label.region=us-east1, resource.type=cloudsql_database Value:0xc016453430}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.589875182s EvaluationString:[ var='B' labels={resource.label.database_id=core-db-prd-255:core-db-prd-108, resource.label.project_id=core-db-prd-255, resource.label.region=us-east1, resource.type=cloudsql_database} value=0.059700878450166785 ], [ var='C' labels={resource.label.database_id=core-db-prd-255:core-db-prd-108, resource.label.project_id=core-db-prd-255, resource.label.region=us-east1, resource.type=cloudsql_database} value=0 ]}]" duration=131.587333ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yg4j8nwk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.590345288Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yg4j8nwk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.590293888Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yg4573n4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.590183977Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yg4573n4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.590173507Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.153:10250, name=843687acc24db76dee456afd8a2cc1426d5355fa6290d7948b2bdc6aa4a11653" t=2024-05-29T13:44:14.590208098Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yg4573n4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.590069856Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-accelbyte-blackbox-prod-shared-enc-001, environment_name=accelbyte-blackbox-prod" t=2024-05-29T13:44:14.590117749Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.590092771Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yg13hiqu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.590005355Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.153:10250, name=7e99157d8039c55c0ed1d45692b8a3dcf94209c284d963c71cbb42de1f494d94" t=2024-05-29T13:44:14.589961054Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.589911252Z caller=remote_instance_store.go:51 user=147497 slug=rhodev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-accelbyte-blackbox-dev-shared-encrypt-001, environment_name=accelbyte-blackbox-dev" t=2024-05-29T13:44:14.589939461Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yg13hiqu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.589884604Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=399183 slug=guidion t=2024-05-29T13:44:14.589838077Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=24.52817ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yftvlkeg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.589799753Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-abcloud-volition-prod-001-01, environment_name=volition-justice-prod" t=2024-05-29T13:44:14.589796718Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.153:10250, name=7955a19c73ac07f0f686427fe0c5d04521b7d60ec24388bb429323027746c9cf" t=2024-05-29T13:44:14.589736561Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.589678859Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.589746888Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yfqd68vl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.58955762Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yfqd68vl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.589433989Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-abcloud-volition-prod-001, environment_name=volition-justice-prod" t=2024-05-29T13:44:14.589617365Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.153:10250, name=745d15001863e6f94f2a72558aa94b511e0ee347d39fb400cc30918487b380f5" t=2024-05-29T13:44:14.589617131Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-299017laio2eastus2, cloud_platform=Azure, customer_id=A119, env_id=299017, env_name=A119_PROD, env_type=dev, instance=env-299017laio2eastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:14.589435683Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.589464996Z caller=remote_instance_store.go:51 user=523054 slug=vialtopartners msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.589440044Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=523054 slug=vialtopartners instance= t=2024-05-29T13:44:14.589399267Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yfmuw9xg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.589365708Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yfmuw9xg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.589247977Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-abcloud-starbreeze-payday3-prod-435-001, environment_name=starbreeze-justice-pd3-prod" t=2024-05-29T13:44:14.589313101Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yfmuw9xg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.589212987Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-299017laio1eastus2, cloud_platform=Azure, customer_id=A119, env_id=299017, env_name=A119_PROD, env_type=dev, instance=env-299017laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:14.58924317Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-abcloud-starbreeze-payday3-prod-435, environment_name=starbreeze-justice-pd3-prod" t=2024-05-29T13:44:14.589144802Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.153:10250, name=591484140bd0b5db3ac54c1dbc78292de292825a56849063027ac062e9e2a326" t=2024-05-29T13:44:14.589133955Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-298570laio1eastus2, cloud_platform=Azure, customer_id=A119, env_id=298570, env_name=A119_UAT, env_type=dev, instance=env-298570laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:14.588912311Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-298570laio1eastus2, cloud_platform=Azure, customer_id=A119, env_id=298570, env_name=A119_UAT, env_type=dev, instance=env-298570laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:14.588894173Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yfea43h1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.588890804Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.588777975Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yf6ic3nz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.588689601Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.153:10250, name=41f9a237fc7429d9ccefe217d1afdbdd7e7dc553dea6caf8822ac9497d113e66" t=2024-05-29T13:44:14.588758145Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-298431laio1eastus2, cloud_platform=Azure, customer_id=A119, env_id=298431, env_name=A119_DEV, env_type=dev, instance=env-298431laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:14.588725055Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.588606365Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.153:10250, name=41733c5ab7d4dbe9441b5a52fa686b912111eaf0a87161606741d26888539ae2" t=2024-05-29T13:44:14.588632167Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-298122laiouse1, cloud_platform=AWS, customer_id=C506, env_id=298122, env_name=C506_Prod_MCE_LAST, env_type=prod, instance=env-298122laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.588562032Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=55557 slug=jigs031 t=2024-05-29T13:44:14.588394806Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=22.190739ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yevd0ti6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.588327728Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yeluy8aw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.588281317Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.58819893Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yeluy8aw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.588244037Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yeluy8aw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.588147496Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yejq6ei8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.588082395Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yejq6ei8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.587960694Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yefv7kpe-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.587898583Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yefv7kpe-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.587887203Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yefv7kpe-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.587851203Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yedyrdsq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.587709661Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.153:10250, name=2fac00b9c4fa698ca8ef303950173fab3d89fd5dc780b54a3de1cfacea8f71a9" t=2024-05-29T13:44:14.587965599Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-abcloud-1047games-prod-matchmaking-001, environment_name=splitgate1047-justice-prod" t=2024-05-29T13:44:14.587977152Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yedyrdsq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.58758375Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ye9e01jz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.587514379Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ye9e01jz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.587438859Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ye9e01jz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.587394288Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.153:10250, name=2e53a471ece668f1e293fa3d3b2da67be4ae1e2fa5073ed66d40828010c00f9f" t=2024-05-29T13:44:14.587849295Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ye5rrk44-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.587115285Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ye5rrk44-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.587070965Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ye4nkjys-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.586925273Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.58777367Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.888398ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ye4nkjys-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.586846163Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.587732286Z caller=remote_instance_store.go:51 user=635771 slug=sharedservices msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ye4nkjys-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.586809612Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-295926laio2germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295926, env_name=A179 DmTech Partner Prod, env_type=prod, instance=env-295926laio2germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live" t=2024-05-29T13:44:14.587649458Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ye48qhqf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.58659076Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ye48qhqf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.58657438Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ye0lpfwh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.586506809Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ye0lpfwh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.586463259Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ye0lpfwh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.586368318Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ye0bs5y3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.586329717Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ye0bs5y3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.586286287Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ydzxbhj9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.586157456Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-abcloud-1047games-prod-lobby-server-0002-001, environment_name=splitgate1047-justice-prod" t=2024-05-29T13:44:14.587555949Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ydzxbhj9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.586128005Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ydzxbhj9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.586094575Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ydzxbhj9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.586084755Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ydzxbhj9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.586028944Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ydyq8vx4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.585956953Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.46.153:10250, name=11a6e24b4e74c7a9ce9e13c12ffba8c63b6104bc739653733f845d6bbc3dbc34" t=2024-05-29T13:44:14.587452306Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cache_cluster_id=r-abcloud-1047games-prod-lobby-server-0001-001, environment_name=splitgate1047-justice-prod" t=2024-05-29T13:44:14.587152786Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-295922laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295922, env_name=A180 DWH-INTRANET DEV, env_type=dev, instance=env-295922laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live" t=2024-05-29T13:44:14.587213502Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.587189539Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:14.586919923Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.42.89:10250, name=f97d95aa2543b25b2850cb8616616e326c6899de0edd010b692ec61780b63735" t=2024-05-29T13:44:14.587045136Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-295921laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295921, env_name=A179 DmTech Partner Dev, env_type=dev, instance=env-295921laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live" t=2024-05-29T13:44:14.587013984Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-295921laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295921, env_name=A179 DmTech Partner Dev, env_type=dev, instance=env-295921laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live" t=2024-05-29T13:44:14.5869984Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.42.89:10250, name=ec2a8063686e138f5a2e63e887bdda770cdcd99aa8031a6572798a53e2b99b07" t=2024-05-29T13:44:14.586951618Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.42.89:10250, name=ea900f2cec1f9c0c6eb45e75aaaa12ae2bc5b8df2c343e48e5f5daad5dbf3d78" t=2024-05-29T13:44:14.586851259Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-294791laio1eastus2, cloud_platform=Azure, customer_id=A178, env_id=294791, env_name=A178 Customers Bank PROD, env_type=prod, instance=env-294791laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:14.586799643Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.42.89:10250, name=e719fc8861bc3a3e1dc72928e299852e9177e406e3c964ca944683e38247f214" t=2024-05-29T13:44:14.586734586Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.42.89:10250, name=dd1036366297ef0152543b05b2ae9f00dd1de9daff4f8e8bbdf60f3d2e4a3a99" t=2024-05-29T13:44:14.58662885Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager.persist user=60199 slug=wallapop t=2024-05-29T13:44:14.586476869Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.42.89:10250, name=d9d3bae47d0d6416871efecfd66aa49dc77f7ffa77d3bf55f0b4fbe9dc427537" t=2024-05-29T13:44:14.58649081Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.42.89:10250, name=d417abaa76448f67eede40e3da1c9a935935078e6e7671a29121b92e2f81745e" t=2024-05-29T13:44:14.586378876Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager.persist user=938012 slug=mywifinetworks t=2024-05-29T13:44:14.586276209Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.872854ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-294107laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294107, env_name=A111 Staples STG, env_type=qa, instance=env-294107laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:14.586198488Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.42.89:10250, name=cba33cd9c9d8c68a55660523a05db9ab879d7c062132032db51fa4d1fcb70c5b" t=2024-05-29T13:44:14.586083299Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=457025 slug=truta instance="resource.label.database_id=user-management-db-prd-001:user-management-db-prd-85, resource.label.project_id=user-management-db-prd-001, resource.label.region=us-east1, resource.type=cloudsql_database" t=2024-05-29T13:44:14.586097741Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=344017 slug=descript t=2024-05-29T13:44:14.585980028Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=344017 slug=descript instance="resource.label.project_id=production-273614, resource.type=k8s_container" t=2024-05-29T13:44:14.585965365Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Polkadot Zetetic 1 (OVH-SBG), chain=Polkadot, exported_chain=polkadot, host=OVH, instance=37.59.29.95:39615, job=Cloud, location=Strasbourg, FR, pool=Zetetic 1, status=best" t=2024-05-29T13:44:14.585779154Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.585876744Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:14.585839188Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Polkadot Watermelon (RPS-MDH), chain=Polkadot, exported_chain=polkadot, host=RapidSwitch, instance=188.227.164.110:39615, job=Cloud, location=Maidenhead, GB, pool=Watermelon, status=best" t=2024-05-29T13:44:14.585703354Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.42.89:10250, name=c276a8b46e21234e337819cacd4708d250f3c41983dcf93b0c126a3973be3c85" t=2024-05-29T13:44:14.585857003Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=49546 slug=nulogyinfra instance= t=2024-05-29T13:44:14.585760316Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.585725289Z caller=remote_instance_store.go:51 user=536824 slug=forgerockit msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ydxhmpbi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.58565464Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ydxhmpbi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.58559073Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=49546 slug=nulogyinfra instance= t=2024-05-29T13:44:14.585749334Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.58572387Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.585703602Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ydu7wvso-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.585477439Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=49546 slug=nulogyinfra version=3 fingerprint=d1c47c2b5b4fc510 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.585604407Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.58532492s EvaluationString:}]" duration=124.166977ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.42.89:10250, name=b84d571e56b2e5abac8fe8519549a997076578e5443b7cf10567ecc3805b0a90" t=2024-05-29T13:44:14.58561613Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ydrooz07-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.585289487Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ydqceqrw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.585235346Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ydqceqrw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.585198726Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.585276496Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.585443514Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=530405 slug=zetetic instance="alias=Polkadot Archive (ZTC-HFH), chain=Polkadot, exported_chain=polkadot, host=Zetetic Technologies, instance=192.168.1.36:39615, job=Higham Home, location=Higham Ferrers, GB, pool=Archive, status=best" t=2024-05-29T13:44:14.585348986Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=530405 slug=zetetic t=2024-05-29T13:44:14.585284122Z level=debug msg="State manager processing evaluation results" resultCount=11 + level=debug ts=2024-05-29T13:44:14.585187953Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.historian backend=loki user=371756 slug=asapp t=2024-05-29T13:44:14.585214285Z level=debug msg="Alert state changed creating annotation" newState="Normal (MissingSeries)" oldState=Pending + logger=ngalert.state.historian backend=loki user=371756 slug=asapp t=2024-05-29T13:44:14.585184023Z level=debug msg="Alert state changed creating annotation" newState=Pending oldState=Normal + level=debug ts=2024-05-29T13:44:14.585123608Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=371756 slug=asapp t=2024-05-29T13:44:14.58506924Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=25.184085ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.42.89:10250, name=9367ebbf6a3a56c37f1b1e019b791f668784cd8b9e40bd670635e02fe9d26541" t=2024-05-29T13:44:14.585071176Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.585023949Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ydl0i0ux-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.584978053Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.584968824Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ydl0i0ux-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.584921293Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.584831617Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ydhkp0c4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.584756221Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:14.584750207Z level=debug msg="Saving alert states" count=21 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.584674523Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ydg1yv99-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.58461131Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ydg1yv99-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.58460003Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.584635068Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-294036laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294036, env_name=A111 Staples DEV, env_type=dev, instance=env-294036laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:14.584535864Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.leia.transientEvents" t=2024-05-29T13:44:14.584452022Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.leia.sharedPE" t=2024-05-29T13:44:14.584420452Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ydebq6oj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.584393017Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.leia.device_metadata" t=2024-05-29T13:44:14.584311921Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.leia.device_metadata" t=2024-05-29T13:44:14.5843059Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yd2aje6v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.584316017Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.leia.device_errors" t=2024-05-29T13:44:14.584274041Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yd2aje6v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.584219396Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.leia.camera_upgrade_reply" t=2024-05-29T13:44:14.58422612Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yczo2y19-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.584181875Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yczo2y19-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.584172655Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yczo2y19-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.584144725Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.42.89:10250, name=4394087e58cc295a53619b824fbdec5883e43d2cb938a67a6167963f54cf08b3" t=2024-05-29T13:44:14.584210454Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yczo2y19-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.584072384Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.leia.camera_recording_update" t=2024-05-29T13:44:14.584147658Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.leia.camera_login" t=2024-05-29T13:44:14.584091228Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.leia.camera_forced_upgrade_req" t=2024-05-29T13:44:14.584063228Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yczmuf70-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.584018094Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yczmuf70-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.583958553Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yczmuf70-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.583943743Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.583965194Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.42.89:10250, name=3726b10ab5d6c23a0a8ecfdb940ab72bdece96ee8f073c6ddcd756dd262ba626" t=2024-05-29T13:44:14.584069309Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.leia.camera_adc_wipe" t=2024-05-29T13:44:14.583956366Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.583980191Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.leia.camera_adc_wipe" t=2024-05-29T13:44:14.583944117Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-293839laio1euw1, cloud_platform=AWS, customer_id=C655, env_id=293839, env_name=c655_booking_prod_2021, env_type=prod, instance=env-293839laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:14.583890641Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="queue=two.leia.backend_report" t=2024-05-29T13:44:14.583904626Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=109452 slug=deltarisk t=2024-05-29T13:44:14.583833672Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.42.89:10250, name=35dc8af3e073b65ea56f0ab7a4ae88f3321ed929c1b2c5927cc651dbfe6ae92c" t=2024-05-29T13:44:14.583800523Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.scheduler user=109452 slug=deltarisk version=12 fingerprint=658345278198a92a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.583760239Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.583460454s EvaluationString:}]" duration=47.59232ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ycvohqdj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.583776671Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=715708 slug=ggiprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.583314633Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ycvohqdj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.58369844Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ycuc3yxx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.583598559Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.583526608Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=715708 slug=ggiprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.583296414Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.583550034Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.42.89:10250, name=2104b32e2fe2899551be5cf520513a841712002655f871963adbf917c95b61d8" t=2024-05-29T13:44:14.583530725Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-293183laio1northeurope, cloud_platform=Azure, customer_id=A164, env_id=293183, env_name=A164 CTTI Corp DEV, env_type=dev, instance=env-293183laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live" t=2024-05-29T13:44:14.583433416Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ycnyts27-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.583336577Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ycnyts27-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.583294166Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=922741 slug=johnnyleeothon instance= t=2024-05-29T13:44:14.583245492Z level=warn msg="Failed to take an image" dashboard=5BQr-Osnz panel=4 error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:14.583189801Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.583196879Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=e630c551dd4814bf4c51610112c66ab4ca227c563cba89e8b89a84b91ae915ed" t=2024-05-29T13:44:14.583188878Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.583108872Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-292236laio2germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio2germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live" t=2024-05-29T13:44:14.583048105Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=de83763f9eca53ca3d03eb0ce3507642a01e0fd49382fefd80cde5457cef4c1c" t=2024-05-29T13:44:14.583056354Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.583000658Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ycn8lm04-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.583010593Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.583032741Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.582970423Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ycn8lm04-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.582932512Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.582953804Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=d1566308bdbe6365bf235cc496bb2fe5a84ade50f98444222c1cfebac3fac032" t=2024-05-29T13:44:14.58293161Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.582792088Z caller=remote_instance_store.go:51 user=733461 slug=lattice msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ycd95u25-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.582890412Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-292236laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live" t=2024-05-29T13:44:14.582810774Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=cb11a3c96346e32d65cdfc90ad24ce5bdf207d2c1247ad2e898dda3f8cfc1499" t=2024-05-29T13:44:14.582801179Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=c2f8c55042bebff96a026d81d1c7e3150c0ffc71d19f0ff60c53bc50470d4704" t=2024-05-29T13:44:14.58269531Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=c2f8c55042bebff96a026d81d1c7e3150c0ffc71d19f0ff60c53bc50470d4704" t=2024-05-29T13:44:14.582683697Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-291924laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=291924, env_name=A169 Aigues Dev, env_type=dev, instance=env-291924laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.582609654Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yc7u9cwp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.582553199Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=bf01eb05706777fe1a2042fda79f1a7fb1c3d7312fe753d12a37d66ba710f56e" t=2024-05-29T13:44:14.582595759Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yc7u9cwp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.582539238Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.582461136Z caller=remote_instance_store.go:51 user=697627 slug=haqq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.582474227Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=b7e271013440aab1050db311be3defc1c1ddc566f25f84348b1a7a2d908f7e84" t=2024-05-29T13:44:14.582455061Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ybslm0in-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.582309256Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=b7bf2f80e41530704310ca2ce3e0f5aa72a63eb2b4bb1f0925fa298000335927" t=2024-05-29T13:44:14.582339872Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=b7bf2f80e41530704310ca2ce3e0f5aa72a63eb2b4bb1f0925fa298000335927" t=2024-05-29T13:44:14.582325696Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ybqmg8x8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.582209545Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ybqmg8x8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.582195455Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ybqmg8x8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.582156884Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.582194775Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=922741 slug=johnnyleeothon instance= t=2024-05-29T13:44:14.582179646Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yblc34ox-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.582064384Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yblc34ox-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.582051023Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.582055608Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yblc34ox-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.582003733Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ybjb03ra-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.581960452Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-289493laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289493, env_name=a148_Carrefour_Dev_2021, env_type=dev, instance=env-289493laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.581947931Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ybjb03ra-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.581895042Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=386776 slug=rcsworks t=2024-05-29T13:44:14.581847224Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.472561ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=a7b866f62c3f2f292512584add68ee582b1f5f93728f5a51615de1b48cd99c96" t=2024-05-29T13:44:14.581841421Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ybj3y80j-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.58172371Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ybj3y80j-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.58168527Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-289178laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289178, env_name=A148_Carrefour_QA, env_type=qa, instance=env-289178laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.581738022Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-289178laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289178, env_name=A148_Carrefour_QA, env_type=qa, instance=env-289178laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.581722403Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=9c92ef4088cd9967569c9a8d71763b51fc8b59617bba232140d9117b4e7e67f1" t=2024-05-29T13:44:14.581610779Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager.persist user=756004 slug=jdsportsprd t=2024-05-29T13:44:14.581474412Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.158337ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ybfcowup-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.581593849Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ybfcowup-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.581561548Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-289152laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289152, env_name=A148_Carrefour_Prod, env_type=prod, instance=env-289152laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.581495654Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=707420 slug=pangealab instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.58139151Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=707420 slug=pangealab instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.581380081Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.581496172Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=707420 slug=pangealab instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.58132532Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=707420 slug=pangealab t=2024-05-29T13:44:14.581292619Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=707420 slug=pangealab version=1 fingerprint=94eb8a156320ca16 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.581199167Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.580861803s EvaluationString:}]" duration=6.894578ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=8eda53275a01fd8e4cc3804dc0b829e3ec37d3912ae9e3c33c7acf7f64c9698f" t=2024-05-29T13:44:14.581352529Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yb53wk1m-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.581274915Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yb53wk1m-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.581244645Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-288988laio1westeurope, cloud_platform=Azure, customer_id=A172, env_id=288988, env_name=A172_SCA_DEV, env_type=dev, instance=env-288988laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.581248227Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-288988laio1westeurope, cloud_platform=Azure, customer_id=A172, env_id=288988, env_name=A172_SCA_DEV, env_type=dev, instance=env-288988laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.581219605Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.581168944Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.581203546Z caller=remote_instance_store.go:51 user=334665 slug=mjacobson msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yb53wk1m-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.581125514Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.580982972Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=8535a76db21c5d87367b7fed0da6bd1a3af42321f1561c2ca16b38109fe260b2" t=2024-05-29T13:44:14.58110224Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.581067715Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.581053206Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.580990944Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=85008 slug=kalypsolp t=2024-05-29T13:44:14.580956947Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=29.500296ms + logger=ngalert.state.manager user=334665 slug=mjacobson instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.581029846Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=79638611dc0f59d461a32472a5de768814c5b502591e2de1e6c2bd244139a294" t=2024-05-29T13:44:14.580966242Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yb4e4lbf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.580955992Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yb4e4lbf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.580920442Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yb4e4lbf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.580874711Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yb4e4lbf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.580841441Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=vm6, exported_orgunit=TELE, fan=6, host=vm6-ilo.telecomx.dk, host_short=vm6-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=vm, type=health" t=2024-05-29T13:44:14.580839493Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yb3ug4hi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.58078784Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=vm6, exported_orgunit=TELE, fan=6, host=vm6-ilo.telecomx.dk, host_short=vm6-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=vm, type=health" t=2024-05-29T13:44:14.580824893Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=214309 slug=spenmo instance="datasource_uid=grafanacloud-prom, ref_id=count,success rate" t=2024-05-29T13:44:14.580869681Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=214309 slug=spenmo instance="datasource_uid=grafanacloud-prom, ref_id=count,success rate" t=2024-05-29T13:44:14.580858897Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=78e05a0a639c8f7d9946fb20610daf998c26fdb778934e0a8c56376711b9d50b" t=2024-05-29T13:44:14.580810602Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=214309 slug=spenmo t=2024-05-29T13:44:14.580831438Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=78e05a0a639c8f7d9946fb20610daf998c26fdb778934e0a8c56376711b9d50b" t=2024-05-29T13:44:14.580797797Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=vm6, exported_orgunit=TELE, fan=5, host=vm6-ilo.telecomx.dk, host_short=vm6-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=vm, type=health" t=2024-05-29T13:44:14.580735792Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=77d58b7d52405f4d300214b45e192d3772701d4f849c77637310b5a80ac3345b" t=2024-05-29T13:44:14.580687245Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yb3tjrme-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.580622269Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.580574862Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yb3tjrme-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.580572748Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=6b1e0fef2bfb006bce1da6f3536a525375dff20bf9dc941b2fbaed9d3e733921" t=2024-05-29T13:44:14.580439853Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.580410767Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=4947 slug=mediamath instance= t=2024-05-29T13:44:14.580377008Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-288344laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio1westus2, job=integrations/node_exporter, region=westus2, stage=live" t=2024-05-29T13:44:14.580374622Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:14.580319177Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yb1b8v3a-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.580291695Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yb1b8v3a-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.580275315Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yb15a7dy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.580231785Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=vm5, exported_orgunit=TELE, fan=6, host=vm5-ilo.telecomx.dk, host_short=vm5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=vm, type=health" t=2024-05-29T13:44:14.580276985Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=5d0b5617d1cdc9d3e0c45f0bf01ba504e06b9e04d9272876c702dae749f08873" t=2024-05-29T13:44:14.580193953Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yb15a7dy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.580120434Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=vm5, exported_orgunit=TELE, fan=4, host=vm5-ilo.telecomx.dk, host_short=vm5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=vm, type=health" t=2024-05-29T13:44:14.580127382Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=454eb7a3cecbcc41bf1f464c109dfde1c1d0ea6039d8d08fa731034bc0288f6f" t=2024-05-29T13:44:14.580066311Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yb15a7dy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.580047283Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=454eb7a3cecbcc41bf1f464c109dfde1c1d0ea6039d8d08fa731034bc0288f6f" t=2024-05-29T13:44:14.58005676Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yayo99l0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.579885581Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yayo99l0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.579874081Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=41a102ee21c3d1d3a94d02f5fcdba4d988fdfcb5e8b062148168f99f871bd861" t=2024-05-29T13:44:14.579939784Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.579947412Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=3a8dc664364854a52bb923d680106264c817212f2d7306f1e2dd772f946b7329" t=2024-05-29T13:44:14.579826289Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=vm5, exported_orgunit=TELE, fan=0, host=vm5-ilo.telecomx.dk, host_short=vm5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=vm, type=health" t=2024-05-29T13:44:14.579816477Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=vm5, exported_orgunit=TELE, fan=0, host=vm5-ilo.telecomx.dk, host_short=vm5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=vm, type=health" t=2024-05-29T13:44:14.579804577Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yavfsa86-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.57972825Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:14.57977355Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=growth-event-worker-worker, pod=growth-event-worker-worker-5cdd9f4798-nqwmz" t=2024-05-29T13:44:14.579719374Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=growth-event-worker-worker, pod=growth-event-worker-worker-5cdd9f4798-4lr59" t=2024-05-29T13:44:14.579681989Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-286990laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=286990, env_name=A169_Aigues_Prod, env_type=prod, instance=env-286990laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.579702262Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=37bf38878f4c6dc52f15d424eeb86e360ed3466621592639ac8de9dbd078cb79" t=2024-05-29T13:44:14.579701072Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:14.579626548Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=vm3, exported_orgunit=TELE, fan=5, host=vm3-ilo.telecomx.dk, host_short=vm3-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=vm, type=health" t=2024-05-29T13:44:14.579666975Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=698963 slug=lemonade version=4 fingerprint=33e403c2e784ca09 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.579506636Z level=debug msg="Alert rule evaluated" results="[{Instance:app=growth-event-worker-worker, pod=growth-event-worker-worker-5cdd9f4798-4lr59 State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=growth-event-worker-worker, pod=growth-event-worker-worker-5cdd9f4798-4lr59 Value:0xc039ab5aa8} THRESHOLD:{Var:THRESHOLD Labels:app=growth-event-worker-worker, pod=growth-event-worker-worker-5cdd9f4798-4lr59 Value:0xc039ab5ae0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.579116586s EvaluationString:[ var='QUERY' labels={app=growth-event-worker-worker, pod=growth-event-worker-worker-5cdd9f4798-4lr59} value=0 ], [ var='THRESHOLD' labels={app=growth-event-worker-worker, pod=growth-event-worker-worker-5cdd9f4798-4lr59} value=0 ]} {Instance:app=growth-event-worker-worker, pod=growth-event-worker-worker-5cdd9f4798-nqwmz State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=growth-event-worker-worker, pod=growth-event-worker-worker-5cdd9f4798-nqwmz Value:0xc039ab5b58} THRESHOLD:{Var:THRESHOLD Labels:app=growth-event-worker-worker, pod=growth-event-worker-worker-5cdd9f4798-nqwmz Value:0xc039ab5b80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.579129689s EvaluationString:[ var='QUERY' labels={app=growth-event-worker-worker, pod=growth-event-worker-worker-5cdd9f4798-nqwmz} value=0 ], [ var='THRESHOLD' labels={app=growth-event-worker-worker, pod=growth-event-worker-worker-5cdd9f4798-nqwmz} value=0 ]}]" duration=43.433088ms + logger=ngalert.state.manager.persist user=236496 slug=improbable t=2024-05-29T13:44:14.579529801Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:14.579506178Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yatb0m5r-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.579528218Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=2b4ae5e67d643202493be73913b5323f9d33a28c81b2e2ad0d51bf85cb635377" t=2024-05-29T13:44:14.579452502Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.579460062Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=vm3, exported_orgunit=TELE, fan=2, host=vm3-ilo.telecomx.dk, host_short=vm3-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=vm, type=health" t=2024-05-29T13:44:14.579423371Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yalpph9l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.579305995Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=vm2, exported_orgunit=IPTV, fan=6, host=vm2-ilo.powernet.tv, host_short=vm2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, type=health" t=2024-05-29T13:44:14.57934497Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=26c621c596c7b3c155925879381073f85f411fa3ef9342c3e4bbb418e7ffc721" t=2024-05-29T13:44:14.579331529Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.579339919Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:14.579244758Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yalpph9l-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.579249265Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.577697391Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=vm2, exported_orgunit=IPTV, fan=4, host=vm2-ilo.powernet.tv, host_short=vm2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, type=health" t=2024-05-29T13:44:14.579203468Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=1fcfba4921bfcba0b370db2ce21919183761cb31c7ad649f2ad408ffb3d9dc62" t=2024-05-29T13:44:14.579210324Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yajr0iex-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.579161484Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yajr0iex-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.579133023Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yajr0iex-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.579093273Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=vm2, exported_orgunit=IPTV, fan=3, host=vm2-ilo.powernet.tv, host_short=vm2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, type=health" t=2024-05-29T13:44:14.579112867Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yajr0iex-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.579063393Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=vm2, exported_orgunit=IPTV, fan=2, host=vm2-ilo.powernet.tv, host_short=vm2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, type=health" t=2024-05-29T13:44:14.579040766Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=1d6a915ffc629be484b9d3b9e8394818512e9a3667e30f58cca61ee001e27c40" t=2024-05-29T13:44:14.579093611Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yai5hppw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.579019982Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=vm2, exported_orgunit=IPTV, fan=2, host=vm2-ilo.powernet.tv, host_short=vm2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, type=health" t=2024-05-29T13:44:14.579028365Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-284881laio2centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio2centralus, job=integrations/node_exporter, region=centralus, stage=live" t=2024-05-29T13:44:14.57898227Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=vm2, exported_orgunit=IPTV, fan=1, host=vm2-ilo.powernet.tv, host_short=vm2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, type=health" t=2024-05-29T13:44:14.578963964Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yai5hppw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.578860851Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yagxlh0r-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.578740419Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=192674b46156d31fe0b0078e659fd37158e2b1e00fd05e307061a15b665f0be6" t=2024-05-29T13:44:14.578760837Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-284881laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio1centralus, job=integrations/node_exporter, region=centralus, stage=live" t=2024-05-29T13:44:14.578768108Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.578678175Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=112387 slug=lucidhq t=2024-05-29T13:44:14.578643219Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.578663302Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=184b7943d08446d0e9e4175d95c25437c6a418c4bc2ba4d498ba49b87084a498" t=2024-05-29T13:44:14.578653788Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=112387 slug=lucidhq t=2024-05-29T13:44:14.578588579Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=vm1, exported_orgunit=IPTV, fan=3, host=vm1-ilo.powernet.tv, host_short=vm1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, type=health" t=2024-05-29T13:44:14.578543458Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yagnhmfx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.578512017Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=128cd9f9fa757555a7995880bc35c73d92ce0379401ad25b50c97737a1797c21" t=2024-05-29T13:44:14.57853009Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=vm1, exported_orgunit=IPTV, fan=2, host=vm1-ilo.powernet.tv, host_short=vm1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, type=health" t=2024-05-29T13:44:14.578436356Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yafufcox-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.578410226Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-yafufcox-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.578293895Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-283878laionortheurope, cloud_platform=Azure, customer_id=A164, env_id=283878, env_name=A164_CTTI_Corp_PROD, env_type=prod, instance=env-283878laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live" t=2024-05-29T13:44:14.578271467Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ya4l0vy6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.578166494Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=201644 slug=thoughtspot instance= t=2024-05-29T13:44:14.578126157Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=201644 slug=thoughtspot instance= t=2024-05-29T13:44:14.578119176Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=vm1, exported_orgunit=IPTV, fan=0, host=vm1-ilo.powernet.tv, host_short=vm1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, type=health" t=2024-05-29T13:44:14.578147852Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-283735laio2eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio2eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:14.578107143Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=201644 slug=thoughtspot t=2024-05-29T13:44:14.578104092Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=0e7dcfa4ff84bb066158d11f3f661395366372cc799cab35f7fe358d585e3bad" t=2024-05-29T13:44:14.578130014Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-283735laio2eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio2eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:14.578091354Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr6, exported_orgunit=IPTV, fan=6, host=tr6-ilo.powernet.tv, host_short=tr6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=transcoder, type=health" t=2024-05-29T13:44:14.57803985Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ya3bibv8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.577879011Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ya3bibv8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.57786314Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr6, exported_orgunit=IPTV, fan=5, host=tr6-ilo.powernet.tv, host_short=tr6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=transcoder, type=health" t=2024-05-29T13:44:14.577946649Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y9xe29jb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.57779417Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=074c93d282c15d0d164df9be75b071921adac5b5a1bf682d4eb89c0f2bb46fd5" t=2024-05-29T13:44:14.577878614Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.577752922Z caller=remote_instance_store.go:51 user=130276 slug=devops8 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=055761c185b4e044fc26c92bcd39cdc6458f43da9f8c17bc836a7f329ddce424" t=2024-05-29T13:44:14.57777405Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr6, exported_orgunit=IPTV, fan=3, host=tr6-ilo.powernet.tv, host_short=tr6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=transcoder, type=health" t=2024-05-29T13:44:14.577763446Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr6, exported_orgunit=IPTV, fan=3, host=tr6-ilo.powernet.tv, host_short=tr6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=transcoder, type=health" t=2024-05-29T13:44:14.577746846Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=03f4a206b4472e88405ca56a89ea1a8dc40025eb12b5b6b39ae3bc25fab90924" t=2024-05-29T13:44:14.577668965Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.577560606Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y9ws0r0h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.577609798Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=716600 slug=microntechnology version=1 fingerprint=c1092db1228cdc86 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.577472944Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=windows_service_status, agent_hostname=MC-Prod-App01, instance=MC-Prod-App01:12345, job=integrations/windows_exporter, name=rabbitmq, status=ok State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=windows_service_status, agent_hostname=MC-Prod-App01, instance=MC-Prod-App01:12345, job=integrations/windows_exporter, name=rabbitmq, status=ok Value:0xc003814a18} B:{Var:B Labels:__name__=windows_service_status, agent_hostname=MC-Prod-App01, instance=MC-Prod-App01:12345, job=integrations/windows_exporter, name=rabbitmq, status=ok Value:0xc003814a68} C:{Var:C Labels:__name__=windows_service_status, agent_hostname=MC-Prod-App01, instance=MC-Prod-App01:12345, job=integrations/windows_exporter, name=rabbitmq, status=ok Value:0xc0038149c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.577170652s EvaluationString:[ var='A' labels={__name__=windows_service_status, agent_hostname=MC-Prod-App01, instance=MC-Prod-App01:12345, job=integrations/windows_exporter, name=rabbitmq, status=ok} value=1 ], [ var='B' labels={__name__=windows_service_status, agent_hostname=MC-Prod-App01, instance=MC-Prod-App01:12345, job=integrations/windows_exporter, name=rabbitmq, status=ok} value=1 ], [ var='C' labels={__name__=windows_service_status, agent_hostname=MC-Prod-App01, instance=MC-Prod-App01:12345, job=integrations/windows_exporter, name=rabbitmq, status=ok} value=0 ]}]" duration=6.956439ms + level=debug ts=2024-05-29T13:44:14.577523568Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=02f6de7f735a80b5625c523e201398d730930d1fce61b9bd49a779fd3933ce69" t=2024-05-29T13:44:14.57755383Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.577480016Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112387 slug=lucidhq instance= t=2024-05-29T13:44:14.577471117Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y9t1ecdb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.577427366Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="instance=10.100.40.95:10250, name=019a08e463181c5d2d3270acbc0947bda7c909381ad8e92ef49f45aacceea7f0" t=2024-05-29T13:44:14.57739174Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.577159191Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.57742504Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=cdm8p3tewy0owf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:14.573092211Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr5, exported_orgunit=IPTV, fan=6, host=tr5-ilo.powernet.tv, host_short=tr5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=transcoder, type=health" t=2024-05-29T13:44:14.57736354Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=938012 slug=mywifinetworks instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.577367464Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr5, exported_orgunit=IPTV, fan=5, host=tr5-ilo.powernet.tv, host_short=tr5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=transcoder, type=health" t=2024-05-29T13:44:14.577283939Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y9ko7ul7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.577304135Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr5, exported_orgunit=IPTV, fan=4, host=tr5-ilo.powernet.tv, host_short=tr5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=transcoder, type=health" t=2024-05-29T13:44:14.577211238Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y9ko7ul7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.577273344Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.577247766Z caller=remote_instance_store.go:51 user=625813 slug=all2energy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=938012 slug=mywifinetworks t=2024-05-29T13:44:14.577253763Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.577223776Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=309009 slug=elestyle version=1 fingerprint=45393568ab3c8ec8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.575163425Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=10.100.40.95:10250, name=019a08e463181c5d2d3270acbc0947bda7c909381ad8e92ef49f45aacceea7f0 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=019a08e463181c5d2d3270acbc0947bda7c909381ad8e92ef49f45aacceea7f0 Value:0xc023518d90} C:{Var:C Labels:instance=10.100.40.95:10250, name=019a08e463181c5d2d3270acbc0947bda7c909381ad8e92ef49f45aacceea7f0 Value:0xc023518d78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570636329s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=019a08e463181c5d2d3270acbc0947bda7c909381ad8e92ef49f45aacceea7f0} value=0.04986509583356261 ], [ var='C' labels={instance=10.100.40.95:10250, name=019a08e463181c5d2d3270acbc0947bda7c909381ad8e92ef49f45aacceea7f0} value=1 ]} {Instance:instance=10.100.40.95:10250, name=02f6de7f735a80b5625c523e201398d730930d1fce61b9bd49a779fd3933ce69 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=02f6de7f735a80b5625c523e201398d730930d1fce61b9bd49a779fd3933ce69 Value:0xc023518dd8} C:{Var:C Labels:instance=10.100.40.95:10250, name=02f6de7f735a80b5625c523e201398d730930d1fce61b9bd49a779fd3933ce69 Value:0xc023518dc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570646847s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=02f6de7f735a80b5625c523e201398d730930d1fce61b9bd49a779fd3933ce69} value=0 ], [ var='C' labels={instance=10.100.40.95:10250, name=02f6de7f735a80b5625c523e201398d730930d1fce61b9bd49a779fd3933ce69} value=1 ]} {Instance:instance=10.100.40.95:10250, name=03f4a206b4472e88405ca56a89ea1a8dc40025eb12b5b6b39ae3bc25fab90924 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=03f4a206b4472e88405ca56a89ea1a8dc40025eb12b5b6b39ae3bc25fab90924 Value:0xc023518e20} C:{Var:C Labels:instance=10.100.40.95:10250, name=03f4a206b4472e88405ca56a89ea1a8dc40025eb12b5b6b39ae3bc25fab90924 Value:0xc023518e08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570650524s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=03f4a206b4472e88405ca56a89ea1a8dc40025eb12b5b6b39ae3bc25fab90924} value=0 ], [ var='C' labels={instance=10.100.40.95:10250, name=03f4a206b4472e88405ca56a89ea1a8dc40025eb12b5b6b39ae3bc25fab90924} value=1 ]} {Instance:instance=10.100.40.95:10250, name=055761c185b4e044fc26c92bcd39cdc6458f43da9f8c17bc836a7f329ddce424 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=055761c185b4e044fc26c92bcd39cdc6458f43da9f8c17bc836a7f329ddce424 Value:0xc023518e68} C:{Var:C Labels:instance=10.100.40.95:10250, name=055761c185b4e044fc26c92bcd39cdc6458f43da9f8c17bc836a7f329ddce424 Value:0xc023518e50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570653654s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=055761c185b4e044fc26c92bcd39cdc6458f43da9f8c17bc836a7f329ddce424} value=0.015472354999999846 ], [ var='C' labels={instance=10.100.40.95:10250, name=055761c185b4e044fc26c92bcd39cdc6458f43da9f8c17bc836a7f329ddce424} value=1 ]} {Instance:instance=10.100.40.95:10250, name=074c93d282c15d0d164df9be75b071921adac5b5a1bf682d4eb89c0f2bb46fd5 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=074c93d282c15d0d164df9be75b071921adac5b5a1bf682d4eb89c0f2bb46fd5 Value:0xc023518e98} C:{Var:C Labels:instance=10.100.40.95:10250, name=074c93d282c15d0d164df9be75b071921adac5b5a1bf682d4eb89c0f2bb46fd5 Value:0xc023518eb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570656838s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=074c93d282c15d0d164df9be75b071921adac5b5a1bf682d4eb89c0f2bb46fd5} value=0 ], [ var='C' labels={instance=10.100.40.95:10250, name=074c93d282c15d0d164df9be75b071921adac5b5a1bf682d4eb89c0f2bb46fd5} value=1 ]} {Instance:instance=10.100.40.95:10250, name=078609f826611265000c07636e844e89254765de5c17e7f32a511a0c52bc3799 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=078609f826611265000c07636e844e89254765de5c17e7f32a511a0c52bc3799 Value:0xc023518ef8} C:{Var:C Labels:instance=10.100.40.95:10250, name=078609f826611265000c07636e844e89254765de5c17e7f32a511a0c52bc3799 Value:0xc023518ee0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570660525s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=078609f826611265000c07636e844e89254765de5c17e7f32a511a0c52bc3799} value=0 ], [ var='C' labels={instance=10.100.40.95:10250, name=078609f826611265000c07636e844e89254765de5c17e7f32a511a0c52bc3799} value=1 ]} {Instance:instance=10.100.40.95:10250, name=0e7dcfa4ff84bb066158d11f3f661395366372cc799cab35f7fe358d585e3bad State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=0e7dcfa4ff84bb066158d11f3f661395366372cc799cab35f7fe358d585e3bad Value:0xc023518f38} C:{Var:C Labels:instance=10.100.40.95:10250, name=0e7dcfa4ff84bb066158d11f3f661395366372cc799cab35f7fe358d585e3bad Value:0xc023518f60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570663038s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=0e7dcfa4ff84bb066158d11f3f661395366372cc799cab35f7fe358d585e3bad} value=0.2772945666659628 ], [ var='C' labels={instance=10.100.40.95:10250, name=0e7dcfa4ff84bb066158d11f3f661395366372cc799cab35f7fe358d585e3bad} value=1 ]} {Instance:instance=10.100.40.95:10250, name=0f7d856c340c6a9671566e93628946a27901937d09397de963cd58b8329bcf56 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=0f7d856c340c6a9671566e93628946a27901937d09397de963cd58b8329bcf56 Value:0xc023518fb0} C:{Var:C Labels:instance=10.100.40.95:10250, name=0f7d856c340c6a9671566e93628946a27901937d09397de963cd58b8329bcf56 Value:0xc023518fc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570678806s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=0f7d856c340c6a9671566e93628946a27901937d09397de963cd58b8329bcf56} value=0 ], [ var='C' labels={instance=10.100.40.95:10250, name=0f7d856c340c6a9671566e93628946a27901937d09397de963cd58b8329bcf56} value=1 ]} {Instance:instance=10.100.40.95:10250, name=105347a933a438964a9014ac2267b977afbb3f964cf123df119896a1ef38fc10 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=105347a933a438964a9014ac2267b977afbb3f964cf123df119896a1ef38fc10 Value:0xc023518ff8} C:{Var:C Labels:instance=10.100.40.95:10250, name=105347a933a438964a9014ac2267b977afbb3f964cf123df119896a1ef38fc10 Value:0xc023519010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570683071s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=105347a933a438964a9014ac2267b977afbb3f964cf123df119896a1ef38fc10} value=0.5470898082499313 ], [ var='C' labels={instance=10.100.40.95:10250, name=105347a933a438964a9014ac2267b977afbb3f964cf123df119896a1ef38fc10} value=1 ]} {Instance:instance=10.100.40.95:10250, name=128cd9f9fa757555a7995880bc35c73d92ce0379401ad25b50c97737a1797c21 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=128cd9f9fa757555a7995880bc35c73d92ce0379401ad25b50c97737a1797c21 Value:0xc023519040} C:{Var:C Labels:instance=10.100.40.95:10250, name=128cd9f9fa757555a7995880bc35c73d92ce0379401ad25b50c97737a1797c21 Value:0xc023519058}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570689272s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=128cd9f9fa757555a7995880bc35c73d92ce0379401ad25b50c97737a1797c21} value=0 ], [ var='C' labels={instance=10.100.40.95:10250, name=128cd9f9fa757555a7995880bc35c73d92ce0379401ad25b50c97737a1797c21} value=1 ]} {Instance:instance=10.100.40.95:10250, name=184b7943d08446d0e9e4175d95c25437c6a418c4bc2ba4d498ba49b87084a498 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=184b7943d08446d0e9e4175d95c25437c6a418c4bc2ba4d498ba49b87084a498 Value:0xc023519088} C:{Var:C Labels:instance=10.100.40.95:10250, name=184b7943d08446d0e9e4175d95c25437c6a418c4bc2ba4d498ba49b87084a498 Value:0xc0235190a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570695134s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=184b7943d08446d0e9e4175d95c25437c6a418c4bc2ba4d498ba49b87084a498} value=0.05076328000010714 ], [ var='C' labels={instance=10.100.40.95:10250, name=184b7943d08446d0e9e4175d95c25437c6a418c4bc2ba4d498ba49b87084a498} value=1 ]} {Instance:instance=10.100.40.95:10250, name=192674b46156d31fe0b0078e659fd37158e2b1e00fd05e307061a15b665f0be6 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=192674b46156d31fe0b0078e659fd37158e2b1e00fd05e307061a15b665f0be6 Value:0xc0235190d0} C:{Var:C Labels:instance=10.100.40.95:10250, name=192674b46156d31fe0b0078e659fd37158e2b1e00fd05e307061a15b665f0be6 Value:0xc0235190e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570700648s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=192674b46156d31fe0b0078e659fd37158e2b1e00fd05e307061a15b665f0be6} value=0.026173683333468034 ], [ var='C' labels={instance=10.100.40.95:10250, name=192674b46156d31fe0b0078e659fd37158e2b1e00fd05e307061a15b665f0be6} value=1 ]} {Instance:instance=10.100.40.95:10250, name=19dd181eb3f358fadaf2f26b4ed1a0b926e3ffb1b1d9153d92792a964029aa71 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=19dd181eb3f358fadaf2f26b4ed1a0b926e3ffb1b1d9153d92792a964029aa71 Value:0xc023519168} C:{Var:C Labels:instance=10.100.40.95:10250, name=19dd181eb3f358fadaf2f26b4ed1a0b926e3ffb1b1d9153d92792a964029aa71 Value:0xc023519190}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570704021s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=19dd181eb3f358fadaf2f26b4ed1a0b926e3ffb1b1d9153d92792a964029aa71} value=0 ], [ var='C' labels={instance=10.100.40.95:10250, name=19dd181eb3f358fadaf2f26b4ed1a0b926e3ffb1b1d9153d92792a964029aa71} value=1 ]} {Instance:instance=10.100.40.95:10250, name=1ba103365b5ec3b8509842c306e2782e9edee9afb2a97c2726e8533696a0e547 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=1ba103365b5ec3b8509842c306e2782e9edee9afb2a97c2726e8533696a0e547 Value:0xc0235191c0} C:{Var:C Labels:instance=10.100.40.95:10250, name=1ba103365b5ec3b8509842c306e2782e9edee9afb2a97c2726e8533696a0e547 Value:0xc0235191d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570707486s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=1ba103365b5ec3b8509842c306e2782e9edee9afb2a97c2726e8533696a0e547} value=0 ], [ var='C' labels={instance=10.100.40.95:10250, name=1ba103365b5ec3b8509842c306e2782e9edee9afb2a97c2726e8533696a0e547} value=1 ]} {Instance:instance=10.100.40.95:10250, name=1d6a915ffc629be484b9d3b9e8394818512e9a3667e30f58cca61ee001e27c40 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=1d6a915ffc629be484b9d3b9e8394818512e9a3667e30f58cca61ee001e27c40 Value:0xc023519208} C:{Var:C Labels:instance=10.100.40.95:10250, name=1d6a915ffc629be484b9d3b9e8394818512e9a3667e30f58cca61ee001e27c40 Value:0xc023519220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570712301s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=1d6a915ffc629be484b9d3b9e8394818512e9a3667e30f58cca61ee001e27c40} value=0.06554719824999702 ], [ var='C' labels={instance=10.100.40.95:10250, name=1d6a915ffc629be484b9d3b9e8394818512e9a3667e30f58cca61ee001e27c40} value=1 ]} {Instance:instance=10.100.40.95:10250, name=1fcfba4921bfcba0b370db2ce21919183761cb31c7ad649f2ad408ffb3d9dc62 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=1fcfba4921bfcba0b370db2ce21919183761cb31c7ad649f2ad408ffb3d9dc62 Value:0xc023519250} C:{Var:C Labels:instance=10.100.40.95:10250, name=1fcfba4921bfcba0b370db2ce21919183761cb31c7ad649f2ad408ffb3d9dc62 Value:0xc023519268}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570715782s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=1fcfba4921bfcba0b370db2ce21919183761cb31c7ad649f2ad408ffb3d9dc62} value=0.10509888333331217 ], [ var='C' labels={instance=10.100.40.95:10250, name=1fcfba4921bfcba0b370db2ce21919183761cb31c7ad649f2ad408ffb3d9dc62} value=1 ]} {Instance:instance=10.100.40.95:10250, name=26c621c596c7b3c155925879381073f85f411fa3ef9342c3e4bbb418e7ffc721 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=26c621c596c7b3c155925879381073f85f411fa3ef9342c3e4bbb418e7ffc721 Value:0xc023519298} C:{Var:C Labels:instance=10.100.40.95:10250, name=26c621c596c7b3c155925879381073f85f411fa3ef9342c3e4bbb418e7ffc721 Value:0xc0235192b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570718908s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=26c621c596c7b3c155925879381073f85f411fa3ef9342c3e4bbb418e7ffc721} value=0.13032396666668924 ], [ var='C' labels={instance=10.100.40.95:10250, name=26c621c596c7b3c155925879381073f85f411fa3ef9342c3e4bbb418e7ffc721} value=1 ]} {Instance:instance=10.100.40.95:10250, name=2b4ae5e67d643202493be73913b5323f9d33a28c81b2e2ad0d51bf85cb635377 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=2b4ae5e67d643202493be73913b5323f9d33a28c81b2e2ad0d51bf85cb635377 Value:0xc0235192e0} C:{Var:C Labels:instance=10.100.40.95:10250, name=2b4ae5e67d643202493be73913b5323f9d33a28c81b2e2ad0d51bf85cb635377 Value:0xc0235192f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570721493s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=2b4ae5e67d643202493be73913b5323f9d33a28c81b2e2ad0d51bf85cb635377} value=0 ], [ var='C' labels={instance=10.100.40.95:10250, name=2b4ae5e67d643202493be73913b5323f9d33a28c81b2e2ad0d51bf85cb635377} value=1 ]} {Instance:instance=10.100.40.95:10250, name=2d388a7ad207086ea5bb357992e701113909dc4e6c1502ab58444c9d4df6aadc State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=2d388a7ad207086ea5bb357992e701113909dc4e6c1502ab58444c9d4df6aadc Value:0xc023519328} C:{Var:C Labels:instance=10.100.40.95:10250, name=2d388a7ad207086ea5bb357992e701113909dc4e6c1502ab58444c9d4df6aadc Value:0xc023519340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570723891s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=2d388a7ad207086ea5bb357992e701113909dc4e6c1502ab58444c9d4df6aadc} value=0.04675287741671733 ], [ var='C' labels={instance=10.100.40.95:10250, name=2d388a7ad207086ea5bb357992e701113909dc4e6c1502ab58444c9d4df6aadc} value=1 ]} {Instance:instance=10.100.40.95:10250, name=37bf38878f4c6dc52f15d424eeb86e360ed3466621592639ac8de9dbd078cb79 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=37bf38878f4c6dc52f15d424eeb86e360ed3466621592639ac8de9dbd078cb79 Value:0xc0235193b8} C:{Var:C Labels:instance=10.100.40.95:10250, name=37bf38878f4c6dc52f15d424eeb86e360ed3466621592639ac8de9dbd078cb79 Value:0xc0235193a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570729554s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=37bf38878f4c6dc52f15d424eeb86e360ed3466621592639ac8de9dbd078cb79} value=0 ], [ var='C' labels={instance=10.100.40.95:10250, name=37bf38878f4c6dc52f15d424eeb86e360ed3466621592639ac8de9dbd078cb79} value=1 ]} {Instance:instance=10.100.40.95:10250, name=3a8dc664364854a52bb923d680106264c817212f2d7306f1e2dd772f946b7329 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=3a8dc664364854a52bb923d680106264c817212f2d7306f1e2dd772f946b7329 Value:0xc023519410} C:{Var:C Labels:instance=10.100.40.95:10250, name=3a8dc664364854a52bb923d680106264c817212f2d7306f1e2dd772f946b7329 Value:0xc0235193f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570731998s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=3a8dc664364854a52bb923d680106264c817212f2d7306f1e2dd772f946b7329} value=0.0015855050000013193 ], [ var='C' labels={instance=10.100.40.95:10250, name=3a8dc664364854a52bb923d680106264c817212f2d7306f1e2dd772f946b7329} value=1 ]} {Instance:instance=10.100.40.95:10250, name=41a102ee21c3d1d3a94d02f5fcdba4d988fdfcb5e8b062148168f99f871bd861 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=41a102ee21c3d1d3a94d02f5fcdba4d988fdfcb5e8b062148168f99f871bd861 Value:0xc023519440} C:{Var:C Labels:instance=10.100.40.95:10250, name=41a102ee21c3d1d3a94d02f5fcdba4d988fdfcb5e8b062148168f99f871bd861 Value:0xc023519458}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570734777s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=41a102ee21c3d1d3a94d02f5fcdba4d988fdfcb5e8b062148168f99f871bd861} value=0 ], [ var='C' labels={instance=10.100.40.95:10250, name=41a102ee21c3d1d3a94d02f5fcdba4d988fdfcb5e8b062148168f99f871bd861} value=1 ]} {Instance:instance=10.100.40.95:10250, name=454eb7a3cecbcc41bf1f464c109dfde1c1d0ea6039d8d08fa731034bc0288f6f State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=454eb7a3cecbcc41bf1f464c109dfde1c1d0ea6039d8d08fa731034bc0288f6f Value:0xc023519488} C:{Var:C Labels:instance=10.100.40.95:10250, name=454eb7a3cecbcc41bf1f464c109dfde1c1d0ea6039d8d08fa731034bc0288f6f Value:0xc0235194a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570736958s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=454eb7a3cecbcc41bf1f464c109dfde1c1d0ea6039d8d08fa731034bc0288f6f} value=0.25633549999990163 ], [ var='C' labels={instance=10.100.40.95:10250, name=454eb7a3cecbcc41bf1f464c109dfde1c1d0ea6039d8d08fa731034bc0288f6f} value=1 ]} {Instance:instance=10.100.40.95:10250, name=5d0b5617d1cdc9d3e0c45f0bf01ba504e06b9e04d9272876c702dae749f08873 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=5d0b5617d1cdc9d3e0c45f0bf01ba504e06b9e04d9272876c702dae749f08873 Value:0xc0235194e8} C:{Var:C Labels:instance=10.100.40.95:10250, name=5d0b5617d1cdc9d3e0c45f0bf01ba504e06b9e04d9272876c702dae749f08873 Value:0xc0235194d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570739576s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=5d0b5617d1cdc9d3e0c45f0bf01ba504e06b9e04d9272876c702dae749f08873} value=0.29561829833331404 ], [ var='C' labels={instance=10.100.40.95:10250, name=5d0b5617d1cdc9d3e0c45f0bf01ba504e06b9e04d9272876c702dae749f08873} value=1 ]} {Instance:instance=10.100.40.95:10250, name=6880438a24012dec80bb1d10c3f90167933bf76f8864cad585b2927911c9e2c0 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=6880438a24012dec80bb1d10c3f90167933bf76f8864cad585b2927911c9e2c0 Value:0xc023519518} C:{Var:C Labels:instance=10.100.40.95:10250, name=6880438a24012dec80bb1d10c3f90167933bf76f8864cad585b2927911c9e2c0 Value:0xc023519530}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570742474s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=6880438a24012dec80bb1d10c3f90167933bf76f8864cad585b2927911c9e2c0} value=0 ], [ var='C' labels={instance=10.100.40.95:10250, name=6880438a24012dec80bb1d10c3f90167933bf76f8864cad585b2927911c9e2c0} value=1 ]} {Instance:instance=10.100.40.95:10250, name=6b1e0fef2bfb006bce1da6f3536a525375dff20bf9dc941b2fbaed9d3e733921 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=6b1e0fef2bfb006bce1da6f3536a525375dff20bf9dc941b2fbaed9d3e733921 Value:0xc023519570} C:{Var:C Labels:instance=10.100.40.95:10250, name=6b1e0fef2bfb006bce1da6f3536a525375dff20bf9dc941b2fbaed9d3e733921 Value:0xc023519588}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570745036s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=6b1e0fef2bfb006bce1da6f3536a525375dff20bf9dc941b2fbaed9d3e733921} value=0.02536800166656879 ], [ var='C' labels={instance=10.100.40.95:10250, name=6b1e0fef2bfb006bce1da6f3536a525375dff20bf9dc941b2fbaed9d3e733921} value=1 ]} {Instance:instance=10.100.40.95:10250, name=6e8317c226b5c9b382060344a190e75d6fe97d14f8657fe10846b2b6287065fb State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=6e8317c226b5c9b382060344a190e75d6fe97d14f8657fe10846b2b6287065fb Value:0xc0235195b8} C:{Var:C Labels:instance=10.100.40.95:10250, name=6e8317c226b5c9b382060344a190e75d6fe97d14f8657fe10846b2b6287065fb Value:0xc0235195e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570747464s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=6e8317c226b5c9b382060344a190e75d6fe97d14f8657fe10846b2b6287065fb} value=0.06482084749980761 ], [ var='C' labels={instance=10.100.40.95:10250, name=6e8317c226b5c9b382060344a190e75d6fe97d14f8657fe10846b2b6287065fb} value=1 ]} {Instance:instance=10.100.40.95:10250, name=77d58b7d52405f4d300214b45e192d3772701d4f849c77637310b5a80ac3345b State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=77d58b7d52405f4d300214b45e192d3772701d4f849c77637310b5a80ac3345b Value:0xc023519610} C:{Var:C Labels:instance=10.100.40.95:10250, name=77d58b7d52405f4d300214b45e192d3772701d4f849c77637310b5a80ac3345b Value:0xc023519628}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570750152s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=77d58b7d52405f4d300214b45e192d3772701d4f849c77637310b5a80ac3345b} value=2.306928608328841 ], [ var='C' labels={instance=10.100.40.95:10250, name=77d58b7d52405f4d300214b45e192d3772701d4f849c77637310b5a80ac3345b} value=1 ]} {Instance:instance=10.100.40.95:10250, name=78e05a0a639c8f7d9946fb20610daf998c26fdb778934e0a8c56376711b9d50b State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=78e05a0a639c8f7d9946fb20610daf998c26fdb778934e0a8c56376711b9d50b Value:0xc023519658} C:{Var:C Labels:instance=10.100.40.95:10250, name=78e05a0a639c8f7d9946fb20610daf998c26fdb778934e0a8c56376711b9d50b Value:0xc023519670}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570752926s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=78e05a0a639c8f7d9946fb20610daf998c26fdb778934e0a8c56376711b9d50b} value=0 ], [ var='C' labels={instance=10.100.40.95:10250, name=78e05a0a639c8f7d9946fb20610daf998c26fdb778934e0a8c56376711b9d50b} value=1 ]} {Instance:instance=10.100.40.95:10250, name=79638611dc0f59d461a32472a5de768814c5b502591e2de1e6c2bd244139a294 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=79638611dc0f59d461a32472a5de768814c5b502591e2de1e6c2bd244139a294 Value:0xc0235196b8} C:{Var:C Labels:instance=10.100.40.95:10250, name=79638611dc0f59d461a32472a5de768814c5b502591e2de1e6c2bd244139a294 Value:0xc0235196a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570755918s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=79638611dc0f59d461a32472a5de768814c5b502591e2de1e6c2bd244139a294} value=0 ], [ var='C' labels={instance=10.100.40.95:10250, name=79638611dc0f59d461a32472a5de768814c5b502591e2de1e6c2bd244139a294} value=1 ]} {Instance:instance=10.100.40.95:10250, name=8535a76db21c5d87367b7fed0da6bd1a3af42321f1561c2ca16b38109fe260b2 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=8535a76db21c5d87367b7fed0da6bd1a3af42321f1561c2ca16b38109fe260b2 Value:0xc023519700} C:{Var:C Labels:instance=10.100.40.95:10250, name=8535a76db21c5d87367b7fed0da6bd1a3af42321f1561c2ca16b38109fe260b2 Value:0xc0235196e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570758168s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=8535a76db21c5d87367b7fed0da6bd1a3af42321f1561c2ca16b38109fe260b2} value=0 ], [ var='C' labels={instance=10.100.40.95:10250, name=8535a76db21c5d87367b7fed0da6bd1a3af42321f1561c2ca16b38109fe260b2} value=1 ]} {Instance:instance=10.100.40.95:10250, name=88ad6ccf1c42009daedc69002189927574c3a80b5abe9d030fad39e232a44a8b State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=88ad6ccf1c42009daedc69002189927574c3a80b5abe9d030fad39e232a44a8b Value:0xc023519730} C:{Var:C Labels:instance=10.100.40.95:10250, name=88ad6ccf1c42009daedc69002189927574c3a80b5abe9d030fad39e232a44a8b Value:0xc023519758}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570760348s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=88ad6ccf1c42009daedc69002189927574c3a80b5abe9d030fad39e232a44a8b} value=0.33894980833338195 ], [ var='C' labels={instance=10.100.40.95:10250, name=88ad6ccf1c42009daedc69002189927574c3a80b5abe9d030fad39e232a44a8b} value=1 ]} {Instance:instance=10.100.40.95:10250, name=8eda53275a01fd8e4cc3804dc0b829e3ec37d3912ae9e3c33c7acf7f64c9698f State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=8eda53275a01fd8e4cc3804dc0b829e3ec37d3912ae9e3c33c7acf7f64c9698f Value:0xc023519788} C:{Var:C Labels:instance=10.100.40.95:10250, name=8eda53275a01fd8e4cc3804dc0b829e3ec37d3912ae9e3c33c7acf7f64c9698f Value:0xc0235197a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570764284s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=8eda53275a01fd8e4cc3804dc0b829e3ec37d3912ae9e3c33c7acf7f64c9698f} value=0 ], [ var='C' labels={instance=10.100.40.95:10250, name=8eda53275a01fd8e4cc3804dc0b829e3ec37d3912ae9e3c33c7acf7f64c9698f} value=1 ]} {Instance:instance=10.100.40.95:10250, name=9a0258cc073d447b5c994c802f3d866a69ca2e42b9015833d51a5fa067e122ab State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=9a0258cc073d447b5c994c802f3d866a69ca2e42b9015833d51a5fa067e122ab Value:0xc0235197f8} C:{Var:C Labels:instance=10.100.40.95:10250, name=9a0258cc073d447b5c994c802f3d866a69ca2e42b9015833d51a5fa067e122ab Value:0xc0235197e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570766467s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=9a0258cc073d447b5c994c802f3d866a69ca2e42b9015833d51a5fa067e122ab} value=0 ], [ var='C' labels={instance=10.100.40.95:10250, name=9a0258cc073d447b5c994c802f3d866a69ca2e42b9015833d51a5fa067e122ab} value=1 ]} {Instance:instance=10.100.40.95:10250, name=9c92ef4088cd9967569c9a8d71763b51fc8b59617bba232140d9117b4e7e67f1 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=9c92ef4088cd9967569c9a8d71763b51fc8b59617bba232140d9117b4e7e67f1 Value:0xc023519828} C:{Var:C Labels:instance=10.100.40.95:10250, name=9c92ef4088cd9967569c9a8d71763b51fc8b59617bba232140d9117b4e7e67f1 Value:0xc023519840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570769111s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=9c92ef4088cd9967569c9a8d71763b51fc8b59617bba232140d9117b4e7e67f1} value=0.013035967416665054 ], [ var='C' labels={instance=10.100.40.95:10250, name=9c92ef4088cd9967569c9a8d71763b51fc8b59617bba232140d9117b4e7e67f1} value=1 ]} {Instance:instance=10.100.40.95:10250, name=a6ba2bb23f82970b95ea0fe9e7f3c5ebae04de7b929573aaf6f4caa983ee3d32 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=a6ba2bb23f82970b95ea0fe9e7f3c5ebae04de7b929573aaf6f4caa983ee3d32 Value:0xc023519870} C:{Var:C Labels:instance=10.100.40.95:10250, name=a6ba2bb23f82970b95ea0fe9e7f3c5ebae04de7b929573aaf6f4caa983ee3d32 Value:0xc023519888}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570771692s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=a6ba2bb23f82970b95ea0fe9e7f3c5ebae04de7b929573aaf6f4caa983ee3d32} value=0.013866560749988821 ], [ var='C' labels={instance=10.100.40.95:10250, name=a6ba2bb23f82970b95ea0fe9e7f3c5ebae04de7b929573aaf6f4caa983ee3d32} value=1 ]} {Instance:instance=10.100.40.95:10250, name=a7b866f62c3f2f292512584add68ee582b1f5f93728f5a51615de1b48cd99c96 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=a7b866f62c3f2f292512584add68ee582b1f5f93728f5a51615de1b48cd99c96 Value:0xc0235198b8} C:{Var:C Labels:instance=10.100.40.95:10250, name=a7b866f62c3f2f292512584add68ee582b1f5f93728f5a51615de1b48cd99c96 Value:0xc0235198e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570773976s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=a7b866f62c3f2f292512584add68ee582b1f5f93728f5a51615de1b48cd99c96} value=0 ], [ var='C' labels={instance=10.100.40.95:10250, name=a7b866f62c3f2f292512584add68ee582b1f5f93728f5a51615de1b48cd99c96} value=1 ]} {Instance:instance=10.100.40.95:10250, name=acc20f0a1743d0c55d5b69aa13de1fe2f683dbb754e24f0e9512813cf0ddcffb State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=acc20f0a1743d0c55d5b69aa13de1fe2f683dbb754e24f0e9512813cf0ddcffb Value:0xc023519938} C:{Var:C Labels:instance=10.100.40.95:10250, name=acc20f0a1743d0c55d5b69aa13de1fe2f683dbb754e24f0e9512813cf0ddcffb Value:0xc023519920}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570776151s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=acc20f0a1743d0c55d5b69aa13de1fe2f683dbb754e24f0e9512813cf0ddcffb} value=0.06806293333321871 ], [ var='C' labels={instance=10.100.40.95:10250, name=acc20f0a1743d0c55d5b69aa13de1fe2f683dbb754e24f0e9512813cf0ddcffb} value=1 ]} {Instance:instance=10.100.40.95:10250, name=b110495bcde38fa3c2917fde48692a943c8c156b828ee6d8e5429f464af42aea State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=b110495bcde38fa3c2917fde48692a943c8c156b828ee6d8e5429f464af42aea Value:0xc023519980} C:{Var:C Labels:instance=10.100.40.95:10250, name=b110495bcde38fa3c2917fde48692a943c8c156b828ee6d8e5429f464af42aea Value:0xc023519968}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570779708s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=b110495bcde38fa3c2917fde48692a943c8c156b828ee6d8e5429f464af42aea} value=0.10916323749976678 ], [ var='C' labels={instance=10.100.40.95:10250, name=b110495bcde38fa3c2917fde48692a943c8c156b828ee6d8e5429f464af42aea} value=1 ]} {Instance:instance=10.100.40.95:10250, name=b7bf2f80e41530704310ca2ce3e0f5aa72a63eb2b4bb1f0925fa298000335927 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=b7bf2f80e41530704310ca2ce3e0f5aa72a63eb2b4bb1f0925fa298000335927 Value:0xc0235199c0} C:{Var:C Labels:instance=10.100.40.95:10250, name=b7bf2f80e41530704310ca2ce3e0f5aa72a63eb2b4bb1f0925fa298000335927 Value:0xc0235199e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570782088s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=b7bf2f80e41530704310ca2ce3e0f5aa72a63eb2b4bb1f0925fa298000335927} value=0.015332026583365634 ], [ var='C' labels={instance=10.100.40.95:10250, name=b7bf2f80e41530704310ca2ce3e0f5aa72a63eb2b4bb1f0925fa298000335927} value=1 ]} {Instance:instance=10.100.40.95:10250, name=b7e271013440aab1050db311be3defc1c1ddc566f25f84348b1a7a2d908f7e84 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=b7e271013440aab1050db311be3defc1c1ddc566f25f84348b1a7a2d908f7e84 Value:0xc023519a28} C:{Var:C Labels:instance=10.100.40.95:10250, name=b7e271013440aab1050db311be3defc1c1ddc566f25f84348b1a7a2d908f7e84 Value:0xc023519a40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570784367s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=b7e271013440aab1050db311be3defc1c1ddc566f25f84348b1a7a2d908f7e84} value=0.04309760833336895 ], [ var='C' labels={instance=10.100.40.95:10250, name=b7e271013440aab1050db311be3defc1c1ddc566f25f84348b1a7a2d908f7e84} value=1 ]} {Instance:instance=10.100.40.95:10250, name=bf01eb05706777fe1a2042fda79f1a7fb1c3d7312fe753d12a37d66ba710f56e State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=bf01eb05706777fe1a2042fda79f1a7fb1c3d7312fe753d12a37d66ba710f56e Value:0xc023519a70} C:{Var:C Labels:instance=10.100.40.95:10250, name=bf01eb05706777fe1a2042fda79f1a7fb1c3d7312fe753d12a37d66ba710f56e Value:0xc023519a88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570786789s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=bf01eb05706777fe1a2042fda79f1a7fb1c3d7312fe753d12a37d66ba710f56e} value=0.012513284166667898 ], [ var='C' labels={instance=10.100.40.95:10250, name=bf01eb05706777fe1a2042fda79f1a7fb1c3d7312fe753d12a37d66ba710f56e} value=1 ]} {Instance:instance=10.100.40.95:10250, name=c2f8c55042bebff96a026d81d1c7e3150c0ffc71d19f0ff60c53bc50470d4704 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=c2f8c55042bebff96a026d81d1c7e3150c0ffc71d19f0ff60c53bc50470d4704 Value:0xc023519ab8} C:{Var:C Labels:instance=10.100.40.95:10250, name=c2f8c55042bebff96a026d81d1c7e3150c0ffc71d19f0ff60c53bc50470d4704 Value:0xc023519ad0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570789616s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=c2f8c55042bebff96a026d81d1c7e3150c0ffc71d19f0ff60c53bc50470d4704} value=0 ], [ var='C' labels={instance=10.100.40.95:10250, name=c2f8c55042bebff96a026d81d1c7e3150c0ffc71d19f0ff60c53bc50470d4704} value=1 ]} {Instance:instance=10.100.40.95:10250, name=cb11a3c96346e32d65cdfc90ad24ce5bdf207d2c1247ad2e898dda3f8cfc1499 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=cb11a3c96346e32d65cdfc90ad24ce5bdf207d2c1247ad2e898dda3f8cfc1499 Value:0xc023519b00} C:{Var:C Labels:instance=10.100.40.95:10250, name=cb11a3c96346e32d65cdfc90ad24ce5bdf207d2c1247ad2e898dda3f8cfc1499 Value:0xc023519b18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570793378s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=cb11a3c96346e32d65cdfc90ad24ce5bdf207d2c1247ad2e898dda3f8cfc1499} value=0 ], [ var='C' labels={instance=10.100.40.95:10250, name=cb11a3c96346e32d65cdfc90ad24ce5bdf207d2c1247ad2e898dda3f8cfc1499} value=1 ]} {Instance:instance=10.100.40.95:10250, name=d1566308bdbe6365bf235cc496bb2fe5a84ade50f98444222c1cfebac3fac032 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=d1566308bdbe6365bf235cc496bb2fe5a84ade50f98444222c1cfebac3fac032 Value:0xc023519b60} C:{Var:C Labels:instance=10.100.40.95:10250, name=d1566308bdbe6365bf235cc496bb2fe5a84ade50f98444222c1cfebac3fac032 Value:0xc023519b48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570795523s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=d1566308bdbe6365bf235cc496bb2fe5a84ade50f98444222c1cfebac3fac032} value=0.10976205833306571 ], [ var='C' labels={instance=10.100.40.95:10250, name=d1566308bdbe6365bf235cc496bb2fe5a84ade50f98444222c1cfebac3fac032} value=1 ]} {Instance:instance=10.100.40.95:10250, name=de83763f9eca53ca3d03eb0ce3507642a01e0fd49382fefd80cde5457cef4c1c State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=de83763f9eca53ca3d03eb0ce3507642a01e0fd49382fefd80cde5457cef4c1c Value:0xc023519b90} C:{Var:C Labels:instance=10.100.40.95:10250, name=de83763f9eca53ca3d03eb0ce3507642a01e0fd49382fefd80cde5457cef4c1c Value:0xc023519ba8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570798265s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=de83763f9eca53ca3d03eb0ce3507642a01e0fd49382fefd80cde5457cef4c1c} value=1.4304939750005967 ], [ var='C' labels={instance=10.100.40.95:10250, name=de83763f9eca53ca3d03eb0ce3507642a01e0fd49382fefd80cde5457cef4c1c} value=1 ]} {Instance:instance=10.100.40.95:10250, name=e630c551dd4814bf4c51610112c66ab4ca227c563cba89e8b89a84b91ae915ed State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=e630c551dd4814bf4c51610112c66ab4ca227c563cba89e8b89a84b91ae915ed Value:0xc023519c00} C:{Var:C Labels:instance=10.100.40.95:10250, name=e630c551dd4814bf4c51610112c66ab4ca227c563cba89e8b89a84b91ae915ed Value:0xc023519bd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570800727s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=e630c551dd4814bf4c51610112c66ab4ca227c563cba89e8b89a84b91ae915ed} value=0 ], [ var='C' labels={instance=10.100.40.95:10250, name=e630c551dd4814bf4c51610112c66ab4ca227c563cba89e8b89a84b91ae915ed} value=1 ]} {Instance:instance=10.100.40.95:10250, name=ebdb3f3cd4cf1e6283aee883210d46b76eda2edec266ad9255b294eddc57bbc9 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.40.95:10250, name=ebdb3f3cd4cf1e6283aee883210d46b76eda2edec266ad9255b294eddc57bbc9 Value:0xc023519c30} C:{Var:C Labels:instance=10.100.40.95:10250, name=ebdb3f3cd4cf1e6283aee883210d46b76eda2edec266ad9255b294eddc57bbc9 Value:0xc023519c48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570803168s EvaluationString:[ var='B' labels={instance=10.100.40.95:10250, name=ebdb3f3cd4cf1e6283aee883210d46b76eda2edec266ad9255b294eddc57bbc9} value=0 ], [ var='C' labels={instance=10.100.40.95:10250, name=ebdb3f3cd4cf1e6283aee883210d46b76eda2edec266ad9255b294eddc57bbc9} value=1 ]} {Instance:instance=10.100.42.89:10250, name=0ebb75b73e284e27e60c81c299733616ad5b5404bbab7de864c50bc4c7fc4c3b State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=0ebb75b73e284e27e60c81c299733616ad5b5404bbab7de864c50bc4c7fc4c3b Value:0xc023519c90} C:{Var:C Labels:instance=10.100.42.89:10250, name=0ebb75b73e284e27e60c81c299733616ad5b5404bbab7de864c50bc4c7fc4c3b Value:0xc023519c78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570805394s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=0ebb75b73e284e27e60c81c299733616ad5b5404bbab7de864c50bc4c7fc4c3b} value=0 ], [ var='C' labels={instance=10.100.42.89:10250, name=0ebb75b73e284e27e60c81c299733616ad5b5404bbab7de864c50bc4c7fc4c3b} value=1 ]} {Instance:instance=10.100.42.89:10250, name=2104b32e2fe2899551be5cf520513a841712002655f871963adbf917c95b61d8 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=2104b32e2fe2899551be5cf520513a841712002655f871963adbf917c95b61d8 Value:0xc023519cd8} C:{Var:C Labels:instance=10.100.42.89:10250, name=2104b32e2fe2899551be5cf520513a841712002655f871963adbf917c95b61d8 Value:0xc023519cc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570807554s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=2104b32e2fe2899551be5cf520513a841712002655f871963adbf917c95b61d8} value=0.03738825481478771 ], [ var='C' labels={instance=10.100.42.89:10250, name=2104b32e2fe2899551be5cf520513a841712002655f871963adbf917c95b61d8} value=1 ]} {Instance:instance=10.100.42.89:10250, name=312acd816f6341289a905b5553bff671acc89b525e6466fe4c5aec577fc012da State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=312acd816f6341289a905b5553bff671acc89b525e6466fe4c5aec577fc012da Value:0xc023519d18} C:{Var:C Labels:instance=10.100.42.89:10250, name=312acd816f6341289a905b5553bff671acc89b525e6466fe4c5aec577fc012da Value:0xc023519d30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570810336s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=312acd816f6341289a905b5553bff671acc89b525e6466fe4c5aec577fc012da} value=0.04360107407407028 ], [ var='C' labels={instance=10.100.42.89:10250, name=312acd816f6341289a905b5553bff671acc89b525e6466fe4c5aec577fc012da} value=1 ]} {Instance:instance=10.100.42.89:10250, name=35dc8af3e073b65ea56f0ab7a4ae88f3321ed929c1b2c5927cc651dbfe6ae92c State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=35dc8af3e073b65ea56f0ab7a4ae88f3321ed929c1b2c5927cc651dbfe6ae92c Value:0xc023519d60} C:{Var:C Labels:instance=10.100.42.89:10250, name=35dc8af3e073b65ea56f0ab7a4ae88f3321ed929c1b2c5927cc651dbfe6ae92c Value:0xc023519d78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.57081343s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=35dc8af3e073b65ea56f0ab7a4ae88f3321ed929c1b2c5927cc651dbfe6ae92c} value=0.2398626888882265 ], [ var='C' labels={instance=10.100.42.89:10250, name=35dc8af3e073b65ea56f0ab7a4ae88f3321ed929c1b2c5927cc651dbfe6ae92c} value=1 ]} {Instance:instance=10.100.42.89:10250, name=366939461e668320aa0b40f7e8d656f488e1787edcc7ae103b175f5477c403e1 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=366939461e668320aa0b40f7e8d656f488e1787edcc7ae103b175f5477c403e1 Value:0xc023519da8} C:{Var:C Labels:instance=10.100.42.89:10250, name=366939461e668320aa0b40f7e8d656f488e1787edcc7ae103b175f5477c403e1 Value:0xc023519dc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570815959s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=366939461e668320aa0b40f7e8d656f488e1787edcc7ae103b175f5477c403e1} value=0 ], [ var='C' labels={instance=10.100.42.89:10250, name=366939461e668320aa0b40f7e8d656f488e1787edcc7ae103b175f5477c403e1} value=1 ]} {Instance:instance=10.100.42.89:10250, name=3726b10ab5d6c23a0a8ecfdb940ab72bdece96ee8f073c6ddcd756dd262ba626 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=3726b10ab5d6c23a0a8ecfdb940ab72bdece96ee8f073c6ddcd756dd262ba626 Value:0xc023519df0} C:{Var:C Labels:instance=10.100.42.89:10250, name=3726b10ab5d6c23a0a8ecfdb940ab72bdece96ee8f073c6ddcd756dd262ba626 Value:0xc023519e08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570818076s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=3726b10ab5d6c23a0a8ecfdb940ab72bdece96ee8f073c6ddcd756dd262ba626} value=0 ], [ var='C' labels={instance=10.100.42.89:10250, name=3726b10ab5d6c23a0a8ecfdb940ab72bdece96ee8f073c6ddcd756dd262ba626} value=1 ]} {Instance:instance=10.100.42.89:10250, name=4394087e58cc295a53619b824fbdec5883e43d2cb938a67a6167963f54cf08b3 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=4394087e58cc295a53619b824fbdec5883e43d2cb938a67a6167963f54cf08b3 Value:0xc023519e48} C:{Var:C Labels:instance=10.100.42.89:10250, name=4394087e58cc295a53619b824fbdec5883e43d2cb938a67a6167963f54cf08b3 Value:0xc023519e60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570820851s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=4394087e58cc295a53619b824fbdec5883e43d2cb938a67a6167963f54cf08b3} value=0.20053103703739236 ], [ var='C' labels={instance=10.100.42.89:10250, name=4394087e58cc295a53619b824fbdec5883e43d2cb938a67a6167963f54cf08b3} value=1 ]} {Instance:instance=10.100.42.89:10250, name=524d7074b35ba1f365c0173d770267d7a2fa78772e51ab815f96c6990389ce92 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=524d7074b35ba1f365c0173d770267d7a2fa78772e51ab815f96c6990389ce92 Value:0xc023519e90} C:{Var:C Labels:instance=10.100.42.89:10250, name=524d7074b35ba1f365c0173d770267d7a2fa78772e51ab815f96c6990389ce92 Value:0xc023519ea8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.5708235s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=524d7074b35ba1f365c0173d770267d7a2fa78772e51ab815f96c6990389ce92} value=0.05388682296298881 ], [ var='C' labels={instance=10.100.42.89:10250, name=524d7074b35ba1f365c0173d770267d7a2fa78772e51ab815f96c6990389ce92} value=1 ]} {Instance:instance=10.100.42.89:10250, name=54a9cf3a0e04a560d373931aa367de4a3c5d53b0f62bcfbd22d74f7f908ae87a State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=54a9cf3a0e04a560d373931aa367de4a3c5d53b0f62bcfbd22d74f7f908ae87a Value:0xc023519ed8} C:{Var:C Labels:instance=10.100.42.89:10250, name=54a9cf3a0e04a560d373931aa367de4a3c5d53b0f62bcfbd22d74f7f908ae87a Value:0xc023519ef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570826098s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=54a9cf3a0e04a560d373931aa367de4a3c5d53b0f62bcfbd22d74f7f908ae87a} value=0.049554605925887125 ], [ var='C' labels={instance=10.100.42.89:10250, name=54a9cf3a0e04a560d373931aa367de4a3c5d53b0f62bcfbd22d74f7f908ae87a} value=1 ]} {Instance:instance=10.100.42.89:10250, name=5f833652e08bd89f05822b55f39d8461812bb5b4088d0f9bccb477134bbf12dd State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=5f833652e08bd89f05822b55f39d8461812bb5b4088d0f9bccb477134bbf12dd Value:0xc023519f20} C:{Var:C Labels:instance=10.100.42.89:10250, name=5f833652e08bd89f05822b55f39d8461812bb5b4088d0f9bccb477134bbf12dd Value:0xc023519f38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570828476s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=5f833652e08bd89f05822b55f39d8461812bb5b4088d0f9bccb477134bbf12dd} value=0 ], [ var='C' labels={instance=10.100.42.89:10250, name=5f833652e08bd89f05822b55f39d8461812bb5b4088d0f9bccb477134bbf12dd} value=1 ]} {Instance:instance=10.100.42.89:10250, name=67223776d2671f12e5b3c49fb7a4ff0e31fcb15d6caec7ec2e4f3a77e101021a State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=67223776d2671f12e5b3c49fb7a4ff0e31fcb15d6caec7ec2e4f3a77e101021a Value:0xc023519f68} C:{Var:C Labels:instance=10.100.42.89:10250, name=67223776d2671f12e5b3c49fb7a4ff0e31fcb15d6caec7ec2e4f3a77e101021a Value:0xc023519f80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570830483s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=67223776d2671f12e5b3c49fb7a4ff0e31fcb15d6caec7ec2e4f3a77e101021a} value=0 ], [ var='C' labels={instance=10.100.42.89:10250, name=67223776d2671f12e5b3c49fb7a4ff0e31fcb15d6caec7ec2e4f3a77e101021a} value=1 ]} {Instance:instance=10.100.42.89:10250, name=7e8596ba69f68130f501eca29c65fa9e0aa1aee313f34575520e657c45637415 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=7e8596ba69f68130f501eca29c65fa9e0aa1aee313f34575520e657c45637415 Value:0xc023519fb0} C:{Var:C Labels:instance=10.100.42.89:10250, name=7e8596ba69f68130f501eca29c65fa9e0aa1aee313f34575520e657c45637415 Value:0xc023519fc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.5708332s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=7e8596ba69f68130f501eca29c65fa9e0aa1aee313f34575520e657c45637415} value=0.053876431925935456 ], [ var='C' labels={instance=10.100.42.89:10250, name=7e8596ba69f68130f501eca29c65fa9e0aa1aee313f34575520e657c45637415} value=1 ]} {Instance:instance=10.100.42.89:10250, name=916696a205a15297a155722c10c3adae03e2cf32f3c54e31401f1467a44bb06e State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=916696a205a15297a155722c10c3adae03e2cf32f3c54e31401f1467a44bb06e Value:0xc023519ff8} C:{Var:C Labels:instance=10.100.42.89:10250, name=916696a205a15297a155722c10c3adae03e2cf32f3c54e31401f1467a44bb06e Value:0xc0026a8150}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570836023s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=916696a205a15297a155722c10c3adae03e2cf32f3c54e31401f1467a44bb06e} value=0.003209013333326362 ], [ var='C' labels={instance=10.100.42.89:10250, name=916696a205a15297a155722c10c3adae03e2cf32f3c54e31401f1467a44bb06e} value=1 ]} {Instance:instance=10.100.42.89:10250, name=9367ebbf6a3a56c37f1b1e019b791f668784cd8b9e40bd670635e02fe9d26541 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=9367ebbf6a3a56c37f1b1e019b791f668784cd8b9e40bd670635e02fe9d26541 Value:0xc0026a8190} C:{Var:C Labels:instance=10.100.42.89:10250, name=9367ebbf6a3a56c37f1b1e019b791f668784cd8b9e40bd670635e02fe9d26541 Value:0xc0026a81a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570838718s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=9367ebbf6a3a56c37f1b1e019b791f668784cd8b9e40bd670635e02fe9d26541} value=0.1319903777780214 ], [ var='C' labels={instance=10.100.42.89:10250, name=9367ebbf6a3a56c37f1b1e019b791f668784cd8b9e40bd670635e02fe9d26541} value=1 ]} {Instance:instance=10.100.42.89:10250, name=959efaac1ff1525e141d794b4cbac1ac760ce635056036fb15958e426d58c406 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=959efaac1ff1525e141d794b4cbac1ac760ce635056036fb15958e426d58c406 Value:0xc0026a81d8} C:{Var:C Labels:instance=10.100.42.89:10250, name=959efaac1ff1525e141d794b4cbac1ac760ce635056036fb15958e426d58c406 Value:0xc0026a81f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570841273s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=959efaac1ff1525e141d794b4cbac1ac760ce635056036fb15958e426d58c406} value=0.07391014814789675 ], [ var='C' labels={instance=10.100.42.89:10250, name=959efaac1ff1525e141d794b4cbac1ac760ce635056036fb15958e426d58c406} value=1 ]} {Instance:instance=10.100.42.89:10250, name=96a61430389b7ff493846bc8cd6666d01072c1ca5162bbdd1f4bb809b3a3037f State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=96a61430389b7ff493846bc8cd6666d01072c1ca5162bbdd1f4bb809b3a3037f Value:0xc0026a8220} C:{Var:C Labels:instance=10.100.42.89:10250, name=96a61430389b7ff493846bc8cd6666d01072c1ca5162bbdd1f4bb809b3a3037f Value:0xc0026a8238}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570843681s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=96a61430389b7ff493846bc8cd6666d01072c1ca5162bbdd1f4bb809b3a3037f} value=0.2654830406667046 ], [ var='C' labels={instance=10.100.42.89:10250, name=96a61430389b7ff493846bc8cd6666d01072c1ca5162bbdd1f4bb809b3a3037f} value=1 ]} {Instance:instance=10.100.42.89:10250, name=9c1c3a35ff68f5e328ebc877580b2c615dcc8863893cf04c921a4f729e3bb5a8 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=9c1c3a35ff68f5e328ebc877580b2c615dcc8863893cf04c921a4f729e3bb5a8 Value:0xc0026a8268} C:{Var:C Labels:instance=10.100.42.89:10250, name=9c1c3a35ff68f5e328ebc877580b2c615dcc8863893cf04c921a4f729e3bb5a8 Value:0xc0026a82a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570846041s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=9c1c3a35ff68f5e328ebc877580b2c615dcc8863893cf04c921a4f729e3bb5a8} value=0.2196107756295802 ], [ var='C' labels={instance=10.100.42.89:10250, name=9c1c3a35ff68f5e328ebc877580b2c615dcc8863893cf04c921a4f729e3bb5a8} value=1 ]} {Instance:instance=10.100.42.89:10250, name=b84d571e56b2e5abac8fe8519549a997076578e5443b7cf10567ecc3805b0a90 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=b84d571e56b2e5abac8fe8519549a997076578e5443b7cf10567ecc3805b0a90 Value:0xc0026a82d0} C:{Var:C Labels:instance=10.100.42.89:10250, name=b84d571e56b2e5abac8fe8519549a997076578e5443b7cf10567ecc3805b0a90 Value:0xc0026a82e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570849133s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=b84d571e56b2e5abac8fe8519549a997076578e5443b7cf10567ecc3805b0a90} value=0.11399917777789932 ], [ var='C' labels={instance=10.100.42.89:10250, name=b84d571e56b2e5abac8fe8519549a997076578e5443b7cf10567ecc3805b0a90} value=1 ]} {Instance:instance=10.100.42.89:10250, name=c13261778746223baa4479a0b45624f353c1bf0131bd9b3be71c3a34f8322a3f State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=c13261778746223baa4479a0b45624f353c1bf0131bd9b3be71c3a34f8322a3f Value:0xc0026a8350} C:{Var:C Labels:instance=10.100.42.89:10250, name=c13261778746223baa4479a0b45624f353c1bf0131bd9b3be71c3a34f8322a3f Value:0xc0026a8318}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570851741s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=c13261778746223baa4479a0b45624f353c1bf0131bd9b3be71c3a34f8322a3f} value=0 ], [ var='C' labels={instance=10.100.42.89:10250, name=c13261778746223baa4479a0b45624f353c1bf0131bd9b3be71c3a34f8322a3f} value=1 ]} {Instance:instance=10.100.42.89:10250, name=c276a8b46e21234e337819cacd4708d250f3c41983dcf93b0c126a3973be3c85 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=c276a8b46e21234e337819cacd4708d250f3c41983dcf93b0c126a3973be3c85 Value:0xc0026a8380} C:{Var:C Labels:instance=10.100.42.89:10250, name=c276a8b46e21234e337819cacd4708d250f3c41983dcf93b0c126a3973be3c85 Value:0xc0026a8398}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570854374s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=c276a8b46e21234e337819cacd4708d250f3c41983dcf93b0c126a3973be3c85} value=0.05207670229639108 ], [ var='C' labels={instance=10.100.42.89:10250, name=c276a8b46e21234e337819cacd4708d250f3c41983dcf93b0c126a3973be3c85} value=1 ]} {Instance:instance=10.100.42.89:10250, name=cba33cd9c9d8c68a55660523a05db9ab879d7c062132032db51fa4d1fcb70c5b State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=cba33cd9c9d8c68a55660523a05db9ab879d7c062132032db51fa4d1fcb70c5b Value:0xc0026a83c8} C:{Var:C Labels:instance=10.100.42.89:10250, name=cba33cd9c9d8c68a55660523a05db9ab879d7c062132032db51fa4d1fcb70c5b Value:0xc0026a83e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570856634s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=cba33cd9c9d8c68a55660523a05db9ab879d7c062132032db51fa4d1fcb70c5b} value=0.05123649777774348 ], [ var='C' labels={instance=10.100.42.89:10250, name=cba33cd9c9d8c68a55660523a05db9ab879d7c062132032db51fa4d1fcb70c5b} value=1 ]} {Instance:instance=10.100.42.89:10250, name=d28dbf9b200cddb284fd5b577b9ff8c530ac35b3904ef0e66d342dccf53316bc State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=d28dbf9b200cddb284fd5b577b9ff8c530ac35b3904ef0e66d342dccf53316bc Value:0xc0026a8410} C:{Var:C Labels:instance=10.100.42.89:10250, name=d28dbf9b200cddb284fd5b577b9ff8c530ac35b3904ef0e66d342dccf53316bc Value:0xc0026a8428}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570859098s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=d28dbf9b200cddb284fd5b577b9ff8c530ac35b3904ef0e66d342dccf53316bc} value=0 ], [ var='C' labels={instance=10.100.42.89:10250, name=d28dbf9b200cddb284fd5b577b9ff8c530ac35b3904ef0e66d342dccf53316bc} value=1 ]} {Instance:instance=10.100.42.89:10250, name=d417abaa76448f67eede40e3da1c9a935935078e6e7671a29121b92e2f81745e State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=d417abaa76448f67eede40e3da1c9a935935078e6e7671a29121b92e2f81745e Value:0xc0026a8458} C:{Var:C Labels:instance=10.100.42.89:10250, name=d417abaa76448f67eede40e3da1c9a935935078e6e7671a29121b92e2f81745e Value:0xc0026a8470}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570861707s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=d417abaa76448f67eede40e3da1c9a935935078e6e7671a29121b92e2f81745e} value=0.18545825926088985 ], [ var='C' labels={instance=10.100.42.89:10250, name=d417abaa76448f67eede40e3da1c9a935935078e6e7671a29121b92e2f81745e} value=1 ]} {Instance:instance=10.100.42.89:10250, name=d9d3bae47d0d6416871efecfd66aa49dc77f7ffa77d3bf55f0b4fbe9dc427537 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=d9d3bae47d0d6416871efecfd66aa49dc77f7ffa77d3bf55f0b4fbe9dc427537 Value:0xc0026a84a0} C:{Var:C Labels:instance=10.100.42.89:10250, name=d9d3bae47d0d6416871efecfd66aa49dc77f7ffa77d3bf55f0b4fbe9dc427537 Value:0xc0026a84b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570863955s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=d9d3bae47d0d6416871efecfd66aa49dc77f7ffa77d3bf55f0b4fbe9dc427537} value=0.025020922963045044 ], [ var='C' labels={instance=10.100.42.89:10250, name=d9d3bae47d0d6416871efecfd66aa49dc77f7ffa77d3bf55f0b4fbe9dc427537} value=1 ]} {Instance:instance=10.100.42.89:10250, name=dd1036366297ef0152543b05b2ae9f00dd1de9daff4f8e8bbdf60f3d2e4a3a99 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=dd1036366297ef0152543b05b2ae9f00dd1de9daff4f8e8bbdf60f3d2e4a3a99 Value:0xc0026a84e8} C:{Var:C Labels:instance=10.100.42.89:10250, name=dd1036366297ef0152543b05b2ae9f00dd1de9daff4f8e8bbdf60f3d2e4a3a99 Value:0xc0026a8500}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570866181s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=dd1036366297ef0152543b05b2ae9f00dd1de9daff4f8e8bbdf60f3d2e4a3a99} value=0 ], [ var='C' labels={instance=10.100.42.89:10250, name=dd1036366297ef0152543b05b2ae9f00dd1de9daff4f8e8bbdf60f3d2e4a3a99} value=1 ]} {Instance:instance=10.100.42.89:10250, name=e719fc8861bc3a3e1dc72928e299852e9177e406e3c964ca944683e38247f214 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=e719fc8861bc3a3e1dc72928e299852e9177e406e3c964ca944683e38247f214 Value:0xc0026a8530} C:{Var:C Labels:instance=10.100.42.89:10250, name=e719fc8861bc3a3e1dc72928e299852e9177e406e3c964ca944683e38247f214 Value:0xc0026a8548}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.5708683s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=e719fc8861bc3a3e1dc72928e299852e9177e406e3c964ca944683e38247f214} value=0.15701090000018464 ], [ var='C' labels={instance=10.100.42.89:10250, name=e719fc8861bc3a3e1dc72928e299852e9177e406e3c964ca944683e38247f214} value=1 ]} {Instance:instance=10.100.42.89:10250, name=ea900f2cec1f9c0c6eb45e75aaaa12ae2bc5b8df2c343e48e5f5daad5dbf3d78 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=ea900f2cec1f9c0c6eb45e75aaaa12ae2bc5b8df2c343e48e5f5daad5dbf3d78 Value:0xc0026a8578} C:{Var:C Labels:instance=10.100.42.89:10250, name=ea900f2cec1f9c0c6eb45e75aaaa12ae2bc5b8df2c343e48e5f5daad5dbf3d78 Value:0xc0026a8590}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570871458s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=ea900f2cec1f9c0c6eb45e75aaaa12ae2bc5b8df2c343e48e5f5daad5dbf3d78} value=0.08973782963005726 ], [ var='C' labels={instance=10.100.42.89:10250, name=ea900f2cec1f9c0c6eb45e75aaaa12ae2bc5b8df2c343e48e5f5daad5dbf3d78} value=1 ]} {Instance:instance=10.100.42.89:10250, name=ec2a8063686e138f5a2e63e887bdda770cdcd99aa8031a6572798a53e2b99b07 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=ec2a8063686e138f5a2e63e887bdda770cdcd99aa8031a6572798a53e2b99b07 Value:0xc0026a85c0} C:{Var:C Labels:instance=10.100.42.89:10250, name=ec2a8063686e138f5a2e63e887bdda770cdcd99aa8031a6572798a53e2b99b07 Value:0xc0026a85d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570873758s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=ec2a8063686e138f5a2e63e887bdda770cdcd99aa8031a6572798a53e2b99b07} value=0.06937223851852853 ], [ var='C' labels={instance=10.100.42.89:10250, name=ec2a8063686e138f5a2e63e887bdda770cdcd99aa8031a6572798a53e2b99b07} value=1 ]} {Instance:instance=10.100.42.89:10250, name=f97d95aa2543b25b2850cb8616616e326c6899de0edd010b692ec61780b63735 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.42.89:10250, name=f97d95aa2543b25b2850cb8616616e326c6899de0edd010b692ec61780b63735 Value:0xc0026a8630} C:{Var:C Labels:instance=10.100.42.89:10250, name=f97d95aa2543b25b2850cb8616616e326c6899de0edd010b692ec61780b63735 Value:0xc0026a8608}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570877342s EvaluationString:[ var='B' labels={instance=10.100.42.89:10250, name=f97d95aa2543b25b2850cb8616616e326c6899de0edd010b692ec61780b63735} value=0 ], [ var='C' labels={instance=10.100.42.89:10250, name=f97d95aa2543b25b2850cb8616616e326c6899de0edd010b692ec61780b63735} value=1 ]} {Instance:instance=10.100.46.153:10250, name=075a9d15ceeb990516185ead6ac7bdc2ccb807bdc25fbff51bb5ed72d5ab1a0d State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=075a9d15ceeb990516185ead6ac7bdc2ccb807bdc25fbff51bb5ed72d5ab1a0d Value:0xc0026a8660} C:{Var:C Labels:instance=10.100.46.153:10250, name=075a9d15ceeb990516185ead6ac7bdc2ccb807bdc25fbff51bb5ed72d5ab1a0d Value:0xc0026a8678}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.57087967s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=075a9d15ceeb990516185ead6ac7bdc2ccb807bdc25fbff51bb5ed72d5ab1a0d} value=0.18104595833392523 ], [ var='C' labels={instance=10.100.46.153:10250, name=075a9d15ceeb990516185ead6ac7bdc2ccb807bdc25fbff51bb5ed72d5ab1a0d} value=1 ]} {Instance:instance=10.100.46.153:10250, name=0ba34eacd0359df03ea328ead274200700d9e7cd062f7e7c3ab158a3632afaf7 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=0ba34eacd0359df03ea328ead274200700d9e7cd062f7e7c3ab158a3632afaf7 Value:0xc0026a86a8} C:{Var:C Labels:instance=10.100.46.153:10250, name=0ba34eacd0359df03ea328ead274200700d9e7cd062f7e7c3ab158a3632afaf7 Value:0xc0026a86c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.57088198s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=0ba34eacd0359df03ea328ead274200700d9e7cd062f7e7c3ab158a3632afaf7} value=0 ], [ var='C' labels={instance=10.100.46.153:10250, name=0ba34eacd0359df03ea328ead274200700d9e7cd062f7e7c3ab158a3632afaf7} value=1 ]} {Instance:instance=10.100.46.153:10250, name=11a6e24b4e74c7a9ce9e13c12ffba8c63b6104bc739653733f845d6bbc3dbc34 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=11a6e24b4e74c7a9ce9e13c12ffba8c63b6104bc739653733f845d6bbc3dbc34 Value:0xc0026a86f0} C:{Var:C Labels:instance=10.100.46.153:10250, name=11a6e24b4e74c7a9ce9e13c12ffba8c63b6104bc739653733f845d6bbc3dbc34 Value:0xc0026a8708}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570884308s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=11a6e24b4e74c7a9ce9e13c12ffba8c63b6104bc739653733f845d6bbc3dbc34} value=0.007938493333341512 ], [ var='C' labels={instance=10.100.46.153:10250, name=11a6e24b4e74c7a9ce9e13c12ffba8c63b6104bc739653733f845d6bbc3dbc34} value=1 ]} {Instance:instance=10.100.46.153:10250, name=28ad5ef5decaaec1df3908b730ef255fe0674da206dc2417f87c8349731b726e State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=28ad5ef5decaaec1df3908b730ef255fe0674da206dc2417f87c8349731b726e Value:0xc0026a8748} C:{Var:C Labels:instance=10.100.46.153:10250, name=28ad5ef5decaaec1df3908b730ef255fe0674da206dc2417f87c8349731b726e Value:0xc0026a8760}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570886766s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=28ad5ef5decaaec1df3908b730ef255fe0674da206dc2417f87c8349731b726e} value=0 ], [ var='C' labels={instance=10.100.46.153:10250, name=28ad5ef5decaaec1df3908b730ef255fe0674da206dc2417f87c8349731b726e} value=1 ]} {Instance:instance=10.100.46.153:10250, name=29c30b4fd12104130950110fb4485dacbeebc91f64db102d2391b1fbf69e92d9 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=29c30b4fd12104130950110fb4485dacbeebc91f64db102d2391b1fbf69e92d9 Value:0xc0026a8790} C:{Var:C Labels:instance=10.100.46.153:10250, name=29c30b4fd12104130950110fb4485dacbeebc91f64db102d2391b1fbf69e92d9 Value:0xc0026a87a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.57088911s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=29c30b4fd12104130950110fb4485dacbeebc91f64db102d2391b1fbf69e92d9} value=0.049588242416689354 ], [ var='C' labels={instance=10.100.46.153:10250, name=29c30b4fd12104130950110fb4485dacbeebc91f64db102d2391b1fbf69e92d9} value=1 ]} {Instance:instance=10.100.46.153:10250, name=2e53a471ece668f1e293fa3d3b2da67be4ae1e2fa5073ed66d40828010c00f9f State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=2e53a471ece668f1e293fa3d3b2da67be4ae1e2fa5073ed66d40828010c00f9f Value:0xc0026a87d8} C:{Var:C Labels:instance=10.100.46.153:10250, name=2e53a471ece668f1e293fa3d3b2da67be4ae1e2fa5073ed66d40828010c00f9f Value:0xc0026a87f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570891406s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=2e53a471ece668f1e293fa3d3b2da67be4ae1e2fa5073ed66d40828010c00f9f} value=0.03002313333335375 ], [ var='C' labels={instance=10.100.46.153:10250, name=2e53a471ece668f1e293fa3d3b2da67be4ae1e2fa5073ed66d40828010c00f9f} value=1 ]} {Instance:instance=10.100.46.153:10250, name=2fac00b9c4fa698ca8ef303950173fab3d89fd5dc780b54a3de1cfacea8f71a9 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=2fac00b9c4fa698ca8ef303950173fab3d89fd5dc780b54a3de1cfacea8f71a9 Value:0xc0026a8838} C:{Var:C Labels:instance=10.100.46.153:10250, name=2fac00b9c4fa698ca8ef303950173fab3d89fd5dc780b54a3de1cfacea8f71a9 Value:0xc0026a8820}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570894171s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=2fac00b9c4fa698ca8ef303950173fab3d89fd5dc780b54a3de1cfacea8f71a9} value=0 ], [ var='C' labels={instance=10.100.46.153:10250, name=2fac00b9c4fa698ca8ef303950173fab3d89fd5dc780b54a3de1cfacea8f71a9} value=1 ]} {Instance:instance=10.100.46.153:10250, name=35264817cdce430e6f8911f6a40600aaaedc06aeca53ea5993861c68c7e9d9bc State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=35264817cdce430e6f8911f6a40600aaaedc06aeca53ea5993861c68c7e9d9bc Value:0xc0026a8868} C:{Var:C Labels:instance=10.100.46.153:10250, name=35264817cdce430e6f8911f6a40600aaaedc06aeca53ea5993861c68c7e9d9bc Value:0xc0026a8880}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570896661s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=35264817cdce430e6f8911f6a40600aaaedc06aeca53ea5993861c68c7e9d9bc} value=0 ], [ var='C' labels={instance=10.100.46.153:10250, name=35264817cdce430e6f8911f6a40600aaaedc06aeca53ea5993861c68c7e9d9bc} value=1 ]} {Instance:instance=10.100.46.153:10250, name=36ed607d7628f9ff2ee386ef755ac3157096c258658fb3b066b0cd94eec0718e State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=36ed607d7628f9ff2ee386ef755ac3157096c258658fb3b066b0cd94eec0718e Value:0xc0026a88c8} C:{Var:C Labels:instance=10.100.46.153:10250, name=36ed607d7628f9ff2ee386ef755ac3157096c258658fb3b066b0cd94eec0718e Value:0xc0026a88b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570898974s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=36ed607d7628f9ff2ee386ef755ac3157096c258658fb3b066b0cd94eec0718e} value=0.01162798750000027 ], [ var='C' labels={instance=10.100.46.153:10250, name=36ed607d7628f9ff2ee386ef755ac3157096c258658fb3b066b0cd94eec0718e} value=1 ]} {Instance:instance=10.100.46.153:10250, name=37615861c38664709799e23c630528592fe4cf3a8a378fcdba392dbf64bb96d0 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=37615861c38664709799e23c630528592fe4cf3a8a378fcdba392dbf64bb96d0 Value:0xc0026a8920} C:{Var:C Labels:instance=10.100.46.153:10250, name=37615861c38664709799e23c630528592fe4cf3a8a378fcdba392dbf64bb96d0 Value:0xc0026a8908}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570910198s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=37615861c38664709799e23c630528592fe4cf3a8a378fcdba392dbf64bb96d0} value=0.047091838416690734 ], [ var='C' labels={instance=10.100.46.153:10250, name=37615861c38664709799e23c630528592fe4cf3a8a378fcdba392dbf64bb96d0} value=1 ]} {Instance:instance=10.100.46.153:10250, name=400cb893bbf0fbee0a4cc786c4b239ddd5ee24419e4a928ba37bfe66e325c229 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=400cb893bbf0fbee0a4cc786c4b239ddd5ee24419e4a928ba37bfe66e325c229 Value:0xc0026a8950} C:{Var:C Labels:instance=10.100.46.153:10250, name=400cb893bbf0fbee0a4cc786c4b239ddd5ee24419e4a928ba37bfe66e325c229 Value:0xc0026a8968}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570913776s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=400cb893bbf0fbee0a4cc786c4b239ddd5ee24419e4a928ba37bfe66e325c229} value=0 ], [ var='C' labels={instance=10.100.46.153:10250, name=400cb893bbf0fbee0a4cc786c4b239ddd5ee24419e4a928ba37bfe66e325c229} value=1 ]} {Instance:instance=10.100.46.153:10250, name=41733c5ab7d4dbe9441b5a52fa686b912111eaf0a87161606741d26888539ae2 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=41733c5ab7d4dbe9441b5a52fa686b912111eaf0a87161606741d26888539ae2 Value:0xc0026a89b0} C:{Var:C Labels:instance=10.100.46.153:10250, name=41733c5ab7d4dbe9441b5a52fa686b912111eaf0a87161606741d26888539ae2 Value:0xc0026a8998}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570917444s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=41733c5ab7d4dbe9441b5a52fa686b912111eaf0a87161606741d26888539ae2} value=0 ], [ var='C' labels={instance=10.100.46.153:10250, name=41733c5ab7d4dbe9441b5a52fa686b912111eaf0a87161606741d26888539ae2} value=1 ]} {Instance:instance=10.100.46.153:10250, name=41f9a237fc7429d9ccefe217d1afdbdd7e7dc553dea6caf8822ac9497d113e66 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=41f9a237fc7429d9ccefe217d1afdbdd7e7dc553dea6caf8822ac9497d113e66 Value:0xc0026a8a08} C:{Var:C Labels:instance=10.100.46.153:10250, name=41f9a237fc7429d9ccefe217d1afdbdd7e7dc553dea6caf8822ac9497d113e66 Value:0xc0026a89f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570920415s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=41f9a237fc7429d9ccefe217d1afdbdd7e7dc553dea6caf8822ac9497d113e66} value=0.019198895833293744 ], [ var='C' labels={instance=10.100.46.153:10250, name=41f9a237fc7429d9ccefe217d1afdbdd7e7dc553dea6caf8822ac9497d113e66} value=1 ]} {Instance:instance=10.100.46.153:10250, name=470a47a685704d45ea168da45bb26e33b619ae064050f44fe33eea7621a4e711 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=470a47a685704d45ea168da45bb26e33b619ae064050f44fe33eea7621a4e711 Value:0xc0026a8a78} C:{Var:C Labels:instance=10.100.46.153:10250, name=470a47a685704d45ea168da45bb26e33b619ae064050f44fe33eea7621a4e711 Value:0xc0026a8ae0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570923901s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=470a47a685704d45ea168da45bb26e33b619ae064050f44fe33eea7621a4e711} value=0 ], [ var='C' labels={instance=10.100.46.153:10250, name=470a47a685704d45ea168da45bb26e33b619ae064050f44fe33eea7621a4e711} value=1 ]} {Instance:instance=10.100.46.153:10250, name=52e1e128cc93b7b9de53d7c8568cc3403f59fcb8fb3d5388a42bd7fcdc5464d1 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=52e1e128cc93b7b9de53d7c8568cc3403f59fcb8fb3d5388a42bd7fcdc5464d1 Value:0xc0026a8b70} C:{Var:C Labels:instance=10.100.46.153:10250, name=52e1e128cc93b7b9de53d7c8568cc3403f59fcb8fb3d5388a42bd7fcdc5464d1 Value:0xc0026a8b88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570926368s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=52e1e128cc93b7b9de53d7c8568cc3403f59fcb8fb3d5388a42bd7fcdc5464d1} value=0.06777649583331898 ], [ var='C' labels={instance=10.100.46.153:10250, name=52e1e128cc93b7b9de53d7c8568cc3403f59fcb8fb3d5388a42bd7fcdc5464d1} value=1 ]} {Instance:instance=10.100.46.153:10250, name=591484140bd0b5db3ac54c1dbc78292de292825a56849063027ac062e9e2a326 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=591484140bd0b5db3ac54c1dbc78292de292825a56849063027ac062e9e2a326 Value:0xc0026a8c18} C:{Var:C Labels:instance=10.100.46.153:10250, name=591484140bd0b5db3ac54c1dbc78292de292825a56849063027ac062e9e2a326 Value:0xc0026a8ca0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570928941s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=591484140bd0b5db3ac54c1dbc78292de292825a56849063027ac062e9e2a326} value=0.010409140833333256 ], [ var='C' labels={instance=10.100.46.153:10250, name=591484140bd0b5db3ac54c1dbc78292de292825a56849063027ac062e9e2a326} value=1 ]} {Instance:instance=10.100.46.153:10250, name=5a9045ff7cdb0236e450886df64d37f56574b5343645f66d90b32a5ded026a15 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=5a9045ff7cdb0236e450886df64d37f56574b5343645f66d90b32a5ded026a15 Value:0xc0026a8d40} C:{Var:C Labels:instance=10.100.46.153:10250, name=5a9045ff7cdb0236e450886df64d37f56574b5343645f66d90b32a5ded026a15 Value:0xc0026a8d78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570931274s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=5a9045ff7cdb0236e450886df64d37f56574b5343645f66d90b32a5ded026a15} value=0 ], [ var='C' labels={instance=10.100.46.153:10250, name=5a9045ff7cdb0236e450886df64d37f56574b5343645f66d90b32a5ded026a15} value=1 ]} {Instance:instance=10.100.46.153:10250, name=6b1247322aa769e5e8bc2bba8e03f524dd9c0e39c94c25de550d0109ef8d55d3 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=6b1247322aa769e5e8bc2bba8e03f524dd9c0e39c94c25de550d0109ef8d55d3 Value:0xc0026a8e28} C:{Var:C Labels:instance=10.100.46.153:10250, name=6b1247322aa769e5e8bc2bba8e03f524dd9c0e39c94c25de550d0109ef8d55d3 Value:0xc0026a8e80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570933535s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=6b1247322aa769e5e8bc2bba8e03f524dd9c0e39c94c25de550d0109ef8d55d3} value=0.07198553666664036 ], [ var='C' labels={instance=10.100.46.153:10250, name=6b1247322aa769e5e8bc2bba8e03f524dd9c0e39c94c25de550d0109ef8d55d3} value=1 ]} {Instance:instance=10.100.46.153:10250, name=6da03d1f9428ebfbbfd6c37dc2d0b51cbc510da7b59ca4b1ea1b72c28cb2bae2 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=6da03d1f9428ebfbbfd6c37dc2d0b51cbc510da7b59ca4b1ea1b72c28cb2bae2 Value:0xc0026a8f10} C:{Var:C Labels:instance=10.100.46.153:10250, name=6da03d1f9428ebfbbfd6c37dc2d0b51cbc510da7b59ca4b1ea1b72c28cb2bae2 Value:0xc0026a8f48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570935838s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=6da03d1f9428ebfbbfd6c37dc2d0b51cbc510da7b59ca4b1ea1b72c28cb2bae2} value=0 ], [ var='C' labels={instance=10.100.46.153:10250, name=6da03d1f9428ebfbbfd6c37dc2d0b51cbc510da7b59ca4b1ea1b72c28cb2bae2} value=1 ]} {Instance:instance=10.100.46.153:10250, name=6db50a7032c08669383855581827764516b4701891ec4719d4f2d69268bec591 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=6db50a7032c08669383855581827764516b4701891ec4719d4f2d69268bec591 Value:0xc0026a8fe8} C:{Var:C Labels:instance=10.100.46.153:10250, name=6db50a7032c08669383855581827764516b4701891ec4719d4f2d69268bec591 Value:0xc0026a9050}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.57093898s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=6db50a7032c08669383855581827764516b4701891ec4719d4f2d69268bec591} value=0.34085459166666016 ], [ var='C' labels={instance=10.100.46.153:10250, name=6db50a7032c08669383855581827764516b4701891ec4719d4f2d69268bec591} value=1 ]} {Instance:instance=10.100.46.153:10250, name=745d15001863e6f94f2a72558aa94b511e0ee347d39fb400cc30918487b380f5 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=745d15001863e6f94f2a72558aa94b511e0ee347d39fb400cc30918487b380f5 Value:0xc0026a9118} C:{Var:C Labels:instance=10.100.46.153:10250, name=745d15001863e6f94f2a72558aa94b511e0ee347d39fb400cc30918487b380f5 Value:0xc0026a90e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570942824s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=745d15001863e6f94f2a72558aa94b511e0ee347d39fb400cc30918487b380f5} value=0.016945936666665773 ], [ var='C' labels={instance=10.100.46.153:10250, name=745d15001863e6f94f2a72558aa94b511e0ee347d39fb400cc30918487b380f5} value=1 ]} {Instance:instance=10.100.46.153:10250, name=7955a19c73ac07f0f686427fe0c5d04521b7d60ec24388bb429323027746c9cf State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=7955a19c73ac07f0f686427fe0c5d04521b7d60ec24388bb429323027746c9cf Value:0xc0026a91a8} C:{Var:C Labels:instance=10.100.46.153:10250, name=7955a19c73ac07f0f686427fe0c5d04521b7d60ec24388bb429323027746c9cf Value:0xc0026a91e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.57094706s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=7955a19c73ac07f0f686427fe0c5d04521b7d60ec24388bb429323027746c9cf} value=0.03785136666673831 ], [ var='C' labels={instance=10.100.46.153:10250, name=7955a19c73ac07f0f686427fe0c5d04521b7d60ec24388bb429323027746c9cf} value=1 ]} {Instance:instance=10.100.46.153:10250, name=79d43eac6a3ac9d72dfacf067e130486a4256ceb4219719bb769f6294726c82d State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=79d43eac6a3ac9d72dfacf067e130486a4256ceb4219719bb769f6294726c82d Value:0xc0026a9280} C:{Var:C Labels:instance=10.100.46.153:10250, name=79d43eac6a3ac9d72dfacf067e130486a4256ceb4219719bb769f6294726c82d Value:0xc0026a92b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570949956s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=79d43eac6a3ac9d72dfacf067e130486a4256ceb4219719bb769f6294726c82d} value=0 ], [ var='C' labels={instance=10.100.46.153:10250, name=79d43eac6a3ac9d72dfacf067e130486a4256ceb4219719bb769f6294726c82d} value=1 ]} {Instance:instance=10.100.46.153:10250, name=7e99157d8039c55c0ed1d45692b8a3dcf94209c284d963c71cbb42de1f494d94 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=7e99157d8039c55c0ed1d45692b8a3dcf94209c284d963c71cbb42de1f494d94 Value:0xc0026a93a8} C:{Var:C Labels:instance=10.100.46.153:10250, name=7e99157d8039c55c0ed1d45692b8a3dcf94209c284d963c71cbb42de1f494d94 Value:0xc0026a93e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570952264s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=7e99157d8039c55c0ed1d45692b8a3dcf94209c284d963c71cbb42de1f494d94} value=0.011088958333333268 ], [ var='C' labels={instance=10.100.46.153:10250, name=7e99157d8039c55c0ed1d45692b8a3dcf94209c284d963c71cbb42de1f494d94} value=1 ]} {Instance:instance=10.100.46.153:10250, name=81a6e9ecb30fd98aad6fe2291b1804d1db24aa0292469ef0e364783909f7654b State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=81a6e9ecb30fd98aad6fe2291b1804d1db24aa0292469ef0e364783909f7654b Value:0xc0026a9450} C:{Var:C Labels:instance=10.100.46.153:10250, name=81a6e9ecb30fd98aad6fe2291b1804d1db24aa0292469ef0e364783909f7654b Value:0xc0026a9468}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570954916s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=81a6e9ecb30fd98aad6fe2291b1804d1db24aa0292469ef0e364783909f7654b} value=0 ], [ var='C' labels={instance=10.100.46.153:10250, name=81a6e9ecb30fd98aad6fe2291b1804d1db24aa0292469ef0e364783909f7654b} value=1 ]} {Instance:instance=10.100.46.153:10250, name=843687acc24db76dee456afd8a2cc1426d5355fa6290d7948b2bdc6aa4a11653 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=843687acc24db76dee456afd8a2cc1426d5355fa6290d7948b2bdc6aa4a11653 Value:0xc0026a94b8} C:{Var:C Labels:instance=10.100.46.153:10250, name=843687acc24db76dee456afd8a2cc1426d5355fa6290d7948b2bdc6aa4a11653 Value:0xc0026a9510}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570957691s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=843687acc24db76dee456afd8a2cc1426d5355fa6290d7948b2bdc6aa4a11653} value=0.45311441666702496 ], [ var='C' labels={instance=10.100.46.153:10250, name=843687acc24db76dee456afd8a2cc1426d5355fa6290d7948b2bdc6aa4a11653} value=1 ]} {Instance:instance=10.100.46.153:10250, name=85d73dd2b67dfc640230b8c95c3a05008e4aac8b5929f4cc5365663cf6674874 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=85d73dd2b67dfc640230b8c95c3a05008e4aac8b5929f4cc5365663cf6674874 Value:0xc0026a95f0} C:{Var:C Labels:instance=10.100.46.153:10250, name=85d73dd2b67dfc640230b8c95c3a05008e4aac8b5929f4cc5365663cf6674874 Value:0xc0026a9608}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570960496s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=85d73dd2b67dfc640230b8c95c3a05008e4aac8b5929f4cc5365663cf6674874} value=0.12700824166662036 ], [ var='C' labels={instance=10.100.46.153:10250, name=85d73dd2b67dfc640230b8c95c3a05008e4aac8b5929f4cc5365663cf6674874} value=1 ]} {Instance:instance=10.100.46.153:10250, name=99e885e539a2b2f86355c85880fb4a837167d0a22cc4460a87e11ee9900e1707 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=99e885e539a2b2f86355c85880fb4a837167d0a22cc4460a87e11ee9900e1707 Value:0xc0026a9698} C:{Var:C Labels:instance=10.100.46.153:10250, name=99e885e539a2b2f86355c85880fb4a837167d0a22cc4460a87e11ee9900e1707 Value:0xc0026a96e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570963431s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=99e885e539a2b2f86355c85880fb4a837167d0a22cc4460a87e11ee9900e1707} value=0 ], [ var='C' labels={instance=10.100.46.153:10250, name=99e885e539a2b2f86355c85880fb4a837167d0a22cc4460a87e11ee9900e1707} value=1 ]} {Instance:instance=10.100.46.153:10250, name=9be1ba92e6e534b0323897e885ba81a189a78b2fffb3010993dca4aa2e62fe64 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=9be1ba92e6e534b0323897e885ba81a189a78b2fffb3010993dca4aa2e62fe64 Value:0xc0026a9750} C:{Var:C Labels:instance=10.100.46.153:10250, name=9be1ba92e6e534b0323897e885ba81a189a78b2fffb3010993dca4aa2e62fe64 Value:0xc0026a9778}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570967368s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=9be1ba92e6e534b0323897e885ba81a189a78b2fffb3010993dca4aa2e62fe64} value=0.22574824916659963 ], [ var='C' labels={instance=10.100.46.153:10250, name=9be1ba92e6e534b0323897e885ba81a189a78b2fffb3010993dca4aa2e62fe64} value=1 ]} {Instance:instance=10.100.46.153:10250, name=a2f9cdce800a241d9c977a492fb8afd063048f8539aeeee59b56abf75386f53f State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=a2f9cdce800a241d9c977a492fb8afd063048f8539aeeee59b56abf75386f53f Value:0xc0026a97f8} C:{Var:C Labels:instance=10.100.46.153:10250, name=a2f9cdce800a241d9c977a492fb8afd063048f8539aeeee59b56abf75386f53f Value:0xc0026a9850}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570971299s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=a2f9cdce800a241d9c977a492fb8afd063048f8539aeeee59b56abf75386f53f} value=0.053365359166643124 ], [ var='C' labels={instance=10.100.46.153:10250, name=a2f9cdce800a241d9c977a492fb8afd063048f8539aeeee59b56abf75386f53f} value=1 ]} {Instance:instance=10.100.46.153:10250, name=a93702e0654fc1a9c684a8a557a72456f34f8cf91c695210acfe601ed8f9701c State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=a93702e0654fc1a9c684a8a557a72456f34f8cf91c695210acfe601ed8f9701c Value:0xc0026a9900} C:{Var:C Labels:instance=10.100.46.153:10250, name=a93702e0654fc1a9c684a8a557a72456f34f8cf91c695210acfe601ed8f9701c Value:0xc0026a9918}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570975533s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=a93702e0654fc1a9c684a8a557a72456f34f8cf91c695210acfe601ed8f9701c} value=0.35945145833314507 ], [ var='C' labels={instance=10.100.46.153:10250, name=a93702e0654fc1a9c684a8a557a72456f34f8cf91c695210acfe601ed8f9701c} value=1 ]} {Instance:instance=10.100.46.153:10250, name=af4cc1bb2f4b732e2e6be899b6dd95945d392914566c145f2aaa9d7c6083c387 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=af4cc1bb2f4b732e2e6be899b6dd95945d392914566c145f2aaa9d7c6083c387 Value:0xc0026a99f0} C:{Var:C Labels:instance=10.100.46.153:10250, name=af4cc1bb2f4b732e2e6be899b6dd95945d392914566c145f2aaa9d7c6083c387 Value:0xc0026a99b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570980444s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=af4cc1bb2f4b732e2e6be899b6dd95945d392914566c145f2aaa9d7c6083c387} value=0.013134165000000902 ], [ var='C' labels={instance=10.100.46.153:10250, name=af4cc1bb2f4b732e2e6be899b6dd95945d392914566c145f2aaa9d7c6083c387} value=1 ]} {Instance:instance=10.100.46.153:10250, name=af92867defada9c0329baa042636c1058b8c6d4af669bfb0344e2e468a94b224 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=af92867defada9c0329baa042636c1058b8c6d4af669bfb0344e2e468a94b224 Value:0xc0026a9ab0} C:{Var:C Labels:instance=10.100.46.153:10250, name=af92867defada9c0329baa042636c1058b8c6d4af669bfb0344e2e468a94b224 Value:0xc0026a9ae8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570984941s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=af92867defada9c0329baa042636c1058b8c6d4af669bfb0344e2e468a94b224} value=0.39954373583327657 ], [ var='C' labels={instance=10.100.46.153:10250, name=af92867defada9c0329baa042636c1058b8c6d4af669bfb0344e2e468a94b224} value=1 ]} {Instance:instance=10.100.46.153:10250, name=b8a0f1fe5ec6edb6b6a5d6779a25883130dbffba9b4c0d0fe656bf809b701729 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=b8a0f1fe5ec6edb6b6a5d6779a25883130dbffba9b4c0d0fe656bf809b701729 Value:0xc0026a9b70} C:{Var:C Labels:instance=10.100.46.153:10250, name=b8a0f1fe5ec6edb6b6a5d6779a25883130dbffba9b4c0d0fe656bf809b701729 Value:0xc0026a9b48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570989086s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=b8a0f1fe5ec6edb6b6a5d6779a25883130dbffba9b4c0d0fe656bf809b701729} value=0 ], [ var='C' labels={instance=10.100.46.153:10250, name=b8a0f1fe5ec6edb6b6a5d6779a25883130dbffba9b4c0d0fe656bf809b701729} value=1 ]} {Instance:instance=10.100.46.153:10250, name=bb652fbd3c4b373f97d4e6ad1659bd1b69e1abc46dbfbc2c9a8b552cf78677cb State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=bb652fbd3c4b373f97d4e6ad1659bd1b69e1abc46dbfbc2c9a8b552cf78677cb Value:0xc0026a9c20} C:{Var:C Labels:instance=10.100.46.153:10250, name=bb652fbd3c4b373f97d4e6ad1659bd1b69e1abc46dbfbc2c9a8b552cf78677cb Value:0xc0026a9c68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570994s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=bb652fbd3c4b373f97d4e6ad1659bd1b69e1abc46dbfbc2c9a8b552cf78677cb} value=4.8980833333341536e-05 ], [ var='C' labels={instance=10.100.46.153:10250, name=bb652fbd3c4b373f97d4e6ad1659bd1b69e1abc46dbfbc2c9a8b552cf78677cb} value=1 ]} {Instance:instance=10.100.46.153:10250, name=be6cb1191384c94d811ae2d33f13bd80e65a22bb291dd004b81f04ffdac370db State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=be6cb1191384c94d811ae2d33f13bd80e65a22bb291dd004b81f04ffdac370db Value:0xc0026a9f00} C:{Var:C Labels:instance=10.100.46.153:10250, name=be6cb1191384c94d811ae2d33f13bd80e65a22bb291dd004b81f04ffdac370db Value:0xc0026a9d18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.570999822s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=be6cb1191384c94d811ae2d33f13bd80e65a22bb291dd004b81f04ffdac370db} value=0.0817277074999841 ], [ var='C' labels={instance=10.100.46.153:10250, name=be6cb1191384c94d811ae2d33f13bd80e65a22bb291dd004b81f04ffdac370db} value=1 ]} {Instance:instance=10.100.46.153:10250, name=d2cb06efa7ba018a9b63f700eb410f45df3bbc570f54b84c4410127b76879c46 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=d2cb06efa7ba018a9b63f700eb410f45df3bbc570f54b84c4410127b76879c46 Value:0xc0026a9fc0} C:{Var:C Labels:instance=10.100.46.153:10250, name=d2cb06efa7ba018a9b63f700eb410f45df3bbc570f54b84c4410127b76879c46 Value:0xc0026a9ff8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571004615s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=d2cb06efa7ba018a9b63f700eb410f45df3bbc570f54b84c4410127b76879c46} value=0.01407291916666627 ], [ var='C' labels={instance=10.100.46.153:10250, name=d2cb06efa7ba018a9b63f700eb410f45df3bbc570f54b84c4410127b76879c46} value=1 ]} {Instance:instance=10.100.46.153:10250, name=d60bfafa139495466191fbe031e6615eaf832761f8595224539e6a9f4eb5eccc State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=d60bfafa139495466191fbe031e6615eaf832761f8595224539e6a9f4eb5eccc Value:0xc001148040} C:{Var:C Labels:instance=10.100.46.153:10250, name=d60bfafa139495466191fbe031e6615eaf832761f8595224539e6a9f4eb5eccc Value:0xc001148028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571008212s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=d60bfafa139495466191fbe031e6615eaf832761f8595224539e6a9f4eb5eccc} value=0.04235917258332241 ], [ var='C' labels={instance=10.100.46.153:10250, name=d60bfafa139495466191fbe031e6615eaf832761f8595224539e6a9f4eb5eccc} value=1 ]} {Instance:instance=10.100.46.153:10250, name=da9dfed20034ae506cc2fb67b840964d762d8655c44a3cd24ccf8a747ea2992f State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=da9dfed20034ae506cc2fb67b840964d762d8655c44a3cd24ccf8a747ea2992f Value:0xc001148088} C:{Var:C Labels:instance=10.100.46.153:10250, name=da9dfed20034ae506cc2fb67b840964d762d8655c44a3cd24ccf8a747ea2992f Value:0xc001148070}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571010991s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=da9dfed20034ae506cc2fb67b840964d762d8655c44a3cd24ccf8a747ea2992f} value=0.029623914999964047 ], [ var='C' labels={instance=10.100.46.153:10250, name=da9dfed20034ae506cc2fb67b840964d762d8655c44a3cd24ccf8a747ea2992f} value=1 ]} {Instance:instance=10.100.46.153:10250, name=dbc515bdc661de5387f4cafa7abf939246cb8ac48ca734053c230e675f9a3341 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=dbc515bdc661de5387f4cafa7abf939246cb8ac48ca734053c230e675f9a3341 Value:0xc0011480d0} C:{Var:C Labels:instance=10.100.46.153:10250, name=dbc515bdc661de5387f4cafa7abf939246cb8ac48ca734053c230e675f9a3341 Value:0xc0011480b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571013401s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=dbc515bdc661de5387f4cafa7abf939246cb8ac48ca734053c230e675f9a3341} value=0.0416984474166308 ], [ var='C' labels={instance=10.100.46.153:10250, name=dbc515bdc661de5387f4cafa7abf939246cb8ac48ca734053c230e675f9a3341} value=1 ]} {Instance:instance=10.100.46.153:10250, name=e2a4ce6236ab5565909302269a4d2a618464f2051ad233462235ef0fbc68d463 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=e2a4ce6236ab5565909302269a4d2a618464f2051ad233462235ef0fbc68d463 Value:0xc001148100} C:{Var:C Labels:instance=10.100.46.153:10250, name=e2a4ce6236ab5565909302269a4d2a618464f2051ad233462235ef0fbc68d463 Value:0xc001148118}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571016355s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=e2a4ce6236ab5565909302269a4d2a618464f2051ad233462235ef0fbc68d463} value=0 ], [ var='C' labels={instance=10.100.46.153:10250, name=e2a4ce6236ab5565909302269a4d2a618464f2051ad233462235ef0fbc68d463} value=1 ]} {Instance:instance=10.100.46.153:10250, name=e39694e35e6b69167cebbcaab04ee48bde1a59eef4658f83e23b8689ce0e9b21 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=e39694e35e6b69167cebbcaab04ee48bde1a59eef4658f83e23b8689ce0e9b21 Value:0xc001148160} C:{Var:C Labels:instance=10.100.46.153:10250, name=e39694e35e6b69167cebbcaab04ee48bde1a59eef4658f83e23b8689ce0e9b21 Value:0xc001148148}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571019607s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=e39694e35e6b69167cebbcaab04ee48bde1a59eef4658f83e23b8689ce0e9b21} value=0 ], [ var='C' labels={instance=10.100.46.153:10250, name=e39694e35e6b69167cebbcaab04ee48bde1a59eef4658f83e23b8689ce0e9b21} value=1 ]} {Instance:instance=10.100.46.153:10250, name=e624f500f3b430cd6441c7562fbde3809b7946c19ddba6c4d3f87b127bdac85b State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=e624f500f3b430cd6441c7562fbde3809b7946c19ddba6c4d3f87b127bdac85b Value:0xc0011481a0} C:{Var:C Labels:instance=10.100.46.153:10250, name=e624f500f3b430cd6441c7562fbde3809b7946c19ddba6c4d3f87b127bdac85b Value:0xc0011481b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571022454s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=e624f500f3b430cd6441c7562fbde3809b7946c19ddba6c4d3f87b127bdac85b} value=0.015557899999976144 ], [ var='C' labels={instance=10.100.46.153:10250, name=e624f500f3b430cd6441c7562fbde3809b7946c19ddba6c4d3f87b127bdac85b} value=1 ]} {Instance:instance=10.100.46.153:10250, name=eb420a1a0cbe72b7a490adfb4bb5ea924911ec80986927918a376a279d6241b3 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=eb420a1a0cbe72b7a490adfb4bb5ea924911ec80986927918a376a279d6241b3 Value:0xc0011481e8} C:{Var:C Labels:instance=10.100.46.153:10250, name=eb420a1a0cbe72b7a490adfb4bb5ea924911ec80986927918a376a279d6241b3 Value:0xc001148210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571025129s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=eb420a1a0cbe72b7a490adfb4bb5ea924911ec80986927918a376a279d6241b3} value=0.009546280833333901 ], [ var='C' labels={instance=10.100.46.153:10250, name=eb420a1a0cbe72b7a490adfb4bb5ea924911ec80986927918a376a279d6241b3} value=1 ]} {Instance:instance=10.100.46.153:10250, name=f879bc3e2b1772ee18a99f91c3e81d3e32162ccf892af3cee0f0b04e71a10004 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=f879bc3e2b1772ee18a99f91c3e81d3e32162ccf892af3cee0f0b04e71a10004 Value:0xc001148240} C:{Var:C Labels:instance=10.100.46.153:10250, name=f879bc3e2b1772ee18a99f91c3e81d3e32162ccf892af3cee0f0b04e71a10004 Value:0xc001148258}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571027824s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=f879bc3e2b1772ee18a99f91c3e81d3e32162ccf892af3cee0f0b04e71a10004} value=0.6518314207499998 ], [ var='C' labels={instance=10.100.46.153:10250, name=f879bc3e2b1772ee18a99f91c3e81d3e32162ccf892af3cee0f0b04e71a10004} value=1 ]} {Instance:instance=10.100.46.153:10250, name=faa2e1b732a16d84399468b6bb125784a60fd8b89528cc6ffa138932819fb6ac State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=faa2e1b732a16d84399468b6bb125784a60fd8b89528cc6ffa138932819fb6ac Value:0xc001148288} C:{Var:C Labels:instance=10.100.46.153:10250, name=faa2e1b732a16d84399468b6bb125784a60fd8b89528cc6ffa138932819fb6ac Value:0xc0011482a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571031262s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=faa2e1b732a16d84399468b6bb125784a60fd8b89528cc6ffa138932819fb6ac} value=0.06795542249999897 ], [ var='C' labels={instance=10.100.46.153:10250, name=faa2e1b732a16d84399468b6bb125784a60fd8b89528cc6ffa138932819fb6ac} value=1 ]} {Instance:instance=10.100.46.153:10250, name=fbdc16899ca3e87cf3d9c834ce9076259a25ef43d570b85cac91fb034160e61a State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=fbdc16899ca3e87cf3d9c834ce9076259a25ef43d570b85cac91fb034160e61a Value:0xc0011482e8} C:{Var:C Labels:instance=10.100.46.153:10250, name=fbdc16899ca3e87cf3d9c834ce9076259a25ef43d570b85cac91fb034160e61a Value:0xc0011482d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571039413s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=fbdc16899ca3e87cf3d9c834ce9076259a25ef43d570b85cac91fb034160e61a} value=0.021531878333291843 ], [ var='C' labels={instance=10.100.46.153:10250, name=fbdc16899ca3e87cf3d9c834ce9076259a25ef43d570b85cac91fb034160e61a} value=1 ]} {Instance:instance=10.100.46.153:10250, name=fc48a567482b88fd136bc3ebdbfde0952f2d045232355baf587a76c7ea5ec188 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=fc48a567482b88fd136bc3ebdbfde0952f2d045232355baf587a76c7ea5ec188 Value:0xc001148340} C:{Var:C Labels:instance=10.100.46.153:10250, name=fc48a567482b88fd136bc3ebdbfde0952f2d045232355baf587a76c7ea5ec188 Value:0xc001148328}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571042442s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=fc48a567482b88fd136bc3ebdbfde0952f2d045232355baf587a76c7ea5ec188} value=0.010656865833333917 ], [ var='C' labels={instance=10.100.46.153:10250, name=fc48a567482b88fd136bc3ebdbfde0952f2d045232355baf587a76c7ea5ec188} value=1 ]} {Instance:instance=10.100.46.153:10250, name=fdc729c91bc9a1c590739a74afb6338539f3a1891cb08ce37a86edd31febbda3 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.153:10250, name=fdc729c91bc9a1c590739a74afb6338539f3a1891cb08ce37a86edd31febbda3 Value:0xc001148388} C:{Var:C Labels:instance=10.100.46.153:10250, name=fdc729c91bc9a1c590739a74afb6338539f3a1891cb08ce37a86edd31febbda3 Value:0xc001148370}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571044711s EvaluationString:[ var='B' labels={instance=10.100.46.153:10250, name=fdc729c91bc9a1c590739a74afb6338539f3a1891cb08ce37a86edd31febbda3} value=0 ], [ var='C' labels={instance=10.100.46.153:10250, name=fdc729c91bc9a1c590739a74afb6338539f3a1891cb08ce37a86edd31febbda3} value=1 ]} {Instance:instance=10.100.46.71:10250, name=07d46d287fe3fd5aa69a919c9383d4251aca3035aa8a02c5f3d17b6ecd089e21 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=07d46d287fe3fd5aa69a919c9383d4251aca3035aa8a02c5f3d17b6ecd089e21 Value:0xc0011483d0} C:{Var:C Labels:instance=10.100.46.71:10250, name=07d46d287fe3fd5aa69a919c9383d4251aca3035aa8a02c5f3d17b6ecd089e21 Value:0xc0011483b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571046854s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=07d46d287fe3fd5aa69a919c9383d4251aca3035aa8a02c5f3d17b6ecd089e21} value=0.020678634999992736 ], [ var='C' labels={instance=10.100.46.71:10250, name=07d46d287fe3fd5aa69a919c9383d4251aca3035aa8a02c5f3d17b6ecd089e21} value=1 ]} {Instance:instance=10.100.46.71:10250, name=0c858c75bea5adabffc5c65d29aa2c5bd65297a7a9efbf6f58e181506efae7d9 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=0c858c75bea5adabffc5c65d29aa2c5bd65297a7a9efbf6f58e181506efae7d9 Value:0xc001148400} C:{Var:C Labels:instance=10.100.46.71:10250, name=0c858c75bea5adabffc5c65d29aa2c5bd65297a7a9efbf6f58e181506efae7d9 Value:0xc001148418}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.57104992s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=0c858c75bea5adabffc5c65d29aa2c5bd65297a7a9efbf6f58e181506efae7d9} value=0 ], [ var='C' labels={instance=10.100.46.71:10250, name=0c858c75bea5adabffc5c65d29aa2c5bd65297a7a9efbf6f58e181506efae7d9} value=1 ]} {Instance:instance=10.100.46.71:10250, name=119a9cefdf278af3a9e37b6008e530224b04563221ddfe5a9f848225176bad15 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=119a9cefdf278af3a9e37b6008e530224b04563221ddfe5a9f848225176bad15 Value:0xc001148460} C:{Var:C Labels:instance=10.100.46.71:10250, name=119a9cefdf278af3a9e37b6008e530224b04563221ddfe5a9f848225176bad15 Value:0xc001148448}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571052121s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=119a9cefdf278af3a9e37b6008e530224b04563221ddfe5a9f848225176bad15} value=0.08669638333332033 ], [ var='C' labels={instance=10.100.46.71:10250, name=119a9cefdf278af3a9e37b6008e530224b04563221ddfe5a9f848225176bad15} value=1 ]} {Instance:instance=10.100.46.71:10250, name=15c806703f97f08a34e997f68d4e9aa223736e75b18a94aa318e924713e1cf8e State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=15c806703f97f08a34e997f68d4e9aa223736e75b18a94aa318e924713e1cf8e Value:0xc0011484a0} C:{Var:C Labels:instance=10.100.46.71:10250, name=15c806703f97f08a34e997f68d4e9aa223736e75b18a94aa318e924713e1cf8e Value:0xc001148518}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571054491s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=15c806703f97f08a34e997f68d4e9aa223736e75b18a94aa318e924713e1cf8e} value=0.08672325555582777 ], [ var='C' labels={instance=10.100.46.71:10250, name=15c806703f97f08a34e997f68d4e9aa223736e75b18a94aa318e924713e1cf8e} value=1 ]} {Instance:instance=10.100.46.71:10250, name=187e408a2c8e1b8f33432f8480ecccd5696f4e16fbedd3237875202058ac9792 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=187e408a2c8e1b8f33432f8480ecccd5696f4e16fbedd3237875202058ac9792 Value:0xc001148568} C:{Var:C Labels:instance=10.100.46.71:10250, name=187e408a2c8e1b8f33432f8480ecccd5696f4e16fbedd3237875202058ac9792 Value:0xc0011485f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571058554s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=187e408a2c8e1b8f33432f8480ecccd5696f4e16fbedd3237875202058ac9792} value=0 ], [ var='C' labels={instance=10.100.46.71:10250, name=187e408a2c8e1b8f33432f8480ecccd5696f4e16fbedd3237875202058ac9792} value=1 ]} {Instance:instance=10.100.46.71:10250, name=32282388707bef1250bb5a4dbf9941d4e2ffb4b1ad2176fffa9d254613913100 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=32282388707bef1250bb5a4dbf9941d4e2ffb4b1ad2176fffa9d254613913100 Value:0xc001148640} C:{Var:C Labels:instance=10.100.46.71:10250, name=32282388707bef1250bb5a4dbf9941d4e2ffb4b1ad2176fffa9d254613913100 Value:0xc001148658}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571060701s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=32282388707bef1250bb5a4dbf9941d4e2ffb4b1ad2176fffa9d254613913100} value=0.03525052222218821 ], [ var='C' labels={instance=10.100.46.71:10250, name=32282388707bef1250bb5a4dbf9941d4e2ffb4b1ad2176fffa9d254613913100} value=1 ]} {Instance:instance=10.100.46.71:10250, name=33e9c3e3bc1f4a3943991819dbd532879dc3c467abfc6ec1df220ef0171af650 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=33e9c3e3bc1f4a3943991819dbd532879dc3c467abfc6ec1df220ef0171af650 Value:0xc0011486d8} C:{Var:C Labels:instance=10.100.46.71:10250, name=33e9c3e3bc1f4a3943991819dbd532879dc3c467abfc6ec1df220ef0171af650 Value:0xc0011486f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571062988s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=33e9c3e3bc1f4a3943991819dbd532879dc3c467abfc6ec1df220ef0171af650} value=0 ], [ var='C' labels={instance=10.100.46.71:10250, name=33e9c3e3bc1f4a3943991819dbd532879dc3c467abfc6ec1df220ef0171af650} value=1 ]} {Instance:instance=10.100.46.71:10250, name=3f7164a1b24d88e268125adbd0f8874eb933c6ebe3412f0233bd8b589e67aa65 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=3f7164a1b24d88e268125adbd0f8874eb933c6ebe3412f0233bd8b589e67aa65 Value:0xc001148740} C:{Var:C Labels:instance=10.100.46.71:10250, name=3f7164a1b24d88e268125adbd0f8874eb933c6ebe3412f0233bd8b589e67aa65 Value:0xc001148758}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571065069s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=3f7164a1b24d88e268125adbd0f8874eb933c6ebe3412f0233bd8b589e67aa65} value=0.06958464444450227 ], [ var='C' labels={instance=10.100.46.71:10250, name=3f7164a1b24d88e268125adbd0f8874eb933c6ebe3412f0233bd8b589e67aa65} value=1 ]} {Instance:instance=10.100.46.71:10250, name=40237df7d3ae04532639e7a509efb5d465898e5826484880fb2ecfe170eef02c State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=40237df7d3ae04532639e7a509efb5d465898e5826484880fb2ecfe170eef02c Value:0xc0011487b0} C:{Var:C Labels:instance=10.100.46.71:10250, name=40237df7d3ae04532639e7a509efb5d465898e5826484880fb2ecfe170eef02c Value:0xc001148798}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571067491s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=40237df7d3ae04532639e7a509efb5d465898e5826484880fb2ecfe170eef02c} value=0.08132105833333804 ], [ var='C' labels={instance=10.100.46.71:10250, name=40237df7d3ae04532639e7a509efb5d465898e5826484880fb2ecfe170eef02c} value=1 ]} {Instance:instance=10.100.46.71:10250, name=46f9b240afd9bbedfd604e9ce61e92b301e3e2ba647d36b699c6f85b246b70eb State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=46f9b240afd9bbedfd604e9ce61e92b301e3e2ba647d36b699c6f85b246b70eb Value:0xc0011487f0} C:{Var:C Labels:instance=10.100.46.71:10250, name=46f9b240afd9bbedfd604e9ce61e92b301e3e2ba647d36b699c6f85b246b70eb Value:0xc001148858}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571069734s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=46f9b240afd9bbedfd604e9ce61e92b301e3e2ba647d36b699c6f85b246b70eb} value=0 ], [ var='C' labels={instance=10.100.46.71:10250, name=46f9b240afd9bbedfd604e9ce61e92b301e3e2ba647d36b699c6f85b246b70eb} value=1 ]} {Instance:instance=10.100.46.71:10250, name=488a205f909b198fd755ec92924b4a0a65e332446aa951613de83d4064c9a893 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=488a205f909b198fd755ec92924b4a0a65e332446aa951613de83d4064c9a893 Value:0xc001148898} C:{Var:C Labels:instance=10.100.46.71:10250, name=488a205f909b198fd755ec92924b4a0a65e332446aa951613de83d4064c9a893 Value:0xc0011488b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571071878s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=488a205f909b198fd755ec92924b4a0a65e332446aa951613de83d4064c9a893} value=0.15259048333367295 ], [ var='C' labels={instance=10.100.46.71:10250, name=488a205f909b198fd755ec92924b4a0a65e332446aa951613de83d4064c9a893} value=1 ]} {Instance:instance=10.100.46.71:10250, name=48eb9143dd3b840d61a400e00b92570304e4ed8b292877ec1d071d4cd1fa0adf State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=48eb9143dd3b840d61a400e00b92570304e4ed8b292877ec1d071d4cd1fa0adf Value:0xc0011488e0} C:{Var:C Labels:instance=10.100.46.71:10250, name=48eb9143dd3b840d61a400e00b92570304e4ed8b292877ec1d071d4cd1fa0adf Value:0xc0011488f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571074485s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=48eb9143dd3b840d61a400e00b92570304e4ed8b292877ec1d071d4cd1fa0adf} value=0 ], [ var='C' labels={instance=10.100.46.71:10250, name=48eb9143dd3b840d61a400e00b92570304e4ed8b292877ec1d071d4cd1fa0adf} value=1 ]} {Instance:instance=10.100.46.71:10250, name=4c5952153f5719ecdb67bdaf612c62d4e939ca62c7543e8adaa36135a62665b9 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=4c5952153f5719ecdb67bdaf612c62d4e939ca62c7543e8adaa36135a62665b9 Value:0xc001148928} C:{Var:C Labels:instance=10.100.46.71:10250, name=4c5952153f5719ecdb67bdaf612c62d4e939ca62c7543e8adaa36135a62665b9 Value:0xc001148940}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571076768s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=4c5952153f5719ecdb67bdaf612c62d4e939ca62c7543e8adaa36135a62665b9} value=0 ], [ var='C' labels={instance=10.100.46.71:10250, name=4c5952153f5719ecdb67bdaf612c62d4e939ca62c7543e8adaa36135a62665b9} value=1 ]} {Instance:instance=10.100.46.71:10250, name=5859e8a17addfcfd9ed247a2f12dcaf942b9e3ebe343e258892a449822d13f9e State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=5859e8a17addfcfd9ed247a2f12dcaf942b9e3ebe343e258892a449822d13f9e Value:0xc001148970} C:{Var:C Labels:instance=10.100.46.71:10250, name=5859e8a17addfcfd9ed247a2f12dcaf942b9e3ebe343e258892a449822d13f9e Value:0xc001148988}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571079088s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=5859e8a17addfcfd9ed247a2f12dcaf942b9e3ebe343e258892a449822d13f9e} value=0.12437387777784654 ], [ var='C' labels={instance=10.100.46.71:10250, name=5859e8a17addfcfd9ed247a2f12dcaf942b9e3ebe343e258892a449822d13f9e} value=1 ]} {Instance:instance=10.100.46.71:10250, name=5ae7326699ca5cb5321e57ffb9a0136fb55f134708eefbbbeda4d65ee6d4214b State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=5ae7326699ca5cb5321e57ffb9a0136fb55f134708eefbbbeda4d65ee6d4214b Value:0xc0011489e0} C:{Var:C Labels:instance=10.100.46.71:10250, name=5ae7326699ca5cb5321e57ffb9a0136fb55f134708eefbbbeda4d65ee6d4214b Value:0xc0011489c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571082514s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=5ae7326699ca5cb5321e57ffb9a0136fb55f134708eefbbbeda4d65ee6d4214b} value=0.08180773888888875 ], [ var='C' labels={instance=10.100.46.71:10250, name=5ae7326699ca5cb5321e57ffb9a0136fb55f134708eefbbbeda4d65ee6d4214b} value=1 ]} {Instance:instance=10.100.46.71:10250, name=64963be8bc5b3101703345867c17bf16ef0b6c6e9be0c09068d3bf6e986f76ee State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=64963be8bc5b3101703345867c17bf16ef0b6c6e9be0c09068d3bf6e986f76ee Value:0xc001148a30} C:{Var:C Labels:instance=10.100.46.71:10250, name=64963be8bc5b3101703345867c17bf16ef0b6c6e9be0c09068d3bf6e986f76ee Value:0xc001148a48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571085069s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=64963be8bc5b3101703345867c17bf16ef0b6c6e9be0c09068d3bf6e986f76ee} value=0 ], [ var='C' labels={instance=10.100.46.71:10250, name=64963be8bc5b3101703345867c17bf16ef0b6c6e9be0c09068d3bf6e986f76ee} value=1 ]} {Instance:instance=10.100.46.71:10250, name=7bb8a98008394a7b6766a7ac74679b9248317f3cd57f192f28d39477cc2a663c State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=7bb8a98008394a7b6766a7ac74679b9248317f3cd57f192f28d39477cc2a663c Value:0xc001148a78} C:{Var:C Labels:instance=10.100.46.71:10250, name=7bb8a98008394a7b6766a7ac74679b9248317f3cd57f192f28d39477cc2a663c Value:0xc001148a90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571087224s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=7bb8a98008394a7b6766a7ac74679b9248317f3cd57f192f28d39477cc2a663c} value=0.001997353333334691 ], [ var='C' labels={instance=10.100.46.71:10250, name=7bb8a98008394a7b6766a7ac74679b9248317f3cd57f192f28d39477cc2a663c} value=1 ]} {Instance:instance=10.100.46.71:10250, name=81f8d8b139ad6a11b4973a673d80cae8cb487d5b1f40338438746bce4317ce7f State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=81f8d8b139ad6a11b4973a673d80cae8cb487d5b1f40338438746bce4317ce7f Value:0xc001148ad0} C:{Var:C Labels:instance=10.100.46.71:10250, name=81f8d8b139ad6a11b4973a673d80cae8cb487d5b1f40338438746bce4317ce7f Value:0xc001148ae8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571090127s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=81f8d8b139ad6a11b4973a673d80cae8cb487d5b1f40338438746bce4317ce7f} value=0.026521625611116584 ], [ var='C' labels={instance=10.100.46.71:10250, name=81f8d8b139ad6a11b4973a673d80cae8cb487d5b1f40338438746bce4317ce7f} value=1 ]} {Instance:instance=10.100.46.71:10250, name=841848202ef0a97543eb50bff3e79c31e1bb5f2fa16bf9300018df23757394d3 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=841848202ef0a97543eb50bff3e79c31e1bb5f2fa16bf9300018df23757394d3 Value:0xc001148b18} C:{Var:C Labels:instance=10.100.46.71:10250, name=841848202ef0a97543eb50bff3e79c31e1bb5f2fa16bf9300018df23757394d3 Value:0xc001148b30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571092658s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=841848202ef0a97543eb50bff3e79c31e1bb5f2fa16bf9300018df23757394d3} value=0 ], [ var='C' labels={instance=10.100.46.71:10250, name=841848202ef0a97543eb50bff3e79c31e1bb5f2fa16bf9300018df23757394d3} value=1 ]} {Instance:instance=10.100.46.71:10250, name=8552a3ddbfbc8986fa025783e906c234fa860c76e5fca65a8f7d0c94e0009a62 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=8552a3ddbfbc8986fa025783e906c234fa860c76e5fca65a8f7d0c94e0009a62 Value:0xc001148b60} C:{Var:C Labels:instance=10.100.46.71:10250, name=8552a3ddbfbc8986fa025783e906c234fa860c76e5fca65a8f7d0c94e0009a62 Value:0xc001148b78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571094743s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=8552a3ddbfbc8986fa025783e906c234fa860c76e5fca65a8f7d0c94e0009a62} value=0.02062510555548316 ], [ var='C' labels={instance=10.100.46.71:10250, name=8552a3ddbfbc8986fa025783e906c234fa860c76e5fca65a8f7d0c94e0009a62} value=1 ]} {Instance:instance=10.100.46.71:10250, name=86038bbae019cda8a67e6ed3209f9fdb52b0fc2ac0ccb09e920dae2fac8da549 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=86038bbae019cda8a67e6ed3209f9fdb52b0fc2ac0ccb09e920dae2fac8da549 Value:0xc001148ba8} C:{Var:C Labels:instance=10.100.46.71:10250, name=86038bbae019cda8a67e6ed3209f9fdb52b0fc2ac0ccb09e920dae2fac8da549 Value:0xc001148bc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571097792s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=86038bbae019cda8a67e6ed3209f9fdb52b0fc2ac0ccb09e920dae2fac8da549} value=0.11660951666676234 ], [ var='C' labels={instance=10.100.46.71:10250, name=86038bbae019cda8a67e6ed3209f9fdb52b0fc2ac0ccb09e920dae2fac8da549} value=1 ]} {Instance:instance=10.100.46.71:10250, name=8bbc329f599b1a50e14e1d94c62946e6ffdde4763197ed64095720287c5bf3dd State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=8bbc329f599b1a50e14e1d94c62946e6ffdde4763197ed64095720287c5bf3dd Value:0xc001148bf0} C:{Var:C Labels:instance=10.100.46.71:10250, name=8bbc329f599b1a50e14e1d94c62946e6ffdde4763197ed64095720287c5bf3dd Value:0xc001148c08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571100788s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=8bbc329f599b1a50e14e1d94c62946e6ffdde4763197ed64095720287c5bf3dd} value=0 ], [ var='C' labels={instance=10.100.46.71:10250, name=8bbc329f599b1a50e14e1d94c62946e6ffdde4763197ed64095720287c5bf3dd} value=1 ]} {Instance:instance=10.100.46.71:10250, name=91b310a68e1b00647f59498be471917a2187c5a73d8fa357231d7a35c698add1 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=91b310a68e1b00647f59498be471917a2187c5a73d8fa357231d7a35c698add1 Value:0xc001148c60} C:{Var:C Labels:instance=10.100.46.71:10250, name=91b310a68e1b00647f59498be471917a2187c5a73d8fa357231d7a35c698add1 Value:0xc001148c48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571103391s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=91b310a68e1b00647f59498be471917a2187c5a73d8fa357231d7a35c698add1} value=0.09300970277814082 ], [ var='C' labels={instance=10.100.46.71:10250, name=91b310a68e1b00647f59498be471917a2187c5a73d8fa357231d7a35c698add1} value=1 ]} {Instance:instance=10.100.46.71:10250, name=9689fb34b9a7af7b151ae4ad04c3199193c06e7e1e0a8bdf01a1c646967d9f84 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=9689fb34b9a7af7b151ae4ad04c3199193c06e7e1e0a8bdf01a1c646967d9f84 Value:0xc001148cc8} C:{Var:C Labels:instance=10.100.46.71:10250, name=9689fb34b9a7af7b151ae4ad04c3199193c06e7e1e0a8bdf01a1c646967d9f84 Value:0xc001148ca0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571105847s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=9689fb34b9a7af7b151ae4ad04c3199193c06e7e1e0a8bdf01a1c646967d9f84} value=0 ], [ var='C' labels={instance=10.100.46.71:10250, name=9689fb34b9a7af7b151ae4ad04c3199193c06e7e1e0a8bdf01a1c646967d9f84} value=1 ]} {Instance:instance=10.100.46.71:10250, name=97cbf2cafb1dea0074dc93f0b629c93957b541df22097d7557562944fa4b5d47 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=97cbf2cafb1dea0074dc93f0b629c93957b541df22097d7557562944fa4b5d47 Value:0xc001148cf8} C:{Var:C Labels:instance=10.100.46.71:10250, name=97cbf2cafb1dea0074dc93f0b629c93957b541df22097d7557562944fa4b5d47 Value:0xc001148d10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571108086s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=97cbf2cafb1dea0074dc93f0b629c93957b541df22097d7557562944fa4b5d47} value=1.3350761166667426 ], [ var='C' labels={instance=10.100.46.71:10250, name=97cbf2cafb1dea0074dc93f0b629c93957b541df22097d7557562944fa4b5d47} value=1 ]} {Instance:instance=10.100.46.71:10250, name=9d86d6d686dc82bbd2eb82a98c0e118a2c0f2c38f00842c80d6e36716db28a70 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=9d86d6d686dc82bbd2eb82a98c0e118a2c0f2c38f00842c80d6e36716db28a70 Value:0xc001148d40} C:{Var:C Labels:instance=10.100.46.71:10250, name=9d86d6d686dc82bbd2eb82a98c0e118a2c0f2c38f00842c80d6e36716db28a70 Value:0xc001148d58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571110413s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=9d86d6d686dc82bbd2eb82a98c0e118a2c0f2c38f00842c80d6e36716db28a70} value=0.4624006572222091 ], [ var='C' labels={instance=10.100.46.71:10250, name=9d86d6d686dc82bbd2eb82a98c0e118a2c0f2c38f00842c80d6e36716db28a70} value=1 ]} {Instance:instance=10.100.46.71:10250, name=b180f07b25be350ed099840bd3bf84ad82652039e126dc75f283cc2ba2615b3e State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=b180f07b25be350ed099840bd3bf84ad82652039e126dc75f283cc2ba2615b3e Value:0xc001148d88} C:{Var:C Labels:instance=10.100.46.71:10250, name=b180f07b25be350ed099840bd3bf84ad82652039e126dc75f283cc2ba2615b3e Value:0xc001148da0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571113158s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=b180f07b25be350ed099840bd3bf84ad82652039e126dc75f283cc2ba2615b3e} value=11.245026972222453 ], [ var='C' labels={instance=10.100.46.71:10250, name=b180f07b25be350ed099840bd3bf84ad82652039e126dc75f283cc2ba2615b3e} value=1 ]} {Instance:instance=10.100.46.71:10250, name=b8af2af8fa8ab4e47f081de0337eb179abf05367ce5d1c8d8ceb6aaf276aadb0 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=b8af2af8fa8ab4e47f081de0337eb179abf05367ce5d1c8d8ceb6aaf276aadb0 Value:0xc001148e18} C:{Var:C Labels:instance=10.100.46.71:10250, name=b8af2af8fa8ab4e47f081de0337eb179abf05367ce5d1c8d8ceb6aaf276aadb0 Value:0xc001148e00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571115488s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=b8af2af8fa8ab4e47f081de0337eb179abf05367ce5d1c8d8ceb6aaf276aadb0} value=0.02667451111114537 ], [ var='C' labels={instance=10.100.46.71:10250, name=b8af2af8fa8ab4e47f081de0337eb179abf05367ce5d1c8d8ceb6aaf276aadb0} value=1 ]} {Instance:instance=10.100.46.71:10250, name=bbd4dcaa08e580c619e2e31cf24b9dd9925c2ff58350499441ef1617562d44bd State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=bbd4dcaa08e580c619e2e31cf24b9dd9925c2ff58350499441ef1617562d44bd Value:0xc001148e48} C:{Var:C Labels:instance=10.100.46.71:10250, name=bbd4dcaa08e580c619e2e31cf24b9dd9925c2ff58350499441ef1617562d44bd Value:0xc001148e60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571118581s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=bbd4dcaa08e580c619e2e31cf24b9dd9925c2ff58350499441ef1617562d44bd} value=0 ], [ var='C' labels={instance=10.100.46.71:10250, name=bbd4dcaa08e580c619e2e31cf24b9dd9925c2ff58350499441ef1617562d44bd} value=1 ]} {Instance:instance=10.100.46.71:10250, name=c9dbda5db87ae43d24cd5f6a4c22230fe237c884c12d4651dba58d6fe1fbe818 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=c9dbda5db87ae43d24cd5f6a4c22230fe237c884c12d4651dba58d6fe1fbe818 Value:0xc001148ea8} C:{Var:C Labels:instance=10.100.46.71:10250, name=c9dbda5db87ae43d24cd5f6a4c22230fe237c884c12d4651dba58d6fe1fbe818 Value:0xc001148e90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571120804s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=c9dbda5db87ae43d24cd5f6a4c22230fe237c884c12d4651dba58d6fe1fbe818} value=0 ], [ var='C' labels={instance=10.100.46.71:10250, name=c9dbda5db87ae43d24cd5f6a4c22230fe237c884c12d4651dba58d6fe1fbe818} value=1 ]} {Instance:instance=10.100.46.71:10250, name=d673b8286597b504e89fe8c94a437f98cd6adab3e0fe362cb6d60a40d39febd0 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=d673b8286597b504e89fe8c94a437f98cd6adab3e0fe362cb6d60a40d39febd0 Value:0xc001148f08} C:{Var:C Labels:instance=10.100.46.71:10250, name=d673b8286597b504e89fe8c94a437f98cd6adab3e0fe362cb6d60a40d39febd0 Value:0xc001148f20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571123088s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=d673b8286597b504e89fe8c94a437f98cd6adab3e0fe362cb6d60a40d39febd0} value=0.05043334444457691 ], [ var='C' labels={instance=10.100.46.71:10250, name=d673b8286597b504e89fe8c94a437f98cd6adab3e0fe362cb6d60a40d39febd0} value=1 ]} {Instance:instance=10.100.46.71:10250, name=da6ce2f13017125b83bbd23d1bceaf9b3272e673ea801bf507320b74c529c655 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=da6ce2f13017125b83bbd23d1bceaf9b3272e673ea801bf507320b74c529c655 Value:0xc001148f70} C:{Var:C Labels:instance=10.100.46.71:10250, name=da6ce2f13017125b83bbd23d1bceaf9b3272e673ea801bf507320b74c529c655 Value:0xc001148f88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571126004s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=da6ce2f13017125b83bbd23d1bceaf9b3272e673ea801bf507320b74c529c655} value=0.04832074499998069 ], [ var='C' labels={instance=10.100.46.71:10250, name=da6ce2f13017125b83bbd23d1bceaf9b3272e673ea801bf507320b74c529c655} value=1 ]} {Instance:instance=10.100.46.71:10250, name=e1bb2ef2d9b05335e53003cc27e40e8d0bcb7cedf444ec586f2559a906b5a2de State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=e1bb2ef2d9b05335e53003cc27e40e8d0bcb7cedf444ec586f2559a906b5a2de Value:0xc001148fc8} C:{Var:C Labels:instance=10.100.46.71:10250, name=e1bb2ef2d9b05335e53003cc27e40e8d0bcb7cedf444ec586f2559a906b5a2de Value:0xc001148fe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571129381s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=e1bb2ef2d9b05335e53003cc27e40e8d0bcb7cedf444ec586f2559a906b5a2de} value=0.09845158888866637 ], [ var='C' labels={instance=10.100.46.71:10250, name=e1bb2ef2d9b05335e53003cc27e40e8d0bcb7cedf444ec586f2559a906b5a2de} value=1 ]} {Instance:instance=10.100.46.71:10250, name=e4e59778b984b7bcb7d7f7a8178f207d52bd1b66ab9344220868509b92e5b522 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=e4e59778b984b7bcb7d7f7a8178f207d52bd1b66ab9344220868509b92e5b522 Value:0xc001149010} C:{Var:C Labels:instance=10.100.46.71:10250, name=e4e59778b984b7bcb7d7f7a8178f207d52bd1b66ab9344220868509b92e5b522 Value:0xc001149028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.57113181s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=e4e59778b984b7bcb7d7f7a8178f207d52bd1b66ab9344220868509b92e5b522} value=0.008964480555562204 ], [ var='C' labels={instance=10.100.46.71:10250, name=e4e59778b984b7bcb7d7f7a8178f207d52bd1b66ab9344220868509b92e5b522} value=1 ]} {Instance:instance=10.100.46.71:10250, name=ecd19eab833a53ad6a6c0251ed0933a4e63cdea162dfed39c453de8174146da1 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=ecd19eab833a53ad6a6c0251ed0933a4e63cdea162dfed39c453de8174146da1 Value:0xc001149058} C:{Var:C Labels:instance=10.100.46.71:10250, name=ecd19eab833a53ad6a6c0251ed0933a4e63cdea162dfed39c453de8174146da1 Value:0xc001149070}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571134013s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=ecd19eab833a53ad6a6c0251ed0933a4e63cdea162dfed39c453de8174146da1} value=0 ], [ var='C' labels={instance=10.100.46.71:10250, name=ecd19eab833a53ad6a6c0251ed0933a4e63cdea162dfed39c453de8174146da1} value=1 ]} {Instance:instance=10.100.46.71:10250, name=fa511ee14a3f449bd7ab55bf4b9addab3635d28148eea69014522bac0070b2da State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.46.71:10250, name=fa511ee14a3f449bd7ab55bf4b9addab3635d28148eea69014522bac0070b2da Value:0xc0011490c0} C:{Var:C Labels:instance=10.100.46.71:10250, name=fa511ee14a3f449bd7ab55bf4b9addab3635d28148eea69014522bac0070b2da Value:0xc0011490d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571136596s EvaluationString:[ var='B' labels={instance=10.100.46.71:10250, name=fa511ee14a3f449bd7ab55bf4b9addab3635d28148eea69014522bac0070b2da} value=0 ], [ var='C' labels={instance=10.100.46.71:10250, name=fa511ee14a3f449bd7ab55bf4b9addab3635d28148eea69014522bac0070b2da} value=1 ]} {Instance:instance=10.100.48.211:10250, name=01235e9030b55b1f6c86bc1fcdca2b03d0f3350c0c0c61d28ec8bdac24b78508 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=01235e9030b55b1f6c86bc1fcdca2b03d0f3350c0c0c61d28ec8bdac24b78508 Value:0xc001149108} C:{Var:C Labels:instance=10.100.48.211:10250, name=01235e9030b55b1f6c86bc1fcdca2b03d0f3350c0c0c61d28ec8bdac24b78508 Value:0xc001149160}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571138746s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=01235e9030b55b1f6c86bc1fcdca2b03d0f3350c0c0c61d28ec8bdac24b78508} value=0 ], [ var='C' labels={instance=10.100.48.211:10250, name=01235e9030b55b1f6c86bc1fcdca2b03d0f3350c0c0c61d28ec8bdac24b78508} value=1 ]} {Instance:instance=10.100.48.211:10250, name=0158604d6249a3a203603a522d46891417c3e35d83b1a572ac0cf553ffd38e6c State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=0158604d6249a3a203603a522d46891417c3e35d83b1a572ac0cf553ffd38e6c Value:0xc0011491a0} C:{Var:C Labels:instance=10.100.48.211:10250, name=0158604d6249a3a203603a522d46891417c3e35d83b1a572ac0cf553ffd38e6c Value:0xc0011491c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571140971s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=0158604d6249a3a203603a522d46891417c3e35d83b1a572ac0cf553ffd38e6c} value=0.41733081666683974 ], [ var='C' labels={instance=10.100.48.211:10250, name=0158604d6249a3a203603a522d46891417c3e35d83b1a572ac0cf553ffd38e6c} value=1 ]} {Instance:instance=10.100.48.211:10250, name=1238a77769fb406d71688573287a3ce92e0dda591a9d5b3b64933f0e7900dad4 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=1238a77769fb406d71688573287a3ce92e0dda591a9d5b3b64933f0e7900dad4 Value:0xc001149208} C:{Var:C Labels:instance=10.100.48.211:10250, name=1238a77769fb406d71688573287a3ce92e0dda591a9d5b3b64933f0e7900dad4 Value:0xc001149220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571143264s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=1238a77769fb406d71688573287a3ce92e0dda591a9d5b3b64933f0e7900dad4} value=0.021485841111130968 ], [ var='C' labels={instance=10.100.48.211:10250, name=1238a77769fb406d71688573287a3ce92e0dda591a9d5b3b64933f0e7900dad4} value=1 ]} {Instance:instance=10.100.48.211:10250, name=17e8e9dd8ede22e66b9160aa1e5d89405032957afcbb7f243faa7cef99d13222 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=17e8e9dd8ede22e66b9160aa1e5d89405032957afcbb7f243faa7cef99d13222 Value:0xc001149268} C:{Var:C Labels:instance=10.100.48.211:10250, name=17e8e9dd8ede22e66b9160aa1e5d89405032957afcbb7f243faa7cef99d13222 Value:0xc001149250}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571146268s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=17e8e9dd8ede22e66b9160aa1e5d89405032957afcbb7f243faa7cef99d13222} value=0.0817591694443662 ], [ var='C' labels={instance=10.100.48.211:10250, name=17e8e9dd8ede22e66b9160aa1e5d89405032957afcbb7f243faa7cef99d13222} value=1 ]} {Instance:instance=10.100.48.211:10250, name=1ac75b9b741fd81fd57cad4b074aa7d34717456a2181204d173098b334432abb State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=1ac75b9b741fd81fd57cad4b074aa7d34717456a2181204d173098b334432abb Value:0xc0011492a8} C:{Var:C Labels:instance=10.100.48.211:10250, name=1ac75b9b741fd81fd57cad4b074aa7d34717456a2181204d173098b334432abb Value:0xc0011492c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571148643s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=1ac75b9b741fd81fd57cad4b074aa7d34717456a2181204d173098b334432abb} value=0.06488372388888466 ], [ var='C' labels={instance=10.100.48.211:10250, name=1ac75b9b741fd81fd57cad4b074aa7d34717456a2181204d173098b334432abb} value=1 ]} {Instance:instance=10.100.48.211:10250, name=251b078a5ba5a3ab932dfbfa07bcbeb5acdfeaf7511d5c028b84a56842d5cca6 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=251b078a5ba5a3ab932dfbfa07bcbeb5acdfeaf7511d5c028b84a56842d5cca6 Value:0xc0011492f0} C:{Var:C Labels:instance=10.100.48.211:10250, name=251b078a5ba5a3ab932dfbfa07bcbeb5acdfeaf7511d5c028b84a56842d5cca6 Value:0xc001149318}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571151051s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=251b078a5ba5a3ab932dfbfa07bcbeb5acdfeaf7511d5c028b84a56842d5cca6} value=0 ], [ var='C' labels={instance=10.100.48.211:10250, name=251b078a5ba5a3ab932dfbfa07bcbeb5acdfeaf7511d5c028b84a56842d5cca6} value=1 ]} {Instance:instance=10.100.48.211:10250, name=2bdd5ed5f5af3f84c22e3c562c90910af226b8a9a1b9b4cf488e0e713972fc68 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=2bdd5ed5f5af3f84c22e3c562c90910af226b8a9a1b9b4cf488e0e713972fc68 Value:0xc001149348} C:{Var:C Labels:instance=10.100.48.211:10250, name=2bdd5ed5f5af3f84c22e3c562c90910af226b8a9a1b9b4cf488e0e713972fc68 Value:0xc001149360}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571153172s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=2bdd5ed5f5af3f84c22e3c562c90910af226b8a9a1b9b4cf488e0e713972fc68} value=0 ], [ var='C' labels={instance=10.100.48.211:10250, name=2bdd5ed5f5af3f84c22e3c562c90910af226b8a9a1b9b4cf488e0e713972fc68} value=1 ]} {Instance:instance=10.100.48.211:10250, name=3096b8eade8dae99d2092b6b1a3f31b97b1db21db34e896e957c3ee97c003535 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=3096b8eade8dae99d2092b6b1a3f31b97b1db21db34e896e957c3ee97c003535 Value:0xc001149390} C:{Var:C Labels:instance=10.100.48.211:10250, name=3096b8eade8dae99d2092b6b1a3f31b97b1db21db34e896e957c3ee97c003535 Value:0xc0011493a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.5711554s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=3096b8eade8dae99d2092b6b1a3f31b97b1db21db34e896e957c3ee97c003535} value=0.3757251666663958 ], [ var='C' labels={instance=10.100.48.211:10250, name=3096b8eade8dae99d2092b6b1a3f31b97b1db21db34e896e957c3ee97c003535} value=1 ]} {Instance:instance=10.100.48.211:10250, name=32b1826dc09c60879d786dd11569865d2f29f36423c8eaef1e3d6aa30499b40a State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=32b1826dc09c60879d786dd11569865d2f29f36423c8eaef1e3d6aa30499b40a Value:0xc0011493e8} C:{Var:C Labels:instance=10.100.48.211:10250, name=32b1826dc09c60879d786dd11569865d2f29f36423c8eaef1e3d6aa30499b40a Value:0xc001149400}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571157668s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=32b1826dc09c60879d786dd11569865d2f29f36423c8eaef1e3d6aa30499b40a} value=0.0017890449999984842 ], [ var='C' labels={instance=10.100.48.211:10250, name=32b1826dc09c60879d786dd11569865d2f29f36423c8eaef1e3d6aa30499b40a} value=1 ]} {Instance:instance=10.100.48.211:10250, name=376a238b772228cf853aaee4244bfc029090f98a5981a81cea2af0d3c53bfc51 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=376a238b772228cf853aaee4244bfc029090f98a5981a81cea2af0d3c53bfc51 Value:0xc001149470} C:{Var:C Labels:instance=10.100.48.211:10250, name=376a238b772228cf853aaee4244bfc029090f98a5981a81cea2af0d3c53bfc51 Value:0xc001149498}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571168707s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=376a238b772228cf853aaee4244bfc029090f98a5981a81cea2af0d3c53bfc51} value=0.1069722782777818 ], [ var='C' labels={instance=10.100.48.211:10250, name=376a238b772228cf853aaee4244bfc029090f98a5981a81cea2af0d3c53bfc51} value=1 ]} {Instance:instance=10.100.48.211:10250, name=3deba35f0e8e63eb24bfb4f12f4d6df26b65e775a75b2efbfc6d45e168229c2f State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=3deba35f0e8e63eb24bfb4f12f4d6df26b65e775a75b2efbfc6d45e168229c2f Value:0xc0011494e8} C:{Var:C Labels:instance=10.100.48.211:10250, name=3deba35f0e8e63eb24bfb4f12f4d6df26b65e775a75b2efbfc6d45e168229c2f Value:0xc001149530}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571171173s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=3deba35f0e8e63eb24bfb4f12f4d6df26b65e775a75b2efbfc6d45e168229c2f} value=0 ], [ var='C' labels={instance=10.100.48.211:10250, name=3deba35f0e8e63eb24bfb4f12f4d6df26b65e775a75b2efbfc6d45e168229c2f} value=1 ]} {Instance:instance=10.100.48.211:10250, name=402063e63c9a24f2079c657889ad7995f2d2abe4f9890bdd090ffd3501321029 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=402063e63c9a24f2079c657889ad7995f2d2abe4f9890bdd090ffd3501321029 Value:0xc001149560} C:{Var:C Labels:instance=10.100.48.211:10250, name=402063e63c9a24f2079c657889ad7995f2d2abe4f9890bdd090ffd3501321029 Value:0xc001149578}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571173818s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=402063e63c9a24f2079c657889ad7995f2d2abe4f9890bdd090ffd3501321029} value=11.664837222229432 ], [ var='C' labels={instance=10.100.48.211:10250, name=402063e63c9a24f2079c657889ad7995f2d2abe4f9890bdd090ffd3501321029} value=1 ]} {Instance:instance=10.100.48.211:10250, name=41d617643afe51a3a074ee1784c44c502c59698a3fc421aa3bcb6a173c29112d State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=41d617643afe51a3a074ee1784c44c502c59698a3fc421aa3bcb6a173c29112d Value:0xc0011495c8} C:{Var:C Labels:instance=10.100.48.211:10250, name=41d617643afe51a3a074ee1784c44c502c59698a3fc421aa3bcb6a173c29112d Value:0xc0011495e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571176321s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=41d617643afe51a3a074ee1784c44c502c59698a3fc421aa3bcb6a173c29112d} value=0 ], [ var='C' labels={instance=10.100.48.211:10250, name=41d617643afe51a3a074ee1784c44c502c59698a3fc421aa3bcb6a173c29112d} value=1 ]} {Instance:instance=10.100.48.211:10250, name=44483304b3be6dc2483db1053802c9765119122a7f37ea389294da27a3700e94 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=44483304b3be6dc2483db1053802c9765119122a7f37ea389294da27a3700e94 Value:0xc001149610} C:{Var:C Labels:instance=10.100.48.211:10250, name=44483304b3be6dc2483db1053802c9765119122a7f37ea389294da27a3700e94 Value:0xc001149628}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571179346s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=44483304b3be6dc2483db1053802c9765119122a7f37ea389294da27a3700e94} value=0 ], [ var='C' labels={instance=10.100.48.211:10250, name=44483304b3be6dc2483db1053802c9765119122a7f37ea389294da27a3700e94} value=1 ]} {Instance:instance=10.100.48.211:10250, name=4bd62902be25d506a18c5cceed3557db323e1a10c712cd2fe28d2a52784b36c3 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=4bd62902be25d506a18c5cceed3557db323e1a10c712cd2fe28d2a52784b36c3 Value:0xc001149670} C:{Var:C Labels:instance=10.100.48.211:10250, name=4bd62902be25d506a18c5cceed3557db323e1a10c712cd2fe28d2a52784b36c3 Value:0xc001149658}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571182061s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=4bd62902be25d506a18c5cceed3557db323e1a10c712cd2fe28d2a52784b36c3} value=0.0595317611113286 ], [ var='C' labels={instance=10.100.48.211:10250, name=4bd62902be25d506a18c5cceed3557db323e1a10c712cd2fe28d2a52784b36c3} value=1 ]} {Instance:instance=10.100.48.211:10250, name=544da02696d458175d5bd2bc2691e857c58dcaaac437454c82cfed04077e1ce7 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=544da02696d458175d5bd2bc2691e857c58dcaaac437454c82cfed04077e1ce7 Value:0xc0011496b8} C:{Var:C Labels:instance=10.100.48.211:10250, name=544da02696d458175d5bd2bc2691e857c58dcaaac437454c82cfed04077e1ce7 Value:0xc0011496a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571185337s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=544da02696d458175d5bd2bc2691e857c58dcaaac437454c82cfed04077e1ce7} value=0.012153330555555699 ], [ var='C' labels={instance=10.100.48.211:10250, name=544da02696d458175d5bd2bc2691e857c58dcaaac437454c82cfed04077e1ce7} value=1 ]} {Instance:instance=10.100.48.211:10250, name=584b7ffb40b9b367a092f66e41de1d20111769acd61dc2d81e89ac8f32d41ae2 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=584b7ffb40b9b367a092f66e41de1d20111769acd61dc2d81e89ac8f32d41ae2 Value:0xc0011496f8} C:{Var:C Labels:instance=10.100.48.211:10250, name=584b7ffb40b9b367a092f66e41de1d20111769acd61dc2d81e89ac8f32d41ae2 Value:0xc001149720}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571188194s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=584b7ffb40b9b367a092f66e41de1d20111769acd61dc2d81e89ac8f32d41ae2} value=0 ], [ var='C' labels={instance=10.100.48.211:10250, name=584b7ffb40b9b367a092f66e41de1d20111769acd61dc2d81e89ac8f32d41ae2} value=1 ]} {Instance:instance=10.100.48.211:10250, name=5f484bbf99f8c948c0be0771ada94b88eaaf3309d8984614d740135c1680fe32 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=5f484bbf99f8c948c0be0771ada94b88eaaf3309d8984614d740135c1680fe32 Value:0xc001149750} C:{Var:C Labels:instance=10.100.48.211:10250, name=5f484bbf99f8c948c0be0771ada94b88eaaf3309d8984614d740135c1680fe32 Value:0xc001149768}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.57119125s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=5f484bbf99f8c948c0be0771ada94b88eaaf3309d8984614d740135c1680fe32} value=0.07058947888888648 ], [ var='C' labels={instance=10.100.48.211:10250, name=5f484bbf99f8c948c0be0771ada94b88eaaf3309d8984614d740135c1680fe32} value=1 ]} {Instance:instance=10.100.48.211:10250, name=6555a132a0ef86e6e8e82fad05ed7a106ce0fb9e8c6e59765e8c52b8856f3a71 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=6555a132a0ef86e6e8e82fad05ed7a106ce0fb9e8c6e59765e8c52b8856f3a71 Value:0xc001149818} C:{Var:C Labels:instance=10.100.48.211:10250, name=6555a132a0ef86e6e8e82fad05ed7a106ce0fb9e8c6e59765e8c52b8856f3a71 Value:0xc001149830}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571193768s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=6555a132a0ef86e6e8e82fad05ed7a106ce0fb9e8c6e59765e8c52b8856f3a71} value=0.09437282777778618 ], [ var='C' labels={instance=10.100.48.211:10250, name=6555a132a0ef86e6e8e82fad05ed7a106ce0fb9e8c6e59765e8c52b8856f3a71} value=1 ]} {Instance:instance=10.100.48.211:10250, name=67398c30d1826513a8a48bc1393d8c055297f4e0b485e0bf1beb5a11266d31b3 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=67398c30d1826513a8a48bc1393d8c055297f4e0b485e0bf1beb5a11266d31b3 Value:0xc001149860} C:{Var:C Labels:instance=10.100.48.211:10250, name=67398c30d1826513a8a48bc1393d8c055297f4e0b485e0bf1beb5a11266d31b3 Value:0xc001149878}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571197528s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=67398c30d1826513a8a48bc1393d8c055297f4e0b485e0bf1beb5a11266d31b3} value=0 ], [ var='C' labels={instance=10.100.48.211:10250, name=67398c30d1826513a8a48bc1393d8c055297f4e0b485e0bf1beb5a11266d31b3} value=1 ]} {Instance:instance=10.100.48.211:10250, name=69db09dbafe560cbbaf1adbfc51286e66406a874774b88597b879df8b09901b8 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=69db09dbafe560cbbaf1adbfc51286e66406a874774b88597b879df8b09901b8 Value:0xc0011498e0} C:{Var:C Labels:instance=10.100.48.211:10250, name=69db09dbafe560cbbaf1adbfc51286e66406a874774b88597b879df8b09901b8 Value:0xc0011498c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571202524s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=69db09dbafe560cbbaf1adbfc51286e66406a874774b88597b879df8b09901b8} value=0.1724373777773912 ], [ var='C' labels={instance=10.100.48.211:10250, name=69db09dbafe560cbbaf1adbfc51286e66406a874774b88597b879df8b09901b8} value=1 ]} {Instance:instance=10.100.48.211:10250, name=6f99c2ab475051f68d27a307d8601e87a948bbea97c0d7d027d04ebb475db672 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=6f99c2ab475051f68d27a307d8601e87a948bbea97c0d7d027d04ebb475db672 Value:0xc001149920} C:{Var:C Labels:instance=10.100.48.211:10250, name=6f99c2ab475051f68d27a307d8601e87a948bbea97c0d7d027d04ebb475db672 Value:0xc001149938}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571206843s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=6f99c2ab475051f68d27a307d8601e87a948bbea97c0d7d027d04ebb475db672} value=0 ], [ var='C' labels={instance=10.100.48.211:10250, name=6f99c2ab475051f68d27a307d8601e87a948bbea97c0d7d027d04ebb475db672} value=1 ]} {Instance:instance=10.100.48.211:10250, name=84b7ac7b3aa69b6e45495945951ac56e16df1dc70ca1841a80ccac9604a1882b State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=84b7ac7b3aa69b6e45495945951ac56e16df1dc70ca1841a80ccac9604a1882b Value:0xc001149968} C:{Var:C Labels:instance=10.100.48.211:10250, name=84b7ac7b3aa69b6e45495945951ac56e16df1dc70ca1841a80ccac9604a1882b Value:0xc001149980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571212063s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=84b7ac7b3aa69b6e45495945951ac56e16df1dc70ca1841a80ccac9604a1882b} value=0 ], [ var='C' labels={instance=10.100.48.211:10250, name=84b7ac7b3aa69b6e45495945951ac56e16df1dc70ca1841a80ccac9604a1882b} value=1 ]} {Instance:instance=10.100.48.211:10250, name=88be87ceb1538049421d57dfcb5b8aa2ebac5596cebecaa4731f574bc4bfc04a State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=88be87ceb1538049421d57dfcb5b8aa2ebac5596cebecaa4731f574bc4bfc04a Value:0xc0011499b0} C:{Var:C Labels:instance=10.100.48.211:10250, name=88be87ceb1538049421d57dfcb5b8aa2ebac5596cebecaa4731f574bc4bfc04a Value:0xc0011499c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571215698s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=88be87ceb1538049421d57dfcb5b8aa2ebac5596cebecaa4731f574bc4bfc04a} value=0 ], [ var='C' labels={instance=10.100.48.211:10250, name=88be87ceb1538049421d57dfcb5b8aa2ebac5596cebecaa4731f574bc4bfc04a} value=1 ]} {Instance:instance=10.100.48.211:10250, name=89d38dc59469e27cde22e5ccfbe2e68382e067d13cdb0d3397da5eb6cf21720b State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=89d38dc59469e27cde22e5ccfbe2e68382e067d13cdb0d3397da5eb6cf21720b Value:0xc0011499f8} C:{Var:C Labels:instance=10.100.48.211:10250, name=89d38dc59469e27cde22e5ccfbe2e68382e067d13cdb0d3397da5eb6cf21720b Value:0xc001149a10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571229946s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=89d38dc59469e27cde22e5ccfbe2e68382e067d13cdb0d3397da5eb6cf21720b} value=0.05082510333333327 ], [ var='C' labels={instance=10.100.48.211:10250, name=89d38dc59469e27cde22e5ccfbe2e68382e067d13cdb0d3397da5eb6cf21720b} value=1 ]} {Instance:instance=10.100.48.211:10250, name=95e5a1a504be9b230482263b5f3ba2b5f73e0117ff14782c0d7ef1a2df371aae State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=95e5a1a504be9b230482263b5f3ba2b5f73e0117ff14782c0d7ef1a2df371aae Value:0xc001149a40} C:{Var:C Labels:instance=10.100.48.211:10250, name=95e5a1a504be9b230482263b5f3ba2b5f73e0117ff14782c0d7ef1a2df371aae Value:0xc001149a58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571233197s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=95e5a1a504be9b230482263b5f3ba2b5f73e0117ff14782c0d7ef1a2df371aae} value=0 ], [ var='C' labels={instance=10.100.48.211:10250, name=95e5a1a504be9b230482263b5f3ba2b5f73e0117ff14782c0d7ef1a2df371aae} value=1 ]} {Instance:instance=10.100.48.211:10250, name=9bfd13ba6ede120290889af49e5783f83c6ea70230e1a34777637c7df40cb7e7 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=9bfd13ba6ede120290889af49e5783f83c6ea70230e1a34777637c7df40cb7e7 Value:0xc001149a88} C:{Var:C Labels:instance=10.100.48.211:10250, name=9bfd13ba6ede120290889af49e5783f83c6ea70230e1a34777637c7df40cb7e7 Value:0xc001149ab0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571238414s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=9bfd13ba6ede120290889af49e5783f83c6ea70230e1a34777637c7df40cb7e7} value=0.19944802777899898 ], [ var='C' labels={instance=10.100.48.211:10250, name=9bfd13ba6ede120290889af49e5783f83c6ea70230e1a34777637c7df40cb7e7} value=1 ]} {Instance:instance=10.100.48.211:10250, name=b10b3a15b798bb39e1debbe9b760520196a59645e1b8d8d438d54e89adf93f99 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=b10b3a15b798bb39e1debbe9b760520196a59645e1b8d8d438d54e89adf93f99 Value:0xc001149af8} C:{Var:C Labels:instance=10.100.48.211:10250, name=b10b3a15b798bb39e1debbe9b760520196a59645e1b8d8d438d54e89adf93f99 Value:0xc001149ae0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571242936s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=b10b3a15b798bb39e1debbe9b760520196a59645e1b8d8d438d54e89adf93f99} value=1.9768137166667858 ], [ var='C' labels={instance=10.100.48.211:10250, name=b10b3a15b798bb39e1debbe9b760520196a59645e1b8d8d438d54e89adf93f99} value=1 ]} {Instance:instance=10.100.48.211:10250, name=bbeddf9e94ad07a7329af9f7d492649c1ab6ed3499255e1660fbb2d416cf4a8e State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=bbeddf9e94ad07a7329af9f7d492649c1ab6ed3499255e1660fbb2d416cf4a8e Value:0xc001149b28} C:{Var:C Labels:instance=10.100.48.211:10250, name=bbeddf9e94ad07a7329af9f7d492649c1ab6ed3499255e1660fbb2d416cf4a8e Value:0xc001149b50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571247398s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=bbeddf9e94ad07a7329af9f7d492649c1ab6ed3499255e1660fbb2d416cf4a8e} value=0.0230871950000417 ], [ var='C' labels={instance=10.100.48.211:10250, name=bbeddf9e94ad07a7329af9f7d492649c1ab6ed3499255e1660fbb2d416cf4a8e} value=1 ]} {Instance:instance=10.100.48.211:10250, name=c3af35f39f643a12dea27b84d147cede179f36c9e2383333fd1d95c8272804e4 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=c3af35f39f643a12dea27b84d147cede179f36c9e2383333fd1d95c8272804e4 Value:0xc001149b80} C:{Var:C Labels:instance=10.100.48.211:10250, name=c3af35f39f643a12dea27b84d147cede179f36c9e2383333fd1d95c8272804e4 Value:0xc001149b98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571253087s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=c3af35f39f643a12dea27b84d147cede179f36c9e2383333fd1d95c8272804e4} value=0 ], [ var='C' labels={instance=10.100.48.211:10250, name=c3af35f39f643a12dea27b84d147cede179f36c9e2383333fd1d95c8272804e4} value=1 ]} {Instance:instance=10.100.48.211:10250, name=c58682f5cc1bd8c46182b10abcdd052c5b8ced54ca934bf0b142c8bee28226f5 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=c58682f5cc1bd8c46182b10abcdd052c5b8ced54ca934bf0b142c8bee28226f5 Value:0xc001149bc8} C:{Var:C Labels:instance=10.100.48.211:10250, name=c58682f5cc1bd8c46182b10abcdd052c5b8ced54ca934bf0b142c8bee28226f5 Value:0xc001149bf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571257641s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=c58682f5cc1bd8c46182b10abcdd052c5b8ced54ca934bf0b142c8bee28226f5} value=0.02453107944436144 ], [ var='C' labels={instance=10.100.48.211:10250, name=c58682f5cc1bd8c46182b10abcdd052c5b8ced54ca934bf0b142c8bee28226f5} value=1 ]} {Instance:instance=10.100.48.211:10250, name=d1149e26368dcbee11c62a7ff4c8a61527b9dce9cf5cbe5975d34049fe9cf848 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=d1149e26368dcbee11c62a7ff4c8a61527b9dce9cf5cbe5975d34049fe9cf848 Value:0xc001149c20} C:{Var:C Labels:instance=10.100.48.211:10250, name=d1149e26368dcbee11c62a7ff4c8a61527b9dce9cf5cbe5975d34049fe9cf848 Value:0xc001149c38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571263017s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=d1149e26368dcbee11c62a7ff4c8a61527b9dce9cf5cbe5975d34049fe9cf848} value=0 ], [ var='C' labels={instance=10.100.48.211:10250, name=d1149e26368dcbee11c62a7ff4c8a61527b9dce9cf5cbe5975d34049fe9cf848} value=1 ]} {Instance:instance=10.100.48.211:10250, name=d132cbacbaceec107282645721a1768987ac5dc8f17574679d02f4626e7b4219 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=d132cbacbaceec107282645721a1768987ac5dc8f17574679d02f4626e7b4219 Value:0xc001149c68} C:{Var:C Labels:instance=10.100.48.211:10250, name=d132cbacbaceec107282645721a1768987ac5dc8f17574679d02f4626e7b4219 Value:0xc001149c80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571268177s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=d132cbacbaceec107282645721a1768987ac5dc8f17574679d02f4626e7b4219} value=0 ], [ var='C' labels={instance=10.100.48.211:10250, name=d132cbacbaceec107282645721a1768987ac5dc8f17574679d02f4626e7b4219} value=1 ]} {Instance:instance=10.100.48.211:10250, name=d150e78392cf68f462cfab398a7879607545304117d330d09a1a088e3e419306 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=d150e78392cf68f462cfab398a7879607545304117d330d09a1a088e3e419306 Value:0xc001149ce8} C:{Var:C Labels:instance=10.100.48.211:10250, name=d150e78392cf68f462cfab398a7879607545304117d330d09a1a088e3e419306 Value:0xc001149cd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.57127291s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=d150e78392cf68f462cfab398a7879607545304117d330d09a1a088e3e419306} value=0.018014931722202872 ], [ var='C' labels={instance=10.100.48.211:10250, name=d150e78392cf68f462cfab398a7879607545304117d330d09a1a088e3e419306} value=1 ]} {Instance:instance=10.100.48.211:10250, name=d3bc119901a572a50a15905fdf9071dc742ed7289c0c077412d32a7efecfcf03 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=d3bc119901a572a50a15905fdf9071dc742ed7289c0c077412d32a7efecfcf03 Value:0xc001149d18} C:{Var:C Labels:instance=10.100.48.211:10250, name=d3bc119901a572a50a15905fdf9071dc742ed7289c0c077412d32a7efecfcf03 Value:0xc001149d30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571277816s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=d3bc119901a572a50a15905fdf9071dc742ed7289c0c077412d32a7efecfcf03} value=0.011705805555553499 ], [ var='C' labels={instance=10.100.48.211:10250, name=d3bc119901a572a50a15905fdf9071dc742ed7289c0c077412d32a7efecfcf03} value=1 ]} {Instance:instance=10.100.48.211:10250, name=dbe84075f417dc461452a168c5fac4449a3a6e3ae68045eb400d1077d9378378 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=dbe84075f417dc461452a168c5fac4449a3a6e3ae68045eb400d1077d9378378 Value:0xc001149d60} C:{Var:C Labels:instance=10.100.48.211:10250, name=dbe84075f417dc461452a168c5fac4449a3a6e3ae68045eb400d1077d9378378 Value:0xc001149d78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571280479s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=dbe84075f417dc461452a168c5fac4449a3a6e3ae68045eb400d1077d9378378} value=0.2197281127778044 ], [ var='C' labels={instance=10.100.48.211:10250, name=dbe84075f417dc461452a168c5fac4449a3a6e3ae68045eb400d1077d9378378} value=1 ]} {Instance:instance=10.100.48.211:10250, name=e1fd219caa249f952fa4b7b02e0ed2fb7a4c6f5a22f0a6dcf11fa888baa1d75a State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=e1fd219caa249f952fa4b7b02e0ed2fb7a4c6f5a22f0a6dcf11fa888baa1d75a Value:0xc001149da8} C:{Var:C Labels:instance=10.100.48.211:10250, name=e1fd219caa249f952fa4b7b02e0ed2fb7a4c6f5a22f0a6dcf11fa888baa1d75a Value:0xc001149dc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571283478s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=e1fd219caa249f952fa4b7b02e0ed2fb7a4c6f5a22f0a6dcf11fa888baa1d75a} value=0 ], [ var='C' labels={instance=10.100.48.211:10250, name=e1fd219caa249f952fa4b7b02e0ed2fb7a4c6f5a22f0a6dcf11fa888baa1d75a} value=1 ]} {Instance:instance=10.100.48.211:10250, name=e20985d2b48ad76c11060b978538605d33716bb284f0f971a077c54233197e0c State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=e20985d2b48ad76c11060b978538605d33716bb284f0f971a077c54233197e0c Value:0xc001149df0} C:{Var:C Labels:instance=10.100.48.211:10250, name=e20985d2b48ad76c11060b978538605d33716bb284f0f971a077c54233197e0c Value:0xc001149e08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571286242s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=e20985d2b48ad76c11060b978538605d33716bb284f0f971a077c54233197e0c} value=3.9789778833336538 ], [ var='C' labels={instance=10.100.48.211:10250, name=e20985d2b48ad76c11060b978538605d33716bb284f0f971a077c54233197e0c} value=1 ]} {Instance:instance=10.100.48.211:10250, name=e4c3b0e175a67ca465ce64394e858dbb3eeae6c1773fcbcff4ff69be371045ba State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=e4c3b0e175a67ca465ce64394e858dbb3eeae6c1773fcbcff4ff69be371045ba Value:0xc001149e38} C:{Var:C Labels:instance=10.100.48.211:10250, name=e4c3b0e175a67ca465ce64394e858dbb3eeae6c1773fcbcff4ff69be371045ba Value:0xc001149e50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571289303s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=e4c3b0e175a67ca465ce64394e858dbb3eeae6c1773fcbcff4ff69be371045ba} value=0 ], [ var='C' labels={instance=10.100.48.211:10250, name=e4c3b0e175a67ca465ce64394e858dbb3eeae6c1773fcbcff4ff69be371045ba} value=1 ]} {Instance:instance=10.100.48.211:10250, name=e5587224add268b8d845a3a9fa92b99bf1a67b6c161ccd38f05a0ea84b393d83 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=e5587224add268b8d845a3a9fa92b99bf1a67b6c161ccd38f05a0ea84b393d83 Value:0xc001149e80} C:{Var:C Labels:instance=10.100.48.211:10250, name=e5587224add268b8d845a3a9fa92b99bf1a67b6c161ccd38f05a0ea84b393d83 Value:0xc001149e98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571291934s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=e5587224add268b8d845a3a9fa92b99bf1a67b6c161ccd38f05a0ea84b393d83} value=0 ], [ var='C' labels={instance=10.100.48.211:10250, name=e5587224add268b8d845a3a9fa92b99bf1a67b6c161ccd38f05a0ea84b393d83} value=1 ]} {Instance:instance=10.100.48.211:10250, name=e985c12b662f9992dce277a01d1dcfcaa8b63a648b976299c786e14c0eb0ba48 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=e985c12b662f9992dce277a01d1dcfcaa8b63a648b976299c786e14c0eb0ba48 Value:0xc001149ec8} C:{Var:C Labels:instance=10.100.48.211:10250, name=e985c12b662f9992dce277a01d1dcfcaa8b63a648b976299c786e14c0eb0ba48 Value:0xc001149ee0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571295217s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=e985c12b662f9992dce277a01d1dcfcaa8b63a648b976299c786e14c0eb0ba48} value=0.3422759833332142 ], [ var='C' labels={instance=10.100.48.211:10250, name=e985c12b662f9992dce277a01d1dcfcaa8b63a648b976299c786e14c0eb0ba48} value=1 ]} {Instance:instance=10.100.48.211:10250, name=ea309b04e725235372c57f8d6cc3907571a96eff8f7f7ad844c0ab6a4cc0f462 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=ea309b04e725235372c57f8d6cc3907571a96eff8f7f7ad844c0ab6a4cc0f462 Value:0xc001149f10} C:{Var:C Labels:instance=10.100.48.211:10250, name=ea309b04e725235372c57f8d6cc3907571a96eff8f7f7ad844c0ab6a4cc0f462 Value:0xc001149f28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571298491s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=ea309b04e725235372c57f8d6cc3907571a96eff8f7f7ad844c0ab6a4cc0f462} value=0 ], [ var='C' labels={instance=10.100.48.211:10250, name=ea309b04e725235372c57f8d6cc3907571a96eff8f7f7ad844c0ab6a4cc0f462} value=1 ]} {Instance:instance=10.100.48.211:10250, name=f040cbef36823db3aafaf9a5745c65e43a88e898cf4a3bc27dcbcc6802b78767 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.48.211:10250, name=f040cbef36823db3aafaf9a5745c65e43a88e898cf4a3bc27dcbcc6802b78767 Value:0xc001149f58} C:{Var:C Labels:instance=10.100.48.211:10250, name=f040cbef36823db3aafaf9a5745c65e43a88e898cf4a3bc27dcbcc6802b78767 Value:0xc001149f70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571301779s EvaluationString:[ var='B' labels={instance=10.100.48.211:10250, name=f040cbef36823db3aafaf9a5745c65e43a88e898cf4a3bc27dcbcc6802b78767} value=0.0368175555555202 ], [ var='C' labels={instance=10.100.48.211:10250, name=f040cbef36823db3aafaf9a5745c65e43a88e898cf4a3bc27dcbcc6802b78767} value=1 ]} {Instance:instance=10.100.50.226:10250, name=02509e284a3444f95c647db8637ce12045a947af30ad298a9d73804f3a24df06 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=02509e284a3444f95c647db8637ce12045a947af30ad298a9d73804f3a24df06 Value:0xc001149fb8} C:{Var:C Labels:instance=10.100.50.226:10250, name=02509e284a3444f95c647db8637ce12045a947af30ad298a9d73804f3a24df06 Value:0xc001149fa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571303998s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=02509e284a3444f95c647db8637ce12045a947af30ad298a9d73804f3a24df06} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=02509e284a3444f95c647db8637ce12045a947af30ad298a9d73804f3a24df06} value=1 ]} {Instance:instance=10.100.50.226:10250, name=03cfde126b91e33a6bc675e2a8f595842ddfa16d5db57f3f2ef3dc7a76c67e4a State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=03cfde126b91e33a6bc675e2a8f595842ddfa16d5db57f3f2ef3dc7a76c67e4a Value:0xc001149fe8} C:{Var:C Labels:instance=10.100.50.226:10250, name=03cfde126b91e33a6bc675e2a8f595842ddfa16d5db57f3f2ef3dc7a76c67e4a Value:0xc01a15c000}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571306113s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=03cfde126b91e33a6bc675e2a8f595842ddfa16d5db57f3f2ef3dc7a76c67e4a} value=0.015111971111107575 ], [ var='C' labels={instance=10.100.50.226:10250, name=03cfde126b91e33a6bc675e2a8f595842ddfa16d5db57f3f2ef3dc7a76c67e4a} value=1 ]} {Instance:instance=10.100.50.226:10250, name=0ad6b3fa80e59df94359117347487ead897228f667e71aab1b74d294851a3e4b State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=0ad6b3fa80e59df94359117347487ead897228f667e71aab1b74d294851a3e4b Value:0xc01a15c030} C:{Var:C Labels:instance=10.100.50.226:10250, name=0ad6b3fa80e59df94359117347487ead897228f667e71aab1b74d294851a3e4b Value:0xc01a15c048}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571308881s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=0ad6b3fa80e59df94359117347487ead897228f667e71aab1b74d294851a3e4b} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=0ad6b3fa80e59df94359117347487ead897228f667e71aab1b74d294851a3e4b} value=1 ]} {Instance:instance=10.100.50.226:10250, name=0c4e5153b1309550a3e3084e26f2ba81498eb2800dc26d8d9e628a2e6b5d038e State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=0c4e5153b1309550a3e3084e26f2ba81498eb2800dc26d8d9e628a2e6b5d038e Value:0xc01a15c078} C:{Var:C Labels:instance=10.100.50.226:10250, name=0c4e5153b1309550a3e3084e26f2ba81498eb2800dc26d8d9e628a2e6b5d038e Value:0xc01a15c090}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571310998s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=0c4e5153b1309550a3e3084e26f2ba81498eb2800dc26d8d9e628a2e6b5d038e} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=0c4e5153b1309550a3e3084e26f2ba81498eb2800dc26d8d9e628a2e6b5d038e} value=1 ]} {Instance:instance=10.100.50.226:10250, name=0cc832734dbe15ab6986688985ecf9e5289743130f80c0aaabe38798d9fb9ce5 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=0cc832734dbe15ab6986688985ecf9e5289743130f80c0aaabe38798d9fb9ce5 Value:0xc01a15c0c0} C:{Var:C Labels:instance=10.100.50.226:10250, name=0cc832734dbe15ab6986688985ecf9e5289743130f80c0aaabe38798d9fb9ce5 Value:0xc01a15c0d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571313062s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=0cc832734dbe15ab6986688985ecf9e5289743130f80c0aaabe38798d9fb9ce5} value=0.1302697166667915 ], [ var='C' labels={instance=10.100.50.226:10250, name=0cc832734dbe15ab6986688985ecf9e5289743130f80c0aaabe38798d9fb9ce5} value=1 ]} {Instance:instance=10.100.50.226:10250, name=0e4e9fa19a646dabf36ae9e8c5a7d437bfe14b895c67f82d3814e376d1447fb7 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=0e4e9fa19a646dabf36ae9e8c5a7d437bfe14b895c67f82d3814e376d1447fb7 Value:0xc01a15c108} C:{Var:C Labels:instance=10.100.50.226:10250, name=0e4e9fa19a646dabf36ae9e8c5a7d437bfe14b895c67f82d3814e376d1447fb7 Value:0xc01a15c120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571315563s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=0e4e9fa19a646dabf36ae9e8c5a7d437bfe14b895c67f82d3814e376d1447fb7} value=0.10374256111087257 ], [ var='C' labels={instance=10.100.50.226:10250, name=0e4e9fa19a646dabf36ae9e8c5a7d437bfe14b895c67f82d3814e376d1447fb7} value=1 ]} {Instance:instance=10.100.50.226:10250, name=168c7495240ad1230d8c7525b7e0718bae4c05980072c823755c6f17024907c6 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=168c7495240ad1230d8c7525b7e0718bae4c05980072c823755c6f17024907c6 Value:0xc01a15c150} C:{Var:C Labels:instance=10.100.50.226:10250, name=168c7495240ad1230d8c7525b7e0718bae4c05980072c823755c6f17024907c6 Value:0xc01a15c168}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571317889s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=168c7495240ad1230d8c7525b7e0718bae4c05980072c823755c6f17024907c6} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=168c7495240ad1230d8c7525b7e0718bae4c05980072c823755c6f17024907c6} value=1 ]} {Instance:instance=10.100.50.226:10250, name=1b05ea6b5f86696a28a917b8341f8ebea10684301e05d701beecbe0cd9be6ae3 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=1b05ea6b5f86696a28a917b8341f8ebea10684301e05d701beecbe0cd9be6ae3 Value:0xc01a15c198} C:{Var:C Labels:instance=10.100.50.226:10250, name=1b05ea6b5f86696a28a917b8341f8ebea10684301e05d701beecbe0cd9be6ae3 Value:0xc01a15c1b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571320051s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=1b05ea6b5f86696a28a917b8341f8ebea10684301e05d701beecbe0cd9be6ae3} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=1b05ea6b5f86696a28a917b8341f8ebea10684301e05d701beecbe0cd9be6ae3} value=1 ]} {Instance:instance=10.100.50.226:10250, name=1c542b16040f95adf24804cc595405d5537c1834b4072d7037739d998bca44c9 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=1c542b16040f95adf24804cc595405d5537c1834b4072d7037739d998bca44c9 Value:0xc01a15c1e0} C:{Var:C Labels:instance=10.100.50.226:10250, name=1c542b16040f95adf24804cc595405d5537c1834b4072d7037739d998bca44c9 Value:0xc01a15c1f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571322553s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=1c542b16040f95adf24804cc595405d5537c1834b4072d7037739d998bca44c9} value=0.023821185555511368 ], [ var='C' labels={instance=10.100.50.226:10250, name=1c542b16040f95adf24804cc595405d5537c1834b4072d7037739d998bca44c9} value=1 ]} {Instance:instance=10.100.50.226:10250, name=273a4e65e89e02cc43e09667f65d4ad249ed24f227489611fec4deeeb85c2c1c State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=273a4e65e89e02cc43e09667f65d4ad249ed24f227489611fec4deeeb85c2c1c Value:0xc01a15c240} C:{Var:C Labels:instance=10.100.50.226:10250, name=273a4e65e89e02cc43e09667f65d4ad249ed24f227489611fec4deeeb85c2c1c Value:0xc01a15c228}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571324844s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=273a4e65e89e02cc43e09667f65d4ad249ed24f227489611fec4deeeb85c2c1c} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=273a4e65e89e02cc43e09667f65d4ad249ed24f227489611fec4deeeb85c2c1c} value=1 ]} {Instance:instance=10.100.50.226:10250, name=333737f428bc5cf3b213baa78317cc384fea1a5f83e2c407f4cc00fee76bb82b State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=333737f428bc5cf3b213baa78317cc384fea1a5f83e2c407f4cc00fee76bb82b Value:0xc01a15c270} C:{Var:C Labels:instance=10.100.50.226:10250, name=333737f428bc5cf3b213baa78317cc384fea1a5f83e2c407f4cc00fee76bb82b Value:0xc01a15c288}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571327089s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=333737f428bc5cf3b213baa78317cc384fea1a5f83e2c407f4cc00fee76bb82b} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=333737f428bc5cf3b213baa78317cc384fea1a5f83e2c407f4cc00fee76bb82b} value=1 ]} {Instance:instance=10.100.50.226:10250, name=3373237f38d9511b349ef4df71e8af0a7e93dc7355db8b61664ef3cd7d8c9b4b State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=3373237f38d9511b349ef4df71e8af0a7e93dc7355db8b61664ef3cd7d8c9b4b Value:0xc01a15c2b8} C:{Var:C Labels:instance=10.100.50.226:10250, name=3373237f38d9511b349ef4df71e8af0a7e93dc7355db8b61664ef3cd7d8c9b4b Value:0xc01a15c2d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.57132927s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=3373237f38d9511b349ef4df71e8af0a7e93dc7355db8b61664ef3cd7d8c9b4b} value=0.008565924388885076 ], [ var='C' labels={instance=10.100.50.226:10250, name=3373237f38d9511b349ef4df71e8af0a7e93dc7355db8b61664ef3cd7d8c9b4b} value=1 ]} {Instance:instance=10.100.50.226:10250, name=33a81cc58d3c552250e310ca732323addcb8b9117f281606c1c20fe8d480ad66 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=33a81cc58d3c552250e310ca732323addcb8b9117f281606c1c20fe8d480ad66 Value:0xc01a15c318} C:{Var:C Labels:instance=10.100.50.226:10250, name=33a81cc58d3c552250e310ca732323addcb8b9117f281606c1c20fe8d480ad66 Value:0xc01a15c300}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571331521s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=33a81cc58d3c552250e310ca732323addcb8b9117f281606c1c20fe8d480ad66} value=0.010565374444444251 ], [ var='C' labels={instance=10.100.50.226:10250, name=33a81cc58d3c552250e310ca732323addcb8b9117f281606c1c20fe8d480ad66} value=1 ]} {Instance:instance=10.100.50.226:10250, name=47db7c900f2d6dbe5029a1e95931195861fa6038b241b2bbeab06e40bab0bdf9 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=47db7c900f2d6dbe5029a1e95931195861fa6038b241b2bbeab06e40bab0bdf9 Value:0xc01a15c348} C:{Var:C Labels:instance=10.100.50.226:10250, name=47db7c900f2d6dbe5029a1e95931195861fa6038b241b2bbeab06e40bab0bdf9 Value:0xc01a15c360}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571334535s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=47db7c900f2d6dbe5029a1e95931195861fa6038b241b2bbeab06e40bab0bdf9} value=0.1436951900000041 ], [ var='C' labels={instance=10.100.50.226:10250, name=47db7c900f2d6dbe5029a1e95931195861fa6038b241b2bbeab06e40bab0bdf9} value=1 ]} {Instance:instance=10.100.50.226:10250, name=4a09f0df5ee8203e64d17bb91caf295f6bdbd32780a41da2faf71624358deacb State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=4a09f0df5ee8203e64d17bb91caf295f6bdbd32780a41da2faf71624358deacb Value:0xc01a15c390} C:{Var:C Labels:instance=10.100.50.226:10250, name=4a09f0df5ee8203e64d17bb91caf295f6bdbd32780a41da2faf71624358deacb Value:0xc01a15c3a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.57133695s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=4a09f0df5ee8203e64d17bb91caf295f6bdbd32780a41da2faf71624358deacb} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=4a09f0df5ee8203e64d17bb91caf295f6bdbd32780a41da2faf71624358deacb} value=1 ]} {Instance:instance=10.100.50.226:10250, name=515848d38dcad782abc619597f27b7a6cbc918af349bfb7f2010e5018f5829d0 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=515848d38dcad782abc619597f27b7a6cbc918af349bfb7f2010e5018f5829d0 Value:0xc01a15c3d8} C:{Var:C Labels:instance=10.100.50.226:10250, name=515848d38dcad782abc619597f27b7a6cbc918af349bfb7f2010e5018f5829d0 Value:0xc01a15c3f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571339041s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=515848d38dcad782abc619597f27b7a6cbc918af349bfb7f2010e5018f5829d0} value=0.020798172777745094 ], [ var='C' labels={instance=10.100.50.226:10250, name=515848d38dcad782abc619597f27b7a6cbc918af349bfb7f2010e5018f5829d0} value=1 ]} {Instance:instance=10.100.50.226:10250, name=527fb591516c78bc6080c7015b22e438a2af7ae69fe31b0a06a8c775ca9363e6 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=527fb591516c78bc6080c7015b22e438a2af7ae69fe31b0a06a8c775ca9363e6 Value:0xc01a15c420} C:{Var:C Labels:instance=10.100.50.226:10250, name=527fb591516c78bc6080c7015b22e438a2af7ae69fe31b0a06a8c775ca9363e6 Value:0xc01a15c438}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.57134149s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=527fb591516c78bc6080c7015b22e438a2af7ae69fe31b0a06a8c775ca9363e6} value=0.2783619755000346 ], [ var='C' labels={instance=10.100.50.226:10250, name=527fb591516c78bc6080c7015b22e438a2af7ae69fe31b0a06a8c775ca9363e6} value=1 ]} {Instance:instance=10.100.50.226:10250, name=54fa2546f3349c1824839f9f5b93658ea3150c986311391c1f5504097241756c State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=54fa2546f3349c1824839f9f5b93658ea3150c986311391c1f5504097241756c Value:0xc01a15c490} C:{Var:C Labels:instance=10.100.50.226:10250, name=54fa2546f3349c1824839f9f5b93658ea3150c986311391c1f5504097241756c Value:0xc01a15c468}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571344274s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=54fa2546f3349c1824839f9f5b93658ea3150c986311391c1f5504097241756c} value=0.03789399444435225 ], [ var='C' labels={instance=10.100.50.226:10250, name=54fa2546f3349c1824839f9f5b93658ea3150c986311391c1f5504097241756c} value=1 ]} {Instance:instance=10.100.50.226:10250, name=55a96b44e3aeca202cc81612c4e9720496d842b84bf2cda2a600ebb91c51550b State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=55a96b44e3aeca202cc81612c4e9720496d842b84bf2cda2a600ebb91c51550b Value:0xc01a15c4c0} C:{Var:C Labels:instance=10.100.50.226:10250, name=55a96b44e3aeca202cc81612c4e9720496d842b84bf2cda2a600ebb91c51550b Value:0xc01a15c4d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571347228s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=55a96b44e3aeca202cc81612c4e9720496d842b84bf2cda2a600ebb91c51550b} value=0.0022351011111096694 ], [ var='C' labels={instance=10.100.50.226:10250, name=55a96b44e3aeca202cc81612c4e9720496d842b84bf2cda2a600ebb91c51550b} value=1 ]} {Instance:instance=10.100.50.226:10250, name=580da83e84573fad047272924926406626719caa62832bcd65ce0b314776d2df State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=580da83e84573fad047272924926406626719caa62832bcd65ce0b314776d2df Value:0xc01a15c508} C:{Var:C Labels:instance=10.100.50.226:10250, name=580da83e84573fad047272924926406626719caa62832bcd65ce0b314776d2df Value:0xc01a15c520}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571349419s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=580da83e84573fad047272924926406626719caa62832bcd65ce0b314776d2df} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=580da83e84573fad047272924926406626719caa62832bcd65ce0b314776d2df} value=1 ]} {Instance:instance=10.100.50.226:10250, name=5885ae00d0796b4340e95184c16f52f8abe5bd7e0e6e006576f56c32697507d1 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=5885ae00d0796b4340e95184c16f52f8abe5bd7e0e6e006576f56c32697507d1 Value:0xc01a15c550} C:{Var:C Labels:instance=10.100.50.226:10250, name=5885ae00d0796b4340e95184c16f52f8abe5bd7e0e6e006576f56c32697507d1 Value:0xc01a15c568}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571352004s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=5885ae00d0796b4340e95184c16f52f8abe5bd7e0e6e006576f56c32697507d1} value=0.0036790799999961462 ], [ var='C' labels={instance=10.100.50.226:10250, name=5885ae00d0796b4340e95184c16f52f8abe5bd7e0e6e006576f56c32697507d1} value=1 ]} {Instance:instance=10.100.50.226:10250, name=60ce9ad166af5d937f10f0731bebaee0bfa8e681c8fba3bcfa1476f624acb381 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=60ce9ad166af5d937f10f0731bebaee0bfa8e681c8fba3bcfa1476f624acb381 Value:0xc01a15c5b0} C:{Var:C Labels:instance=10.100.50.226:10250, name=60ce9ad166af5d937f10f0731bebaee0bfa8e681c8fba3bcfa1476f624acb381 Value:0xc01a15c598}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571355007s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=60ce9ad166af5d937f10f0731bebaee0bfa8e681c8fba3bcfa1476f624acb381} value=0.010421430000000016 ], [ var='C' labels={instance=10.100.50.226:10250, name=60ce9ad166af5d937f10f0731bebaee0bfa8e681c8fba3bcfa1476f624acb381} value=1 ]} {Instance:instance=10.100.50.226:10250, name=65c562ebf244dfb4162570658755f4931725039ca158c179191c559d4d64cbae State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=65c562ebf244dfb4162570658755f4931725039ca158c179191c559d4d64cbae Value:0xc01a15c5e0} C:{Var:C Labels:instance=10.100.50.226:10250, name=65c562ebf244dfb4162570658755f4931725039ca158c179191c559d4d64cbae Value:0xc01a15c5f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571357375s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=65c562ebf244dfb4162570658755f4931725039ca158c179191c559d4d64cbae} value=0.04741335333335428 ], [ var='C' labels={instance=10.100.50.226:10250, name=65c562ebf244dfb4162570658755f4931725039ca158c179191c559d4d64cbae} value=1 ]} {Instance:instance=10.100.50.226:10250, name=67ac9dfacd8d983668b6a71b8be4cbe2afea429357f371c7ab497d20aca7e5b0 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=67ac9dfacd8d983668b6a71b8be4cbe2afea429357f371c7ab497d20aca7e5b0 Value:0xc01a15c628} C:{Var:C Labels:instance=10.100.50.226:10250, name=67ac9dfacd8d983668b6a71b8be4cbe2afea429357f371c7ab497d20aca7e5b0 Value:0xc01a15c640}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571359574s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=67ac9dfacd8d983668b6a71b8be4cbe2afea429357f371c7ab497d20aca7e5b0} value=0.01858143838887827 ], [ var='C' labels={instance=10.100.50.226:10250, name=67ac9dfacd8d983668b6a71b8be4cbe2afea429357f371c7ab497d20aca7e5b0} value=1 ]} {Instance:instance=10.100.50.226:10250, name=6c647c200c16370da647edbf0e7032c290ee6b369daac3d70dadc7d0f0e2128a State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=6c647c200c16370da647edbf0e7032c290ee6b369daac3d70dadc7d0f0e2128a Value:0xc01a15c670} C:{Var:C Labels:instance=10.100.50.226:10250, name=6c647c200c16370da647edbf0e7032c290ee6b369daac3d70dadc7d0f0e2128a Value:0xc01a15c688}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571362188s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=6c647c200c16370da647edbf0e7032c290ee6b369daac3d70dadc7d0f0e2128a} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=6c647c200c16370da647edbf0e7032c290ee6b369daac3d70dadc7d0f0e2128a} value=1 ]} {Instance:instance=10.100.50.226:10250, name=70b1684f7ca2446a43e5ac14844e7c4a5509bcadede66e5aef8f99c087f62848 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=70b1684f7ca2446a43e5ac14844e7c4a5509bcadede66e5aef8f99c087f62848 Value:0xc01a15c6b8} C:{Var:C Labels:instance=10.100.50.226:10250, name=70b1684f7ca2446a43e5ac14844e7c4a5509bcadede66e5aef8f99c087f62848 Value:0xc01a15c6d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571364455s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=70b1684f7ca2446a43e5ac14844e7c4a5509bcadede66e5aef8f99c087f62848} value=0.020512796666720432 ], [ var='C' labels={instance=10.100.50.226:10250, name=70b1684f7ca2446a43e5ac14844e7c4a5509bcadede66e5aef8f99c087f62848} value=1 ]} {Instance:instance=10.100.50.226:10250, name=7c1d8ec3c21e31df189233b13447234a9a1691c2f6cbbf778e743344b0339daf State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=7c1d8ec3c21e31df189233b13447234a9a1691c2f6cbbf778e743344b0339daf Value:0xc01a15c700} C:{Var:C Labels:instance=10.100.50.226:10250, name=7c1d8ec3c21e31df189233b13447234a9a1691c2f6cbbf778e743344b0339daf Value:0xc01a15c718}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571367205s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=7c1d8ec3c21e31df189233b13447234a9a1691c2f6cbbf778e743344b0339daf} value=0.15707514722218244 ], [ var='C' labels={instance=10.100.50.226:10250, name=7c1d8ec3c21e31df189233b13447234a9a1691c2f6cbbf778e743344b0339daf} value=1 ]} {Instance:instance=10.100.50.226:10250, name=7d462c558e8e9b9a25bad82e29a473b8a0e52980eb8047246a78d75c244cd497 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=7d462c558e8e9b9a25bad82e29a473b8a0e52980eb8047246a78d75c244cd497 Value:0xc01a15c768} C:{Var:C Labels:instance=10.100.50.226:10250, name=7d462c558e8e9b9a25bad82e29a473b8a0e52980eb8047246a78d75c244cd497 Value:0xc01a15c790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571369769s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=7d462c558e8e9b9a25bad82e29a473b8a0e52980eb8047246a78d75c244cd497} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=7d462c558e8e9b9a25bad82e29a473b8a0e52980eb8047246a78d75c244cd497} value=1 ]} {Instance:instance=10.100.50.226:10250, name=7fca5984ae3227a095dc9ac8d546147657cc95e505ddafd9d355ca9858033797 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=7fca5984ae3227a095dc9ac8d546147657cc95e505ddafd9d355ca9858033797 Value:0xc01a15c7e8} C:{Var:C Labels:instance=10.100.50.226:10250, name=7fca5984ae3227a095dc9ac8d546147657cc95e505ddafd9d355ca9858033797 Value:0xc01a15c7c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571371891s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=7fca5984ae3227a095dc9ac8d546147657cc95e505ddafd9d355ca9858033797} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=7fca5984ae3227a095dc9ac8d546147657cc95e505ddafd9d355ca9858033797} value=1 ]} {Instance:instance=10.100.50.226:10250, name=885d59a902cc432dd4850fe963b05a047ce311b6be81c6b52d931046f2bba633 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=885d59a902cc432dd4850fe963b05a047ce311b6be81c6b52d931046f2bba633 Value:0xc01a15c828} C:{Var:C Labels:instance=10.100.50.226:10250, name=885d59a902cc432dd4850fe963b05a047ce311b6be81c6b52d931046f2bba633 Value:0xc01a15c840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571374026s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=885d59a902cc432dd4850fe963b05a047ce311b6be81c6b52d931046f2bba633} value=1.9875205777781149 ], [ var='C' labels={instance=10.100.50.226:10250, name=885d59a902cc432dd4850fe963b05a047ce311b6be81c6b52d931046f2bba633} value=1 ]} {Instance:instance=10.100.50.226:10250, name=8a1e01b2a72b2e2da5159819c40a6cba279c2bfe4e2c448bf35d5dc1a209d627 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=8a1e01b2a72b2e2da5159819c40a6cba279c2bfe4e2c448bf35d5dc1a209d627 Value:0xc01a15c870} C:{Var:C Labels:instance=10.100.50.226:10250, name=8a1e01b2a72b2e2da5159819c40a6cba279c2bfe4e2c448bf35d5dc1a209d627 Value:0xc01a15c898}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571376397s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=8a1e01b2a72b2e2da5159819c40a6cba279c2bfe4e2c448bf35d5dc1a209d627} value=0.011982438333333591 ], [ var='C' labels={instance=10.100.50.226:10250, name=8a1e01b2a72b2e2da5159819c40a6cba279c2bfe4e2c448bf35d5dc1a209d627} value=1 ]} {Instance:instance=10.100.50.226:10250, name=8ef894aa6a38dd76614c053719e6c812662fdd93ad1719eba88bdc06e25b4d89 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=8ef894aa6a38dd76614c053719e6c812662fdd93ad1719eba88bdc06e25b4d89 Value:0xc01a15c8d8} C:{Var:C Labels:instance=10.100.50.226:10250, name=8ef894aa6a38dd76614c053719e6c812662fdd93ad1719eba88bdc06e25b4d89 Value:0xc01a15c8f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571379153s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=8ef894aa6a38dd76614c053719e6c812662fdd93ad1719eba88bdc06e25b4d89} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=8ef894aa6a38dd76614c053719e6c812662fdd93ad1719eba88bdc06e25b4d89} value=1 ]} {Instance:instance=10.100.50.226:10250, name=8f53a8ac0b03a19873d35d110dbab532023d681d519f92e4e392a24aef9616f6 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=8f53a8ac0b03a19873d35d110dbab532023d681d519f92e4e392a24aef9616f6 Value:0xc01a15c958} C:{Var:C Labels:instance=10.100.50.226:10250, name=8f53a8ac0b03a19873d35d110dbab532023d681d519f92e4e392a24aef9616f6 Value:0xc01a15c930}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571381911s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=8f53a8ac0b03a19873d35d110dbab532023d681d519f92e4e392a24aef9616f6} value=0.20684757222220973 ], [ var='C' labels={instance=10.100.50.226:10250, name=8f53a8ac0b03a19873d35d110dbab532023d681d519f92e4e392a24aef9616f6} value=1 ]} {Instance:instance=10.100.50.226:10250, name=989467794613a0820338b65b2f8b1725ac50190860d7ae09f9233a2cce053064 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=989467794613a0820338b65b2f8b1725ac50190860d7ae09f9233a2cce053064 Value:0xc01a15c988} C:{Var:C Labels:instance=10.100.50.226:10250, name=989467794613a0820338b65b2f8b1725ac50190860d7ae09f9233a2cce053064 Value:0xc01a15c9a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571384321s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=989467794613a0820338b65b2f8b1725ac50190860d7ae09f9233a2cce053064} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=989467794613a0820338b65b2f8b1725ac50190860d7ae09f9233a2cce053064} value=1 ]} {Instance:instance=10.100.50.226:10250, name=9f7fcfba9d6f3357d511e75eb070f1d9420c6efc204544ef0c63478e10f977ec State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=9f7fcfba9d6f3357d511e75eb070f1d9420c6efc204544ef0c63478e10f977ec Value:0xc01a15c9e0} C:{Var:C Labels:instance=10.100.50.226:10250, name=9f7fcfba9d6f3357d511e75eb070f1d9420c6efc204544ef0c63478e10f977ec Value:0xc01a15c9f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571387251s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=9f7fcfba9d6f3357d511e75eb070f1d9420c6efc204544ef0c63478e10f977ec} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=9f7fcfba9d6f3357d511e75eb070f1d9420c6efc204544ef0c63478e10f977ec} value=1 ]} {Instance:instance=10.100.50.226:10250, name=a331d90da71fa43542f58adfa9681763e1d7bdd62497c6d1ef233359a4d67c57 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=a331d90da71fa43542f58adfa9681763e1d7bdd62497c6d1ef233359a4d67c57 Value:0xc01a15ca38} C:{Var:C Labels:instance=10.100.50.226:10250, name=a331d90da71fa43542f58adfa9681763e1d7bdd62497c6d1ef233359a4d67c57 Value:0xc01a15ca50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571389405s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=a331d90da71fa43542f58adfa9681763e1d7bdd62497c6d1ef233359a4d67c57} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=a331d90da71fa43542f58adfa9681763e1d7bdd62497c6d1ef233359a4d67c57} value=1 ]} {Instance:instance=10.100.50.226:10250, name=a34c7dffa8b6170bb8d196a240f4f69288895958faf45621fe006dcc4bcb8b59 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=a34c7dffa8b6170bb8d196a240f4f69288895958faf45621fe006dcc4bcb8b59 Value:0xc01a15caa0} C:{Var:C Labels:instance=10.100.50.226:10250, name=a34c7dffa8b6170bb8d196a240f4f69288895958faf45621fe006dcc4bcb8b59 Value:0xc01a15cab8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571392168s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=a34c7dffa8b6170bb8d196a240f4f69288895958faf45621fe006dcc4bcb8b59} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=a34c7dffa8b6170bb8d196a240f4f69288895958faf45621fe006dcc4bcb8b59} value=1 ]} {Instance:instance=10.100.50.226:10250, name=a3e218a7f9d770845d7c76a2563f55c33458b44b1fe31ecf4b62518e376e4e2a State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=a3e218a7f9d770845d7c76a2563f55c33458b44b1fe31ecf4b62518e376e4e2a Value:0xc01a15cae8} C:{Var:C Labels:instance=10.100.50.226:10250, name=a3e218a7f9d770845d7c76a2563f55c33458b44b1fe31ecf4b62518e376e4e2a Value:0xc01a15cb10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571403354s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=a3e218a7f9d770845d7c76a2563f55c33458b44b1fe31ecf4b62518e376e4e2a} value=0.003477246111111122 ], [ var='C' labels={instance=10.100.50.226:10250, name=a3e218a7f9d770845d7c76a2563f55c33458b44b1fe31ecf4b62518e376e4e2a} value=1 ]} {Instance:instance=10.100.50.226:10250, name=a8a95dc728dd98fbb7b70fde1b7ebea1e09cda04c7d6eb56d816bdf626633300 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=a8a95dc728dd98fbb7b70fde1b7ebea1e09cda04c7d6eb56d816bdf626633300 Value:0xc01a15cb50} C:{Var:C Labels:instance=10.100.50.226:10250, name=a8a95dc728dd98fbb7b70fde1b7ebea1e09cda04c7d6eb56d816bdf626633300 Value:0xc01a15cb68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571405678s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=a8a95dc728dd98fbb7b70fde1b7ebea1e09cda04c7d6eb56d816bdf626633300} value=0.07181905549998646 ], [ var='C' labels={instance=10.100.50.226:10250, name=a8a95dc728dd98fbb7b70fde1b7ebea1e09cda04c7d6eb56d816bdf626633300} value=1 ]} {Instance:instance=10.100.50.226:10250, name=a9ac4d6f832d3ef6003049cedf46db03a0a5671bcb61892e00861507efd2fafc State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=a9ac4d6f832d3ef6003049cedf46db03a0a5671bcb61892e00861507efd2fafc Value:0xc01a15cba8} C:{Var:C Labels:instance=10.100.50.226:10250, name=a9ac4d6f832d3ef6003049cedf46db03a0a5671bcb61892e00861507efd2fafc Value:0xc01a15cbd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571408124s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=a9ac4d6f832d3ef6003049cedf46db03a0a5671bcb61892e00861507efd2fafc} value=0.021342306666648863 ], [ var='C' labels={instance=10.100.50.226:10250, name=a9ac4d6f832d3ef6003049cedf46db03a0a5671bcb61892e00861507efd2fafc} value=1 ]} {Instance:instance=10.100.50.226:10250, name=a9dd8192cf775a48271256ddd40dbe1825c9860460dfeef8934a19e2bccf5873 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=a9dd8192cf775a48271256ddd40dbe1825c9860460dfeef8934a19e2bccf5873 Value:0xc01a15cc00} C:{Var:C Labels:instance=10.100.50.226:10250, name=a9dd8192cf775a48271256ddd40dbe1825c9860460dfeef8934a19e2bccf5873 Value:0xc01a15cc18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571410668s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=a9dd8192cf775a48271256ddd40dbe1825c9860460dfeef8934a19e2bccf5873} value=0.0059347827777855855 ], [ var='C' labels={instance=10.100.50.226:10250, name=a9dd8192cf775a48271256ddd40dbe1825c9860460dfeef8934a19e2bccf5873} value=1 ]} {Instance:instance=10.100.50.226:10250, name=ae5dc4e55f19c8437ae45a011253816923d68250341ec8f98bf159ad74abfd88 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=ae5dc4e55f19c8437ae45a011253816923d68250341ec8f98bf159ad74abfd88 Value:0xc01a15cca8} C:{Var:C Labels:instance=10.100.50.226:10250, name=ae5dc4e55f19c8437ae45a011253816923d68250341ec8f98bf159ad74abfd88 Value:0xc01a15ccd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571413785s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=ae5dc4e55f19c8437ae45a011253816923d68250341ec8f98bf159ad74abfd88} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=ae5dc4e55f19c8437ae45a011253816923d68250341ec8f98bf159ad74abfd88} value=1 ]} {Instance:instance=10.100.50.226:10250, name=bc75c69687f7d790cf815bd4ac990eb3c46d4e9b839ebb92fc0799233dc3c664 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=bc75c69687f7d790cf815bd4ac990eb3c46d4e9b839ebb92fc0799233dc3c664 Value:0xc01a15cd20} C:{Var:C Labels:instance=10.100.50.226:10250, name=bc75c69687f7d790cf815bd4ac990eb3c46d4e9b839ebb92fc0799233dc3c664 Value:0xc01a15cd48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.57141644s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=bc75c69687f7d790cf815bd4ac990eb3c46d4e9b839ebb92fc0799233dc3c664} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=bc75c69687f7d790cf815bd4ac990eb3c46d4e9b839ebb92fc0799233dc3c664} value=1 ]} {Instance:instance=10.100.50.226:10250, name=be9b321fa59b70bb50385b1c84518a98f168b1967b578d0f0c68f5283d5eede8 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=be9b321fa59b70bb50385b1c84518a98f168b1967b578d0f0c68f5283d5eede8 Value:0xc01a15cd88} C:{Var:C Labels:instance=10.100.50.226:10250, name=be9b321fa59b70bb50385b1c84518a98f168b1967b578d0f0c68f5283d5eede8 Value:0xc01a15cda0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571418947s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=be9b321fa59b70bb50385b1c84518a98f168b1967b578d0f0c68f5283d5eede8} value=0.003973438944452962 ], [ var='C' labels={instance=10.100.50.226:10250, name=be9b321fa59b70bb50385b1c84518a98f168b1967b578d0f0c68f5283d5eede8} value=1 ]} {Instance:instance=10.100.50.226:10250, name=beaedd1c6b24261bf0de4d99e5f3b846f5ad8e1278000c0e8a77234b16fb8396 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=beaedd1c6b24261bf0de4d99e5f3b846f5ad8e1278000c0e8a77234b16fb8396 Value:0xc01a15cde0} C:{Var:C Labels:instance=10.100.50.226:10250, name=beaedd1c6b24261bf0de4d99e5f3b846f5ad8e1278000c0e8a77234b16fb8396 Value:0xc01a15cdf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571421374s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=beaedd1c6b24261bf0de4d99e5f3b846f5ad8e1278000c0e8a77234b16fb8396} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=beaedd1c6b24261bf0de4d99e5f3b846f5ad8e1278000c0e8a77234b16fb8396} value=1 ]} {Instance:instance=10.100.50.226:10250, name=c81d9adf1ad2fe9b568897ff38119251bfd61404d41f7e6190dfc8dc44a30de8 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=c81d9adf1ad2fe9b568897ff38119251bfd61404d41f7e6190dfc8dc44a30de8 Value:0xc01a15ce38} C:{Var:C Labels:instance=10.100.50.226:10250, name=c81d9adf1ad2fe9b568897ff38119251bfd61404d41f7e6190dfc8dc44a30de8 Value:0xc01a15ce50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571424617s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=c81d9adf1ad2fe9b568897ff38119251bfd61404d41f7e6190dfc8dc44a30de8} value=0.03724142672215041 ], [ var='C' labels={instance=10.100.50.226:10250, name=c81d9adf1ad2fe9b568897ff38119251bfd61404d41f7e6190dfc8dc44a30de8} value=1 ]} {Instance:instance=10.100.50.226:10250, name=c9242427e30cd75cd1a200f9412d87e0e9c2daf7db2b64c4e5cbde4ee71289d1 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=c9242427e30cd75cd1a200f9412d87e0e9c2daf7db2b64c4e5cbde4ee71289d1 Value:0xc01a15ce90} C:{Var:C Labels:instance=10.100.50.226:10250, name=c9242427e30cd75cd1a200f9412d87e0e9c2daf7db2b64c4e5cbde4ee71289d1 Value:0xc01a15cea8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571427258s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=c9242427e30cd75cd1a200f9412d87e0e9c2daf7db2b64c4e5cbde4ee71289d1} value=0.05253421222221277 ], [ var='C' labels={instance=10.100.50.226:10250, name=c9242427e30cd75cd1a200f9412d87e0e9c2daf7db2b64c4e5cbde4ee71289d1} value=1 ]} {Instance:instance=10.100.50.226:10250, name=ccbb3a5cff0f74814d490a4382fd882ec62539b0eadca3cd2d36093d1ad46195 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=ccbb3a5cff0f74814d490a4382fd882ec62539b0eadca3cd2d36093d1ad46195 Value:0xc01a15cee8} C:{Var:C Labels:instance=10.100.50.226:10250, name=ccbb3a5cff0f74814d490a4382fd882ec62539b0eadca3cd2d36093d1ad46195 Value:0xc01a15cf00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571429972s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=ccbb3a5cff0f74814d490a4382fd882ec62539b0eadca3cd2d36093d1ad46195} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=ccbb3a5cff0f74814d490a4382fd882ec62539b0eadca3cd2d36093d1ad46195} value=1 ]} {Instance:instance=10.100.50.226:10250, name=de9f76a670fc15b71ef0a5c91a368d70ffbf9e40549218e22d97f337ed90ade3 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=de9f76a670fc15b71ef0a5c91a368d70ffbf9e40549218e22d97f337ed90ade3 Value:0xc01a15cf68} C:{Var:C Labels:instance=10.100.50.226:10250, name=de9f76a670fc15b71ef0a5c91a368d70ffbf9e40549218e22d97f337ed90ade3 Value:0xc01a15cf40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571432394s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=de9f76a670fc15b71ef0a5c91a368d70ffbf9e40549218e22d97f337ed90ade3} value=0.17479548333287515 ], [ var='C' labels={instance=10.100.50.226:10250, name=de9f76a670fc15b71ef0a5c91a368d70ffbf9e40549218e22d97f337ed90ade3} value=1 ]} {Instance:instance=10.100.50.226:10250, name=df74826e5ec518858e9e8b35833f812093bbda80052955f88ff1ffc222c549b6 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=df74826e5ec518858e9e8b35833f812093bbda80052955f88ff1ffc222c549b6 Value:0xc01a15cf98} C:{Var:C Labels:instance=10.100.50.226:10250, name=df74826e5ec518858e9e8b35833f812093bbda80052955f88ff1ffc222c549b6 Value:0xc01a15cfb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571434921s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=df74826e5ec518858e9e8b35833f812093bbda80052955f88ff1ffc222c549b6} value=0.32847902222177555 ], [ var='C' labels={instance=10.100.50.226:10250, name=df74826e5ec518858e9e8b35833f812093bbda80052955f88ff1ffc222c549b6} value=1 ]} {Instance:instance=10.100.50.226:10250, name=dfb35692af2d1bd0bcc6fd0fdb94e8adf0db5f7c0b9f46588a179128422217b6 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=dfb35692af2d1bd0bcc6fd0fdb94e8adf0db5f7c0b9f46588a179128422217b6 Value:0xc01a15cfe0} C:{Var:C Labels:instance=10.100.50.226:10250, name=dfb35692af2d1bd0bcc6fd0fdb94e8adf0db5f7c0b9f46588a179128422217b6 Value:0xc01a15cff8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571437728s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=dfb35692af2d1bd0bcc6fd0fdb94e8adf0db5f7c0b9f46588a179128422217b6} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=dfb35692af2d1bd0bcc6fd0fdb94e8adf0db5f7c0b9f46588a179128422217b6} value=1 ]} {Instance:instance=10.100.50.226:10250, name=e48d9d18b82a48df2ca1df1aeaa1640f888ebceec064e3df1d96f50b7cf434b0 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=e48d9d18b82a48df2ca1df1aeaa1640f888ebceec064e3df1d96f50b7cf434b0 Value:0xc01a15d040} C:{Var:C Labels:instance=10.100.50.226:10250, name=e48d9d18b82a48df2ca1df1aeaa1640f888ebceec064e3df1d96f50b7cf434b0 Value:0xc01a15d028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571440171s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=e48d9d18b82a48df2ca1df1aeaa1640f888ebceec064e3df1d96f50b7cf434b0} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=e48d9d18b82a48df2ca1df1aeaa1640f888ebceec064e3df1d96f50b7cf434b0} value=1 ]} {Instance:instance=10.100.50.226:10250, name=efb67185bd4922c28215f98b2891af4b1de5678c3e4384fb51ed6621967c5ee3 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=efb67185bd4922c28215f98b2891af4b1de5678c3e4384fb51ed6621967c5ee3 Value:0xc01a15d088} C:{Var:C Labels:instance=10.100.50.226:10250, name=efb67185bd4922c28215f98b2891af4b1de5678c3e4384fb51ed6621967c5ee3 Value:0xc01a15d070}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571443523s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=efb67185bd4922c28215f98b2891af4b1de5678c3e4384fb51ed6621967c5ee3} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=efb67185bd4922c28215f98b2891af4b1de5678c3e4384fb51ed6621967c5ee3} value=1 ]} {Instance:instance=10.100.50.226:10250, name=f85c2cd4953c9cf93e8719420d273362fab722f1a9219efa0900ba6a57c098db State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=f85c2cd4953c9cf93e8719420d273362fab722f1a9219efa0900ba6a57c098db Value:0xc01a15d100} C:{Var:C Labels:instance=10.100.50.226:10250, name=f85c2cd4953c9cf93e8719420d273362fab722f1a9219efa0900ba6a57c098db Value:0xc01a15d0e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571446127s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=f85c2cd4953c9cf93e8719420d273362fab722f1a9219efa0900ba6a57c098db} value=0.09718335555539347 ], [ var='C' labels={instance=10.100.50.226:10250, name=f85c2cd4953c9cf93e8719420d273362fab722f1a9219efa0900ba6a57c098db} value=1 ]} {Instance:instance=10.100.50.226:10250, name=f9f96230cddc8b1c73cec0f5f1cc69b41cc70dd107f3520959a482f20e47e9a1 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=f9f96230cddc8b1c73cec0f5f1cc69b41cc70dd107f3520959a482f20e47e9a1 Value:0xc01a15d148} C:{Var:C Labels:instance=10.100.50.226:10250, name=f9f96230cddc8b1c73cec0f5f1cc69b41cc70dd107f3520959a482f20e47e9a1 Value:0xc01a15d130}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571448803s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=f9f96230cddc8b1c73cec0f5f1cc69b41cc70dd107f3520959a482f20e47e9a1} value=11.155804444440744 ], [ var='C' labels={instance=10.100.50.226:10250, name=f9f96230cddc8b1c73cec0f5f1cc69b41cc70dd107f3520959a482f20e47e9a1} value=1 ]} {Instance:instance=10.100.50.226:10250, name=feac9bdb527fad1e55b3ba87b9da5feb96da646bc24dbd1751876aea06ff17b2 State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.100.50.226:10250, name=feac9bdb527fad1e55b3ba87b9da5feb96da646bc24dbd1751876aea06ff17b2 Value:0xc01a15d190} C:{Var:C Labels:instance=10.100.50.226:10250, name=feac9bdb527fad1e55b3ba87b9da5feb96da646bc24dbd1751876aea06ff17b2 Value:0xc01a15d178}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.571451562s EvaluationString:[ var='B' labels={instance=10.100.50.226:10250, name=feac9bdb527fad1e55b3ba87b9da5feb96da646bc24dbd1751876aea06ff17b2} value=0 ], [ var='C' labels={instance=10.100.50.226:10250, name=feac9bdb527fad1e55b3ba87b9da5feb96da646bc24dbd1751876aea06ff17b2} value=1 ]}]" duration=187.146923ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y9ko7ul7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.577199353Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.577162801Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y9jmwuyw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.577158153Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr5, exported_orgunit=IPTV, fan=3, host=tr5-ilo.powernet.tv, host_short=tr5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=transcoder, type=health" t=2024-05-29T13:44:14.577124236Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y9i1f1c5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.576994421Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr5, exported_orgunit=IPTV, fan=1, host=tr5-ilo.powernet.tv, host_short=tr5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=transcoder, type=health" t=2024-05-29T13:44:14.576933033Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-283453laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.576886287Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-283453laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.576867768Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr5, exported_orgunit=IPTV, fan=0, host=tr5-ilo.powernet.tv, host_short=tr5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=transcoder, type=health" t=2024-05-29T13:44:14.576861032Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=625813 slug=all2energy instance= t=2024-05-29T13:44:14.576819988Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr5, exported_orgunit=IPTV, fan=0, host=tr5-ilo.powernet.tv, host_short=tr5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=transcoder, type=health" t=2024-05-29T13:44:14.576844732Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y9gt0sel-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.576812129Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y9gt0sel-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.576774089Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.576726344Z caller=remote_instance_store.go:51 user=354676 slug=gridmarketenergy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.57672034Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr4, exported_orgunit=IPTV, fan=6, host=tr4-ilo.powernet.tv, host_short=tr4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=transcoder, type=health" t=2024-05-29T13:44:14.57674373Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr4, exported_orgunit=IPTV, fan=5, host=tr4-ilo.powernet.tv, host_short=tr4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=transcoder, type=health" t=2024-05-29T13:44:14.576653929Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=134486 slug=podigee instance="hostname=railspodigeecache-green-nbg1-02, instance=5.75.188.152:9121, job=consul_services, service=redis_exporter" t=2024-05-29T13:44:14.576557119Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y9dkkx9b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.576499186Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=134486 slug=podigee instance="hostname=railspodigeecache-green-fsn1-03, instance=49.13.86.231:9121, job=consul_services, service=redis_exporter" t=2024-05-29T13:44:14.576450621Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.576365167Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y9dkkx9b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.576408105Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr4, exported_orgunit=IPTV, fan=2, host=tr4-ilo.powernet.tv, host_short=tr4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=transcoder, type=health" t=2024-05-29T13:44:14.576372725Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.576327808Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y99u7324-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.576293434Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y99u7324-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.576252794Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr4, exported_orgunit=IPTV, fan=1, host=tr4-ilo.powernet.tv, host_short=tr4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=transcoder, type=health" t=2024-05-29T13:44:14.576273523Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-279208laio1westeurope, cloud_platform=Azure, customer_id=A161, env_id=279208, env_name=A161_Thyssenkrup_Prod, env_type=prod, instance=env-279208laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.5762047Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=134486 slug=podigee t=2024-05-29T13:44:14.576176296Z level=debug msg="State manager processing evaluation results" resultCount=3 + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr4, exported_orgunit=IPTV, fan=1, host=tr4-ilo.powernet.tv, host_short=tr4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=transcoder, type=health" t=2024-05-29T13:44:14.576249523Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=134486 slug=podigee version=30 fingerprint=84aec69a48d31886 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.57606808Z level=debug msg="Alert rule evaluated" results="[{Instance:hostname=railspodigeecache-green-fsn1-01, instance=5.75.255.117:9121, job=consul_services, service=redis_exporter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:hostname=railspodigeecache-green-fsn1-01, instance=5.75.255.117:9121, job=consul_services, service=redis_exporter Value:0xc024ddeb30} C:{Var:C Labels:hostname=railspodigeecache-green-fsn1-01, instance=5.75.255.117:9121, job=consul_services, service=redis_exporter Value:0xc024ddeb78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.575515676s EvaluationString:[ var='A' labels={hostname=railspodigeecache-green-fsn1-01, instance=5.75.255.117:9121, job=consul_services, service=redis_exporter} value=28.199999999999996 ], [ var='C' labels={hostname=railspodigeecache-green-fsn1-01, instance=5.75.255.117:9121, job=consul_services, service=redis_exporter} value=0 ]} {Instance:hostname=railspodigeecache-green-fsn1-03, instance=49.13.86.231:9121, job=consul_services, service=redis_exporter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:hostname=railspodigeecache-green-fsn1-03, instance=49.13.86.231:9121, job=consul_services, service=redis_exporter Value:0xc024ddec30} C:{Var:C Labels:hostname=railspodigeecache-green-fsn1-03, instance=49.13.86.231:9121, job=consul_services, service=redis_exporter Value:0xc024ddec78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.575532452s EvaluationString:[ var='A' labels={hostname=railspodigeecache-green-fsn1-03, instance=49.13.86.231:9121, job=consul_services, service=redis_exporter} value=0.8 ], [ var='C' labels={hostname=railspodigeecache-green-fsn1-03, instance=49.13.86.231:9121, job=consul_services, service=redis_exporter} value=0 ]} {Instance:hostname=railspodigeecache-green-nbg1-02, instance=5.75.188.152:9121, job=consul_services, service=redis_exporter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:hostname=railspodigeecache-green-nbg1-02, instance=5.75.188.152:9121, job=consul_services, service=redis_exporter Value:0xc024dded20} C:{Var:C Labels:hostname=railspodigeecache-green-nbg1-02, instance=5.75.188.152:9121, job=consul_services, service=redis_exporter Value:0xc024dded58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.575542498s EvaluationString:[ var='A' labels={hostname=railspodigeecache-green-nbg1-02, instance=5.75.188.152:9121, job=consul_services, service=redis_exporter} value=0.8 ], [ var='C' labels={hostname=railspodigeecache-green-nbg1-02, instance=5.75.188.152:9121, job=consul_services, service=redis_exporter} value=0 ]}]" duration=17.005432ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y99u7324-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.576174163Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-279208laio1westeurope, cloud_platform=Azure, customer_id=A161, env_id=279208, env_name=A161_Thyssenkrup_Prod, env_type=prod, instance=env-279208laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.576188804Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=190917 slug=d1cx instance= t=2024-05-29T13:44:14.576134148Z level=debug msg="Changing state" previous_state=Normal next_state=Pending previous_ends_at=2024-05-29T13:43:00Z next_ends_at=2024-05-29T13:48:00Z + logger=ngalert.state.manager user=190917 slug=d1cx instance= t=2024-05-29T13:44:14.576123142Z level=debug msg="Execution error state is Alerting" handler=resultAlerting previous_handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y982oym1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.576111362Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr3, exported_orgunit=IPTV, fan=6, host=tr3-ilo.powernet.tv, host_short=tr3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=transcoder, type=health" t=2024-05-29T13:44:14.57607752Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-279146laio1eastus, cloud_platform=Azure, customer_id=A160, env_id=279146, env_name=A160 Marc Jacobs PROD, env_type=prod, instance=env-279146laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:14.575944956Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y982oym1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.575950231Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y971lhtm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.57592073Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.575848447Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.575822775Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y971lhtm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.575839019Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.575744975Z caller=remote_instance_store.go:51 user=642786 slug=sophoscomnsg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-278412laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=278412, env_name=A113 - BAT - Sandbox, env_type=sandbox, instance=env-278412laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.575740037Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance= t=2024-05-29T13:44:14.575744821Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance= t=2024-05-29T13:44:14.575718029Z level=debug msg="Setting next state" handler=resultError + level=debug ts=2024-05-29T13:44:14.575714273Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y96lhqsn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.575568017Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y90kazsl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.575531336Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-275957laiouaenorth, cloud_platform=Azure, customer_id=A159, env_id=275957, env_name=A159_Mubadala_Prod, env_type=prod, instance=env-275957laiouaenorth, job=integrations/node_exporter, region=UAENorth, stage=live" t=2024-05-29T13:44:14.575520373Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-275957laiouaenorth, cloud_platform=Azure, customer_id=A159, env_id=275957, env_name=A159_Mubadala_Prod, env_type=prod, instance=env-275957laiouaenorth, job=integrations/node_exporter, region=UAENorth, stage=live" t=2024-05-29T13:44:14.575482436Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y90kazsl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.575426125Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y8myzsu8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.575388975Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y8myzsu8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.575335274Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.575397935Z caller=remote_instance_store.go:51 user=363785 slug=moonletmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y8myzsu8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.575305044Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=363785 slug=moonletmonitor instance= t=2024-05-29T13:44:14.57532279Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=363785 slug=moonletmonitor t=2024-05-29T13:44:14.575231091Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y8mtg96j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.575249933Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr2, exported_orgunit=IPTV, fan=4, host=tr2-ilo.powernet.tv, host_short=tr2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=transcoder, type=health" t=2024-05-29T13:44:14.575264708Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-275846laio2use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.575243052Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr2, exported_orgunit=IPTV, fan=3, host=tr2-ilo.powernet.tv, host_short=tr2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=transcoder, type=health" t=2024-05-29T13:44:14.575183807Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=758564c1880a264b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.575115697Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.574835859s EvaluationString:}]" duration=165.198277ms + level=debug ts=2024-05-29T13:44:14.575158428Z caller=remote_instance_store.go:51 user=159532 slug=getfabric msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr2, exported_orgunit=IPTV, fan=2, host=tr2-ilo.powernet.tv, host_short=tr2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=transcoder, type=health" t=2024-05-29T13:44:14.575104605Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr2, exported_orgunit=IPTV, fan=2, host=tr2-ilo.powernet.tv, host_short=tr2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=transcoder, type=health" t=2024-05-29T13:44:14.575091205Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=371756 slug=asapp t=2024-05-29T13:44:14.574979802Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.74603ms + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.575024029Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.470988ms + level=debug ts=2024-05-29T13:44:14.574912977Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.574931051Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.297271ms + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.574882691Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.574872028Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-275843laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275843, env_name=C595_Franconnect_DEV, env_type=dev, instance=env-275843laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.574864307Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=underwriting-platform, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=underwriting-platform, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development" t=2024-05-29T13:44:14.574854438Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-275843laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275843, env_name=C595_Franconnect_DEV, env_type=dev, instance=env-275843laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.574845679Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=3e13de2f9d6c1864 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.574765195Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.574517112s EvaluationString:}]" duration=152.762614ms + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:14.574753316Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="region=eu-west-1, service=kube-state-metrics, stage=development" + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:14.574704769Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=82292 slug=prosperatech t=2024-05-29T13:44:14.574709113Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=38.752887ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y8g6bokx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.574747418Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y8g6bokx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.574704998Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.574723617Z caller=remote_instance_store.go:51 user=158536 slug=clearsaleantifraude msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y8g6bokx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.574683027Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr1, exported_orgunit=IPTV, fan=5, host=tr1-ilo.powernet.tv, host_short=tr1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=transcoder, type=health" t=2024-05-29T13:44:14.5747377Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=698963 slug=lemonade version=1 fingerprint=fa2772441214f103 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.574551285Z level=debug msg="Alert rule evaluated" results="[{Instance:app=underwriting-platform, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=underwriting-platform, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=underwriting-platform, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=underwriting-platform, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development Value:0xc02206e240} THRESHOLD:{Var:THRESHOLD Labels:app=underwriting-platform, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=underwriting-platform, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development Value:0xc02206e320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.574025661s EvaluationString:[ var='QUERY' labels={app=underwriting-platform, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=underwriting-platform, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development} value=0 ], [ var='THRESHOLD' labels={app=underwriting-platform, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=underwriting-platform, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development} value=0 ]}]" duration=43.608926ms + level=debug ts=2024-05-29T13:44:14.574627428Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y8g6bokx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.574655017Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=679831 slug=joveostageaws t=2024-05-29T13:44:14.574651773Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y8expulg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.574594636Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y8expulg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.574566246Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y8expulg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.574524786Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y8expulg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.574498746Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=277970 slug=teckresourcestest t=2024-05-29T13:44:14.574466344Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=53.350291ms + logger=ngalert.scheduler user=679831 slug=joveostageaws version=12981 fingerprint=1974cbe203d26db3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.574472596Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=a6f971c0-2a39-492e-8c6a-8034f1702671, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.574109258s EvaluationString:}]" duration=35.953482ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y8aam27f-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.574467905Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y8aam27f-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.574427905Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y89h6lg6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.574277873Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.historian backend=loki user=893151 slug=cmtdsnp t=2024-05-29T13:44:14.574243883Z level=debug msg="Done saving alert state history batch" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr1, exported_orgunit=IPTV, fan=1, host=tr1-ilo.powernet.tv, host_short=tr1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=transcoder, type=health" t=2024-05-29T13:44:14.574204492Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.574076671Z caller=remote_instance_store.go:51 user=154996 slug=veovo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y84e4yac-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.574141832Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=tr1, exported_orgunit=IPTV, fan=0, host=tr1-ilo.powernet.tv, host_short=tr1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=transcoder, type=health" t=2024-05-29T13:44:14.57410659Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=stbstream2, exported_orgunit=IPTV, fan=5, host=stbstream2-ilo.powernet.tv, host_short=stbstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=streamer, type=health" t=2024-05-29T13:44:14.573937687Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y830hkz0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.57395534Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y830hkz0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.57392044Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y830hkz0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.573881379Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=stbstream2, exported_orgunit=IPTV, fan=4, host=stbstream2-ilo.powernet.tv, host_short=stbstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=streamer, type=health" t=2024-05-29T13:44:14.573869986Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.573762624Z caller=remote_instance_store.go:51 user=548276 slug=relayrobotics msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=stbstream2, exported_orgunit=IPTV, fan=3, host=stbstream2-ilo.powernet.tv, host_short=stbstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=streamer, type=health" t=2024-05-29T13:44:14.573777385Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-274689laio2westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio2westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.573774807Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y82q0pfo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.573764308Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.57364218Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y7xvrzuj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.573574906Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y7xvrzuj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.573522095Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y7tkbjju-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.573485895Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=stbstream2, exported_orgunit=IPTV, fan=1, host=stbstream2-ilo.powernet.tv, host_short=stbstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=streamer, type=health" t=2024-05-29T13:44:14.573554382Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-274689laio1westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.573595305Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y7tkbjju-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.573393484Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y7tkbjju-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.573356644Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y7tkbjju-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.573331214Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y7mbv2qq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.573296133Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y7mbv2qq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.573205862Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y7mbv2qq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.573195752Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696798 slug=mcv instance="datasource_uid=a7c4b457-6a7a-416c-b994-1407dd32ed34, ref_id=Query" t=2024-05-29T13:44:14.573491734Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-274593laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274593, env_name=A101_Jumbo_2021_DEV, env_type=dev, instance=env-274593laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.573391029Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=stbstream1, exported_orgunit=IPTV, fan=6, host=stbstream1-ilo.powernet.tv, host_short=stbstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=streamer, type=health" t=2024-05-29T13:44:14.573325478Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-274545laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274545, env_name=A101_Jumbo_2021_ACCEPT, env_type=accept, instance=env-274545laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.573176973Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=stbstream1, exported_orgunit=IPTV, fan=4, host=stbstream1-ilo.powernet.tv, host_short=stbstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=streamer, type=health" t=2024-05-29T13:44:14.573082674Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.573118584Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y78lliq2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.57297963Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=stbstream1, exported_orgunit=IPTV, fan=3, host=stbstream1-ilo.powernet.tv, host_short=stbstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=streamer, type=health" t=2024-05-29T13:44:14.572979873Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y750rb9a-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.57294989Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-273931laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273931, env_name=a131_symrise_prod_2021u4, env_type=prod, instance=env-273931laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.572960706Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-273931laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273931, env_name=a131_symrise_prod_2021u4, env_type=prod, instance=env-273931laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.572941656Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y750rb9a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.572887019Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y750rb9a-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.572850589Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.572802131Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=stbstream1, exported_orgunit=IPTV, fan=1, host=stbstream1-ilo.powernet.tv, host_short=stbstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=streamer, type=health" t=2024-05-29T13:44:14.57278767Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y71i1f8x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.572797158Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.572757699Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=stbstream1, exported_orgunit=IPTV, fan=1, host=stbstream1-ilo.powernet.tv, host_short=stbstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=streamer, type=health" t=2024-05-29T13:44:14.57277257Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-273913laio1westus2, cloud_platform=Azure, customer_id=A144, env_id=273913, env_name=A144 Big 5 Corp PROD, env_type=prod, instance=env-273913laio1westus2, job=integrations/node_exporter, region=westus2, stage=live" t=2024-05-29T13:44:14.572691756Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y71i1f8x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.572709067Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y704vr2l-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.572655677Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y704vr2l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.572599976Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y704vr2l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.572578786Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y6yogmo1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.572500025Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=4, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=relay, type=health" t=2024-05-29T13:44:14.572426564Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.572360037Z caller=remote_instance_store.go:51 user=756004 slug=jdsportsprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y6yogmo1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.572373724Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=3, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=relay, type=health" t=2024-05-29T13:44:14.572337763Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.572298284Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=633335 slug=promqlworkshop instance= t=2024-05-29T13:44:14.572140497Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y6om66uj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.572186212Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.572157615Z caller=remote_instance_store.go:51 user=146728 slug=dgc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=1, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=relay, type=health" t=2024-05-29T13:44:14.57213296Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=1, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=relay, type=health" t=2024-05-29T13:44:14.57211616Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y6om66uj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.572132471Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=146728 slug=dgc t=2024-05-29T13:44:14.572061529Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=0, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=relay, type=health" t=2024-05-29T13:44:14.572032658Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=146728 slug=dgc instance="datasource_uid=WqVnnZtMk, ref_id=A" t=2024-05-29T13:44:14.572035519Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=146728 slug=dgc instance="datasource_uid=WqVnnZtMk, ref_id=A" t=2024-05-29T13:44:14.571998911Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y6d4vuii-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.571955449Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=146728 slug=dgc instance="datasource_uid=WqVnnZtMk, ref_id=A" t=2024-05-29T13:44:14.57198094Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.57191381Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=6, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=relay, type=health" t=2024-05-29T13:44:14.571913456Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y5xhxoxs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.571848568Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y5xhxoxs-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.571790128Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.571713778Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=4, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=relay, type=health" t=2024-05-29T13:44:14.571741654Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=4, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=relay, type=health" t=2024-05-29T13:44:14.571728554Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y53sjy9v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.571676417Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=3, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=relay, type=health" t=2024-05-29T13:44:14.571658753Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y53sjy9v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.571631166Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.571542325Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y53sjy9v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.571533445Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-273677laio1eastus2, cloud_platform=Azure, customer_id=A157, env_id=273677, env_name=A157 Ryder Dev, env_type=dev, instance=env-273677laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:14.571546777Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.57125401Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=1, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=relay, type=health" t=2024-05-29T13:44:14.571450449Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=1, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=relay, type=health" t=2024-05-29T13:44:14.571433449Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.571383931Z caller=grafana.go:247 user=438185 slug=nodeinfra msg="rules manager rule groups request" path=/prometheus/api/v1/alerts grafana_org_id=1 query= groups=0 alerts=373 + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=0, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=relay, type=health" t=2024-05-29T13:44:14.571352848Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=0, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=relay, type=health" t=2024-05-29T13:44:14.571339048Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=6, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health" t=2024-05-29T13:44:14.571264446Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y53gyqfd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.571372203Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=5, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health" t=2024-05-29T13:44:14.571187445Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.571220096Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=3, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health" t=2024-05-29T13:44:14.571027243Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y4m7tti7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.571270062Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.571199609Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y4m7tti7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.571228762Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=2, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health" t=2024-05-29T13:44:14.570931441Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y4hvnvxj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.571125681Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=343745 slug=getharley t=2024-05-29T13:44:14.5709885Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.703455ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y4hvnvxj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.5710703Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y4hvnvxj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.57104721Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y4dr62s8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.57101775Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=6, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health" t=2024-05-29T13:44:14.57087044Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y4dr62s8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.570860348Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y4ditvcz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.570747127Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y4ditvcz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.570681836Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=5, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health" t=2024-05-29T13:44:14.570782639Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.570713217Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y4ditvcz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.570633686Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=3, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health" t=2024-05-29T13:44:14.570617937Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.570507735Z caller=remote_instance_store.go:51 user=715708 slug=ggiprod msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.570504692Z caller=grafana.go:247 user=438185 slug=nodeinfra msg="rules manager rule groups request" path=/prometheus/api/v1/alerts grafana_org_id=1 query= groups=0 alerts=373 + logger=ngalert.state.manager.persist user=715708 slug=ggiprod t=2024-05-29T13:44:14.570408944Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y48zy9qx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.570386693Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y48zy9qx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.570373193Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=715708 slug=ggiprod t=2024-05-29T13:44:14.570351272Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=2, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health" t=2024-05-29T13:44:14.570342932Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.570377176Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=18335 slug=semaphore t=2024-05-29T13:44:14.570361219Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=48.793186ms + logger=ngalert.state.manager.persist user=26909 slug=designcrowd t=2024-05-29T13:44:14.570246171Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=36.86009ms + level=debug ts=2024-05-29T13:44:14.570147292Z caller=remote_instance_store.go:51 user=446686 slug=coinfx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=5, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health" t=2024-05-29T13:44:14.57018873Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=4, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health" t=2024-05-29T13:44:14.570111229Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.570072454Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3x5dh7c-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.569959799Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=3, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health" t=2024-05-29T13:44:14.570036028Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.569943895Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.495785ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3x5dh7c-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.569878448Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3x5dh7c-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.569796617Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3x5dh7c-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.569765447Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3x4mulq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.569708496Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=2, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health" t=2024-05-29T13:44:14.569948626Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=384712 slug=nearinc t=2024-05-29T13:44:14.569913691Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.474268ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3vi25d6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.569521394Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3vi25d6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.569441424Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3rrj23h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.569385903Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3rrj23h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.569357673Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3rrj23h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.569325612Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=1, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=k8s, type=health" t=2024-05-29T13:44:14.569873725Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.569857191Z caller=remote_instance_store.go:51 user=316418 slug=workmotion msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3phm4nd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.569233942Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.569796754Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3phm4nd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.569222021Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3phm4nd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.569182381Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3phm4nd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.569155061Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=0, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=k8s, type=health" t=2024-05-29T13:44:14.569812824Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3pen0rx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.568922768Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3pen0rx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.568884348Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.569744056Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.569708765Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3o6hscs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.568758157Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3o6hscs-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.568710286Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.569536136Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3nb2133-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.568565575Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=4, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health" t=2024-05-29T13:44:14.56953752Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=316418 slug=workmotion instance="ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=10eKxF4jRqenox4anBTrCw" t=2024-05-29T13:44:14.569519908Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3k38aya-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.568337142Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3is8773-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.568250811Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=3, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health" t=2024-05-29T13:44:14.569458319Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.569360252Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-265908laiowestus2, cloud_platform=Azure, customer_id=A144, env_id=265908, env_name=A144 Big 5 Corp DEV, env_type=dev, instance=env-265908laiowestus2, job=integrations/node_exporter, region=westus2, stage=live" t=2024-05-29T13:44:14.569390061Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3is8773-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.568159931Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y3is8773-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.56814861Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.nuts-1-common.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYERS_CLIENT_SERVER_PEER_HOSTED_*-sin,5)) Query" t=2024-05-29T13:44:14.569337652Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=2, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health" t=2024-05-29T13:44:14.569368517Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y39qh6ca-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.568019409Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=0061dc20714d3d04 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.569124218Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.nuts-1-common.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYERS_CLIENT_SERVER_PEER_HOSTED_*-sin,5)) Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc08eb75278} IgnoreBelow:{Var:IgnoreBelow Labels: Value:0xc08eb752a0} Threshold:{Var:Threshold Labels: Value:0xc08eb752a8} compare:{Var:compare Labels:aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.nuts-1-common.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYERS_CLIENT_SERVER_PEER_HOSTED_*-sin,5)) Query Value:0xc08eb753f8} sum:{Var:sum Labels:aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.nuts-1-common.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYERS_CLIENT_SERVER_PEER_HOSTED_*-sin,5)) Query Value:0xc08eb75450}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.56375688s EvaluationString:[ var='Breaches' labels={} value=1 ], [ var='IgnoreBelow' labels={} value=1300 ], [ var='Threshold' labels={} value=-35 ], [ var='compare' labels={aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.nuts-1-common.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYERS_CLIENT_SERVER_PEER_HOSTED_*-sin,5)) Query} value=0 ], [ var='sum' labels={aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.nuts-1-common.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYERS_CLIENT_SERVER_PEER_HOSTED_*-sin,5)) Query} value=0 ]}]" duration=66.293369ms + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=1, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=k8s, type=health" t=2024-05-29T13:44:14.569300516Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y38suahc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.567970139Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=1, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=k8s, type=health" t=2024-05-29T13:44:14.569286416Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=316418 slug=workmotion version=4 fingerprint=ed705af29dfce793 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.569112254Z level=debug msg="Alert rule evaluated" results="[{Instance:ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=10eKxF4jRqenox4anBTrCw State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=10eKxF4jRqenox4anBTrCw Value:0xc084668ff0} C:{Var:C Labels:ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=10eKxF4jRqenox4anBTrCw Value:0xc084668fb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.568653864s EvaluationString:[ var='B' labels={ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=10eKxF4jRqenox4anBTrCw} value=0 ], [ var='C' labels={ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=10eKxF4jRqenox4anBTrCw} value=0 ]} {Instance:ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=hxHfRl-xQUauVzymQSapww State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=hxHfRl-xQUauVzymQSapww Value:0xc084669060} C:{Var:C Labels:ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=hxHfRl-xQUauVzymQSapww Value:0xc084669088}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.568667336s EvaluationString:[ var='B' labels={ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=hxHfRl-xQUauVzymQSapww} value=0 ], [ var='C' labels={ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=hxHfRl-xQUauVzymQSapww} value=0 ]}]" duration=39.647903ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y36e2cqg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.567816847Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y36e2cqg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.567797777Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.569184036Z caller=remote_instance_store.go:51 user=438761 slug=wasabicloudprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=0, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=k8s, type=health" t=2024-05-29T13:44:14.569186315Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y36e2cqg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.567697036Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y2zcwa9v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.567647005Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y2zcwa9v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.567633105Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y2zcwa9v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.567591695Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.569001363Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=438185 slug=nodeinfra t=2024-05-29T13:44:14.569079186Z level=debug msg="Saving alert states done" count=6 max_state_save_concurrency=1 duration=131.009955ms + logger=ngalert.state.manager.persist user=438761 slug=wasabicloudprod t=2024-05-29T13:44:14.569089141Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y2ycx97n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.567465173Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=438761 slug=wasabicloudprod instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.569071747Z level=debug msg="Changing state" previous_state=Normal next_state=NoData previous_ends_at=2024-05-29T13:44:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y2ycx97n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.567410293Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y2ycx97n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.567368052Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y2ycx97n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.567352782Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=438761 slug=wasabicloudprod instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.569056051Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y2rkukks-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.567258581Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-265723laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=265723, env_name=A133 Douglas DEV, env_type=dev, instance=env-265723laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live" t=2024-05-29T13:44:14.568987447Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y2omwkok-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.56714517Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-265723laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=265723, env_name=A133 Douglas DEV, env_type=dev, instance=env-265723laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live" t=2024-05-29T13:44:14.56896717Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y2omwkok-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.567066589Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y2ilhj70-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.566977688Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y28vs7gu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.566633375Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y1yax06s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.566578834Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.568808544Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y1yax06s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.566505804Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y1vlx7n3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.566422303Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y1vlx7n3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.566384412Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=3, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=db, type=health" t=2024-05-29T13:44:14.568713807Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y1ph9sci-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.566346402Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=432323 slug=lithic instance="FunctionName=wire-transfer-producer-live" t=2024-05-29T13:44:14.568724764Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.568644517Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.568701785Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=432323 slug=lithic t=2024-05-29T13:44:14.568669708Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.568622647Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y1ljc10t-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.566234681Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=432323 slug=lithic version=1 fingerprint=c414e4493de6cd9d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.568576975Z level=debug msg="Alert rule evaluated" results="[{Instance:FunctionName=wire-transfer-producer-live State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:FunctionName=wire-transfer-producer-live Value:0xc03a0880a0} C:{Var:C Labels:FunctionName=wire-transfer-producer-live Value:0xc03a0880a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.56823855s EvaluationString:[ var='B' labels={FunctionName=wire-transfer-producer-live} value=0 ], [ var='C' labels={FunctionName=wire-transfer-producer-live} value=0 ]}]" duration=54.402314ms + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=2, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=db, type=health" t=2024-05-29T13:44:14.568635506Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.568626409Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y1ljc10t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.56617423Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=6, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=db, type=health" t=2024-05-29T13:44:14.568567405Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.568481241Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=5, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=db, type=health" t=2024-05-29T13:44:14.568430503Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=824501 slug=bendingspoons t=2024-05-29T13:44:14.568277653Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.568390165Z caller=remote_instance_store.go:51 user=82372 slug=fout msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=3, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=db, type=health" t=2024-05-29T13:44:14.568286501Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-263640laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=263640, env_name=A135_Roland_Dev, env_type=dev, instance=env-263640laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.568254594Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.568223307Z caller=remote_alert_sender.go:94 user=250150 slug=bizagi host=bizagi-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.107.6:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=e5147ddf-4008-4375-a8f6-a93905323f68 alerts=1 + logger=ngalert.state.manager user=806229 slug=simplisafe instance="method=POST, route=/v1/dise/{deviceId}/profiles" t=2024-05-29T13:44:14.568260548Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="method=GET, route=/v2/devices/{deviceId}" t=2024-05-29T13:44:14.568210157Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=2, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=db, type=health" t=2024-05-29T13:44:14.5682113Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="method=GET, route=/v2/devices/{deviceId}" t=2024-05-29T13:44:14.568199456Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=2, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=db, type=health" t=2024-05-29T13:44:14.5681996Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=6, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=backup, type=health" t=2024-05-29T13:44:14.568132599Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=6, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=backup, type=health" t=2024-05-29T13:44:14.568117798Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="method=GET, route=/v1/health" t=2024-05-29T13:44:14.568119205Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=253959 slug=roic t=2024-05-29T13:44:14.568087044Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=197.556425ms + Error parsing panelUID for alert annotationruleID1370dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:14.568079974Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=53.195292ms + logger=ngalert.state.manager user=806229 slug=simplisafe instance="method=GET, route=/v1/dise/sid/{sid}/deviceId/{deviceId}" t=2024-05-29T13:44:14.568059454Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-263191laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263191, env_name=A139 AgReserves PROD, env_type=prod, instance=env-263191laiowestus, job=integrations/node_exporter, region=westus, stage=live" t=2024-05-29T13:44:14.567973802Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="method=DELETE, route=/v1/dise/{deviceId}/device" t=2024-05-29T13:44:14.567974034Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=4, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=backup, type=health" t=2024-05-29T13:44:14.567980196Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="method=DELETE, route=/v1/dise/{deviceId}/device" t=2024-05-29T13:44:14.567956893Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.567817258Z caller=remote_instance_store.go:51 user=423441 slug=outgoinc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=806229 slug=simplisafe t=2024-05-29T13:44:14.567804902Z level=debug msg="State manager processing evaluation results" resultCount=7 + level=info ts=2024-05-29T13:44:14.567746435Z caller=grafana.go:247 user=90424 slug=westerveltlumber msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=15&state=firing&state=error" groups=154 alerts=0 + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=1, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=backup, type=health" t=2024-05-29T13:44:14.567752993Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.567678934Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.567704144Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" + Error parsing panelUID for alert annotationruleID987dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:14.567705783Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=180.416182ms + level=debug ts=2024-05-29T13:44:14.567613485Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.567582839Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.567358974Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.567479948Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=5, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=backup, type=health" t=2024-05-29T13:44:14.567524089Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.56749698Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.567498476Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.567487025Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.567359678Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.567418498Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=4, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=backup, type=health" t=2024-05-29T13:44:14.567433288Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=4, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=backup, type=health" t=2024-05-29T13:44:14.567419088Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=386776 slug=rcsworks t=2024-05-29T13:44:14.567372786Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=386776 slug=rcsworks instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.567336127Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=3, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=backup, type=health" t=2024-05-29T13:44:14.567318886Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-262058laioeastus2, cloud_platform=Azure, customer_id=A136, env_id=262058, env_name=A136 Coughlan PROD, env_type=dev, instance=env-262058laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:14.567204574Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.567185115Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.567054248Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=8c9785ac9e9e3429 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.566994051Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.566781588s EvaluationString:}]" duration=172.201465ms + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=0, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=backup, type=health" t=2024-05-29T13:44:14.567068682Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.566981378Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-261751laiouse1, cloud_platform=AWS, customer_id=C324, env_id=261751, env_name=C324_Marsh_DEV_2021, env_type=dev, instance=env-261751laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.567006858Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-261091laiouse1, cloud_platform=AWS, customer_id=C333, env_id=261091, env_name=c333_Mercer_QA_2021, env_type=qa, instance=env-261091laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.566735446Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=93999 slug=mereo t=2024-05-29T13:44:14.566717543Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=2, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health" t=2024-05-29T13:44:14.566716177Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.566697719Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-259828laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=259828, env_name=A135_Roland_Prod, env_type=prod, instance=env-259828laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.566180925Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=1, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health" t=2024-05-29T13:44:14.566136368Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=615073 slug=origence t=2024-05-29T13:44:14.566080544Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.144343ms + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=1, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health" t=2024-05-29T13:44:14.566123268Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=277970 slug=teckresourcestest t=2024-05-29T13:44:14.566090212Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=0, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health" t=2024-05-29T13:44:14.566059067Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.566043475Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.561478634Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.56601357Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.557928274Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=2 fingerprint=dbc36c29222f7ed4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.565954246Z level=error msg="Failed to evaluate rule" error="failed to build query 'D': data source not found" duration=4.965919ms + level=error ts=2024-05-29T13:44:14.565905561Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'D': data source not found" + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.565938444Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=4, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health" t=2024-05-29T13:44:14.565899064Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=3, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health" t=2024-05-29T13:44:14.565806263Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112732 slug=gleamer t=2024-05-29T13:44:14.565712899Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=2, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health" t=2024-05-29T13:44:14.565731162Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y19ntn6u-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.565727246Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y19ntn6u-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.565694485Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.565643873Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=1, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health" t=2024-05-29T13:44:14.565660461Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=6f7f05c87727838d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.565529893Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.565290815s EvaluationString:}]" duration=192.281392ms + logger=ngalert.state.manager.persist user=456850 slug=juniz t=2024-05-29T13:44:14.56561278Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=456850 slug=juniz instance="datasource_uid=f7369e7e-cc4e-4ad3-b666-dc932c9595de, ref_id=A" t=2024-05-29T13:44:14.565593095Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=0, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health" t=2024-05-29T13:44:14.565566159Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y11u54yr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.565564434Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=456850 slug=juniz version=9 fingerprint=54a4fd64093d1556 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.56538695Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=f7369e7e-cc4e-4ad3-b666-dc932c9595de, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.564894391s EvaluationString:}]" duration=58.773894ms + logger=ngalert.state.manager.persist user=691855 slug=chainlake t=2024-05-29T13:44:14.565157777Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.565440163Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.565382017Z caller=remote_instance_store.go:51 user=399183 slug=guidion msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.565350342Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=4, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health" t=2024-05-29T13:44:14.565360256Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=3, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health" t=2024-05-29T13:44:14.565274155Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.565298596Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y10qyepj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.565319661Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=3, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health" t=2024-05-29T13:44:14.565261555Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=399183 slug=guidion instance="gui_utl__Source__c=data-eraser" t=2024-05-29T13:44:14.565270183Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y10qyepj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.565267771Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID374dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=20177 slug=paddledash t=2024-05-29T13:44:14.565271388Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=41.535823ms + level=debug ts=2024-05-29T13:44:14.565262226Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y10qyepj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.56519328Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=399183 slug=guidion t=2024-05-29T13:44:14.565211405Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.565170829Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.565082916Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=399183 slug=guidion version=114 fingerprint=b11917982c6aaec0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.565103463Z level=debug msg="Alert rule evaluated" results="[{Instance:gui_utl__Source__c=data-eraser State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:gui_utl__Source__c=data-eraser Value:0xc100fc61a8} B:{Var:B Labels:gui_utl__Source__c=data-eraser Value:0xc100fc61d0} C:{Var:C Labels:gui_utl__Source__c=data-eraser Value:0xc100fc61d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.564460319s EvaluationString:[ var='A' labels={gui_utl__Source__c=data-eraser} value=12 ], [ var='B' labels={gui_utl__Source__c=data-eraser} value=12 ], [ var='C' labels={gui_utl__Source__c=data-eraser} value=0 ]}]" duration=95.214075ms + level=info ts=2024-05-29T13:44:14.565107328Z caller=remote_alert_sender.go:94 user=391577 slug=daghouse host=daghouse-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.144.248.150:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=QVPvQZhVk alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y104f8aa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.56515009Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.565106767Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=45.280533ms + level=debug ts=2024-05-29T13:44:14.565011841Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.565079237Z caller=remote_alert_sender.go:94 user=391577 slug=daghouse host=daghouse-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.33.88:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=QVPvQZhVk alerts=1 + logger=ngalert.state.manager user=691855 slug=chainlake instance="instance=compute-hel-9-cpx31-compute-hel-9" t=2024-05-29T13:44:14.565040722Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.564932781Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=0, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health" t=2024-05-29T13:44:14.56498955Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.564896843Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y104f8aa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.564980488Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.564850788Z caller=remote_instance_store.go:51 user=813270 slug=adiante msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.564857903Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.564857419Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-258482laio2use1, cloud_platform=AWS, customer_id=C447, env_id=258482, env_name=c447_VSI_PROD_2021U2, env_type=prod, instance=env-258482laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.564812683Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=5, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=streamer, type=health" t=2024-05-29T13:44:14.564764147Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y0yco9fn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.564744065Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=2, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=streamer, type=health" t=2024-05-29T13:44:14.564509043Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y0ujuoo9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.564474433Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y0ujuoo9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.564440632Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y0ujuoo9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.564308461Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y0thnmsb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.56424409Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-258029laiouse1, cloud_platform=AWS, customer_id=C447, env_id=258029, env_name=C447_VSI_DEV_2021U2, env_type=dev, instance=env-258029laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.56427926Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-258029laiouse1, cloud_platform=AWS, customer_id=C447, env_id=258029, env_name=C447_VSI_DEV_2021U2, env_type=dev, instance=env-258029laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.5642564Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=714575 slug=bonduelleprod t=2024-05-29T13:44:14.564156816Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=6.607983ms + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=3, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=streamer, type=health" t=2024-05-29T13:44:14.564173638Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=2, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=streamer, type=health" t=2024-05-29T13:44:14.564103437Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y0thnmsb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.564101659Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=1, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=streamer, type=health" t=2024-05-29T13:44:14.564021736Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y0h347xd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.563974928Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=0, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=streamer, type=health" t=2024-05-29T13:44:14.563944534Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.563892042Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y0fdxta3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.563866857Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y0fdxta3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.563852556Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y0fdxta3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.563815266Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=6, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=streamer, type=health" t=2024-05-29T13:44:14.563863333Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-255971laio2use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.563808395Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=5, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=streamer, type=health" t=2024-05-29T13:44:14.563784832Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y058gwd9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.563705785Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.563696429Z caller=remote_instance_store.go:51 user=155740 slug=routific msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=4, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=streamer, type=health" t=2024-05-29T13:44:14.563698231Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y058gwd9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.563644224Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=155740 slug=routific instance= t=2024-05-29T13:44:14.563643253Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y058gwd9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.563628244Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=155740 slug=routific instance= t=2024-05-29T13:44:14.563635146Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y0439hu3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.563535163Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y0439hu3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.563487033Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=2, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=streamer, type=health" t=2024-05-29T13:44:14.563494027Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.563463363Z caller=remote_instance_store.go:51 user=543660 slug=jobcloudprogrammaticstage msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y0439hu3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.563414332Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.563330746Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=1, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=streamer, type=health" t=2024-05-29T13:44:14.563382326Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=6, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=streamer, type=health" t=2024-05-29T13:44:14.563197223Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-y00397mv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.563033068Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=5, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=streamer, type=health" t=2024-05-29T13:44:14.563096821Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.562993857Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.56294604Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.562927349Z caller=remote_instance_store.go:51 user=147497 slug=rhodev msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=486972 slug=payretailers t=2024-05-29T13:44:14.562882309Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=133.544282ms + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=3, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=streamer, type=health" t=2024-05-29T13:44:14.562872418Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=3, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=streamer, type=health" t=2024-05-29T13:44:14.562855218Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=1, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=streamer, type=health" t=2024-05-29T13:44:14.562652415Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.562729806Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=560336 slug=powernet version=17 fingerprint=1c9d1a93ed1f20ae attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.552796764Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=0, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=0, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00e202ab0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=0, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00e202be8} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=0, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00e202d70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.530937409s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=0, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=0, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=0, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=1, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=1, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00e202f98} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=1, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00e203090} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=1, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00e203188}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.530988709s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=1, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=1, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=1, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=2, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=2, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00e203548} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=2, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00e203358} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=2, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00e203450}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531009609s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=2, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=2, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=2, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=3, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=3, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00cc90018} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=3, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00cc90550} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=3, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00cc90638}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531028809s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=3, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=3, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=3, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=4, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=4, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00cc90918} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=4, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00cc909f0} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=4, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00cc90830}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531045909s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=4, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=4, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=4, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=5, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=5, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00cc90bd8} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=5, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00cc90cc0} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=5, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00cc90df0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531062409s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=5, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=5, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=5, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=6, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=6, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00cc91060} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=6, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00cc91150} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=6, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00cc91240}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531079008s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=6, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=6, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream1, exported_orgunit=IPTV, fan=6, host=appstream1-ilo.powernet.tv, host_short=appstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=0, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=0, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00cc91e70} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=0, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00cc91430} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=0, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00cc91900}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531104308s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=0, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=0, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=0, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=1, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=1, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00f5360e8} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=1, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc015e80280} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=1, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00f536010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531119808s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=1, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=1, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=1, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=2, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=2, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00f536320} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=2, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00f536418} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=2, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00f5364f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531136208s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=2, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=2, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=2, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=3, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=3, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00f5368d8} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=3, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00f5366f0} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=3, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00f5367e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531154108s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=3, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=3, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=3, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=4, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=4, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00f536f80} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=4, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00f537078} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=4, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00f536e98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531172408s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=4, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=4, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=4, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=5, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=5, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00f537328} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=5, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00f537408} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=5, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00f537248}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531188008s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=5, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=5, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=5, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=6, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=6, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00f5375f0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=6, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00f5376e8} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=6, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc00f5377d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531204608s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=6, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=6, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream2, exported_orgunit=IPTV, fan=6, host=appstream2-ilo.powernet.tv, host_short=appstream2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=0, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=0, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00f537d90} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=0, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00f537e80} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=0, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00f537f78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531270907s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=0, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=0, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=0, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=1, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=1, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00183c1a8} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=1, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00183c2d0} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=1, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00183c980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531292807s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=1, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=1, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=1, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=2, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=2, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00183cbd0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=2, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00183cce0} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=2, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00183cdd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531309007s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=2, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=2, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=2, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=3, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=3, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00183d020} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=3, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00183d120} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=3, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00183d240}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531324407s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=3, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=3, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=3, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=4, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=4, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00183d8b0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=4, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00183d9a8} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=4, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00183da90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531340307s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=4, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=4, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=4, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=5, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=5, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00b2ae8f0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=5, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00b2ae620} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=5, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00b2ae7c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531358407s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=5, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=5, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=5, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=6, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=6, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00b2aed18} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=6, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00b2af390} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=6, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00b2aebd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531375507s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=6, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=6, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream3, exported_orgunit=IPTV, fan=6, host=appstream3-ilo.powernet.tv, host_short=appstream3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=2, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=2, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00b2af888} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=2, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00b2af610} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=2, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00b2af740}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531391007s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=2, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=2, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=2, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=3, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=3, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc00b2afaf0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=3, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc01cec8460} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=3, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc01cec8588}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531407006s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=3, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=3, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=3, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=4, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=4, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc01cec8928} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=4, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc01cec8a78} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=4, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc01cec8810}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531422106s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=4, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=4, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=4, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=5, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=5, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc01cec9178} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=5, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc01cec92b8} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=5, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc01cec93c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531435906s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=5, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=5, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=5, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=6, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=6, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc01cec9630} B:{Var:B Labels:__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=6, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc01cec9790} C:{Var:C Labels:__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=6, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=streamer, type=health Value:0xc01cec98b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531450706s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=6, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=6, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=appstream5, exported_orgunit=IPTV, fan=6, host=appstream5-ilo.powernet.tv, host_short=appstream5-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=streamer, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=0, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=0, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc01cec9c38} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=0, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc01cec9d70} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=0, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc01cec9af0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531469006s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=0, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=0, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=0, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=1, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=1, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc01cec9fb8} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=1, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011062188} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=1, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011062318}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531486306s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=1, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=1, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=1, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=2, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=2, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc0110627a8} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=2, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011062578} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=2, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011062690}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531502206s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=2, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=2, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=2, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=3, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=3, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc0110630f0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=3, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc0110629f8} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=3, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011062fd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531516906s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=3, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=3, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=3, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=4, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=4, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011063320} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=4, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011063528} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=4, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011063658}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531532506s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=4, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=4, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=4, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=5, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=5, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011063a58} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=5, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011063b60} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=5, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011063c68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531548906s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=5, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=5, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive1, exported_orgunit=IPTV, fan=5, host=archive1-ilo.powernet.tv, host_short=archive1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=0, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=0, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc0150900c0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=0, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011063e50} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=0, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011063f50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531567605s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=0, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=0, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=0, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=1, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=1, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc0150908b0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=1, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc015090a50} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=1, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc015090b90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531584205s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=1, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=1, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=1, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=2, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=2, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc015091090} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=2, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc015090df8} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=2, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc015090f28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531599905s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=2, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=2, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=2, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=3, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=3, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc015091ff8} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=3, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc015091848} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=3, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc015091ef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531615205s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=3, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=3, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=3, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=4, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=4, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc009d2ddd8} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=4, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc006234bc8} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=4, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc009d2c280}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531631305s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=4, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=4, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=4, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=5, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=5, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc00dd5e390} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=5, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc00dd5e518} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=5, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc00dd5e710}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531645705s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=5, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=5, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive2, exported_orgunit=IPTV, fan=5, host=archive2-ilo.powernet.tv, host_short=archive2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=0, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=0, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc00dd5ea88} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=0, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc00dd5ebc8} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=0, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc00dd5f2d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531661505s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=0, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=0, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=0, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=1, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=1, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc00dd5f658} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=1, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc00dd5f860} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=1, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc00dd5f9a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531679205s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=1, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=1, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=1, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=2, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=2, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011308068} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=2, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc00dd5fd50} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=2, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc00dd5fef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531693905s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=2, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=2, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=2, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=3, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=3, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011308318} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=3, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011308480} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=3, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011308600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531707705s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=3, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=3, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=3, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=4, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=4, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011308c08} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=4, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc0113088f8} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=4, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011308a58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531724404s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=4, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=4, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=4, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=5, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=5, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011308ed8} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=5, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011309050} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=5, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc0113091b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531742804s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=5, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=5, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive5, exported_orgunit=IPTV, fan=5, host=archive5-ilo.powernet.tv, host_short=archive5-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=0, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=0, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011309490} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=0, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc011309c50} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=0, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc016eb83c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531758104s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=0, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=0, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=0, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=1, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=1, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc016eb8708} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=1, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc016eb88c8} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=1, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc016eb8a38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531774104s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=1, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=1, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=1, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=2, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=2, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc016eb8f20} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=2, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc016eb90d0} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=2, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc016eb8d90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531788504s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=2, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=2, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=2, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=3, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=3, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc016eb9b00} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=3, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc016eb9c70} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=3, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc016eb9e60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531804604s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=3, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=3, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=3, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=4, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=4, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc002f68a48} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=4, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc002f692c8} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=4, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc002f69470}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531820704s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=4, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=4, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=4, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=5, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=5, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc00b26a2e0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=5, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc00b26a728} C:{Var:C Labels:__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=5, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health Value:0xc00b26ac68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531848004s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=5, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=5, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=archive6, exported_orgunit=IPTV, fan=5, host=archive6-ilo.powernet.tv, host_short=archive6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=archive, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=0, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=backup, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=0, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc00b26b4d0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=0, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc00b26b250} C:{Var:C Labels:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=0, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc00b26b3e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531864504s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=0, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=0, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=0, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=backup, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=1, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=backup, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=1, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc008d22bd0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=1, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc008d23190} C:{Var:C Labels:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=1, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc008d23898}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531880903s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=1, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=1, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=1, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=backup, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=2, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=backup, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=2, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc008d23db0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=2, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc003aa60a0} C:{Var:C Labels:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=2, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc003aa61d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531897403s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=2, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=2, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=2, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=backup, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=3, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=backup, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=3, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc003aa6d18} B:{Var:B Labels:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=3, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc003aa7008} C:{Var:C Labels:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=3, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc003aa7360}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531913203s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=3, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=3, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=3, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=backup, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=4, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=backup, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=4, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc003aa77f0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=4, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc003aa7af8} C:{Var:C Labels:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=4, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc003aa76c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531928803s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=4, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=4, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=4, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=backup, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=5, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=backup, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=5, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc01ba2fb08} B:{Var:B Labels:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=5, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc003aa7d38} C:{Var:C Labels:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=5, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc003aa7e88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531944403s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=5, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=5, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=5, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=backup, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=6, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=backup, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=6, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc01ba2fe38} B:{Var:B Labels:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=6, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc00cf0c420} C:{Var:C Labels:__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=6, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc00cf0c538}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531958303s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=6, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=6, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=backup1, exported_orgunit=TELE, fan=6, host=backup1-ilo.telecomx.dk, host_short=backup1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=backup, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=0, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=backup, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=0, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc00cf0c748} B:{Var:B Labels:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=0, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc00cf0c8b0} C:{Var:C Labels:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=0, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc00cf0c9c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531973703s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=0, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=0, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=0, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=backup, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=1, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=backup, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=1, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc00cf0cdf0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=1, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc00cf0cbd0} C:{Var:C Labels:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=1, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc00cf0cce8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531991003s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=1, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=1, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=1, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=backup, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=2, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=backup, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=2, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc00cf0d208} B:{Var:B Labels:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=2, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc00cf0d018} C:{Var:C Labels:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=2, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc00cf0d110}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532007103s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=2, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=2, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=2, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=backup, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=3, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=backup, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=3, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc00cf0d420} B:{Var:B Labels:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=3, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc00cf0d500} C:{Var:C Labels:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=3, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc00cf0db00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532056502s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=3, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=3, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=3, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=backup, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=4, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=backup, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=4, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc00cf0dd28} B:{Var:B Labels:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=4, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc00cf0de28} C:{Var:C Labels:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=4, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc00cf0df28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532085702s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=4, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=4, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=4, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=backup, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=5, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=backup, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=5, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc0006427b0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=5, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc000643298} C:{Var:C Labels:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=5, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc0169d4030}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532107402s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=5, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=5, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=5, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=backup, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=6, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=backup, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=6, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc0169d41e0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=6, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc0169d46f0} C:{Var:C Labels:__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=6, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=backup, type=health Value:0xc0169d47d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532124002s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=6, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=6, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=backup, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=backup2, exported_orgunit=TELE, fan=6, host=backup2-ilo.telecomx.dk, host_short=backup2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=backup, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=2, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=db, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=2, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc0169d4978} B:{Var:B Labels:__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=2, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc0169d4e28} C:{Var:C Labels:__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=2, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc0169d4f00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532141302s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=2, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=db, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=2, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=db, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=2, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=db, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=3, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=db, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=3, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc0169d50a0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=3, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc0169d5178} C:{Var:C Labels:__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=3, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc0169d5248}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532158102s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=3, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=db, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=3, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=db, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=3, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=db, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=4, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=db, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=4, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc0169d53d8} B:{Var:B Labels:__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=4, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc0169d54b8} C:{Var:C Labels:__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=4, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc0169d5598}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532174802s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=4, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=db, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=4, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=db, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=4, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=db, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=5, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=db, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=5, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc0169d5758} B:{Var:B Labels:__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=5, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc0169d5828} C:{Var:C Labels:__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=5, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc0169d5940}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532190102s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=5, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=db, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=5, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=db, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=5, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=db, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=6, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=db, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=6, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc0169d5ae0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=6, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc0169d5bb8} C:{Var:C Labels:__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=6, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc0169d5c80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532206201s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=6, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=db, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=6, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=db, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=db1, exported_orgunit=TELE, fan=6, host=db1-ilo.telecomx.dk, host_short=db1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=db, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=2, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=db, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=2, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc009c10020} B:{Var:B Labels:__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=2, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc0169d5e40} C:{Var:C Labels:__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=2, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc0169d5f28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532223001s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=2, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=db, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=2, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=db, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=2, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=db, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=3, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=db, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=3, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc009c10350} B:{Var:B Labels:__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=3, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc009c104e8} C:{Var:C Labels:__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=3, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc009c10820}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532237901s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=3, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=db, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=3, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=db, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=3, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=db, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=4, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=db, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=4, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc009c10e48} B:{Var:B Labels:__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=4, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc009c11228} C:{Var:C Labels:__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=4, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc009c114a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532253701s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=4, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=db, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=4, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=db, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=4, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=db, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=5, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=db, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=5, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc009c11940} B:{Var:B Labels:__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=5, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc009c11ab0} C:{Var:C Labels:__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=5, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc009c11bd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532269801s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=5, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=db, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=5, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=db, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=5, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=db, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=6, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=db, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=6, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc009c11fa8} B:{Var:B Labels:__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=6, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc00d7ae108} C:{Var:C Labels:__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=6, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=db, type=health Value:0xc00d7ae328}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532285401s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=6, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=db, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=6, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=db, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=db6, exported_orgunit=TELE, fan=6, host=db6-ilo.telecomx.dk, host_short=db6-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=db, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=0, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=0, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc00d7ae5f0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=0, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc00d7aecd0} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=0, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc00d7aeed0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532301001s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=0, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=0, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=0, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=1, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=1, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc00d7af110} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=1, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc00d7af220} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=1, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc00d7af348}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532316101s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=1, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=1, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=1, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=2, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=2, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc00d7af670} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=2, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc00d7af788} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=2, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc00d7af8a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532330801s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=2, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=2, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=2, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=3, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=3, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc015728110} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=3, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc0157281f0} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=3, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc0157282f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532345501s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=3, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=3, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=3, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=4, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=4, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc0157284c0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=4, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc0157285a0} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=4, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc015728678}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532360501s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=4, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=4, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=4, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=5, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=5, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc015728840} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=5, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc015728928} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=5, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc015728a00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.5323857s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=5, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=5, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=5, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=6, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=6, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc015728d58} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=6, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc015728b98} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=6, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc015728c78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.5324009s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=6, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=6, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node1, exported_orgunit=TELE, fan=6, host=node1-ilo.telecomx.dk, host_short=node1-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=0, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=0, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc0157290c0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=0, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc015728f18} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=0, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc015728fe8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.5324161s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=0, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=0, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=0, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=1, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=1, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc015729270} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=1, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc015729348} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=1, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc015729418}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.5324315s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=1, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=1, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=1, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=2, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=2, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc0157295f0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=2, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc0157296c0} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=2, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc0157297a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.5324467s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=2, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=2, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=2, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=3, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=3, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc015729a60} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=3, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc015729b38} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=3, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc015729970}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.5324632s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=3, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=3, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=3, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=4, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=4, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc015729ce8} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=4, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc015729db8} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=4, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc015729e90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.5324781s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=4, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=4, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=4, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=5, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=5, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc00130ece8} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=5, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc00130e090} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=5, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc00130e1e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.5324983s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=5, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=5, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=5, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=6, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=6, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc00130fb60} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=6, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc00130fd10} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=6, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc00130f680}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.5325143s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=6, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=6, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node2, exported_orgunit=TELE, fan=6, host=node2-ilo.telecomx.dk, host_short=node2-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=2, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=2, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc0182b0de0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=2, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc0182b1160} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=2, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc0182b1ad0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532528499s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=2, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=2, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=2, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=3, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=3, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc014bb6298} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=3, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc014bb63b0} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=3, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc014bb6190}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532543799s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=3, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=3, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=3, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=4, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=4, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc014bb65f8} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=4, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc014bb6720} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=4, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc014bb68a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532559899s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=4, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=4, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=4, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=5, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=5, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc014bb6be0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=5, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc014bb6d00} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=5, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc014bb6e28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532573999s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=5, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=5, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=5, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=6, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=6, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc014bb7240} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=6, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc014bb7380} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=6, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc014bb70f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532587999s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=6, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=6, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node3, exported_orgunit=TELE, fan=6, host=node3-ilo.telecomx.dk, host_short=node3-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=2, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=2, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc014bb76a0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=2, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc014bb77d8} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=2, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc014bb78f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532602299s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=2, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=2, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=2, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=3, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=3, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc014bb7b40} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=3, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc014bb7c80} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=3, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc014bb7da0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532618199s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=3, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=3, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=3, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=4, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=4, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc0046ea000} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=4, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc0046ea110} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=4, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc0046ea218}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532634599s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=4, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=4, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=4, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=5, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=5, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc0046eab38} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=5, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc0046eac70} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=5, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc0046ead90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532649799s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=5, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=5, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=5, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=6, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=6, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc0046eafe8} B:{Var:B Labels:__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=6, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc0046eb158} C:{Var:C Labels:__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=6, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health Value:0xc0046eb270}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532664399s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=6, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=6, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=node4, exported_orgunit=TELE, fan=6, host=node4-ilo.telecomx.dk, host_short=node4-ilo, hotplug=true, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=true, servertype=k8s, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=0, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=relay, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=0, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc0046eb520} B:{Var:B Labels:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=0, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc0046eb658} C:{Var:C Labels:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=0, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc0046eb790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532678799s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=0, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=0, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=0, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=relay, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=1, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=relay, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=1, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc0046ebb18} B:{Var:B Labels:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=1, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc0046ebc18} C:{Var:C Labels:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=1, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc0046eb9f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532692198s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=1, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=1, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=1, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=relay, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=2, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=relay, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=2, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00ff2e170} B:{Var:B Labels:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=2, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc0046ebe28} C:{Var:C Labels:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=2, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00ff2e040}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532708198s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=2, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=2, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=2, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=relay, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=3, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=relay, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=3, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00ff2e338} B:{Var:B Labels:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=3, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00ff2e428} C:{Var:C Labels:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=3, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00ff2e518}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532721598s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=3, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=3, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=3, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=relay, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=4, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=relay, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=4, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00ff2e740} B:{Var:B Labels:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=4, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00ff2e838} C:{Var:C Labels:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=4, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00ff2e938}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532737398s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=4, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=4, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=4, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=relay, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=5, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=relay, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=5, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00ff2eb30} B:{Var:B Labels:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=5, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00ff2ec10} C:{Var:C Labels:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=5, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00ff2ed10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532752498s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=5, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=5, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=5, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=relay, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=6, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=relay, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=6, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00ff2eee8} B:{Var:B Labels:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=6, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00ff2efc0} C:{Var:C Labels:__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=6, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00ff2f0a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532781398s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=6, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=6, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=relay1, exported_orgunit=IPTV, fan=6, host=relay1-ilo.powernet.tv, host_short=relay1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=relay, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=0, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=relay, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=0, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00eff0390} B:{Var:B Labels:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=0, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00ff2f6c0} C:{Var:C Labels:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=0, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00eff0168}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532797098s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=0, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=0, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=0, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=relay, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=1, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=relay, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=1, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00eff0708} B:{Var:B Labels:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=1, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00eff0918} C:{Var:C Labels:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=1, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00eff0a68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532813698s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=1, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=1, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=1, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 2, orgunit=TELE, redundant=false, servertype=relay, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=2, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=relay, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=2, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00eff0df0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=2, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00eff0f98} C:{Var:C Labels:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=2, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00eff10f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532829198s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=2, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=2, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=2, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 3, orgunit=TELE, redundant=false, servertype=relay, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=3, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=relay, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=3, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00eff13b0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=3, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00eff1710} C:{Var:C Labels:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=3, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00eff1858}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532843698s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=3, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=3, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=3, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 4, orgunit=TELE, redundant=false, servertype=relay, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=4, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=relay, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=4, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00eff1c28} B:{Var:B Labels:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=4, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc00eff1d90} C:{Var:C Labels:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=4, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc0123e0028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532858797s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=4, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=4, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=4, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 5, orgunit=TELE, redundant=false, servertype=relay, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=5, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=relay, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=5, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc0123e0460} B:{Var:B Labels:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=5, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc0123e0600} C:{Var:C Labels:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=5, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc0123e0318}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532873497s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=5, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=5, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=5, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 6, orgunit=TELE, redundant=false, servertype=relay, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=6, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=relay, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=6, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc0123e0b88} B:{Var:B Labels:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=6, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc0123e08b8} C:{Var:C Labels:__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=6, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=relay, type=health Value:0xc0123e0a20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532889197s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=6, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='B' labels={__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=6, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=relay, type=health} value=1 ], [ var='C' labels={__name__=ilo_fan, exported_localname=relay2, exported_orgunit=IPTV, fan=6, host=relay2-ilo.powernet.tv, host_short=relay2-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 7, orgunit=TELE, redundant=false, servertype=relay, type=health} value=0 ]} {Instance:__name__=ilo_fan, exported_localname=stbstream1, exported_orgunit=IPTV, fan=0, host=stbstream1-ilo.powernet.tv, host_short=stbstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=streamer, type=health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=ilo_fan, exported_localname=stbstream1, exported_orgunit=IPTV, fan=0, host=stbstream1-ilo.powernet.tv, host_short=stbstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc0123e0eb0} B:{Var:B Labels:__name__=ilo_fan, exported_localname=stbstream1, exported_orgunit=IPTV, fan=0, host=stbstream1-ilo.powernet.tv, host_short=stbstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc0123e1040} C:{Var:C Labels:__name__=ilo_fan, exported_localname=stbstream1, exported_orgunit=IPTV, fan=0, host=stbstream1-ilo.powernet.tv, host_short=stbstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=streamer, type=health Value:0xc0123e1198}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532902997s EvaluationString:[ var='A' labels={__name__=ilo_fan, exported_localname=stbstream1, exported_orgunit=IPTV, fan=0, host=stbstream1-ilo.powernet.tv, host_short=stbstream1-ilo, hotplug=false, instance=job1.telecomx.dk:9102, job=custom, localname=Job1 Ilo, name=Fan 1, orgunit=TELE, redundant=false, servertype=streamer + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xzp7wxfg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.562639414Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.historian backend=loki user=190917 slug=d1cx t=2024-05-29T13:44:14.56266691Z level=debug msg="Done saving alert state history batch" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xzf3d3ai-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.562580013Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xzf3d3ai-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.562518473Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xzf3d3ai-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.562459292Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.562416641Z caller=remote_alert_sender.go:94 user=87780 slug=zencloudandhosting host=zencloudandhosting-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.192.7:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=b6b20d2d-1201-4739-8795-f4a33acf1f5a alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xz4xx5e5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.562342051Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xz4xx5e5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.56228311Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-255805laioeastus, cloud_platform=Azure, customer_id=A103, env_id=255805, env_name=A103_Allianz_Prod_2021U1, env_type=prod, instance=env-255805laioeastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:14.562243645Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xz3kd9r6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.562101638Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-255763laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=255763, env_name=A133 Douglas PROD, env_type=Prod, instance=env-255763laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live" t=2024-05-29T13:44:14.56197882Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xz1hu028-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.561917926Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-255763laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=255763, env_name=A133 Douglas PROD, env_type=Prod, instance=env-255763laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live" t=2024-05-29T13:44:14.561935264Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.561801954Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xz0pirw0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.561772915Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-255634laioeastus2, cloud_platform=Azure, customer_id=A129, env_id=255634, env_name=A129_Gap_Dev_2021_east2, env_type=dev, instance=env-255634laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:14.561724375Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xz0pirw0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.561698324Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xyv2tcno-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.561597963Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=54eead6a4fe38727 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.561497508Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.561272033s EvaluationString:}]" duration=297.089174ms + level=debug ts=2024-05-29T13:44:14.561453974Z caller=remote_instance_store.go:51 user=213445 slug=gan msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-253588laio2use1, cloud_platform=AWS, customer_id=C333, env_id=253588, env_name=c333_Mercer_PROD_2021, env_type=prod, instance=env-253588laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.561487124Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xyukq11x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.561356211Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xytxoi0e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.561208589Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xytxoi0e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.561066828Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-253100laioeastus2, cloud_platform=Azure, customer_id=A132, env_id=253100, env_name=A132 Capstone DEV, env_type=dev, instance=env-253100laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:14.561100425Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-253100laioeastus2, cloud_platform=Azure, customer_id=A132, env_id=253100, env_name=A132 Capstone DEV, env_type=dev, instance=env-253100laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:14.56107708Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xyd0k04q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.560942346Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xyd0k04q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.560871846Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xyd0k04q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.560831655Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-250989laiousw1, cloud_platform=AWS, customer_id=C537, env_id=250989, env_name=C537_PacSun_PROD2021, env_type=prod, instance=env-250989laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live" t=2024-05-29T13:44:14.560846086Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-250989laiousw1, cloud_platform=AWS, customer_id=C537, env_id=250989, env_name=C537_PacSun_PROD2021, env_type=prod, instance=env-250989laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live" t=2024-05-29T13:44:14.560791558Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xy2khu1e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.560739874Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xy2khu1e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.560681164Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xy2khu1e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.560638333Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xy2khu1e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.560603143Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.560598413Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.560536732Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xxz9ktc3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.560500522Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xxz9ktc3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.560474411Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xxqoal5z-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.56037157Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xxqoal5z-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.5603455Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xxqoal5z-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.560274519Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xxqoal5z-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.560215939Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xxpr0g1d-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.560173628Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=489921 slug=statuscake t=2024-05-29T13:44:14.56014845Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.319731ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xxpr0g1d-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.560087747Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xxpr0g1d-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.560044947Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xxlh45fe-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.559985366Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=856040 slug=kuady t=2024-05-29T13:44:14.559971151Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.857677ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xxlh45fe-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.559957346Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-249638laioeastus, cloud_platform=Azure, customer_id=A109, env_id=249638, env_name=A109 Renasant Bank DEV, env_type=dev, instance=env-249638laioeastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:14.559732505Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=371756 slug=asapp t=2024-05-29T13:44:14.559796398Z level=debug msg="Deleting alert states" count=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xxhnt8ly-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.559769384Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.559594667Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xxhnt8ly-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.559685173Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp t=2024-05-29T13:44:14.559728807Z level=info msg="Detected stale state entry" cacheID="[[\"Series\",\"query14a47a8f19f8419a9d0aec8aa9e663b6\"],[\"__alert_rule_namespace_uid__\",\"treewalk2\"],[\"__alert_rule_uid__\",\"e3fae56f-70b3-4c4a-9ae2-81644bb140df\"],[\"alertname\",\"Payment Test UI\"],[\"grafana_folder\",\"treewalk2\"]]" state=Pending reason= + logger=ngalert.state.manager user=371756 slug=asapp instance="Series=queryff18565364f64b9f8cc6c35527bd8545" t=2024-05-29T13:44:14.559651649Z level=debug msg="Changing state" previous_state=Normal next_state=Pending previous_ends_at=2024-05-29T13:44:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.559613863Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xxg8apdg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.559558152Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=diff, name=SINGLEPLAYER PS5 Query" t=2024-05-29T13:44:14.559591088Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="Series=queryff18565364f64b9f8cc6c35527bd8545" t=2024-05-29T13:44:14.559484149Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xxg8apdg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.5594134Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-249508laiowesteurope, cloud_platform=Azure, customer_id=A113, env_id=249508, env_name=A113_BAT_DEV_2021U1, env_type=dev, instance=env-249508laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.55934874Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.559390153Z caller=remote_instance_store.go:51 user=642786 slug=sophoscomnsg msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=e40824acce3d0580 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.559411239Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=diff, name=SINGLEPLAYER PS5 Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc00271cdf0} IgnoreBelow:{Var:IgnoreBelow Labels: Value:0xc00271cdf8} Threshold:{Var:Threshold Labels: Value:0xc00271ce60} compare:{Var:compare Labels:aggregatedBy=diff, name=SINGLEPLAYER PS5 Query Value:0xc00271cea0} sum:{Var:sum Labels:aggregatedBy=diff, name=SINGLEPLAYER PS5 Query Value:0xc00271cdd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.558963983s EvaluationString:[ var='Breaches' labels={} value=1 ], [ var='IgnoreBelow' labels={} value=2000 ], [ var='Threshold' labels={} value=-2.5 ], [ var='compare' labels={aggregatedBy=diff, name=SINGLEPLAYER PS5 Query} value=0 ], [ var='sum' labels={aggregatedBy=diff, name=SINGLEPLAYER PS5 Query} value=0 ]}]" duration=49.37201ms + logger=ngalert.state.manager.persist user=656284 slug=cencosudx t=2024-05-29T13:44:14.559401234Z level=debug msg="Saving alert states done" count=10 max_state_save_concurrency=1 duration=149.845005ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xxf0q5wl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.55938125Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.559384172Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xxf0q5wl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.559313019Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=472647 slug=planet t=2024-05-29T13:44:14.559324548Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=47.721016ms + level=debug ts=2024-05-29T13:44:14.559342963Z caller=remote_instance_store.go:51 user=656284 slug=cencosudx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xxf0q5wl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.559284749Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.559195257Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.559117904Z caller=remote_rule_evaluator.go:193 user=134486 slug=podigee msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-249394laio2westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio2westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.55917983Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xx5wueqo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.559122537Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xx5wueqo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.559096007Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xx5wueqo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.559028376Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xx41trak-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.558930465Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.558965537Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-249394laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:14.558936091Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.558899325Z caller=remote_instance_store.go:51 user=174054 slug=netrading msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xx41trak-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.558864045Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xx41trak-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.558837984Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xx0dty60-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.558671753Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-245671laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245671, env_name=A127_DEV_RoundPoint, env_type=dev, instance=env-245671laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:14.558737128Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-245671laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245671, env_name=A127_DEV_RoundPoint, env_type=dev, instance=env-245671laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:14.558692459Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.558458914Z caller=remote_instance_store.go:51 user=738479 slug=gohero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xwz6vf1n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.558543101Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xwz6vf1n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.558486641Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xwz6vf1n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.55844596Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-245644laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245644, env_name=A127_PROD_RoundPoint, env_type=prod, instance=env-245644laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:14.558473632Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.558429296Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-245644laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245644, env_name=A127_PROD_RoundPoint, env_type=prod, instance=env-245644laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:14.558450056Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.558314834Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.558384259Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.55836773Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xwvcvfzi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.558257409Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xwvcvfzi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.558232238Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xwsxgmwn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.558189128Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xwsxgmwn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.558100197Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xwpecli8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.557967176Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xwpecli8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.557870025Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xwpecli8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.557853574Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xwlquunc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.557809394Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-242642laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242642, env_name=A130_Sumitomo_PROD_M2021, env_type=prod, instance=env-242642laiowestus2, job=integrations/node_exporter, region=westus2, stage=live" t=2024-05-29T13:44:14.557963689Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xw5gf4nj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.557503001Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xw5gf4nj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.557475861Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xw5gf4nj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.55744069Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.557874868Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xw32rjyw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.55738059Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xw32rjyw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.557261298Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xw32rjyw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.557251908Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.557822185Z caller=remote_instance_store.go:51 user=407477 slug=inventa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xw2h1voi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.557119337Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.557744079Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xw0zo40i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.557064786Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xw0zo40i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.557015246Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.557617395Z caller=remote_instance_store.go:51 user=714575 slug=bonduelleprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xw0zo40i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.556946885Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xvoedpo6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.556762983Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xvoedpo6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.556716923Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xvoe1a6t-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.556678212Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=714575 slug=bonduelleprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.557526913Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xvoe1a6t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.556638232Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xvoe1a6t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.556628712Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.5575363Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=714575 slug=bonduelleprod version=1 fingerprint=77949084f2ce8662 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.557430322Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.556874264s EvaluationString:}]" duration=7.851074ms + level=debug ts=2024-05-29T13:44:14.557497657Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xvjr51wa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.55641218Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xvjr51wa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.556400579Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xveudyf6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.556295698Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xveudyf6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.556248538Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=430961 slug=solifi version=6 fingerprint=70e9290f9e217949 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.557258875Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.556954587s EvaluationString:}]" duration=108.069492ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xv6rbpl7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.556172207Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.557281517Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xv4ftnu0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.556066536Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xv4ftnu0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.556017046Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xv4ftnu0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.555973815Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xv4ftnu0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.555959865Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.557258259Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xv0ucv2g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.555922165Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xv0ucv2g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.555858754Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xumilwyf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.555707292Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.55611324Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.554341835Z caller=remote_alert_sender.go:94 user=884866 slug=cnonumerique host=cnonumerique-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.74.54:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=cdlo5ymnnj18ga alerts=1 + level=debug ts=2024-05-29T13:44:14.549691185Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xuirxx1r-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.555536701Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=788474 slug=elisasre t=2024-05-29T13:44:14.555268124Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.109916ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xuirxx1r-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.55550493Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xuf76d9m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.555400089Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xuf76d9m-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.555345849Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xuc1cej8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.555195677Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xtz6pqn8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.555127766Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xtz6pqn8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.555097656Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xtufusuu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.554983275Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.556761054Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xtufusuu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.554736232Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-241206laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241206, env_name=A121_CTTI_Prod_M2021, env_type=prod, instance=env-241206laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live" t=2024-05-29T13:44:14.556742408Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-241206laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241206, env_name=A121_CTTI_Prod_M2021, env_type=prod, instance=env-241206laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live" t=2024-05-29T13:44:14.556723294Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=87780 slug=zencloudandhosting t=2024-05-29T13:44:14.556452252Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=101.337127ms + level=debug ts=2024-05-29T13:44:14.556477242Z caller=remote_instance_store.go:51 user=463523 slug=porchatto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=463523 slug=porchatto instance="env=test" t=2024-05-29T13:44:14.55636008Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.556357198Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.55628717Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.556273488Z caller=remote_instance_store.go:51 user=290313 slug=replit msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=463523 slug=porchatto t=2024-05-29T13:44:14.556310701Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="env=test" + logger=ngalert.state.manager user=250150 slug=bizagi instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.556254444Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=cb13d90ab79cdce4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.55615096Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.555907075s EvaluationString:}]" duration=13.743649ms + level=debug ts=2024-05-29T13:44:14.556037823Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-238672laio2use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:14.55586923Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=463523 slug=porchatto version=5 fingerprint=aa8be567cd07c003 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.555630789Z level=debug msg="Alert rule evaluated" results="[{Instance:env=prod State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:env=prod Value:0xc0726c14a8} C:{Var:C Labels:env=prod Value:0xc0726c1498}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.555272715s EvaluationString:[ var='B' labels={env=prod} value=8.275862068965518 ], [ var='C' labels={env=prod} value=0 ]} {Instance:env=test State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:env=test Value:0xc0726c14c8} C:{Var:C Labels:env=test Value:0xc0726c14d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.5552832s EvaluationString:[ var='B' labels={env=test} value=16.99625 ], [ var='C' labels={env=test} value=0 ]}]" duration=14.695645ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-238296laiousw1, cloud_platform=AWS, customer_id=C537, env_id=238296, env_name=C537_PacSun_DEV_2021, env_type=dev, instance=env-238296laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live" t=2024-05-29T13:44:14.55541266Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.555280365Z caller=remote_instance_store.go:51 user=159532 slug=getfabric msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-238230laioeastus2, cloud_platform=Azure, customer_id=A126, env_id=238230, env_name=A126_PVH_DEV_M2021, env_type=dev, instance=env-238230laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:14.555232004Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.555215263Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=env-238223laio2eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio2eastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:14.555054516Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=180994 slug=cgmonitor t=2024-05-29T13:44:14.554646768Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=28.650868ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xtotxif2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.554630271Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_modeling_service, agent_hostname=Prod-20240506-1a, cloud_platform=AWS, customer_id=C518, env_type=prod, instance=Prod-20240506-1a, job=integrations/node_exporter, region=cn-north-1, stage=live" t=2024-05-29T13:44:14.554447221Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xtlyiwlk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.55449333Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy t=2024-05-29T13:44:14.554144487Z level=debug msg="State manager processing evaluation results" resultCount=840 + logger=ngalert.scheduler user=412779 slug=microstrategy version=2 fingerprint=12536f9c44b97bae attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.518397142Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=mstr_status_modeling_service, agent_hostname=Prod-20240506-1a, cloud_platform=AWS, customer_id=C518, env_type=prod, instance=Prod-20240506-1a, job=integrations/node_exporter, region=cn-north-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=Prod-20240506-1a, cloud_platform=AWS, customer_id=C518, env_type=prod, instance=Prod-20240506-1a, job=integrations/node_exporter, region=cn-north-1, stage=live Value:0xc01c53d410} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=Prod-20240506-1a, cloud_platform=AWS, customer_id=C518, env_type=prod, instance=Prod-20240506-1a, job=integrations/node_exporter, region=cn-north-1, stage=live Value:0xc01c53d510} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=Prod-20240506-1a, cloud_platform=AWS, customer_id=C518, env_type=prod, instance=Prod-20240506-1a, job=integrations/node_exporter, region=cn-north-1, stage=live Value:0xc01c53d600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433222614s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=Prod-20240506-1a, cloud_platform=AWS, customer_id=C518, env_type=prod, instance=Prod-20240506-1a, job=integrations/node_exporter, region=cn-north-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=Prod-20240506-1a, cloud_platform=AWS, customer_id=C518, env_type=prod, instance=Prod-20240506-1a, job=integrations/node_exporter, region=cn-north-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=Prod-20240506-1a, cloud_platform=AWS, customer_id=C518, env_type=prod, instance=Prod-20240506-1a, job=integrations/node_exporter, region=cn-north-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=Prod-20240506-1b, cloud_platform=AWS, customer_id=C518, env_type=prod, instance=Prod-20240506-1b, job=integrations/node_exporter, region=cn-north-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=Prod-20240506-1b, cloud_platform=AWS, customer_id=C518, env_type=prod, instance=Prod-20240506-1b, job=integrations/node_exporter, region=cn-north-1, stage=live Value:0xc01c53d8e8} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=Prod-20240506-1b, cloud_platform=AWS, customer_id=C518, env_type=prod, instance=Prod-20240506-1b, job=integrations/node_exporter, region=cn-north-1, stage=live Value:0xc01c53db20} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=Prod-20240506-1b, cloud_platform=AWS, customer_id=C518, env_type=prod, instance=Prod-20240506-1b, job=integrations/node_exporter, region=cn-north-1, stage=live Value:0xc01c53d820}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433292533s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=Prod-20240506-1b, cloud_platform=AWS, customer_id=C518, env_type=prod, instance=Prod-20240506-1b, job=integrations/node_exporter, region=cn-north-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=Prod-20240506-1b, cloud_platform=AWS, customer_id=C518, env_type=prod, instance=Prod-20240506-1b, job=integrations/node_exporter, region=cn-north-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=Prod-20240506-1b, cloud_platform=AWS, customer_id=C518, env_type=prod, instance=Prod-20240506-1b, job=integrations/node_exporter, region=cn-north-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-238223laio1eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-238223laio1eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01c53de30} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-238223laio1eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01c53df00} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-238223laio1eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01c53dfb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433313198s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-238223laio1eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-238223laio1eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-238223laio1eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-238223laio2eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio2eastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-238223laio2eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio2eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01eede700} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-238223laio2eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio2eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01eede798} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-238223laio2eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio2eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01eede650}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433336538s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-238223laio2eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio2eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-238223laio2eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio2eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-238223laio2eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio2eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-238230laioeastus2, cloud_platform=Azure, customer_id=A126, env_id=238230, env_name=A126_PVH_DEV_M2021, env_type=dev, instance=env-238230laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-238230laioeastus2, cloud_platform=Azure, customer_id=A126, env_id=238230, env_name=A126_PVH_DEV_M2021, env_type=dev, instance=env-238230laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01eedea38} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-238230laioeastus2, cloud_platform=Azure, customer_id=A126, env_id=238230, env_name=A126_PVH_DEV_M2021, env_type=dev, instance=env-238230laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01eede900} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-238230laioeastus2, cloud_platform=Azure, customer_id=A126, env_id=238230, env_name=A126_PVH_DEV_M2021, env_type=dev, instance=env-238230laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01eede9b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433356089s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-238230laioeastus2, cloud_platform=Azure, customer_id=A126, env_id=238230, env_name=A126_PVH_DEV_M2021, env_type=dev, instance=env-238230laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-238230laioeastus2, cloud_platform=Azure, customer_id=A126, env_id=238230, env_name=A126_PVH_DEV_M2021, env_type=dev, instance=env-238230laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-238230laioeastus2, cloud_platform=Azure, customer_id=A126, env_id=238230, env_name=A126_PVH_DEV_M2021, env_type=dev, instance=env-238230laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-238296laiousw1, cloud_platform=AWS, customer_id=C537, env_id=238296, env_name=C537_PacSun_DEV_2021, env_type=dev, instance=env-238296laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-238296laiousw1, cloud_platform=AWS, customer_id=C537, env_id=238296, env_name=C537_PacSun_DEV_2021, env_type=dev, instance=env-238296laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live Value:0xc01eedebc0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-238296laiousw1, cloud_platform=AWS, customer_id=C537, env_id=238296, env_name=C537_PacSun_DEV_2021, env_type=dev, instance=env-238296laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live Value:0xc01eedeca0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-238296laiousw1, cloud_platform=AWS, customer_id=C537, env_id=238296, env_name=C537_PacSun_DEV_2021, env_type=dev, instance=env-238296laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live Value:0xc01eeded70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433376244s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-238296laiousw1, cloud_platform=AWS, customer_id=C537, env_id=238296, env_name=C537_PacSun_DEV_2021, env_type=dev, instance=env-238296laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-238296laiousw1, cloud_platform=AWS, customer_id=C537, env_id=238296, env_name=C537_PacSun_DEV_2021, env_type=dev, instance=env-238296laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-238296laiousw1, cloud_platform=AWS, customer_id=C537, env_id=238296, env_name=C537_PacSun_DEV_2021, env_type=dev, instance=env-238296laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-238672laio1use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-238672laio1use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01eedeec8} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-238672laio1use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01eedef70} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-238672laio1use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01eedf020}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.43339288s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-238672laio1use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-238672laio1use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-238672laio1use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-238672laio2use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-238672laio2use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01eedf1e0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-238672laio2use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01eedf2a0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-238672laio2use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01eedf348}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433409476s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-238672laio2use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-238672laio2use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-238672laio2use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-240076laionortheurope, cloud_platform=Azure, customer_id=A102, env_id=240076, env_name=A102_Limagrain_Dev_M2021, env_type=dev, instance=env-240076laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-240076laionortheurope, cloud_platform=Azure, customer_id=A102, env_id=240076, env_name=A102_Limagrain_Dev_M2021, env_type=dev, instance=env-240076laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc01eedf4a0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-240076laionortheurope, cloud_platform=Azure, customer_id=A102, env_id=240076, env_name=A102_Limagrain_Dev_M2021, env_type=dev, instance=env-240076laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc01eedf540} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-240076laionortheurope, cloud_platform=Azure, customer_id=A102, env_id=240076, env_name=A102_Limagrain_Dev_M2021, env_type=dev, instance=env-240076laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc01eedf5f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433426725s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-240076laionortheurope, cloud_platform=Azure, customer_id=A102, env_id=240076, env_name=A102_Limagrain_Dev_M2021, env_type=dev, instance=env-240076laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-240076laionortheurope, cloud_platform=Azure, customer_id=A102, env_id=240076, env_name=A102_Limagrain_Dev_M2021, env_type=dev, instance=env-240076laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-240076laionortheurope, cloud_platform=Azure, customer_id=A102, env_id=240076, env_name=A102_Limagrain_Dev_M2021, env_type=dev, instance=env-240076laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-240335laio1northeurope, cloud_platform=Azure, customer_id=A102, env_id=240335, env_name=A102_Limagrain_Prod_M2021, env_type=undefined, instance=env-240335laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-240335laio1northeurope, cloud_platform=Azure, customer_id=A102, env_id=240335, env_name=A102_Limagrain_Prod_M2021, env_type=undefined, instance=env-240335laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc01eedf8c8} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-240335laio1northeurope, cloud_platform=Azure, customer_id=A102, env_id=240335, env_name=A102_Limagrain_Prod_M2021, env_type=undefined, instance=env-240335laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc01eedf768} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-240335laio1northeurope, cloud_platform=Azure, customer_id=A102, env_id=240335, env_name=A102_Limagrain_Prod_M2021, env_type=undefined, instance=env-240335laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc01eedf830}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433445141s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-240335laio1northeurope, cloud_platform=Azure, customer_id=A102, env_id=240335, env_name=A102_Limagrain_Prod_M2021, env_type=undefined, instance=env-240335laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-240335laio1northeurope, cloud_platform=Azure, customer_id=A102, env_id=240335, env_name=A102_Limagrain_Prod_M2021, env_type=undefined, instance=env-240335laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-240335laio1northeurope, cloud_platform=Azure, customer_id=A102, env_id=240335, env_name=A102_Limagrain_Prod_M2021, env_type=undefined, instance=env-240335laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-240335laio2northeurope, cloud_platform=Azure, customer_id=A102, env_id=240335, env_name=A102_Limagrain_Prod_M2021, env_type=undefined, instance=env-240335laio2northeurope, job=integrations/node_exporter, region=northeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-240335laio2northeurope, cloud_platform=Azure, customer_id=A102, env_id=240335, env_name=A102_Limagrain_Prod_M2021, env_type=undefined, instance=env-240335laio2northeurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc01eedfa50} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-240335laio2northeurope, cloud_platform=Azure, customer_id=A102, env_id=240335, env_name=A102_Limagrain_Prod_M2021, env_type=undefined, instance=env-240335laio2northeurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc01eedfb20} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-240335laio2northeurope, cloud_platform=Azure, customer_id=A102, env_id=240335, env_name=A102_Limagrain_Prod_M2021, env_type=undefined, instance=env-240335laio2northeurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc01eedfbd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433459036s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-240335laio2northeurope, cloud_platform=Azure, customer_id=A102, env_id=240335, env_name=A102_Limagrain_Prod_M2021, env_type=undefined, instance=env-240335laio2northeurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-240335laio2northeurope, cloud_platform=Azure, customer_id=A102, env_id=240335, env_name=A102_Limagrain_Prod_M2021, env_type=undefined, instance=env-240335laio2northeurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-240335laio2northeurope, cloud_platform=Azure, customer_id=A102, env_id=240335, env_name=A102_Limagrain_Prod_M2021, env_type=undefined, instance=env-240335laio2northeurope, job=integrations/node_exporter, region=northeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-241206laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241206, env_name=A121_CTTI_Prod_M2021, env_type=prod, instance=env-241206laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-241206laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241206, env_name=A121_CTTI_Prod_M2021, env_type=prod, instance=env-241206laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc01eedfd60} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-241206laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241206, env_name=A121_CTTI_Prod_M2021, env_type=prod, instance=env-241206laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc01eedfe28} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-241206laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241206, env_name=A121_CTTI_Prod_M2021, env_type=prod, instance=env-241206laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc01eedfee8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433477404s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-241206laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241206, env_name=A121_CTTI_Prod_M2021, env_type=prod, instance=env-241206laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-241206laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241206, env_name=A121_CTTI_Prod_M2021, env_type=prod, instance=env-241206laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-241206laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241206, env_name=A121_CTTI_Prod_M2021, env_type=prod, instance=env-241206laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-241214laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241214, env_name=A121_CTTI_Integ_M2021, env_type=Integ, instance=env-241214laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-241214laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241214, env_name=A121_CTTI_Integ_M2021, env_type=Integ, instance=env-241214laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc02f5b4080} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-241214laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241214, env_name=A121_CTTI_Integ_M2021, env_type=Integ, instance=env-241214laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc02f5b41b0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-241214laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241214, env_name=A121_CTTI_Integ_M2021, env_type=Integ, instance=env-241214laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc02f5b43d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433491949s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-241214laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241214, env_name=A121_CTTI_Integ_M2021, env_type=Integ, instance=env-241214laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-241214laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241214, env_name=A121_CTTI_Integ_M2021, env_type=Integ, instance=env-241214laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-241214laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241214, env_name=A121_CTTI_Integ_M2021, env_type=Integ, instance=env-241214laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-241432laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241432, env_name=A124_PartnersHealthcare_Dev_M2021, env_type=dev, instance=env-241432laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-241432laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241432, env_name=A124_PartnersHealthcare_Dev_M2021, env_type=dev, instance=env-241432laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc02f5b4678} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-241432laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241432, env_name=A124_PartnersHealthcare_Dev_M2021, env_type=dev, instance=env-241432laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc02f5b4518} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-241432laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241432, env_name=A124_PartnersHealthcare_Dev_M2021, env_type=dev, instance=env-241432laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc02f5b45c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433503499s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-241432laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241432, env_name=A124_PartnersHealthcare_Dev_M2021, env_type=dev, instance=env-241432laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-241432laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241432, env_name=A124_PartnersHealthcare_Dev_M2021, env_type=dev, instance=env-241432laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-241432laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241432, env_name=A124_PartnersHealthcare_Dev_M2021, env_type=dev, instance=env-241432laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-241433laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241433, env_name=A124_PartnersHealthcare_Prod_M2021, env_type=prod, instance=env-241433laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-241433laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241433, env_name=A124_PartnersHealthcare_Prod_M2021, env_type=prod, instance=env-241433laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc02f5b47f8} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-241433laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241433, env_name=A124_PartnersHealthcare_Prod_M2021, env_type=prod, instance=env-241433laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc02f5b4960} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-241433laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241433, env_name=A124_PartnersHealthcare_Prod_M2021, env_type=prod, instance=env-241433laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc02f5b4a30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433515414s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-241433laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241433, env_name=A124_PartnersHealthcare_Prod_M2021, env_type=prod, instance=env-241433laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-241433laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241433, env_name=A124_PartnersHealthcare_Prod_M2021, env_type=prod, instance=env-241433laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-241433laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241433, env_name=A124_PartnersHealthcare_Prod_M2021, env_type=prod, instance=env-241433laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-242641laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242641, env_name=A130_Sumitomo_Dev_M2021, env_type=dev, instance=env-242641laiowestus2, job=integrations/node_exporter, region=westus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-242641laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242641, env_name=A130_Sumitomo_Dev_M2021, env_type=dev, instance=env-242641laiowestus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc02f5b4d88} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-242641laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242641, env_name=A130_Sumitomo_Dev_M2021, env_type=dev, instance=env-242641laiowestus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc02f5b4f38} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-242641laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242641, env_name=A130_Sumitomo_Dev_M2021, env_type=dev, instance=env-242641laiowestus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc02f5b5260}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433528118s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-242641laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242641, env_name=A130_Sumitomo_Dev_M2021, env_type=dev, instance=env-242641laiowestus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-242641laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242641, env_name=A130_Sumitomo_Dev_M2021, env_type=dev, instance=env-242641laiowestus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-242641laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242641, env_name=A130_Sumitomo_Dev_M2021, env_type=dev, instance=env-242641laiowestus2, job=integrations/node_exporter, region=westus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-242642laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242642, env_name=A130_Sumitomo_PROD_M2021, env_type=prod, instance=env-242642laiowestus2, job=integrations/node_exporter, region=westus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-242642laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242642, env_name=A130_Sumitomo_PROD_M2021, env_type=prod, instance=env-242642laiowestus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc02f5b54e0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-242642laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242642, env_name=A130_Sumitomo_PROD_M2021, env_type=prod, instance=env-242642laiowestus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc02f5b5640} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-242642laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242642, env_name=A130_Sumitomo_PROD_M2021, env_type=prod, instance=env-242642laiowestus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc02f5b5778}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433540996s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-242642laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242642, env_name=A130_Sumitomo_PROD_M2021, env_type=prod, instance=env-242642laiowestus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-242642laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242642, env_name=A130_Sumitomo_PROD_M2021, env_type=prod, instance=env-242642laiowestus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-242642laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242642, env_name=A130_Sumitomo_PROD_M2021, env_type=prod, instance=env-242642laiowestus2, job=integrations/node_exporter, region=westus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-245036laioeastus, cloud_platform=Azure, customer_id=A110, env_id=245036, env_name=A110_Cardinal_Prod_2021, env_type=prod, instance=env-245036laioeastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-245036laioeastus, cloud_platform=Azure, customer_id=A110, env_id=245036, env_name=A110_Cardinal_Prod_2021, env_type=prod, instance=env-245036laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc02f5b58c0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-245036laioeastus, cloud_platform=Azure, customer_id=A110, env_id=245036, env_name=A110_Cardinal_Prod_2021, env_type=prod, instance=env-245036laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc02f5b5950} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-245036laioeastus, cloud_platform=Azure, customer_id=A110, env_id=245036, env_name=A110_Cardinal_Prod_2021, env_type=prod, instance=env-245036laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc02f5b5a00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433554806s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-245036laioeastus, cloud_platform=Azure, customer_id=A110, env_id=245036, env_name=A110_Cardinal_Prod_2021, env_type=prod, instance=env-245036laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-245036laioeastus, cloud_platform=Azure, customer_id=A110, env_id=245036, env_name=A110_Cardinal_Prod_2021, env_type=prod, instance=env-245036laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-245036laioeastus, cloud_platform=Azure, customer_id=A110, env_id=245036, env_name=A110_Cardinal_Prod_2021, env_type=prod, instance=env-245036laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-245644laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245644, env_name=A127_PROD_RoundPoint, env_type=prod, instance=env-245644laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-245644laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245644, env_name=A127_PROD_RoundPoint, env_type=prod, instance=env-245644laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc02f5b5d90} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-245644laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245644, env_name=A127_PROD_RoundPoint, env_type=prod, instance=env-245644laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc02f5b5b48} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-245644laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245644, env_name=A127_PROD_RoundPoint, env_type=prod, instance=env-245644laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc02f5b5c40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433567313s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-245644laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245644, env_name=A127_PROD_RoundPoint, env_type=prod, instance=env-245644laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-245644laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245644, env_name=A127_PROD_RoundPoint, env_type=prod, instance=env-245644laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-245644laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245644, env_name=A127_PROD_RoundPoint, env_type=prod, instance=env-245644laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-245671laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245671, env_name=A127_DEV_RoundPoint, env_type=dev, instance=env-245671laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-245671laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245671, env_name=A127_DEV_RoundPoint, env_type=dev, instance=env-245671laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc02f5b5f68} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-245671laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245671, env_name=A127_DEV_RoundPoint, env_type=dev, instance=env-245671laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc023eb8050} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-245671laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245671, env_name=A127_DEV_RoundPoint, env_type=dev, instance=env-245671laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc023eb8720}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433579297s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-245671laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245671, env_name=A127_DEV_RoundPoint, env_type=dev, instance=env-245671laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-245671laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245671, env_name=A127_DEV_RoundPoint, env_type=dev, instance=env-245671laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-245671laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245671, env_name=A127_DEV_RoundPoint, env_type=dev, instance=env-245671laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-249394laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-249394laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ab103f0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-249394laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc023eb9d40} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-249394laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ab10028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433590057s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-249394laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-249394laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-249394laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-249394laio2westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio2westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-249394laio2westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio2westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ab10670} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-249394laio2westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio2westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ab10710} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-249394laio2westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio2westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ab107e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.43360328s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-249394laio2westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio2westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-249394laio2westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio2westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-249394laio2westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio2westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-249508laiowesteurope, cloud_platform=Azure, customer_id=A113, env_id=249508, env_name=A113_BAT_DEV_2021U1, env_type=dev, instance=env-249508laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-249508laiowesteurope, cloud_platform=Azure, customer_id=A113, env_id=249508, env_name=A113_BAT_DEV_2021U1, env_type=dev, instance=env-249508laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ab10bc8} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-249508laiowesteurope, cloud_platform=Azure, customer_id=A113, env_id=249508, env_name=A113_BAT_DEV_2021U1, env_type=dev, instance=env-249508laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ab10c90} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-249508laiowesteurope, cloud_platform=Azure, customer_id=A113, env_id=249508, env_name=A113_BAT_DEV_2021U1, env_type=dev, instance=env-249508laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ab10d70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433613035s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-249508laiowesteurope, cloud_platform=Azure, customer_id=A113, env_id=249508, env_name=A113_BAT_DEV_2021U1, env_type=dev, instance=env-249508laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-249508laiowesteurope, cloud_platform=Azure, customer_id=A113, env_id=249508, env_name=A113_BAT_DEV_2021U1, env_type=dev, instance=env-249508laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-249508laiowesteurope, cloud_platform=Azure, customer_id=A113, env_id=249508, env_name=A113_BAT_DEV_2021U1, env_type=dev, instance=env-249508laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-249638laioeastus, cloud_platform=Azure, customer_id=A109, env_id=249638, env_name=A109 Renasant Bank DEV, env_type=dev, instance=env-249638laioeastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-249638laioeastus, cloud_platform=Azure, customer_id=A109, env_id=249638, env_name=A109 Renasant Bank DEV, env_type=dev, instance=env-249638laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc013286230} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-249638laioeastus, cloud_platform=Azure, customer_id=A109, env_id=249638, env_name=A109 Renasant Bank DEV, env_type=dev, instance=env-249638laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc013286458} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-249638laioeastus, cloud_platform=Azure, customer_id=A109, env_id=249638, env_name=A109 Renasant Bank DEV, env_type=dev, instance=env-249638laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc013286020}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433623707s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-249638laioeastus, cloud_platform=Azure, customer_id=A109, env_id=249638, env_name=A109 Renasant Bank DEV, env_type=dev, instance=env-249638laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-249638laioeastus, cloud_platform=Azure, customer_id=A109, env_id=249638, env_name=A109 Renasant Bank DEV, env_type=dev, instance=env-249638laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-249638laioeastus, cloud_platform=Azure, customer_id=A109, env_id=249638, env_name=A109 Renasant Bank DEV, env_type=dev, instance=env-249638laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-249639laio1eastus, cloud_platform=Azure, customer_id=A109, env_id=249639, env_name=A109 Renasant Bank PROD, env_type=prod, instance=env-249639laio1eastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-249639laio1eastus, cloud_platform=Azure, customer_id=A109, env_id=249639, env_name=A109 Renasant Bank PROD, env_type=prod, instance=env-249639laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc013287060} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-249639laio1eastus, cloud_platform=Azure, customer_id=A109, env_id=249639, env_name=A109 Renasant Bank PROD, env_type=prod, instance=env-249639laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc013286e50} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-249639laio1eastus, cloud_platform=Azure, customer_id=A109, env_id=249639, env_name=A109 Renasant Bank PROD, env_type=prod, instance=env-249639laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc013286f78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433635792s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-249639laio1eastus, cloud_platform=Azure, customer_id=A109, env_id=249639, env_name=A109 Renasant Bank PROD, env_type=prod, instance=env-249639laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-249639laio1eastus, cloud_platform=Azure, customer_id=A109, env_id=249639, env_name=A109 Renasant Bank PROD, env_type=prod, instance=env-249639laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-249639laio1eastus, cloud_platform=Azure, customer_id=A109, env_id=249639, env_name=A109 Renasant Bank PROD, env_type=prod, instance=env-249639laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-249639laio2eastus, cloud_platform=Azure, customer_id=A109, env_id=249639, env_name=A109 Renasant Bank PROD, env_type=prod, instance=env-249639laio2eastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-249639laio2eastus, cloud_platform=Azure, customer_id=A109, env_id=249639, env_name=A109 Renasant Bank PROD, env_type=prod, instance=env-249639laio2eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc0163a20a0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-249639laio2eastus, cloud_platform=Azure, customer_id=A109, env_id=249639, env_name=A109 Renasant Bank PROD, env_type=prod, instance=env-249639laio2eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc0132876e0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-249639laio2eastus, cloud_platform=Azure, customer_id=A109, env_id=249639, env_name=A109 Renasant Bank PROD, env_type=prod, instance=env-249639laio2eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc013287788}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433646968s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-249639laio2eastus, cloud_platform=Azure, customer_id=A109, env_id=249639, env_name=A109 Renasant Bank PROD, env_type=prod, instance=env-249639laio2eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-249639laio2eastus, cloud_platform=Azure, customer_id=A109, env_id=249639, env_name=A109 Renasant Bank PROD, env_type=prod, instance=env-249639laio2eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-249639laio2eastus, cloud_platform=Azure, customer_id=A109, env_id=249639, env_name=A109 Renasant Bank PROD, env_type=prod, instance=env-249639laio2eastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-250989laiousw1, cloud_platform=AWS, customer_id=C537, env_id=250989, env_name=C537_PacSun_PROD2021, env_type=prod, instance=env-250989laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-250989laiousw1, cloud_platform=AWS, customer_id=C537, env_id=250989, env_name=C537_PacSun_PROD2021, env_type=prod, instance=env-250989laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live Value:0xc0163a2260} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-250989laiousw1, cloud_platform=AWS, customer_id=C537, env_id=250989, env_name=C537_PacSun_PROD2021, env_type=prod, instance=env-250989laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live Value:0xc0163a2330} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-250989laiousw1, cloud_platform=AWS, customer_id=C537, env_id=250989, env_name=C537_PacSun_PROD2021, env_type=prod, instance=env-250989laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live Value:0xc0163a2400}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433656736s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-250989laiousw1, cloud_platform=AWS, customer_id=C537, env_id=250989, env_name=C537_PacSun_PROD2021, env_type=prod, instance=env-250989laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-250989laiousw1, cloud_platform=AWS, customer_id=C537, env_id=250989, env_name=C537_PacSun_PROD2021, env_type=prod, instance=env-250989laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-250989laiousw1, cloud_platform=AWS, customer_id=C537, env_id=250989, env_name=C537_PacSun_PROD2021, env_type=prod, instance=env-250989laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-253100laioeastus2, cloud_platform=Azure, customer_id=A132, env_id=253100, env_name=A132 Capstone DEV, env_type=dev, instance=env-253100laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-253100laioeastus2, cloud_platform=Azure, customer_id=A132, env_id=253100, env_name=A132 Capstone DEV, env_type=dev, instance=env-253100laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0163a2660} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-253100laioeastus2, cloud_platform=Azure, customer_id=A132, env_id=253100, env_name=A132 Capstone DEV, env_type=dev, instance=env-253100laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0163a2878} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-253100laioeastus2, cloud_platform=Azure, customer_id=A132, env_id=253100, env_name=A132 Capstone DEV, env_type=dev, instance=env-253100laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0163a2a38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433666312s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-253100laioeastus2, cloud_platform=Azure, customer_id=A132, env_id=253100, env_name=A132 Capstone DEV, env_type=dev, instance=env-253100laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-253100laioeastus2, cloud_platform=Azure, customer_id=A132, env_id=253100, env_name=A132 Capstone DEV, env_type=dev, instance=env-253100laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-253100laioeastus2, cloud_platform=Azure, customer_id=A132, env_id=253100, env_name=A132 Capstone DEV, env_type=dev, instance=env-253100laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-253588laio1use1, cloud_platform=AWS, customer_id=C333, env_id=253588, env_name=c333_Mercer_PROD_2021, env_type=prod, instance=env-253588laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-253588laio1use1, cloud_platform=AWS, customer_id=C333, env_id=253588, env_name=c333_Mercer_PROD_2021, env_type=prod, instance=env-253588laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc0163a2c80} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-253588laio1use1, cloud_platform=AWS, customer_id=C333, env_id=253588, env_name=c333_Mercer_PROD_2021, env_type=prod, instance=env-253588laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc0163a2d78} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-253588laio1use1, cloud_platform=AWS, customer_id=C333, env_id=253588, env_name=c333_Mercer_PROD_2021, env_type=prod, instance=env-253588laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc0163a2e60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433677657s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-253588laio1use1, cloud_platform=AWS, customer_id=C333, env_id=253588, env_name=c333_Mercer_PROD_2021, env_type=prod, instance=env-253588laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-253588laio1use1, cloud_platform=AWS, customer_id=C333, env_id=253588, env_name=c333_Mercer_PROD_2021, env_type=prod, instance=env-253588laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-253588laio1use1, cloud_platform=AWS, customer_id=C333, env_id=253588, env_name=c333_Mercer_PROD_2021, env_type=prod, instance=env-253588laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-253588laio2use1, cloud_platform=AWS, customer_id=C333, env_id=253588, env_name=c333_Mercer_PROD_2021, env_type=prod, instance=env-253588laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-253588laio2use1, cloud_platform=AWS, customer_id=C333, env_id=253588, env_name=c333_Mercer_PROD_2021, env_type=prod, instance=env-253588laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc0163a2fe0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-253588laio2use1, cloud_platform=AWS, customer_id=C333, env_id=253588, env_name=c333_Mercer_PROD_2021, env_type=prod, instance=env-253588laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc0163a30b8} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-253588laio2use1, cloud_platform=AWS, customer_id=C333, env_id=253588, env_name=c333_Mercer_PROD_2021, env_type=prod, instance=env-253588laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc0163a3198}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433686625s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-253588laio2use1, cloud_platform=AWS, customer_id=C333, env_id=253588, env_name=c333_Mercer_PROD_2021, env_type=prod, instance=env-253588laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-253588laio2use1, cloud_platform=AWS, customer_id=C333, env_id=253588, env_name=c333_Mercer_PROD_2021, env_type=prod, instance=env-253588laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-253588laio2use1, cloud_platform=AWS, customer_id=C333, env_id=253588, env_name=c333_Mercer_PROD_2021, env_type=prod, instance=env-253588laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-255634laioeastus2, cloud_platform=Azure, customer_id=A129, env_id=255634, env_name=A129_Gap_Dev_2021_east2, env_type=dev, instance=env-255634laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255634laioeastus2, cloud_platform=Azure, customer_id=A129, env_id=255634, env_name=A129_Gap_Dev_2021_east2, env_type=dev, instance=env-255634laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0163a3410} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255634laioeastus2, cloud_platform=Azure, customer_id=A129, env_id=255634, env_name=A129_Gap_Dev_2021_east2, env_type=dev, instance=env-255634laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0163a34f8} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255634laioeastus2, cloud_platform=Azure, customer_id=A129, env_id=255634, env_name=A129_Gap_Dev_2021_east2, env_type=dev, instance=env-255634laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0163a3330}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433697175s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255634laioeastus2, cloud_platform=Azure, customer_id=A129, env_id=255634, env_name=A129_Gap_Dev_2021_east2, env_type=dev, instance=env-255634laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255634laioeastus2, cloud_platform=Azure, customer_id=A129, env_id=255634, env_name=A129_Gap_Dev_2021_east2, env_type=dev, instance=env-255634laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255634laioeastus2, cloud_platform=Azure, customer_id=A129, env_id=255634, env_name=A129_Gap_Dev_2021_east2, env_type=dev, instance=env-255634laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-255763laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=255763, env_name=A133 Douglas PROD, env_type=Prod, instance=env-255763laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255763laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=255763, env_name=A133 Douglas PROD, env_type=Prod, instance=env-255763laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc0163a3808} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255763laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=255763, env_name=A133 Douglas PROD, env_type=Prod, instance=env-255763laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc0163a38f0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255763laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=255763, env_name=A133 Douglas PROD, env_type=Prod, instance=env-255763laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc0163a3740}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433709441s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255763laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=255763, env_name=A133 Douglas PROD, env_type=Prod, instance=env-255763laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255763laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=255763, env_name=A133 Douglas PROD, env_type=Prod, instance=env-255763laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255763laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=255763, env_name=A133 Douglas PROD, env_type=Prod, instance=env-255763laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-255805laioeastus, cloud_platform=Azure, customer_id=A103, env_id=255805, env_name=A103_Allianz_Prod_2021U1, env_type=prod, instance=env-255805laioeastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255805laioeastus, cloud_platform=Azure, customer_id=A103, env_id=255805, env_name=A103_Allianz_Prod_2021U1, env_type=prod, instance=env-255805laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc0163a3cb8} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255805laioeastus, cloud_platform=Azure, customer_id=A103, env_id=255805, env_name=A103_Allianz_Prod_2021U1, env_type=prod, instance=env-255805laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc0163a3ab0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255805laioeastus, cloud_platform=Azure, customer_id=A103, env_id=255805, env_name=A103_Allianz_Prod_2021U1, env_type=prod, instance=env-255805laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc0163a3ba0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433720373s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255805laioeastus, cloud_platform=Azure, customer_id=A103, env_id=255805, env_name=A103_Allianz_Prod_2021U1, env_type=prod, instance=env-255805laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255805laioeastus, cloud_platform=Azure, customer_id=A103, env_id=255805, env_name=A103_Allianz_Prod_2021U1, env_type=prod, instance=env-255805laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255805laioeastus, cloud_platform=Azure, customer_id=A103, env_id=255805, env_name=A103_Allianz_Prod_2021U1, env_type=prod, instance=env-255805laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-255826laio1eastus, cloud_platform=Azure, customer_id=A129, env_id=255826, env_name=A129_Gap_Prod_2021U1, env_type=prod, instance=env-255826laio1eastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255826laio1eastus, cloud_platform=Azure, customer_id=A129, env_id=255826, env_name=A129_Gap_Prod_2021U1, env_type=prod, instance=env-255826laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc0163a3e68} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255826laio1eastus, cloud_platform=Azure, customer_id=A129, env_id=255826, env_name=A129_Gap_Prod_2021U1, env_type=prod, instance=env-255826laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc0163a3f60} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255826laio1eastus, cloud_platform=Azure, customer_id=A129, env_id=255826, env_name=A129_Gap_Prod_2021U1, env_type=prod, instance=env-255826laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc00d0c4048}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433735907s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255826laio1eastus, cloud_platform=Azure, customer_id=A129, env_id=255826, env_name=A129_Gap_Prod_2021U1, env_type=prod, instance=env-255826laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255826laio1eastus, cloud_platform=Azure, customer_id=A129, env_id=255826, env_name=A129_Gap_Prod_2021U1, env_type=prod, instance=env-255826laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255826laio1eastus, cloud_platform=Azure, customer_id=A129, env_id=255826, env_name=A129_Gap_Prod_2021U1, env_type=prod, instance=env-255826laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-255826laio2eastus, cloud_platform=Azure, customer_id=A129, env_id=255826, env_name=A129_Gap_Prod_2021U1, env_type=prod, instance=env-255826laio2eastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255826laio2eastus, cloud_platform=Azure, customer_id=A129, env_id=255826, env_name=A129_Gap_Prod_2021U1, env_type=prod, instance=env-255826laio2eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc00d0c42f0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255826laio2eastus, cloud_platform=Azure, customer_id=A129, env_id=255826, env_name=A129_Gap_Prod_2021U1, env_type=prod, instance=env-255826laio2eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc00d0c4440} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255826laio2eastus, cloud_platform=Azure, customer_id=A129, env_id=255826, env_name=A129_Gap_Prod_2021U1, env_type=prod, instance=env-255826laio2eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc00d0c4560}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433748723s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255826laio2eastus, cloud_platform=Azure, customer_id=A129, env_id=255826, env_name=A129_Gap_Prod_2021U1, env_type=prod, instance=env-255826laio2eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255826laio2eastus, cloud_platform=Azure, customer_id=A129, env_id=255826, env_name=A129_Gap_Prod_2021U1, env_type=prod, instance=env-255826laio2eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255826laio2eastus, cloud_platform=Azure, customer_id=A129, env_id=255826, env_name=A129_Gap_Prod_2021U1, env_type=prod, instance=env-255826laio2eastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-255964laioeastus, cloud_platform=Azure, customer_id=A129, env_id=255964, env_name=A129_Gap_UAT_2021U1, env_type=test, instance=env-255964laioeastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255964laioeastus, cloud_platform=Azure, customer_id=A129, env_id=255964, env_name=A129_Gap_UAT_2021U1, env_type=test, instance=env-255964laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc00d0c47d0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255964laioeastus, cloud_platform=Azure, customer_id=A129, env_id=255964, env_name=A129_Gap_UAT_2021U1, env_type=test, instance=env-255964laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc00d0c4910} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255964laioeastus, cloud_platform=Azure, customer_id=A129, env_id=255964, env_name=A129_Gap_UAT_2021U1, env_type=test, instance=env-255964laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc00d0c4a30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433760702s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255964laioeastus, cloud_platform=Azure, customer_id=A129, env_id=255964, env_name=A129_Gap_UAT_2021U1, env_type=test, instance=env-255964laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255964laioeastus, cloud_platform=Azure, customer_id=A129, env_id=255964, env_name=A129_Gap_UAT_2021U1, env_type=test, instance=env-255964laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255964laioeastus, cloud_platform=Azure, customer_id=A129, env_id=255964, env_name=A129_Gap_UAT_2021U1, env_type=test, instance=env-255964laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-255971laio1use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255971laio1use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc00d0c5000} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255971laio1use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc00d0c4d50} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255971laio1use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc00d0c4e90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433770924s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255971laio1use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255971laio1use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255971laio1use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-255971laio2use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255971laio2use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc00d0c5250} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255971laio2use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc00d0c5360} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-255971laio2use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc00d0c5488}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.43378201s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255971laio2use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255971laio2use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-255971laio2use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-257197laiousw1, cloud_platform=AWS, customer_id=C536, env_id=257197, env_name=C536_SKX_PRD_2021U1, env_type=prod, instance=env-257197laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-257197laiousw1, cloud_platform=AWS, customer_id=C536, env_id=257197, env_name=C536_SKX_PRD_2021U1, env_type=prod, instance=env-257197laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live Value:0xc00d0c5778} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-257197laiousw1, cloud_platform=AWS, customer_id=C536, env_id=257197, env_name=C536_SKX_PRD_2021U1, env_type=prod, instance=env-257197laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live Value:0xc00d0c5880} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-257197laiousw1, cloud_platform=AWS, customer_id=C536, env_id=257197, env_name=C536_SKX_PRD_2021U1, env_type=prod, instance=env-257197laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live Value:0xc00d0c5a40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433793213s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-257197laiousw1, cloud_platform=AWS, customer_id=C536, env_id=257197, env_name=C536_SKX_PRD_2021U1, env_type=prod, instance=env-257197laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-257197laiousw1, cloud_platform=AWS, customer_id=C536, env_id=257197, env_name=C536_SKX_PRD_2021U1, env_type=prod, instance=env-257197laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-257197laiousw1, cloud_platform=AWS, customer_id=C536, env_id=257197, env_name=C536_SKX_PRD_2021U1, env_type=prod, instance=env-257197laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-258029laiouse1, cloud_platform=AWS, customer_id=C447, env_id=258029, env_name=C447_VSI_DEV_2021U2, env_type=dev, instance=env-258029laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-258029laiouse1, cloud_platform=AWS, customer_id=C447, env_id=258029, env_name=C447_VSI_DEV_2021U2, env_type=dev, instance=env-258029laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc00d0c5cd0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-258029laiouse1, cloud_platform=AWS, customer_id=C447, env_id=258029, env_name=C447_VSI_DEV_2021U2, env_type=dev, instance=env-258029laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc00d0c5e20} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-258029laiouse1, cloud_platform=AWS, customer_id=C447, env_id=258029, env_name=C447_VSI_DEV_2021U2, env_type=dev, instance=env-258029laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc00d0c5f38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.43380427s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-258029laiouse1, cloud_platform=AWS, customer_id=C447, env_id=258029, env_name=C447_VSI_DEV_2021U2, env_type=dev, instance=env-258029laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-258029laiouse1, cloud_platform=AWS, customer_id=C447, env_id=258029, env_name=C447_VSI_DEV_2021U2, env_type=dev, instance=env-258029laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-258029laiouse1, cloud_platform=AWS, customer_id=C447, env_id=258029, env_name=C447_VSI_DEV_2021U2, env_type=dev, instance=env-258029laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-258482laio1use1, cloud_platform=AWS, customer_id=C447, env_id=258482, env_name=c447_VSI_PROD_2021U2, env_type=prod, instance=env-258482laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-258482laio1use1, cloud_platform=AWS, customer_id=C447, env_id=258482, env_name=c447_VSI_PROD_2021U2, env_type=prod, instance=env-258482laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc016d281a0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-258482laio1use1, cloud_platform=AWS, customer_id=C447, env_id=258482, env_name=c447_VSI_PROD_2021U2, env_type=prod, instance=env-258482laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc016d282d0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-258482laio1use1, cloud_platform=AWS, customer_id=C447, env_id=258482, env_name=c447_VSI_PROD_2021U2, env_type=prod, instance=env-258482laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc016d28428}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433816796s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-258482laio1use1, cloud_platform=AWS, customer_id=C447, env_id=258482, env_name=c447_VSI_PROD_2021U2, env_type=prod, instance=env-258482laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-258482laio1use1, cloud_platform=AWS, customer_id=C447, env_id=258482, env_name=c447_VSI_PROD_2021U2, env_type=prod, instance=env-258482laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-258482laio1use1, cloud_platform=AWS, customer_id=C447, env_id=258482, env_name=c447_VSI_PROD_2021U2, env_type=prod, instance=env-258482laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-258482laio2use1, cloud_platform=AWS, customer_id=C447, env_id=258482, env_name=c447_VSI_PROD_2021U2, env_type=prod, instance=env-258482laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-258482laio2use1, cloud_platform=AWS, customer_id=C447, env_id=258482, env_name=c447_VSI_PROD_2021U2, env_type=prod, instance=env-258482laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc016d288d0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-258482laio2use1, cloud_platform=AWS, customer_id=C447, env_id=258482, env_name=c447_VSI_PROD_2021U2, env_type=prod, instance=env-258482laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc016d28630} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-258482laio2use1, cloud_platform=AWS, customer_id=C447, env_id=258482, env_name=c447_VSI_PROD_2021U2, env_type=prod, instance=env-258482laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc016d287a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433828033s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-258482laio2use1, cloud_platform=AWS, customer_id=C447, env_id=258482, env_name=c447_VSI_PROD_2021U2, env_type=prod, instance=env-258482laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-258482laio2use1, cloud_platform=AWS, customer_id=C447, env_id=258482, env_name=c447_VSI_PROD_2021U2, env_type=prod, instance=env-258482laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-258482laio2use1, cloud_platform=AWS, customer_id=C447, env_id=258482, env_name=c447_VSI_PROD_2021U2, env_type=prod, instance=env-258482laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-259085laio1use1, cloud_platform=AWS, customer_id=C489, env_id=259085, env_name=c489_prod_lincoln_2021u2, env_type=prod, instance=env-259085laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-259085laio1use1, cloud_platform=AWS, customer_id=C489, env_id=259085, env_name=c489_prod_lincoln_2021u2, env_type=prod, instance=env-259085laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc016d28f28} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-259085laio1use1, cloud_platform=AWS, customer_id=C489, env_id=259085, env_name=c489_prod_lincoln_2021u2, env_type=prod, instance=env-259085laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc016d29160} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-259085laio1use1, cloud_platform=AWS, customer_id=C489, env_id=259085, env_name=c489_prod_lincoln_2021u2, env_type=prod, instance=env-259085laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc016d28c10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433843867s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-259085laio1use1, cloud_platform=AWS, customer_id=C489, env_id=259085, env_name=c489_prod_lincoln_2021u2, env_type=prod, instance=env-259085laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-259085laio1use1, cloud_platform=AWS, customer_id=C489, env_id=259085, env_name=c489_prod_lincoln_2021u2, env_type=prod, instance=env-259085laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-259085laio1use1, cloud_platform=AWS, customer_id=C489, env_id=259085, env_name=c489_prod_lincoln_2021u2, env_type=prod, instance=env-259085laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-259085laio2use1, cloud_platform=AWS, customer_id=C489, env_id=259085, env_name=c489_prod_lincoln_2021u2, env_type=prod, instance=env-259085laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-259085laio2use1, cloud_platform=AWS, customer_id=C489, env_id=259085, env_name=c489_prod_lincoln_2021u2, env_type=prod, instance=env-259085laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc016d29380} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-259085laio2use1, cloud_platform=AWS, customer_id=C489, env_id=259085, env_name=c489_prod_lincoln_2021u2, env_type=prod, instance=env-259085laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc016d29460} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-259085laio2use1, cloud_platform=AWS, customer_id=C489, env_id=259085, env_name=c489_prod_lincoln_2021u2, env_type=prod, instance=env-259085laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc016d294e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433860258s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-259085laio2use1, cloud_platform=AWS, customer_id=C489, env_id=259085, env_name=c489_prod_lincoln_2021u2, env_type=prod, instance=env-259085laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-259085laio2use1, cloud_platform=AWS, customer_id=C489, env_id=259085, env_name=c489_prod_lincoln_2021u2, env_type=prod, instance=env-259085laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-259085laio2use1, cloud_platform=AWS, customer_id=C489, env_id=259085, env_name=c489_prod_lincoln_2021u2, env_type=prod, instance=env-259085laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-259446laiouse1, cloud_platform=AWS, customer_id=C489, env_id=259446, env_name=c489 Lincoln DEV 2021U2, env_type=dev, instance=env-259446laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-259446laiouse1, cloud_platform=AWS, customer_id=C489, env_id=259446, env_name=c489 Lincoln DEV 2021U2, env_type=dev, instance=env-259446laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc016d29898} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-259446laiouse1, cloud_platform=AWS, customer_id=C489, env_id=259446, env_name=c489 Lincoln DEV 2021U2, env_type=dev, instance=env-259446laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc016d296a0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-259446laiouse1, cloud_platform=AWS, customer_id=C489, env_id=259446, env_name=c489 Lincoln DEV 2021U2, env_type=dev, instance=env-259446laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc016d29780}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433876535s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-259446laiouse1, cloud_platform=AWS, customer_id=C489, env_id=259446, env_name=c489 Lincoln DEV 2021U2, env_type=dev, instance=env-259446laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-259446laiouse1, cloud_platform=AWS, customer_id=C489, env_id=259446, env_name=c489 Lincoln DEV 2021U2, env_type=dev, instance=env-259446laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-259446laiouse1, cloud_platform=AWS, customer_id=C489, env_id=259446, env_name=c489 Lincoln DEV 2021U2, env_type=dev, instance=env-259446laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-259828laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=259828, env_name=A135_Roland_Prod, env_type=prod, instance=env-259828laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-259828laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=259828, env_name=A135_Roland_Prod, env_type=prod, instance=env-259828laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc016d29a78} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-259828laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=259828, env_name=A135_Roland_Prod, env_type=prod, instance=env-259828laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc016d29bd0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-259828laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=259828, env_name=A135_Roland_Prod, env_type=prod, instance=env-259828laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc016d29d00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433892785s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-259828laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=259828, env_name=A135_Roland_Prod, env_type=prod, instance=env-259828laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-259828laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=259828, env_name=A135_Roland_Prod, env_type=prod, instance=env-259828laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-259828laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=259828, env_name=A135_Roland_Prod, env_type=prod, instance=env-259828laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-260655laiouse1, cloud_platform=AWS, customer_id=C333, env_id=260655, env_name=c333_Mercer_DEV_2021, env_type=dev, instance=env-260655laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-260655laiouse1, cloud_platform=AWS, customer_id=C333, env_id=260655, env_name=c333_Mercer_DEV_2021, env_type=dev, instance=env-260655laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc016d29f80} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-260655laiouse1, cloud_platform=AWS, customer_id=C333, env_id=260655, env_name=c333_Mercer_DEV_2021, env_type=dev, instance=env-260655laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc0011dc040} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-260655laiouse1, cloud_platform=AWS, customer_id=C333, env_id=260655, env_name=c333_Mercer_DEV_2021, env_type=dev, instance=env-260655laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc016d29ed0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433906941s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-260655laiouse1, cloud_platform=AWS, customer_id=C333, env_id=260655, env_name=c333_Mercer_DEV_2021, env_type=dev, instance=env-260655laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-260655laiouse1, cloud_platform=AWS, customer_id=C333, env_id=260655, env_name=c333_Mercer_DEV_2021, env_type=dev, instance=env-260655laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-260655laiouse1, cloud_platform=AWS, customer_id=C333, env_id=260655, env_name=c333_Mercer_DEV_2021, env_type=dev, instance=env-260655laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-261091laiouse1, cloud_platform=AWS, customer_id=C333, env_id=261091, env_name=c333_Mercer_QA_2021, env_type=qa, instance=env-261091laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-261091laiouse1, cloud_platform=AWS, customer_id=C333, env_id=261091, env_name=c333_Mercer_QA_2021, env_type=qa, instance=env-261091laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc0011dc280} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-261091laiouse1, cloud_platform=AWS, customer_id=C333, env_id=261091, env_name=c333_Mercer_QA_2021, env_type=qa, instance=env-261091laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc0011dc3c8} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-261091laiouse1, cloud_platform=AWS, customer_id=C333, env_id=261091, env_name=c333_Mercer_QA_2021, env_type=qa, instance=env-261091laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc0011dc538}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433922091s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-261091laiouse1, cloud_platform=AWS, customer_id=C333, env_id=261091, env_name=c333_Mercer_QA_2021, env_type=qa, instance=env-261091laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-261091laiouse1, cloud_platform=AWS, customer_id=C333, env_id=261091, env_name=c333_Mercer_QA_2021, env_type=qa, instance=env-261091laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-261091laiouse1, cloud_platform=AWS, customer_id=C333, env_id=261091, env_name=c333_Mercer_QA_2021, env_type=qa, instance=env-261091laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-261751laiouse1, cloud_platform=AWS, customer_id=C324, env_id=261751, env_name=C324_Marsh_DEV_2021, env_type=dev, instance=env-261751laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-261751laiouse1, cloud_platform=AWS, customer_id=C324, env_id=261751, env_name=C324_Marsh_DEV_2021, env_type=dev, instance=env-261751laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc0011dc7d0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-261751laiouse1, cloud_platform=AWS, customer_id=C324, env_id=261751, env_name=C324_Marsh_DEV_2021, env_type=dev, instance=env-261751laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc0011dc988} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-261751laiouse1, cloud_platform=AWS, customer_id=C324, env_id=261751, env_name=C324_Marsh_DEV_2021, env_type=dev, instance=env-261751laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc0011dcad0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433986351s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-261751laiouse1, cloud_platform=AWS, customer_id=C324, env_id=261751, env_name=C324_Marsh_DEV_2021, env_type=dev, instance=env-261751laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-261751laiouse1, cloud_platform=AWS, customer_id=C324, env_id=261751, env_name=C324_Marsh_DEV_2021, env_type=dev, instance=env-261751laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-261751laiouse1, cloud_platform=AWS, customer_id=C324, env_id=261751, env_name=C324_Marsh_DEV_2021, env_type=dev, instance=env-261751laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-262058laioeastus2, cloud_platform=Azure, customer_id=A136, env_id=262058, env_name=A136 Coughlan PROD, env_type=dev, instance=env-262058laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-262058laioeastus2, cloud_platform=Azure, customer_id=A136, env_id=262058, env_name=A136 Coughlan PROD, env_type=dev, instance=env-262058laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0011dccf0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-262058laioeastus2, cloud_platform=Azure, customer_id=A136, env_id=262058, env_name=A136 Coughlan PROD, env_type=dev, instance=env-262058laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0011dced8} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-262058laioeastus2, cloud_platform=Azure, customer_id=A136, env_id=262058, env_name=A136 Coughlan PROD, env_type=dev, instance=env-262058laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0011dd068}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434004341s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-262058laioeastus2, cloud_platform=Azure, customer_id=A136, env_id=262058, env_name=A136 Coughlan PROD, env_type=dev, instance=env-262058laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-262058laioeastus2, cloud_platform=Azure, customer_id=A136, env_id=262058, env_name=A136 Coughlan PROD, env_type=dev, instance=env-262058laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-262058laioeastus2, cloud_platform=Azure, customer_id=A136, env_id=262058, env_name=A136 Coughlan PROD, env_type=dev, instance=env-262058laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-263173laiosouthcentralus, cloud_platform=Azure, customer_id=A118, env_id=263173, env_name=A118 Libertad PROD, env_type=prod, instance=env-263173laiosouthcentralus, job=integrations/node_exporter, region=southcentralus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-263173laiosouthcentralus, cloud_platform=Azure, customer_id=A118, env_id=263173, env_name=A118 Libertad PROD, env_type=prod, instance=env-263173laiosouthcentralus, job=integrations/node_exporter, region=southcentralus, stage=live Value:0xc0011dd340} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-263173laiosouthcentralus, cloud_platform=Azure, customer_id=A118, env_id=263173, env_name=A118 Libertad PROD, env_type=prod, instance=env-263173laiosouthcentralus, job=integrations/node_exporter, region=southcentralus, stage=live Value:0xc0011dd478} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-263173laiosouthcentralus, cloud_platform=Azure, customer_id=A118, env_id=263173, env_name=A118 Libertad PROD, env_type=prod, instance=env-263173laiosouthcentralus, job=integrations/node_exporter, region=southcentralus, stage=live Value:0xc0011dd600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434019244s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-263173laiosouthcentralus, cloud_platform=Azure, customer_id=A118, env_id=263173, env_name=A118 Libertad PROD, env_type=prod, instance=env-263173laiosouthcentralus, job=integrations/node_exporter, region=southcentralus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-263173laiosouthcentralus, cloud_platform=Azure, customer_id=A118, env_id=263173, env_name=A118 Libertad PROD, env_type=prod, instance=env-263173laiosouthcentralus, job=integrations/node_exporter, region=southcentralus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-263173laiosouthcentralus, cloud_platform=Azure, customer_id=A118, env_id=263173, env_name=A118 Libertad PROD, env_type=prod, instance=env-263173laiosouthcentralus, job=integrations/node_exporter, region=southcentralus, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-263178laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263178, env_name=A139 AgReserves DEV, env_type=dev, instance=env-263178laiowestus, job=integrations/node_exporter, region=westus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-263178laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263178, env_name=A139 AgReserves DEV, env_type=dev, instance=env-263178laiowestus, job=integrations/node_exporter, region=westus, stage=live Value:0xc0011dd940} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-263178laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263178, env_name=A139 AgReserves DEV, env_type=dev, instance=env-263178laiowestus, job=integrations/node_exporter, region=westus, stage=live Value:0xc0011dda80} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-263178laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263178, env_name=A139 AgReserves DEV, env_type=dev, instance=env-263178laiowestus, job=integrations/node_exporter, region=westus, stage=live Value:0xc0011ddbb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434035017s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-263178laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263178, env_name=A139 AgReserves DEV, env_type=dev, instance=env-263178laiowestus, job=integrations/node_exporter, region=westus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-263178laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263178, env_name=A139 AgReserves DEV, env_type=dev, instance=env-263178laiowestus, job=integrations/node_exporter, region=westus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-263178laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263178, env_name=A139 AgReserves DEV, env_type=dev, instance=env-263178laiowestus, job=integrations/node_exporter, region=westus, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-263191laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263191, env_name=A139 AgReserves PROD, env_type=prod, instance=env-263191laiowestus, job=integrations/node_exporter, region=westus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-263191laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263191, env_name=A139 AgReserves PROD, env_type=prod, instance=env-263191laiowestus, job=integrations/node_exporter, region=westus, stage=live Value:0xc02c87c1d8} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-263191laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263191, env_name=A139 AgReserves PROD, env_type=prod, instance=env-263191laiowestus, job=integrations/node_exporter, region=westus, stage=live Value:0xc02c87c2f8} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-263191laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263191, env_name=A139 AgReserves PROD, env_type=prod, instance=env-263191laiowestus, job=integrations/node_exporter, region=westus, stage=live Value:0xc0011dded0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434054421s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-263191laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263191, env_name=A139 AgReserves PROD, env_type=prod, instance=env-263191laiowestus, job=integrations/node_exporter, region=westus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-263191laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263191, env_name=A139 AgReserves PROD, env_type=prod, instance=env-263191laiowestus, job=integrations/node_exporter, region=westus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-263191laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263191, env_name=A139 AgReserves PROD, env_type=prod, instance=env-263191laiowestus, job=integrations/node_exporter, region=westus, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-263640laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=263640, env_name=A135_Roland_Dev, env_type=dev, instance=env-263640laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-263640laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=263640, env_name=A135_Roland_Dev, env_type=dev, instance=env-263640laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc02c87c7b8} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-263640laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=263640, env_name=A135_Roland_Dev, env_type=dev, instance=env-263640laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc02c87c898} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-263640laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=263640, env_name=A135_Roland_Dev, env_type=dev, instance=env-263640laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc02c87c640}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434070031s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-263640laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=263640, env_name=A135_Roland_Dev, env_type=dev, instance=env-263640laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-263640laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=263640, env_name=A135_Roland_Dev, env_type=dev, instance=env-263640laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-263640laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=263640, env_name=A135_Roland_Dev, env_type=dev, instance=env-263640laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-264639laio1use1, cloud_platform=AWS, customer_id=C324, env_id=264639, env_name=C324_Marsh_Prod_2021, env_type=prod, instance=env-264639laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-264639laio1use1, cloud_platform=AWS, customer_id=C324, env_id=264639, env_name=C324_Marsh_Prod_2021, env_type=prod, instance=env-264639laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc02c87cbb0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-264639laio1use1, cloud_platform=AWS, customer_id=C324, env_id=264639, env_name=C324_Marsh_Prod_2021, env_type=prod, instance=env-264639laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc02c87c9f0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-264639laio1use1, cloud_platform=AWS, customer_id=C324, env_id=264639, env_name=C324_Marsh_Prod_2021, env_type=prod, instance=env-264639laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc02c87caa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434083577s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-264639laio1use1, cloud_platform=AWS, customer_id=C324, env_id=264639, env_name=C324_Marsh_Prod_2021, env_type=prod, instance=env-264639laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-264639laio1use1, cloud_platform=AWS, customer_id=C324, env_id=264639, env_name=C324_Marsh_Prod_2021, env_type=prod, instance=env-264639laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-264639laio1use1, cloud_platform=AWS, customer_id=C324, env_id=264639, env_name=C324_Marsh_Prod_2021, env_type=prod, instance=env-264639laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-264639laio2use1, cloud_platform=AWS, customer_id=C324, env_id=264639, env_name=C324_Marsh_Prod_2021, env_type=prod, instance=env-264639laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-264639laio2use1, cloud_platform=AWS, customer_id=C324, env_id=264639, env_name=C324_Marsh_Prod_2021, env_type=prod, instance=env-264639laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc02c87cd68} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-264639laio2use1, cloud_platform=AWS, customer_id=C324, env_id=264639, env_name=C324_Marsh_Prod_2021, env_type=prod, instance=env-264639laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc02c87ce80} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-264639laio2use1, cloud_platform=AWS, customer_id=C324, env_id=264639, env_name=C324_Marsh_Prod_2021, env_type=prod, instance=env-264639laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc02c87cfd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434100696s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-264639laio2use1, cloud_platform=AWS, customer_id=C324, env_id=264639, env_name=C324_Marsh_Prod_2021, env_type=prod, instance=env-264639laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-264639laio2use1, cloud_platform=AWS, customer_id=C324, env_id=264639, env_name=C324_Marsh_Prod_2021, env_type=prod, instance=env-264639laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-264639laio2use1, cloud_platform=AWS, customer_id=C324, env_id=264639, env_name=C324_Marsh_Prod_2021, env_type=prod, instance=env-264639laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-265723laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=265723, env_name=A133 Douglas DEV, env_type=dev, instance=env-265723laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-265723laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=265723, env_name=A133 Douglas DEV, env_type=dev, instance=env-265723laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc02c87d470} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-265723laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=265723, env_name=A133 Douglas DEV, env_type=dev, instance=env-265723laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc02c87d2a0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-265723laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=265723, env_name=A133 Douglas DEV, env_type=dev, instance=env-265723laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc02c87d3c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434115564s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-265723laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=265723, env_name=A133 Douglas DEV, env_type=dev, instance=env-265723laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-265723laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=265723, env_name=A133 Douglas DEV, env_type=dev, instance=env-265723laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-265723laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=265723, env_name=A133 Douglas DEV, env_type=dev, instance=env-265723laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-265908laiowestus2, cloud_platform=Azure, customer_id=A144, env_id=265908, env_name=A144 Big 5 Corp DEV, env_type=dev, instance=env-265908laiowestus2, job=integrations/node_exporter, region=westus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-265908laiowestus2, cloud_platform=Azure, customer_id=A144, env_id=265908, env_name=A144 Big 5 Corp DEV, env_type=dev, instance=env-265908laiowestus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc02c87d6e8} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-265908laiowestus2, cloud_platform=Azure, customer_id=A144, env_id=265908, env_name=A144 Big 5 Corp DEV, env_type=dev, instance=env-265908laiowestus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc02c87d7c8} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-265908laiowestus2, cloud_platform=Azure, customer_id=A144, env_id=265908, env_name=A144 Big 5 Corp DEV, env_type=dev, instance=env-265908laiowestus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc02c87d9e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434131088s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-265908laiowestus2, cloud_platform=Azure, customer_id=A144, env_id=265908, env_name=A144 Big 5 Corp DEV, env_type=dev, instance=env-265908laiowestus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-265908laiowestus2, cloud_platform=Azure, customer_id=A144, env_id=265908, env_name=A144 Big 5 Corp DEV, env_type=dev, instance=env-265908laiowestus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-265908laiowestus2, cloud_platform=Azure, customer_id=A144, env_id=265908, env_name=A144 Big 5 Corp DEV, env_type=dev, instance=env-265908laiowestus2, job=integrations/node_exporter, region=westus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-266276laioaustraliaeast, cloud_platform=Azure, customer_id=A145, env_id=266276, env_name=A145_Bunnings_Prod, env_type=prod, instance=env-266276laioaustraliaeast, job=integrations/node_exporter, region=australiaeast, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-266276laioaustraliaeast, cloud_platform=Azure, customer_id=A145, env_id=266276, env_name=A145_Bunnings_Prod, env_type=prod, instance=env-266276laioaustraliaeast, job=integrations/node_exporter, region=australiaeast, stage=live Value:0xc02c87dcc0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-266276laioaustraliaeast, cloud_platform=Azure, customer_id=A145, env_id=266276, env_name=A145_Bunnings_Prod, env_type=prod, instance=env-266276laioaustraliaeast, job=integrations/node_exporter, region=australiaeast, stage=live Value:0xc02c87ddb8} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-266276laioaustraliaeast, cloud_platform=Azure, customer_id=A145, env_id=266276, env_name=A145_Bunnings_Prod, env_type=prod, instance=env-266276laioaustraliaeast, job=integrations/node_exporter, region=australiaeast, stage=live Value:0xc02c87de80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434146836s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-266276laioaustraliaeast, cloud_platform=Azure, customer_id=A145, env_id=266276, env_name=A145_Bunnings_Prod, env_type=prod, instance=env-266276laioaustraliaeast, job=integrations/node_exporter, region=australiaeast, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-266276laioaustraliaeast, cloud_platform=Azure, customer_id=A145, env_id=266276, env_name=A145_Bunnings_Prod, env_type=prod, instance=env-266276laioaustraliaeast, job=integrations/node_exporter, region=australiaeast, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-266276laioaustraliaeast, cloud_platform=Azure, customer_id=A145, env_id=266276, env_name=A145_Bunnings_Prod, env_type=prod, instance=env-266276laioaustraliaeast, job=integrations/node_exporter, region=australiaeast, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-269670laioeastus, cloud_platform=Azure, customer_id=A128, env_id=269670, env_name=a128_qvc_DEV, env_type=dev, instance=env-269670laioeastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-269670laioeastus, cloud_platform=Azure, customer_id=A128, env_id=269670, env_name=a128_qvc_DEV, env_type=dev, instance=env-269670laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc014984020} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-269670laioeastus, cloud_platform=Azure, customer_id=A128, env_id=269670, env_name=a128_qvc_DEV, env_type=dev, instance=env-269670laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc014984110} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-269670laioeastus, cloud_platform=Azure, customer_id=A128, env_id=269670, env_name=a128_qvc_DEV, env_type=dev, instance=env-269670laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc0149841c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434165732s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-269670laioeastus, cloud_platform=Azure, customer_id=A128, env_id=269670, env_name=a128_qvc_DEV, env_type=dev, instance=env-269670laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-269670laioeastus, cloud_platform=Azure, customer_id=A128, env_id=269670, env_name=a128_qvc_DEV, env_type=dev, instance=env-269670laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-269670laioeastus, cloud_platform=Azure, customer_id=A128, env_id=269670, env_name=a128_qvc_DEV, env_type=dev, instance=env-269670laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-272009laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272009, env_name=A154_Monoprix_Dev, env_type=dev, instance=env-272009laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-272009laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272009, env_name=A154_Monoprix_Dev, env_type=dev, instance=env-272009laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc014984360} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-272009laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272009, env_name=A154_Monoprix_Dev, env_type=dev, instance=env-272009laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc014984938} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-272009laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272009, env_name=A154_Monoprix_Dev, env_type=dev, instance=env-272009laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc014984a10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434183309s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-272009laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272009, env_name=A154_Monoprix_Dev, env_type=dev, instance=env-272009laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-272009laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272009, env_name=A154_Monoprix_Dev, env_type=dev, instance=env-272009laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-272009laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272009, env_name=A154_Monoprix_Dev, env_type=dev, instance=env-272009laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-272010laiouse1, cloud_platform=AWS, customer_id=C583, env_id=272010, env_name=C583 - 5 Hour old Prod, env_type=prod, instance=env-272010laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-272010laiouse1, cloud_platform=AWS, customer_id=C583, env_id=272010, env_name=C583 - 5 Hour old Prod, env_type=prod, instance=env-272010laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc014984f48} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-272010laiouse1, cloud_platform=AWS, customer_id=C583, env_id=272010, env_name=C583 - 5 Hour old Prod, env_type=prod, instance=env-272010laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc014984ff8} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-272010laiouse1, cloud_platform=AWS, customer_id=C583, env_id=272010, env_name=C583 - 5 Hour old Prod, env_type=prod, instance=env-272010laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc014985870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434197259s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-272010laiouse1, cloud_platform=AWS, customer_id=C583, env_id=272010, env_name=C583 - 5 Hour old Prod, env_type=prod, instance=env-272010laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-272010laiouse1, cloud_platform=AWS, customer_id=C583, env_id=272010, env_name=C583 - 5 Hour old Prod, env_type=prod, instance=env-272010laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-272010laiouse1, cloud_platform=AWS, customer_id=C583, env_id=272010, env_name=C583 - 5 Hour old Prod, env_type=prod, instance=env-272010laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-272146laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272146, env_name=A154_Monoprix_Prod, env_type=prod, instance=env-272146laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-272146laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272146, env_name=A154_Monoprix_Prod, env_type=prod, instance=env-272146laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc014985bf0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-272146laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272146, env_name=A154_Monoprix_Prod, env_type=prod, instance=env-272146laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc014985cc8} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-272146laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272146, env_name=A154_Monoprix_Prod, env_type=prod, instance=env-272146laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc014985b38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434211902s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-272146laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272146, env_name=A154_Monoprix_Prod, env_type=prod, instance=env-272146laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-272146laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272146, env_name=A154_Monoprix_Prod, env_type=prod, instance=env-272146laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-272146laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272146, env_name=A154_Monoprix_Prod, env_type=prod, instance=env-272146laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-273677laio1eastus2, cloud_platform=Azure, customer_id=A157, env_id=273677, env_name=A157 Ryder Dev, env_type=dev, instance=env-273677laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-273677laio1eastus2, cloud_platform=Azure, customer_id=A157, env_id=273677, env_name=A157 Ryder Dev, env_type=dev, instance=env-273677laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc00b99a2c8} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-273677laio1eastus2, cloud_platform=Azure, customer_id=A157, env_id=273677, env_name=A157 Ryder Dev, env_type=dev, instance=env-273677laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc00b99a440} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-273677laio1eastus2, cloud_platform=Azure, customer_id=A157, env_id=273677, env_name=A157 Ryder Dev, env_type=dev, instance=env-273677laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc00b99a500}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434227215s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-273677laio1eastus2, cloud_platform=Azure, customer_id=A157, env_id=273677, env_name=A157 Ryder Dev, env_type=dev, instance=env-273677laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-273677laio1eastus2, cloud_platform=Azure, customer_id=A157, env_id=273677, env_name=A157 Ryder Dev, env_type=dev, instance=env-273677laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-273677laio1eastus2, cloud_platform=Azure, customer_id=A157, env_id=273677, env_name=A157 Ryder Dev, env_type=dev, instance=env-273677laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-273680laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273680, env_name=A158 - Bain - Prod Ent, env_type=prod, instance=env-273680laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-273680laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273680, env_name=A158 - Bain - Prod Ent, env_type=prod, instance=env-273680laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc00b99a758} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-273680laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273680, env_name=A158 - Bain - Prod Ent, env_type=prod, instance=env-273680laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc00b99a810} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-273680laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273680, env_name=A158 - Bain - Prod Ent, env_type=prod, instance=env-273680laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc00b99a6a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434245493s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-273680laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273680, env_name=A158 - Bain - Prod Ent, env_type=prod, instance=env-273680laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-273680laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273680, env_name=A158 - Bain - Prod Ent, env_type=prod, instance=env-273680laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-273680laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273680, env_name=A158 - Bain - Prod Ent, env_type=prod, instance=env-273680laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-273681laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273681, env_name=A158 - Bain - DEV Ent, env_type=dev, instance=env-273681laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-273681laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273681, env_name=A158 - Bain - DEV Ent, env_type=dev, instance=env-273681laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc00b99a988} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-273681laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273681, env_name=A158 - Bain - DEV Ent, env_type=dev, instance=env-273681laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc00b99aa78} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-273681laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273681, env_name=A158 - Bain - DEV Ent, env_type=dev, instance=env-273681laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc00b99ab38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434272962s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-273681laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273681, env_name=A158 - Bain - DEV Ent, env_type=dev, instance=env-273681laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-273681laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273681, env_name=A158 - Bain - DEV Ent, env_type=dev, instance=env-273681laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-273681laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273681, env_name=A158 - Bain - DEV Ent, env_type=dev, instance=env-273681laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-273900laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273900, env_name=a131_symrise_qa_2021u4, env_type=qa, instance=env-273900laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-273900laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273900, env_name=a131_symrise_qa_2021u4, env_type=qa, instance=env-273900laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00b99aca0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-273900laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273900, env_name=a131_symrise_qa_2021u4, env_type=qa, instance=env-273900laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00b99ad68} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-273900laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273900, env_name=a131_symrise_qa_2021u4, env_type=qa, instance=env-273900laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00b99ae28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434291805s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-273900laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273900, env_name=a131_symrise_qa_2021u4, env_type=qa, instance=env-273900laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-273900laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273900, env_name=a131_symrise_qa_2021u4, env_type=qa, instance=env-273900laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-273900laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273900, env_name=a131_symrise_qa_2021u4, env_type=qa, instance=env-273900laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-273910laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273910, env_name=A158 - Bain - Test Ent, env_type=test, instance=env-273910laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-273910laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273910, env_name=A158 - Bain - Test Ent, env_type=test, instance=env-273910laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc00b99afd0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-273910laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273910, env_name=A158 - Bain - Test Ent, env_type=test, instance=env-273910laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc00b99b078} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-273910laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273910, env_name=A158 - Bain - Test Ent, env_type=test, instance=env-273910laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc00b99b130}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434306994s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-273910laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273910, env_name=A158 - Bain - Test Ent, env_type=test, instance=env-273910laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-273910laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273910, env_name=A158 - Bain - Test Ent, env_type=test, instance=env-273910laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-273910laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273910, env_name=A158 - Bain - Test Ent, env_type=test, instance=env-273910laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-273913laio1westus2, cloud_platform=Azure, customer_id=A144, env_id=273913, env_name=A144 Big 5 Corp PROD, env_type=prod, instance=env-273913laio1westus2, job=integrations/node_exporter, region=westus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-273913laio1westus2, cloud_platform=Azure, customer_id=A144, env_id=273913, env_name=A144 Big 5 Corp PROD, env_type=prod, instance=env-273913laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc00b99b2b0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-273913laio1westus2, cloud_platform=Azure, customer_id=A144, env_id=273913, env_name=A144 Big 5 Corp PROD, env_type=prod, instance=env-273913laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc00b99b378} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-273913laio1westus2, cloud_platform=Azure, customer_id=A144, env_id=273913, env_name=A144 Big 5 Corp PROD, env_type=prod, instance=env-273913laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc00b99b4f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434321963s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-273913laio1westus2, cloud_platform=Azure, customer_id=A144, env_id=273913, env_name=A144 Big 5 Corp PROD, env_type=prod, instance=env-273913laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-273913laio1westus2, cloud_platform=Azure, customer_id=A144, env_id=273913, env_name=A144 Big 5 Corp PROD, env_type=prod, instance=env-273913laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-273913laio1westus2, cloud_platform=Azure, customer_id=A144, env_id=273913, env_name=A144 Big 5 Corp PROD, env_type=prod, instance=env-273913laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-273931laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273931, env_name=a131_symrise_prod_2021u4, env_type=prod, instance=env-273931laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-273931laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273931, env_name=a131_symrise_prod_2021u4, env_type=prod, instance=env-273931laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00b99b670} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-273931laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273931, env_name=a131_symrise_prod_2021u4, env_type=prod, instance=env-273931laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00b99bcf0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-273931laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273931, env_name=a131_symrise_prod_2021u4, env_type=prod, instance=env-273931laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00b99beb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434337048s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-273931laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273931, env_name=a131_symrise_prod_2021u4, env_type=prod, instance=env-273931laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-273931laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273931, env_name=a131_symrise_prod_2021u4, env_type=prod, instance=env-273931laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-273931laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273931, env_name=a131_symrise_prod_2021u4, env_type=prod, instance=env-273931laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-274545laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274545, env_name=A101_Jumbo_2021_ACCEPT, env_type=accept, instance=env-274545laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-274545laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274545, env_name=A101_Jumbo_2021_ACCEPT, env_type=accept, instance=env-274545laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01e140040} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-274545laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274545, env_name=A101_Jumbo_2021_ACCEPT, env_type=accept, instance=env-274545laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01e1400e8} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-274545laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274545, env_name=A101_Jumbo_2021_ACCEPT, env_type=accept, instance=env-274545laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01e1401e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434355761s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-274545laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274545, env_name=A101_Jumbo_2021_ACCEPT, env_type=accept, instance=env-274545laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-274545laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274545, env_name=A101_Jumbo_2021_ACCEPT, env_type=accept, instance=env-274545laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-274545laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274545, env_name=A101_Jumbo_2021_ACCEPT, env_type=accept, instance=env-274545laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-274593laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274593, env_name=A101_Jumbo_2021_DEV, env_type=dev, instance=env-274593laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-274593laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274593, env_name=A101_Jumbo_2021_DEV, env_type=dev, instance=env-274593laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01e140408} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-274593laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274593, env_name=A101_Jumbo_2021_DEV, env_type=dev, instance=env-274593laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01e1404d0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-274593laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274593, env_name=A101_Jumbo_2021_DEV, env_type=dev, instance=env-274593laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01e1405c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.43437398s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-274593laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274593, env_name=A101_Jumbo_2021_DEV, env_type=dev, instance=env-274593laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-274593laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274593, env_name=A101_Jumbo_2021_DEV, env_type=dev, instance=env-274593laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-274593laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274593, env_name=A101_Jumbo_2021_DEV, env_type=dev, instance=env-274593laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-274689laio1westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-274689laio1westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01e140790} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-274689laio1westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01e140868} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-274689laio1westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01e140940}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434391188s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-274689laio1westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-274689laio1westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-274689laio1westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-274689laio2westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio2westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-274689laio2westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio2westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01e140b60} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-274689laio2westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio2westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01e140c78} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-274689laio2westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio2westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01e140d60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434406669s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-274689laio2westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio2westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-274689laio2westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio2westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-274689laio2westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio2westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-274836laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=274836, env_name=A108_LCBO_Prod_M2021, env_type=prod, instance=env-274836laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-274836laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=274836, env_name=A108_LCBO_Prod_M2021, env_type=prod, instance=env-274836laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live Value:0xc01e140f58} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-274836laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=274836, env_name=A108_LCBO_Prod_M2021, env_type=prod, instance=env-274836laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live Value:0xc01e141050} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-274836laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=274836, env_name=A108_LCBO_Prod_M2021, env_type=prod, instance=env-274836laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live Value:0xc01e141150}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434442656s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-274836laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=274836, env_name=A108_LCBO_Prod_M2021, env_type=prod, instance=env-274836laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-274836laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=274836, env_name=A108_LCBO_Prod_M2021, env_type=prod, instance=env-274836laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-274836laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=274836, env_name=A108_LCBO_Prod_M2021, env_type=prod, instance=env-274836laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-275128laio1use1, cloud_platform=AWS, customer_id=C591, env_id=275128, env_name=C591_Novartis_Dev, env_type=dev, instance=env-275128laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-275128laio1use1, cloud_platform=AWS, customer_id=C591, env_id=275128, env_name=C591_Novartis_Dev, env_type=dev, instance=env-275128laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01e1413b8} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-275128laio1use1, cloud_platform=AWS, customer_id=C591, env_id=275128, env_name=C591_Novartis_Dev, env_type=dev, instance=env-275128laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01e141490} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-275128laio1use1, cloud_platform=AWS, customer_id=C591, env_id=275128, env_name=C591_Novartis_Dev, env_type=dev, instance=env-275128laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01e1412d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434467305s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-275128laio1use1, cloud_platform=AWS, customer_id=C591, env_id=275128, env_name=C591_Novartis_Dev, env_type=dev, instance=env-275128laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-275128laio1use1, cloud_platform=AWS, customer_id=C591, env_id=275128, env_name=C591_Novartis_Dev, env_type=dev, instance=env-275128laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-275128laio1use1, cloud_platform=AWS, customer_id=C591, env_id=275128, env_name=C591_Novartis_Dev, env_type=dev, instance=env-275128laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-275276laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275276, env_name=A108_LCBO_Dev_M2021, env_type=dev, instance=env-275276laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-275276laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275276, env_name=A108_LCBO_Dev_M2021, env_type=dev, instance=env-275276laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live Value:0xc01e141728} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-275276laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275276, env_name=A108_LCBO_Dev_M2021, env_type=dev, instance=env-275276laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live Value:0xc01e1417d8} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-275276laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275276, env_name=A108_LCBO_Dev_M2021, env_type=dev, instance=env-275276laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live Value:0xc01e141658}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434481961s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-275276laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275276, env_name=A108_LCBO_Dev_M2021, env_type=dev, instance=env-275276laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-275276laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275276, env_name=A108_LCBO_Dev_M2021, env_type=dev, instance=env-275276laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-275276laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275276, env_name=A108_LCBO_Dev_M2021, env_type=dev, instance=env-275276laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-275315laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275315, env_name=A108_LCBO_QA_M2021, env_type=qa, instance=env-275315laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-275315laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275315, env_name=A108_LCBO_QA_M2021, env_type=qa, instance=env-275315laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live Value:0xc01e141a10} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-275315laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275315, env_name=A108_LCBO_QA_M2021, env_type=qa, instance=env-275315laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live Value:0xc01e141aa8} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-275315laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275315, env_name=A108_LCBO_QA_M2021, env_type=qa, instance=env-275315laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live Value:0xc01e141950}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434497649s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-275315laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275315, env_name=A108_LCBO_QA_M2021, env_type=qa, instance=env-275315laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-275315laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275315, env_name=A108_LCBO_QA_M2021, env_type=qa, instance=env-275315laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-275315laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275315, env_name=A108_LCBO_QA_M2021, env_type=qa, instance=env-275315laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-275843laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275843, env_name=C595_Franconnect_DEV, env_type=dev, instance=env-275843laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-275843laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275843, env_name=C595_Franconnect_DEV, env_type=dev, instance=env-275843laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01e141d88} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-275843laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275843, env_name=C595_Franconnect_DEV, env_type=dev, instance=env-275843laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01e141c40} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-275843laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275843, env_name=C595_Franconnect_DEV, env_type=dev, instance=env-275843laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01e141ce0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434513187s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-275843laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275843, env_name=C595_Franconnect_DEV, env_type=dev, instance=env-275843laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-275843laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275843, env_name=C595_Franconnect_DEV, env_type=dev, instance=env-275843laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-275843laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275843, env_name=C595_Franconnect_DEV, env_type=dev, instance=env-275843laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-275846laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-275846laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01e141ed8} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-275846laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01e141ff0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-275846laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01f1020b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434528174s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-275846laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-275846laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-275846laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-275846laio2use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-275846laio2use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01f102230} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-275846laio2use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01f102300} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-275846laio2use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01f1023c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.4345605s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-275846laio2use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-275846laio2use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-275846laio2use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-275957laiouaenorth, cloud_platform=Azure, customer_id=A159, env_id=275957, env_name=A159_Mubadala_Prod, env_type=prod, instance=env-275957laiouaenorth, job=integrations/node_exporter, region=UAENorth, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-275957laiouaenorth, cloud_platform=Azure, customer_id=A159, env_id=275957, env_name=A159_Mubadala_Prod, env_type=prod, instance=env-275957laiouaenorth, job=integrations/node_exporter, region=UAENorth, stage=live Value:0xc01f1025a8} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-275957laiouaenorth, cloud_platform=Azure, customer_id=A159, env_id=275957, env_name=A159_Mubadala_Prod, env_type=prod, instance=env-275957laiouaenorth, job=integrations/node_exporter, region=UAENorth, stage=live Value:0xc01f1026a0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-275957laiouaenorth, cloud_platform=Azure, customer_id=A159, env_id=275957, env_name=A159_Mubadala_Prod, env_type=prod, instance=env-275957laiouaenorth, job=integrations/node_exporter, region=UAENorth, stage=live Value:0xc01f102788}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434576061s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-275957laiouaenorth, cloud_platform=Azure, customer_id=A159, env_id=275957, env_name=A159_Mubadala_Prod, env_type=prod, instance=env-275957laiouaenorth, job=integrations/node_exporter, region=UAENorth, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-275957laiouaenorth, cloud_platform=Azure, customer_id=A159, env_id=275957, env_name=A159_Mubadala_Prod, env_type=prod, instance=env-275957laiouaenorth, job=integrations/node_exporter, region=UAENorth, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-275957laiouaenorth, cloud_platform=Azure, customer_id=A159, env_id=275957, env_name=A159_Mubadala_Prod, env_type=prod, instance=env-275957laiouaenorth, job=integrations/node_exporter, region=UAENorth, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-278412laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=278412, env_name=A113 - BAT - Sandbox, env_type=sandbox, instance=env-278412laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-278412laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=278412, env_name=A113 - BAT - Sandbox, env_type=sandbox, instance=env-278412laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01f102a18} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-278412laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=278412, env_name=A113 - BAT - Sandbox, env_type=sandbox, instance=env-278412laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01f102b68} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-278412laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=278412, env_name=A113 - BAT - Sandbox, env_type=sandbox, instance=env-278412laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01f102938}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434590648s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-278412laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=278412, env_name=A113 - BAT - Sandbox, env_type=sandbox, instance=env-278412laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-278412laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=278412, env_name=A113 - BAT - Sandbox, env_type=sandbox, instance=env-278412laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-278412laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=278412, env_name=A113 - BAT - Sandbox, env_type=sandbox, instance=env-278412laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-279146laio1eastus, cloud_platform=Azure, customer_id=A160, env_id=279146, env_name=A160 Marc Jacobs PROD, env_type=prod, instance=env-279146laio1eastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-279146laio1eastus, cloud_platform=Azure, customer_id=A160, env_id=279146, env_name=A160 Marc Jacobs PROD, env_type=prod, instance=env-279146laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc01f102e08} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-279146laio1eastus, cloud_platform=Azure, customer_id=A160, env_id=279146, env_name=A160 Marc Jacobs PROD, env_type=prod, instance=env-279146laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc01f102ef0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-279146laio1eastus, cloud_platform=Azure, customer_id=A160, env_id=279146, env_name=A160 Marc Jacobs PROD, env_type=prod, instance=env-279146laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc01f102d28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434601781s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-279146laio1eastus, cloud_platform=Azure, customer_id=A160, env_id=279146, env_name=A160 Marc Jacobs PROD, env_type=prod, instance=env-279146laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-279146laio1eastus, cloud_platform=Azure, customer_id=A160, env_id=279146, env_name=A160 Marc Jacobs PROD, env_type=prod, instance=env-279146laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-279146laio1eastus, cloud_platform=Azure, customer_id=A160, env_id=279146, env_name=A160 Marc Jacobs PROD, env_type=prod, instance=env-279146laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-279208laio1westeurope, cloud_platform=Azure, customer_id=A161, env_id=279208, env_name=A161_Thyssenkrup_Prod, env_type=prod, instance=env-279208laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-279208laio1westeurope, cloud_platform=Azure, customer_id=A161, env_id=279208, env_name=A161_Thyssenkrup_Prod, env_type=prod, instance=env-279208laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01f1030a0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-279208laio1westeurope, cloud_platform=Azure, customer_id=A161, env_id=279208, env_name=A161_Thyssenkrup_Prod, env_type=prod, instance=env-279208laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01f1031c0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-279208laio1westeurope, cloud_platform=Azure, customer_id=A161, env_id=279208, env_name=A161_Thyssenkrup_Prod, env_type=prod, instance=env-279208laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01f103328}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434612841s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-279208laio1westeurope, cloud_platform=Azure, customer_id=A161, env_id=279208, env_name=A161_Thyssenkrup_Prod, env_type=prod, instance=env-279208laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-279208laio1westeurope, cloud_platform=Azure, customer_id=A161, env_id=279208, env_name=A161_Thyssenkrup_Prod, env_type=prod, instance=env-279208laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-279208laio1westeurope, cloud_platform=Azure, customer_id=A161, env_id=279208, env_name=A161_Thyssenkrup_Prod, env_type=prod, instance=env-279208laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-281950laio1australiaeast, cloud_platform=Azure, customer_id=A145, env_id=281950, env_name=A145_Bunnings_Dev, env_type=dev, instance=env-281950laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-281950laio1australiaeast, cloud_platform=Azure, customer_id=A145, env_id=281950, env_name=A145_Bunnings_Dev, env_type=dev, instance=env-281950laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live Value:0xc01f103590} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-281950laio1australiaeast, cloud_platform=Azure, customer_id=A145, env_id=281950, env_name=A145_Bunnings_Dev, env_type=dev, instance=env-281950laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live Value:0xc01f103678} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-281950laio1australiaeast, cloud_platform=Azure, customer_id=A145, env_id=281950, env_name=A145_Bunnings_Dev, env_type=dev, instance=env-281950laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live Value:0xc01f103748}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434624225s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-281950laio1australiaeast, cloud_platform=Azure, customer_id=A145, env_id=281950, env_name=A145_Bunnings_Dev, env_type=dev, instance=env-281950laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-281950laio1australiaeast, cloud_platform=Azure, customer_id=A145, env_id=281950, env_name=A145_Bunnings_Dev, env_type=dev, instance=env-281950laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-281950laio1australiaeast, cloud_platform=Azure, customer_id=A145, env_id=281950, env_name=A145_Bunnings_Dev, env_type=dev, instance=env-281950laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-283245laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=283245, env_name=A151 Digi-Key Dev/Test, env_type=dev, instance=env-283245laio1centralus, job=integrations/node_exporter, region=centralus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283245laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=283245, env_name=A151 Digi-Key Dev/Test, env_type=dev, instance=env-283245laio1centralus, job=integrations/node_exporter, region=centralus, stage=live Value:0xc01f103998} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283245laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=283245, env_name=A151 Digi-Key Dev/Test, env_type=dev, instance=env-283245laio1centralus, job=integrations/node_exporter, region=centralus, stage=live Value:0xc01f103a88} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283245laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=283245, env_name=A151 Digi-Key Dev/Test, env_type=dev, instance=env-283245laio1centralus, job=integrations/node_exporter, region=centralus, stage=live Value:0xc01f1038f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434636915s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283245laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=283245, env_name=A151 Digi-Key Dev/Test, env_type=dev, instance=env-283245laio1centralus, job=integrations/node_exporter, region=centralus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283245laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=283245, env_name=A151 Digi-Key Dev/Test, env_type=dev, instance=env-283245laio1centralus, job=integrations/node_exporter, region=centralus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283245laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=283245, env_name=A151 Digi-Key Dev/Test, env_type=dev, instance=env-283245laio1centralus, job=integrations/node_exporter, region=centralus, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-283453laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283453laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01f103bf8} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283453laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01f103c88} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283453laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01f103d38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434648418s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283453laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283453laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283453laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-283453laio2use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283453laio2use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01f103fa8} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283453laio2use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc00e9141c0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283453laio2use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01f103ef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434660391s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283453laio2use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283453laio2use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283453laio2use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio2use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-283466laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283466, env_name=C591_Novartis_QA, env_type=qa, instance=env-283466laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283466laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283466, env_name=C591_Novartis_QA, env_type=qa, instance=env-283466laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc00e914f58} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283466laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283466, env_name=C591_Novartis_QA, env_type=qa, instance=env-283466laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc00e915028} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283466laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283466, env_name=C591_Novartis_QA, env_type=qa, instance=env-283466laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc00e914838}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434672735s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283466laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283466, env_name=C591_Novartis_QA, env_type=qa, instance=env-283466laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283466laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283466, env_name=C591_Novartis_QA, env_type=qa, instance=env-283466laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283466laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283466, env_name=C591_Novartis_QA, env_type=qa, instance=env-283466laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-283470laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=283470, env_name=C630_TokyoCentury_Prod, env_type=prod, instance=env-283470laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=decommission State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283470laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=283470, env_name=C630_TokyoCentury_Prod, env_type=prod, instance=env-283470laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=decommission Value:0xc00e9152a8} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283470laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=283470, env_name=C630_TokyoCentury_Prod, env_type=prod, instance=env-283470laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=decommission Value:0xc00e915370} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283470laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=283470, env_name=C630_TokyoCentury_Prod, env_type=prod, instance=env-283470laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=decommission Value:0xc00e9151f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434683891s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283470laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=283470, env_name=C630_TokyoCentury_Prod, env_type=prod, instance=env-283470laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=decommission} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283470laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=283470, env_name=C630_TokyoCentury_Prod, env_type=prod, instance=env-283470laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=decommission} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283470laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=283470, env_name=C630_TokyoCentury_Prod, env_type=prod, instance=env-283470laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=decommission} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-283735laio1eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio1eastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283735laio1eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc01d344220} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283735laio1eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc01d3442f0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283735laio1eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc01d344170}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434695159s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283735laio1eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283735laio1eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283735laio1eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-283735laio2eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio2eastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283735laio2eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio2eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc01d344560} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283735laio2eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio2eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc01d344618} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283735laio2eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio2eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc01d344480}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434706093s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283735laio2eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio2eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283735laio2eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio2eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283735laio2eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio2eastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-283878laionortheurope, cloud_platform=Azure, customer_id=A164, env_id=283878, env_name=A164_CTTI_Corp_PROD, env_type=prod, instance=env-283878laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283878laionortheurope, cloud_platform=Azure, customer_id=A164, env_id=283878, env_name=A164_CTTI_Corp_PROD, env_type=prod, instance=env-283878laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc01d344790} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283878laionortheurope, cloud_platform=Azure, customer_id=A164, env_id=283878, env_name=A164_CTTI_Corp_PROD, env_type=prod, instance=env-283878laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc01d344848} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-283878laionortheurope, cloud_platform=Azure, customer_id=A164, env_id=283878, env_name=A164_CTTI_Corp_PROD, env_type=prod, instance=env-283878laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc01d344910}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434717389s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283878laionortheurope, cloud_platform=Azure, customer_id=A164, env_id=283878, env_name=A164_CTTI_Corp_PROD, env_type=prod, instance=env-283878laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283878laionortheurope, cloud_platform=Azure, customer_id=A164, env_id=283878, env_name=A164_CTTI_Corp_PROD, env_type=prod, instance=env-283878laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-283878laionortheurope, cloud_platform=Azure, customer_id=A164, env_id=283878, env_name=A164_CTTI_Corp_PROD, env_type=prod, instance=env-283878laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-284427laio1use1, cloud_platform=AWS, customer_id=C595, env_id=284427, env_name=c595_franconnect_uat_2021, env_type=qa, instance=env-284427laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-284427laio1use1, cloud_platform=AWS, customer_id=C595, env_id=284427, env_name=c595_franconnect_uat_2021, env_type=qa, instance=env-284427laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01d344ba0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-284427laio1use1, cloud_platform=AWS, customer_id=C595, env_id=284427, env_name=c595_franconnect_uat_2021, env_type=qa, instance=env-284427laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01d344a70} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-284427laio1use1, cloud_platform=AWS, customer_id=C595, env_id=284427, env_name=c595_franconnect_uat_2021, env_type=qa, instance=env-284427laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01d344b00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434728803s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-284427laio1use1, cloud_platform=AWS, customer_id=C595, env_id=284427, env_name=c595_franconnect_uat_2021, env_type=qa, instance=env-284427laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-284427laio1use1, cloud_platform=AWS, customer_id=C595, env_id=284427, env_name=c595_franconnect_uat_2021, env_type=qa, instance=env-284427laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-284427laio1use1, cloud_platform=AWS, customer_id=C595, env_id=284427, env_name=c595_franconnect_uat_2021, env_type=qa, instance=env-284427laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-284881laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio1centralus, job=integrations/node_exporter, region=centralus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-284881laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio1centralus, job=integrations/node_exporter, region=centralus, stage=live Value:0xc01d344cf8} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-284881laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio1centralus, job=integrations/node_exporter, region=centralus, stage=live Value:0xc01d344db0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-284881laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio1centralus, job=integrations/node_exporter, region=centralus, stage=live Value:0xc01d344e48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434740957s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-284881laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio1centralus, job=integrations/node_exporter, region=centralus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-284881laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio1centralus, job=integrations/node_exporter, region=centralus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-284881laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio1centralus, job=integrations/node_exporter, region=centralus, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-284881laio2centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio2centralus, job=integrations/node_exporter, region=centralus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-284881laio2centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio2centralus, job=integrations/node_exporter, region=centralus, stage=live Value:0xc01d345160} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-284881laio2centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio2centralus, job=integrations/node_exporter, region=centralus, stage=live Value:0xc01d344fd8} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-284881laio2centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio2centralus, job=integrations/node_exporter, region=centralus, stage=live Value:0xc01d3450b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434752302s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-284881laio2centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio2centralus, job=integrations/node_exporter, region=centralus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-284881laio2centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio2centralus, job=integrations/node_exporter, region=centralus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-284881laio2centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio2centralus, job=integrations/node_exporter, region=centralus, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-286302laiowesteurope, cloud_platform=Azure, customer_id=A167, env_id=286302, env_name=A167 Loewe Prod, env_type=prod, instance=env-286302laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-286302laiowesteurope, cloud_platform=Azure, customer_id=A167, env_id=286302, env_name=A167 Loewe Prod, env_type=prod, instance=env-286302laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01d345320} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-286302laiowesteurope, cloud_platform=Azure, customer_id=A167, env_id=286302, env_name=A167 Loewe Prod, env_type=prod, instance=env-286302laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01d3453f0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-286302laiowesteurope, cloud_platform=Azure, customer_id=A167, env_id=286302, env_name=A167 Loewe Prod, env_type=prod, instance=env-286302laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01d3454c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434762662s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-286302laiowesteurope, cloud_platform=Azure, customer_id=A167, env_id=286302, env_name=A167 Loewe Prod, env_type=prod, instance=env-286302laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-286302laiowesteurope, cloud_platform=Azure, customer_id=A167, env_id=286302, env_name=A167 Loewe Prod, env_type=prod, instance=env-286302laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-286302laiowesteurope, cloud_platform=Azure, customer_id=A167, env_id=286302, env_name=A167 Loewe Prod, env_type=prod, instance=env-286302laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-286767laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=286767, env_name=A116_Costco_Dev_2021, env_type=dev, instance=env-286767laio1westus2, job=integrations/node_exporter, region=westus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-286767laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=286767, env_name=A116_Costco_Dev_2021, env_type=dev, instance=env-286767laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc01d3456c8} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-286767laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=286767, env_name=A116_Costco_Dev_2021, env_type=dev, instance=env-286767laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc01d345770} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-286767laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=286767, env_name=A116_Costco_Dev_2021, env_type=dev, instance=env-286767laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc01d345618}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434772897s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-286767laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=286767, env_name=A116_Costco_Dev_2021, env_type=dev, instance=env-286767laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-286767laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=286767, env_name=A116_Costco_Dev_2021, env_type=dev, instance=env-286767laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-286767laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=286767, env_name=A116_Costco_Dev_2021, env_type=dev, instance=env-286767laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-286990laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=286990, env_name=A169_Aigues_Prod, env_type=prod, instance=env-286990laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-286990laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=286990, env_name=A169_Aigues_Prod, env_type=prod, instance=env-286990laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01d345e10} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-286990laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=286990, env_name=A169_Aigues_Prod, env_type=prod, instance=env-286990laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc0095d4408} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-286990laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=286990, env_name=A169_Aigues_Prod, env_type=prod, instance=env-286990laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc0095d4710}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434782744s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-286990laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=286990, env_name=A169_Aigues_Prod, env_type=prod, instance=env-286990laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-286990laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=286990, env_name=A169_Aigues_Prod, env_type=prod, instance=env-286990laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-286990laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=286990, env_name=A169_Aigues_Prod, env_type=prod, instance=env-286990laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-287012laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=287012, env_name=a116_Costco_Qa_2021, env_type=qa, instance=env-287012laio1westus2, job=integrations/node_exporter, region=westus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-287012laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=287012, env_name=a116_Costco_Qa_2021, env_type=qa, instance=env-287012laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc019032030} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-287012laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=287012, env_name=a116_Costco_Qa_2021, env_type=qa, instance=env-287012laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc0190320f0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-287012laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=287012, env_name=a116_Costco_Qa_2021, env_type=qa, instance=env-287012laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc019032198}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434793067s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-287012laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=287012, env_name=a116_Costco_Qa_2021, env_type=qa, instance=env-287012laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-287012laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=287012, env_name=a116_Costco_Qa_2021, env_type=qa, instance=env-287012laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-287012laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=287012, env_name=a116_Costco_Qa_2021, env_type=qa, instance=env-287012laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-287969laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=287969, env_name=a170_ASI_Dev, env_type=prod, instance=env-287969laio1eastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-287969laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=287969, env_name=a170_ASI_Dev, env_type=prod, instance=env-287969laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc019032300} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-287969laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=287969, env_name=a170_ASI_Dev, env_type=prod, instance=env-287969laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc0190323c0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-287969laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=287969, env_name=a170_ASI_Dev, env_type=prod, instance=env-287969laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc0190325a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434806859s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-287969laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=287969, env_name=a170_ASI_Dev, env_type=prod, instance=env-287969laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-287969laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=287969, env_name=a170_ASI_Dev, env_type=prod, instance=env-287969laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-287969laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=287969, env_name=a170_ASI_Dev, env_type=prod, instance=env-287969laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-288344laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio1westus2, job=integrations/node_exporter, region=westus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-288344laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc019032af8} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-288344laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc0190329b0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-288344laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc019032a50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434818282s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-288344laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-288344laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-288344laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-288344laio2westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio2westus2, job=integrations/node_exporter, region=westus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-288344laio2westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio2westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc019032c30} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-288344laio2westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio2westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc019032cd8} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-288344laio2westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio2westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc019032d78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434829675s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-288344laio2westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio2westus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-288344laio2westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio2westus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-288344laio2westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio2westus2, job=integrations/node_exporter, region=westus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-288831laio1northeurope, cloud_platform=Azure, customer_id=A162, env_id=288831, env_name=A162_Sonae_Prod, env_type=prod, instance=env-288831laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-288831laio1northeurope, cloud_platform=Azure, customer_id=A162, env_id=288831, env_name=A162_Sonae_Prod, env_type=prod, instance=env-288831laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc019032ed0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-288831laio1northeurope, cloud_platform=Azure, customer_id=A162, env_id=288831, env_name=A162_Sonae_Prod, env_type=prod, instance=env-288831laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc019032f80} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-288831laio1northeurope, cloud_platform=Azure, customer_id=A162, env_id=288831, env_name=A162_Sonae_Prod, env_type=prod, instance=env-288831laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc019033030}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434840515s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-288831laio1northeurope, cloud_platform=Azure, customer_id=A162, env_id=288831, env_name=A162_Sonae_Prod, env_type=prod, instance=env-288831laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-288831laio1northeurope, cloud_platform=Azure, customer_id=A162, env_id=288831, env_name=A162_Sonae_Prod, env_type=prod, instance=env-288831laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-288831laio1northeurope, cloud_platform=Azure, customer_id=A162, env_id=288831, env_name=A162_Sonae_Prod, env_type=prod, instance=env-288831laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-288831laio2northeurope, cloud_platform=Azure, customer_id=A162, env_id=288831, env_name=A162_Sonae_Prod, env_type=prod, instance=env-288831laio2northeurope, job=integrations/node_exporter, region=northeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-288831laio2northeurope, cloud_platform=Azure, customer_id=A162, env_id=288831, env_name=A162_Sonae_Prod, env_type=prod, instance=env-288831laio2northeurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc019033188} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-288831laio2northeurope, cloud_platform=Azure, customer_id=A162, env_id=288831, env_name=A162_Sonae_Prod, env_type=prod, instance=env-288831laio2northeurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc019033238} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-288831laio2northeurope, cloud_platform=Azure, customer_id=A162, env_id=288831, env_name=A162_Sonae_Prod, env_type=prod, instance=env-288831laio2northeurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc019033320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434864454s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-288831laio2northeurope, cloud_platform=Azure, customer_id=A162, env_id=288831, env_name=A162_Sonae_Prod, env_type=prod, instance=env-288831laio2northeurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-288831laio2northeurope, cloud_platform=Azure, customer_id=A162, env_id=288831, env_name=A162_Sonae_Prod, env_type=prod, instance=env-288831laio2northeurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-288831laio2northeurope, cloud_platform=Azure, customer_id=A162, env_id=288831, env_name=A162_Sonae_Prod, env_type=prod, instance=env-288831laio2northeurope, job=integrations/node_exporter, region=northeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-288988laio1westeurope, cloud_platform=Azure, customer_id=A172, env_id=288988, env_name=A172_SCA_DEV, env_type=dev, instance=env-288988laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-288988laio1westeurope, cloud_platform=Azure, customer_id=A172, env_id=288988, env_name=A172_SCA_DEV, env_type=dev, instance=env-288988laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc019033830} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-288988laio1westeurope, cloud_platform=Azure, customer_id=A172, env_id=288988, env_name=A172_SCA_DEV, env_type=dev, instance=env-288988laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc0190336c0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-288988laio1westeurope, cloud_platform=Azure, customer_id=A172, env_id=288988, env_name=A172_SCA_DEV, env_type=dev, instance=env-288988laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc019033770}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434874689s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-288988laio1westeurope, cloud_platform=Azure, customer_id=A172, env_id=288988, env_name=A172_SCA_DEV, env_type=dev, instance=env-288988laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-288988laio1westeurope, cloud_platform=Azure, customer_id=A172, env_id=288988, env_name=A172_SCA_DEV, env_type=dev, instance=env-288988laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-288988laio1westeurope, cloud_platform=Azure, customer_id=A172, env_id=288988, env_name=A172_SCA_DEV, env_type=dev, instance=env-288988laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-289152laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289152, env_name=A148_Carrefour_Prod, env_type=prod, instance=env-289152laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-289152laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289152, env_name=A148_Carrefour_Prod, env_type=prod, instance=env-289152laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc019033970} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-289152laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289152, env_name=A148_Carrefour_Prod, env_type=prod, instance=env-289152laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc019033a20} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-289152laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289152, env_name=A148_Carrefour_Prod, env_type=prod, instance=env-289152laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc019033ad0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434888145s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-289152laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289152, env_name=A148_Carrefour_Prod, env_type=prod, instance=env-289152laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-289152laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289152, env_name=A148_Carrefour_Prod, env_type=prod, instance=env-289152laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-289152laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289152, env_name=A148_Carrefour_Prod, env_type=prod, instance=env-289152laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-289178laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289178, env_name=A148_Carrefour_QA, env_type=qa, instance=env-289178laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-289178laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289178, env_name=A148_Carrefour_QA, env_type=qa, instance=env-289178laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc019033d58} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-289178laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289178, env_name=A148_Carrefour_QA, env_type=qa, instance=env-289178laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc019033c20} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-289178laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289178, env_name=A148_Carrefour_QA, env_type=qa, instance=env-289178laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc019033cb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434900884s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-289178laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289178, env_name=A148_Carrefour_QA, env_type=qa, instance=env-289178laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-289178laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289178, env_name=A148_Carrefour_QA, env_type=qa, instance=env-289178laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-289178laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289178, env_name=A148_Carrefour_QA, env_type=qa, instance=env-289178laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-289493laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289493, env_name=a148_Carrefour_Dev_2021, env_type=dev, instance=env-289493laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-289493laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289493, env_name=a148_Carrefour_Dev_2021, env_type=dev, instance=env-289493laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc019033ea8} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-289493laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289493, env_name=a148_Carrefour_Dev_2021, env_type=dev, instance=env-289493laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc019033f88} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-289493laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289493, env_name=a148_Carrefour_Dev_2021, env_type=dev, instance=env-289493laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00cb474f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.43491281s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-289493laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289493, env_name=a148_Carrefour_Dev_2021, env_type=dev, instance=env-289493laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-289493laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289493, env_name=a148_Carrefour_Dev_2021, env_type=dev, instance=env-289493laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-289493laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289493, env_name=a148_Carrefour_Dev_2021, env_type=dev, instance=env-289493laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-291882laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=291882, env_name=A170_ASI_Prod, env_type=dev, instance=env-291882laio1eastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-291882laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=291882, env_name=A170_ASI_Prod, env_type=dev, instance=env-291882laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc00cb47668} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-291882laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=291882, env_name=A170_ASI_Prod, env_type=dev, instance=env-291882laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc00cb47718} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-291882laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=291882, env_name=A170_ASI_Prod, env_type=dev, instance=env-291882laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc00cb477b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.43492374s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-291882laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=291882, env_name=A170_ASI_Prod, env_type=dev, instance=env-291882laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-291882laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=291882, env_name=A170_ASI_Prod, env_type=dev, instance=env-291882laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-291882laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=291882, env_name=A170_ASI_Prod, env_type=dev, instance=env-291882laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-291902laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=291902, env_name=A165 DWH-MA DEV, env_type=dev, instance=env-291902laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-291902laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=291902, env_name=A165 DWH-MA DEV, env_type=dev, instance=env-291902laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc00cb47988} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-291902laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=291902, env_name=A165 DWH-MA DEV, env_type=dev, instance=env-291902laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc00cb47b10} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-291902laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=291902, env_name=A165 DWH-MA DEV, env_type=dev, instance=env-291902laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc00cb47bf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434954905s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-291902laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=291902, env_name=A165 DWH-MA DEV, env_type=dev, instance=env-291902laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-291902laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=291902, env_name=A165 DWH-MA DEV, env_type=dev, instance=env-291902laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-291902laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=291902, env_name=A165 DWH-MA DEV, env_type=dev, instance=env-291902laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-291924laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=291924, env_name=A169 Aigues Dev, env_type=dev, instance=env-291924laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-291924laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=291924, env_name=A169 Aigues Dev, env_type=dev, instance=env-291924laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00cb47db8} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-291924laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=291924, env_name=A169 Aigues Dev, env_type=dev, instance=env-291924laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00cb47e90} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-291924laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=291924, env_name=A169 Aigues Dev, env_type=dev, instance=env-291924laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00cb47f50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.43497144s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-291924laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=291924, env_name=A169 Aigues Dev, env_type=dev, instance=env-291924laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-291924laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=291924, env_name=A169 Aigues Dev, env_type=dev, instance=env-291924laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-291924laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=291924, env_name=A169 Aigues Dev, env_type=dev, instance=env-291924laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-292236laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-292236laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc01b7d8510} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-292236laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc01b7d8380} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-292236laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc01b7d8458}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.43498796s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-292236laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-292236laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-292236laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-292236laio2germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio2germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-292236laio2germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio2germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc01b7d8b80} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-292236laio2germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio2germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc01b7d8c30} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-292236laio2germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio2germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc01b7d88b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.435001884s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-292236laio2germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio2germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-292236laio2germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio2germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-292236laio2germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio2germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-292945laio1westeurope, cloud_platform=Azure, customer_id=A175, env_id=292945, env_name=A175 - IberianSports Prod, env_type=prod, instance=env-292945laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostname=env-292945laio1westeurope, cloud_platform=Azure, customer_id=A175, env_id=292945, env_name=A175 - IberianSports Prod, env_type=prod, instance=env-292945laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01b7d90d0} B:{Var:B Labels:__name__=mstr_status_modeling_service, agent_hostname=env-292945laio1westeurope, cloud_platform=Azure, customer_id=A175, env_id=292945, env_name=A175 - IberianSports Prod, env_type=prod, instance=env-292945laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01b7d8dc0} C:{Var:C Labels:__name__=mstr_status_modeling_service, agent_hostname=env-292945laio1westeurope, cloud_platform=Azure, customer_id=A175, env_id=292945, env_name=A175 - IberianSports Prod, env_type=prod, instance=env-292945laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01b7d8f40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.435012755s EvaluationString:[ var='A' labels={__name__=mstr_status_modeling_service, agent_hostname=env-292945laio1westeurope, cloud_platform=Azure, customer_id=A175, env_id=292945, env_name=A175 - IberianSports Prod, env_type=prod, instance=env-292945laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_modeling_service, agent_hostname=env-292945laio1westeurope, cloud_platform=Azure, customer_id=A175, env_id=292945, env_name=A175 - IberianSports Prod, env_type=prod, instance=env-292945laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_modeling_service, agent_hostname=env-292945laio1westeurope, cloud_platform=Azure, customer_id=A175, env_id=292945, env_name=A175 - IberianSports Prod, env_type=prod, instance=env-292945laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_modeling_service, agent_hostname=env-293183laio1northeurope, cloud_platform=Azure, customer_id=A164, env_id=293183, env_name=A164 CTTI Corp DEV, env_type=dev, instance=env-293183laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_modeling_service, agent_hostn + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xti0dkc0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.554317148Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xti0dkc0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.554275378Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.554037658Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.554012123Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xtg197zd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.553883104Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xtg197zd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.553856343Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.553876277Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.553855155Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.553813697Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xtg197zd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.553802493Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.553818389Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.553767478Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.553752976Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.553617495Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.553688905Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.553658307Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=2ae14c9fa194a7ea attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.553620195Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.553296518s EvaluationString:}]" duration=421.273399ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xt9hrwik-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.553620261Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xt6144oy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.553583001Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xt6144oy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.553450549Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.55339507Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xt1u8v2a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.553371018Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xt1u8v2a-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.553321178Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=343745 slug=getharley t=2024-05-29T13:44:14.553278679Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xt0tzsx4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.552956174Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xt0rgjni-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.552865973Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xt0rgjni-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.552834683Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xt0rgjni-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.552823393Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.552873068Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.552675251Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xt085s9a-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.552678581Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xt085s9a-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.552666991Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xstnlsak-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.55253545Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xss7idvi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.552494069Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xss7idvi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.552472499Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.552447661Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.55232306Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.167996ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xss7idvi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.552385828Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xss7idvi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.552363988Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xsqnux7j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.552332508Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xsqnux7j-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.552283097Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xso1i4pe-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.552023705Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xso1i4pe-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.552001494Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.552005718Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391359 slug=linklogistics instance= t=2024-05-29T13:44:14.551968586Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391359 slug=linklogistics instance= t=2024-05-29T13:44:14.551933685Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xso1i4pe-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.551960034Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=391359 slug=linklogistics version=65 fingerprint=04f5171106611e9e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.551761584Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc02a1046e8} B:{Var:B Labels: Value:0xc02a1046f0} C:{Var:C Labels: Value:0xc02a1046f8} D:{Var:D Labels: Value:0xc02a104700}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.550423817s EvaluationString:[ var='A' labels={} value=0 ], [ var='B' labels={} value=0 ], [ var='C' labels={} value=0 ], [ var='D' labels={} value=0 ]}]" duration=71.062072ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xsnp9dv0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.551868573Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xsnp9dv0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.551840363Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xsnp9dv0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.551791482Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xsl9tzbc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.551729772Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xsl9tzbc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.551653141Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.551590641Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xshs3sbj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.55154588Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:14.551553355Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.904887ms + level=debug ts=2024-05-29T13:44:14.551492882Z caller=remote_instance_store.go:51 user=85008 slug=kalypsolp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=85008 slug=kalypsolp t=2024-05-29T13:44:14.551453069Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xshs3sbj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.551431859Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xsgio444-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.551378478Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=85008 slug=kalypsolp t=2024-05-29T13:44:14.551351409Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xsgio444-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.551261387Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=85008 slug=kalypsolp version=12 fingerprint=625fa2476975ee66 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.551224662Z level=debug msg="Alert rule evaluated" results="[{Instance:client=Fluidmaster, environment=Prod, host=twxprod.fluidmaster.kalypso.com, servname=ProdTWXAppServer-9ce253, url=twxprod.fluidmaster.kalypso.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:client=Fluidmaster, environment=Prod, host=twxprod.fluidmaster.kalypso.com, servname=ProdTWXAppServer-9ce253, url=twxprod.fluidmaster.kalypso.com Value:0xc05833fbc0} C:{Var:C Labels:client=Fluidmaster, environment=Prod, host=twxprod.fluidmaster.kalypso.com, servname=ProdTWXAppServer-9ce253, url=twxprod.fluidmaster.kalypso.com Value:0xc05833fb80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.550777428s EvaluationString:[ var='A' labels={client=Fluidmaster, environment=Prod, host=twxprod.fluidmaster.kalypso.com, servname=ProdTWXAppServer-9ce253, url=twxprod.fluidmaster.kalypso.com} value=39.16042044144819 ], [ var='C' labels={client=Fluidmaster, environment=Prod, host=twxprod.fluidmaster.kalypso.com, servname=ProdTWXAppServer-9ce253, url=twxprod.fluidmaster.kalypso.com} value=0 ]}]" duration=159.259893ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xsfsjwde-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.551119135Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xsesheaf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.550906893Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xse8ffes-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.550720771Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.550795455Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.550835719Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.550784541Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.550704898Z caller=remote_instance_store.go:51 user=190917 slug=d1cx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=190917 slug=d1cx instance= t=2024-05-29T13:44:14.550646035Z level=debug msg="Changing state" previous_state=Normal next_state=Pending previous_ends_at=2024-05-29T13:43:00Z next_ends_at=2024-05-29T13:48:00Z + logger=ngalert.state.manager user=190917 slug=d1cx instance= t=2024-05-29T13:44:14.550629475Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xse8ffes-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.55062114Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xsdp2x85-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.550542299Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.550449676Z caller=remote_alert_sender.go:94 user=22398 slug=sunfolding host=sunfolding-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.83.87:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=c6f1026e-8308-4963-b6d3-b547a30a4307 alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xrzzflmu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.550323407Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xrzb25fu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.550121045Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.550172615Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.546903369Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.5448446Z caller=remote_instance_store.go:51 user=884866 slug=cnonumerique msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=788474 slug=elisasre t=2024-05-29T13:44:14.545151526Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=788474 slug=elisasre instance="cluster=sre-ci.k8s.local, component=apimgmt-gateway-production, instance=https://10.222.156.157, monitor=monitor-489, namespace=health, region=sdcv3, target=https://10.222.156.157" t=2024-05-29T13:44:14.545135076Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.55005722Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=788474 slug=elisasre instance="cluster=sre-ci.k8s.local, component=apimgmt-gateway-production, instance=https://10.222.156.157, monitor=monitor-489, namespace=health, region=sdcv3, target=https://10.222.156.157" t=2024-05-29T13:44:14.545104275Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=884866 slug=cnonumerique instance="datasource_uid=ddhkbrfewv7k0d, ref_id=A" t=2024-05-29T13:44:14.544766577Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=884866 slug=cnonumerique t=2024-05-29T13:44:14.544725297Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=884866 slug=cnonumerique version=79 fingerprint=4cb6b9820c99d379 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.544642646Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=ddhkbrfewv7k0d, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.544273403s EvaluationString:}]" duration=30.933314ms + level=debug ts=2024-05-29T13:44:14.549901953Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xrrv84iu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.549843692Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xrrv84iu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.549815762Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xrl3tub8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.549711681Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xrj9r9mo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.549329347Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xrj9r9mo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.549278036Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.549298902Z caller=remote_instance_store.go:51 user=158536 slug=clearsaleantifraude msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=158536 slug=clearsaleantifraude instance="collectionname=entity-analysis-configuration" t=2024-05-29T13:44:14.549250523Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=158536 slug=clearsaleantifraude instance="collectionname=entity-analysis-configuration" t=2024-05-29T13:44:14.54924199Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=158536 slug=clearsaleantifraude instance="collectionname=analysisSLA" t=2024-05-29T13:44:14.549183446Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=158536 slug=clearsaleantifraude instance="collectionname=analyses" t=2024-05-29T13:44:14.549158326Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=158536 slug=clearsaleantifraude instance="collectionname=analyses" t=2024-05-29T13:44:14.549133482Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.548895239Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.548895353Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.548805667Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=915065 slug=cmpladpd instance="config_version=1716382794727787520, instance=https://www.pladur.com, job=Browser to pladur.com, probe=Frankfurt" t=2024-05-29T13:44:14.548807664Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.548767379Z caller=grafana.go:247 user=438185 slug=nodeinfra msg="rules manager rule groups request" path=/prometheus/api/v1/alerts grafana_org_id=1 query= groups=0 alerts=256 + logger=ngalert.state.manager user=915065 slug=cmpladpd instance="config_version=1716382794727787520, instance=https://www.pladur.com, job=Browser to pladur.com, probe=Frankfurt" t=2024-05-29T13:44:14.548791443Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xrcyb6pi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.548746241Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.548696242Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=473762 slug=intentiq t=2024-05-29T13:44:14.548645806Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.548617632Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xqxz09qd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.548579689Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=517562 slug=microstrategytest t=2024-05-29T13:44:14.548500478Z level=debug msg="Saving alert states done" count=880 max_state_save_concurrency=1 duration=13.371781888s + level=debug ts=2024-05-29T13:44:14.548400927Z caller=remote_instance_store.go:51 user=334644 slug=meiro msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=335419 slug=tbauctions t=2024-05-29T13:44:14.548419633Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.355509ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xqwthm2l-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.548370887Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xqwthm2l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.548308037Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=452240 slug=trulioo t=2024-05-29T13:44:14.548193879Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=118.784848ms + level=debug ts=2024-05-29T13:44:14.548245694Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xqwthm2l-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.548269996Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=843304 slug=ppcgroup t=2024-05-29T13:44:14.548243394Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.548283494Z caller=remote_instance_store.go:51 user=843304 slug=ppcgroup msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xqwthm2l-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.548242406Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=843304 slug=ppcgroup instance= t=2024-05-29T13:44:14.548219693Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=843304 slug=ppcgroup t=2024-05-29T13:44:14.548170793Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xqmjsl0f-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.548127605Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=843304 slug=ppcgroup version=46 fingerprint=5204a4fe93127230 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.548092792Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.540572526s EvaluationString:}]" duration=13.76631ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xqmjsl0f-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.548056274Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xqgo2ecp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.547918423Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xqgo2ecp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.547857572Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.547726097Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.547736965Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xqfa8blz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.547736951Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xqbdeu88-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.54768321Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=163513 slug=dialpad t=2024-05-29T13:44:14.547607401Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=21.690904ms + logger=ngalert.state.manager.persist user=716630 slug=coapdev t=2024-05-29T13:44:14.547477372Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=29.706696ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xq9jrzg2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.547476508Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.547383292Z caller=remote_instance_store.go:51 user=526847 slug=soniclabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.547423532Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + Error parsing panelUID for alert annotationruleID2105dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:14.547353678Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=129.755699ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xq9bxrdg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.547272196Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.547125078Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.547108906Z caller=remote_instance_store.go:51 user=451427 slug=rocketchat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xq97q93c-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.547083164Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xq97q93c-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.547056774Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xq97q93c-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.547016173Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xq8lxtkf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.546969563Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.546691786Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.546643665Z caller=remote_instance_store.go:51 user=813270 slug=adiante msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xq6g3lhh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.546624419Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xq6g3lhh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.546595939Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xq6g3lhh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.546497818Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.546388113Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xpy3jbzb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.546406557Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xpy3jbzb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.546335096Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xpy3jbzb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.546274316Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.546345062Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.12095ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xpy3jbzb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.546237715Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xpth88aj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.546167165Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xpth88aj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.546098444Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xprxe9lr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.545978043Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xprxe9lr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.545944712Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xprxe9lr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.545824841Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xprxe9lr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.545795021Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=612525 slug=adleyeview t=2024-05-29T13:44:14.545729876Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=159532 slug=getfabric instance="cluster=vmfc-uri278" t=2024-05-29T13:44:14.545815328Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.545639463Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xpd3q18f-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.545581459Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=159532 slug=getfabric instance="cluster=vmfc-sal" t=2024-05-29T13:44:14.545603693Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=159532 slug=getfabric instance="cluster=vmfc-product" t=2024-05-29T13:44:14.54552828Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=475170 slug=paypaplane t=2024-05-29T13:44:14.545438815Z level=debug msg="Saving alert states done" count=162 max_state_save_concurrency=1 duration=2.023145516s + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xpd3q18f-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.545347156Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=159532 slug=getfabric instance="cluster=vmfc-michael" t=2024-05-29T13:44:14.545354486Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xpawhgbb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.545308156Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xpawhgbb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.545247515Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xpawhgbb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.545200055Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.545131192Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xp9sdy0m-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.545123254Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xp9sdy0m-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.545098184Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.545070799Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xp9sdy0m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.545059363Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xp9sdy0m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.545022613Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=159532 slug=getfabric instance="cluster=vmfc-ivan" t=2024-05-29T13:44:14.545014117Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xp8c077y-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.544928852Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=159532 slug=getfabric instance="cluster=vmfc-gali" t=2024-05-29T13:44:14.544922794Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.544807958Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.544866344Z caller=remote_instance_store.go:51 user=642786 slug=sophoscomnsg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xp8c077y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.54479199Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=159532 slug=getfabric instance="cluster=vmfc-dog" t=2024-05-29T13:44:14.544860978Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=159532 slug=getfabric instance="cluster=vmfc-dog" t=2024-05-29T13:44:14.544850003Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=159532 slug=getfabric instance="cluster=nyc01" t=2024-05-29T13:44:14.544751923Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xp3puyiz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.54473439Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.544652994Z caller=remote_instance_store.go:51 user=320906 slug=techcyte msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.54463804Z caller=grafana.go:247 user=438185 slug=nodeinfra msg="rules manager rule groups request" path=/prometheus/api/v1/alerts grafana_org_id=1 query= groups=0 alerts=256 + level=debug ts=2024-05-29T13:44:14.544643974Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=159532 slug=getfabric instance="cluster=emk01" t=2024-05-29T13:44:14.54450172Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=159532 slug=getfabric instance="cluster=emk01" t=2024-05-29T13:44:14.544489973Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xp3flfxz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.544546428Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.544277718Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=159532 slug=getfabric instance="cluster=dal01" t=2024-05-29T13:44:14.54439301Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xp3flfxz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.544453557Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xp3flfxz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.544431457Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xp15eyt4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.544391636Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xp15eyt4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.544298705Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xp11tzs4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.544242845Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.544154044Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xp11tzs4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.544232715Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xp11tzs4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.544163684Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=811546 slug=fyld t=2024-05-29T13:44:14.544272183Z level=debug msg="Saving alert states" count=7 max_state_save_concurrency=1 + logger=ngalert.state.manager user=811546 slug=fyld instance="QueueName=sitestream-sme-maestro-event-bus" t=2024-05-29T13:44:14.544258213Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=811546 slug=fyld instance="QueueName=sitestream-sme-fyld-brain-event-bus" t=2024-05-29T13:44:14.544233372Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=159532 slug=getfabric t=2024-05-29T13:44:14.544093778Z level=debug msg="State manager processing evaluation results" resultCount=17 + logger=ngalert.state.manager user=811546 slug=fyld instance="QueueName=celery-notifications" t=2024-05-29T13:44:14.544188301Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391577 slug=daghouse instance= t=2024-05-29T13:44:14.544130939Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=391577 slug=daghouse t=2024-05-29T13:44:14.544080223Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=159532 slug=getfabric version=93 fingerprint=4825fbb28f694d57 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.543822723Z level=debug msg="Alert rule evaluated" results="[{Instance:cluster=bs01 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=bs01 Value:0xc014061a70} C:{Var:C Labels:cluster=bs01 Value:0xc014061a88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.543094344s EvaluationString:[ var='A' labels={cluster=bs01} value=0 ], [ var='C' labels={cluster=bs01} value=0 ]} {Instance:cluster=dal01 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=dal01 Value:0xc014061ac8} C:{Var:C Labels:cluster=dal01 Value:0xc014061af0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.543110467s EvaluationString:[ var='A' labels={cluster=dal01} value=0 ], [ var='C' labels={cluster=dal01} value=0 ]} {Instance:cluster=emk01 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=emk01 Value:0xc014061b20} C:{Var:C Labels:cluster=emk01 Value:0xc014061b48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.543116896s EvaluationString:[ var='A' labels={cluster=emk01} value=0 ], [ var='C' labels={cluster=emk01} value=0 ]} {Instance:cluster=hln01 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=hln01 Value:0xc014061b88} C:{Var:C Labels:cluster=hln01 Value:0xc014061ba0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.543121839s EvaluationString:[ var='A' labels={cluster=hln01} value=0 ], [ var='C' labels={cluster=hln01} value=0 ]} {Instance:cluster=mfc0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=mfc0 Value:0xc014061bd0} C:{Var:C Labels:cluster=mfc0 Value:0xc014061be8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.543126575s EvaluationString:[ var='A' labels={cluster=mfc0} value=0 ], [ var='C' labels={cluster=mfc0} value=0 ]} {Instance:cluster=nyc01 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=nyc01 Value:0xc014061c18} C:{Var:C Labels:cluster=nyc01 Value:0xc014061c30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.54313133s EvaluationString:[ var='A' labels={cluster=nyc01} value=0 ], [ var='C' labels={cluster=nyc01} value=0 ]} {Instance:cluster=vmfc-dog State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=vmfc-dog Value:0xc014061c60} C:{Var:C Labels:cluster=vmfc-dog Value:0xc014061c78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.54313852s EvaluationString:[ var='A' labels={cluster=vmfc-dog} value=0 ], [ var='C' labels={cluster=vmfc-dog} value=0 ]} {Instance:cluster=vmfc-gali State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=vmfc-gali Value:0xc014061cb0} C:{Var:C Labels:cluster=vmfc-gali Value:0xc014061cd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.543142766s EvaluationString:[ var='A' labels={cluster=vmfc-gali} value=0 ], [ var='C' labels={cluster=vmfc-gali} value=0 ]} {Instance:cluster=vmfc-ivan State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=vmfc-ivan Value:0xc014061d00} C:{Var:C Labels:cluster=vmfc-ivan Value:0xc014061d60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.543149173s EvaluationString:[ var='A' labels={cluster=vmfc-ivan} value=0 ], [ var='C' labels={cluster=vmfc-ivan} value=0 ]} {Instance:cluster=vmfc-latest State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=vmfc-latest Value:0xc014061dc0} C:{Var:C Labels:cluster=vmfc-latest Value:0xc014061da0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.543153384s EvaluationString:[ var='A' labels={cluster=vmfc-latest} value=0 ], [ var='C' labels={cluster=vmfc-latest} value=0 ]} {Instance:cluster=vmfc-maersk State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=vmfc-maersk Value:0xc01c502010} C:{Var:C Labels:cluster=vmfc-maersk Value:0xc01c502030}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.543161975s EvaluationString:[ var='A' labels={cluster=vmfc-maersk} value=0 ], [ var='C' labels={cluster=vmfc-maersk} value=0 ]} {Instance:cluster=vmfc-michael State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=vmfc-michael Value:0xc01c502070} C:{Var:C Labels:cluster=vmfc-michael Value:0xc01c502090}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.543166454s EvaluationString:[ var='A' labels={cluster=vmfc-michael} value=0 ], [ var='C' labels={cluster=vmfc-michael} value=0 ]} {Instance:cluster=vmfc-oz State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=vmfc-oz Value:0xc01c5020d8} C:{Var:C Labels:cluster=vmfc-oz Value:0xc01c5020c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.543170481s EvaluationString:[ var='A' labels={cluster=vmfc-oz} value=0 ], [ var='C' labels={cluster=vmfc-oz} value=0 ]} {Instance:cluster=vmfc-product State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=vmfc-product Value:0xc01c502108} C:{Var:C Labels:cluster=vmfc-product Value:0xc01c502128}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.543172968s EvaluationString:[ var='A' labels={cluster=vmfc-product} value=0 ], [ var='C' labels={cluster=vmfc-product} value=0 ]} {Instance:cluster=vmfc-sal State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=vmfc-sal Value:0xc01c502168} C:{Var:C Labels:cluster=vmfc-sal Value:0xc01c502180}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.543175304s EvaluationString:[ var='A' labels={cluster=vmfc-sal} value=0 ], [ var='C' labels={cluster=vmfc-sal} value=0 ]} {Instance:cluster=vmfc-sivan State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=vmfc-sivan Value:0xc01c5021a8} C:{Var:C Labels:cluster=vmfc-sivan Value:0xc01c5021c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.543178765s EvaluationString:[ var='A' labels={cluster=vmfc-sivan} value=0 ], [ var='C' labels={cluster=vmfc-sivan} value=0 ]} {Instance:cluster=vmfc-uri278 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=vmfc-uri278 Value:0xc01c502208} C:{Var:C Labels:cluster=vmfc-uri278 Value:0xc01c502228}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.543181423s EvaluationString:[ var='A' labels={cluster=vmfc-uri278} value=0 ], [ var='C' labels={cluster=vmfc-uri278} value=0 ]}]" duration=20.826101ms + logger=ngalert.state.manager user=811546 slug=fyld instance="QueueName=DeadLetters" t=2024-05-29T13:44:14.544085529Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=811546 slug=fyld t=2024-05-29T13:44:14.544047278Z level=debug msg="State manager processing evaluation results" resultCount=7 + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:14.544024111Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xoqzbd1l-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.543979202Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xoozacey-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.543937092Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xoozacey-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.543915291Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=245291 slug=pismo version=637 fingerprint=221914251b6e07c9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.543484347Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.543277031s EvaluationString:}]" duration=153.150752ms + level=debug ts=2024-05-29T13:44:14.54328976Z caller=remote_instance_store.go:51 user=354676 slug=gridmarketenergy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xok5wak7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.543137304Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.543036336Z caller=remote_instance_store.go:51 user=691102 slug=deluxeconfdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xohkrsni-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.543065263Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xohkrsni-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.542981922Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.542831347Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xnxluqp8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.542648158Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=622339 slug=lendbr instance="datasource_uid=grafanacloud-prom, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:14.54257403Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=622339 slug=lendbr instance="datasource_uid=grafanacloud-prom, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:14.542566188Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=622339 slug=lendbr instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.54254989Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=622339 slug=lendbr version=5 fingerprint=6132c20b378f2c2a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.542440626Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.541905445s EvaluationString:}]" duration=16.517306ms + level=debug ts=2024-05-29T13:44:14.542267936Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xnvflz39-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.542340925Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xnvflz39-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.542310225Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.542284177Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.24045ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xnvflz39-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.542269465Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xnvflz39-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.542244004Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xngsmf7z-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.54186249Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xngsmf7z-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.541811Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xndeyjw9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.541653378Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=93308 slug=cede instance="aggregatedBy=diff, environment=campus, name=campus A, service=sms" t=2024-05-29T13:44:14.540676567Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xndeyjw9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.541624238Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xndeyjw9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.541602838Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xn5dsex1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.541563277Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xn5dsex1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.541534887Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xn5dsex1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.541512757Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:14.541441177Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.541376144Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance= t=2024-05-29T13:44:14.541419797Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.541319738Z caller=remote_instance_store.go:51 user=697627 slug=haqq msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=288032 slug=dapperlabssre version=1 fingerprint=bbbd88b6f887f751 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.541224498Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.540820181s EvaluationString:}]" duration=136.120233ms + logger=ngalert.scheduler user=119840 slug=manta version=1 fingerprint=ce4d2a6549564291 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.541182724Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.540840861s EvaluationString:}]" duration=16.275312ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xn4a36pp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.541242634Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xn4a36pp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.541227464Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xmyjff52-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.541035262Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.541025473Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xmyjff52-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.540919131Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xmm3shtv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.540803919Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xmm3shtv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.540779159Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xmijvt4u-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.540583947Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.540594742Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xmijvt4u-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.540522486Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.540454096Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.540293572Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xmg8kvrq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.540394085Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xmg8kvrq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.540380425Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xmg8kvrq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.540347235Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=93308 slug=cede t=2024-05-29T13:44:14.54040501Z level=debug msg="State manager processing evaluation results" resultCount=2 + level=debug ts=2024-05-29T13:44:14.540322785Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=286924 slug=kmpdashboard instance= t=2024-05-29T13:44:14.540359164Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=93308 slug=cede version=41 fingerprint=2a881bc134a176ae attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.540302329Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=diff, environment=staging, name=staging A, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:aggregatedBy=diff, environment=staging, name=staging A, service=sms Value:0xc01a5c8400} C:{Var:C Labels:aggregatedBy=diff, environment=staging, name=staging A, service=sms Value:0xc01a5c8460} D:{Var:D Labels:aggregatedBy=diff, environment=staging, name=staging A, service=sms Value:0xc01a5c83a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.539945201s EvaluationString:[ var='B' labels={aggregatedBy=diff, environment=staging, name=staging A, service=sms} value=6.04258320242929 ], [ var='C' labels={aggregatedBy=diff, environment=staging, name=staging A, service=sms} value=0 ], [ var='D' labels={aggregatedBy=diff, environment=staging, name=staging A, service=sms} value=0.0604258320242929 ]} {Instance:aggregatedBy=diff, environment=campus, name=campus A, service=sms State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:aggregatedBy=diff, environment=campus, name=campus A, service=sms Value:0xc01a5c86d0} C:{Var:C Labels:aggregatedBy=diff, environment=campus, name=campus A, service=sms Value:0xc01a5c8730} D:{Var:D Labels:aggregatedBy=diff, environment=campus, name=campus A, service=sms Value:0xc01a5c8608}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.539962679s EvaluationString:[ var='B' labels={aggregatedBy=diff, environment=campus, name=campus A, service=sms} value=35.547093322875284 ], [ var='C' labels={aggregatedBy=diff, environment=campus, name=campus A, service=sms} value=1 ], [ var='D' labels={aggregatedBy=diff, environment=campus, name=campus A, service=sms} value=0.35547093322875284 ]}]" duration=27.644607ms + level=debug ts=2024-05-29T13:44:14.540204978Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=286924 slug=kmpdashboard version=25 fingerprint=24d8aeadcf7da930 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.540192714Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.539955496s EvaluationString:}]" duration=210.337686ms + level=debug ts=2024-05-29T13:44:14.540064147Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=78d970be383260dd attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.539986918Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.53969505s EvaluationString:}]" duration=299.19218ms + logger=ngalert.state.manager.persist user=423441 slug=outgoinc t=2024-05-29T13:44:14.539964855Z level=debug msg="Saving alert states" count=4 max_state_save_concurrency=1 + logger=ngalert.state.manager user=213445 slug=gan instance= t=2024-05-29T13:44:14.539817957Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=213445 slug=gan instance= t=2024-05-29T13:44:14.539803328Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=213445 slug=gan instance= t=2024-05-29T13:44:14.539780294Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.539861671Z caller=remote_instance_store.go:51 user=489921 slug=statuscake msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=213445 slug=gan instance= t=2024-05-29T13:44:14.539750705Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=213445 slug=gan instance= t=2024-05-29T13:44:14.539739312Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=213445 slug=gan instance= t=2024-05-29T13:44:14.539722435Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=213445 slug=gan instance= t=2024-05-29T13:44:14.539701113Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=489921 slug=statuscake t=2024-05-29T13:44:14.539751274Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=423441 slug=outgoinc instance="ClusterName=rainbow-production-cluster, ServiceName=dolly-production" t=2024-05-29T13:44:14.539777353Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=213445 slug=gan instance= t=2024-05-29T13:44:14.539681771Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xly6fhyy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.539759649Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xly6fhyy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.539731268Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=423441 slug=outgoinc instance="ClusterName=rainbow-production-cluster, ServiceName=coho-production" t=2024-05-29T13:44:14.539688252Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xly6fhyy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.539665888Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=213445 slug=gan instance= t=2024-05-29T13:44:14.539603073Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=213445 slug=gan instance= t=2024-05-29T13:44:14.539594921Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xlwvfmm7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.539567327Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=423441 slug=outgoinc t=2024-05-29T13:44:14.539547051Z level=debug msg="State manager processing evaluation results" resultCount=4 + logger=ngalert.state.manager user=213445 slug=gan instance= t=2024-05-29T13:44:14.539544927Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xlwvfmm7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.539538256Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=213445 slug=gan instance= t=2024-05-29T13:44:14.539498665Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=213445 slug=gan version=15 fingerprint=2b875743da4f7887 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.539419334Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=6.259882ms + level=error ts=2024-05-29T13:44:14.539363508Z caller=remote_rule_evaluator.go:110 user=213445 slug=gan msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.state.manager user=158536 slug=clearsaleantifraude instance="datasource_uid=grafanacloud-prom, ref_id=alert_counter_feature_errors" t=2024-05-29T13:44:14.539352106Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xltgevua-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.539311234Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=158536 slug=clearsaleantifraude t=2024-05-29T13:44:14.539315144Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xltgevua-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.539287084Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=158536 slug=clearsaleantifraude version=20 fingerprint=212270badf6ad56f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.539200692Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=alert_counter_feature_errors State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.538909276s EvaluationString:}]" duration=40.352152ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xlpn1f9i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.539182103Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xlpn1f9i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.539139582Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.539011444Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xlpn1f9i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.539077531Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xlpn1f9i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.539036491Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.538963691Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.53893523Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.53894357Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xlpabf0q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.53890743Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.536769962Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xlpabf0q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.538878119Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.526995401Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.526997421Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xlpabf0q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.538840309Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.538796849Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xl6grjm3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.538692267Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xl6grjm3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.538618947Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xl5dy5xe-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.538579386Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.538310965Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xl2c22m0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.538247253Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538355 slug=flogic instance="__name__=aws_ec2_cpucredit_balance_average, account_id=641264638977, dimension_InstanceId=i-093f6ff69975ad3f5, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:ec2:ap-northeast-1:641264638977:instance/i-093f6ff69975ad3f5, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter" t=2024-05-29T13:44:14.538238922Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.538181396Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.538108344Z caller=remote_instance_store.go:51 user=202755 slug=iwmedia msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xkoviydl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.538083191Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xkoeii59-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.538022481Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.537954759Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xkoeii59-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.53793544Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xkk57uus-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.537774318Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xkk57uus-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.537762918Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xkgbb4g7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.537663937Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.537580194Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.537556916Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.455843ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xkgbb4g7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.537539416Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xkdgh674-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.537457475Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xkdgh674-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.537419524Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xkdgh674-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.537394854Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xkdgh674-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.537356054Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xkbaxhhb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.537282023Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xkbaxhhb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.537241643Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.537148954Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.537107735Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.537028489Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=316418 slug=workmotion t=2024-05-29T13:44:14.537054756Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=32.162105ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xk8tf33c-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.53703168Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:14.537008027Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=32.958908ms + level=debug ts=2024-05-29T13:44:14.537010292Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xk47pu6a-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.536880089Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.536818305Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=A" t=2024-05-29T13:44:14.536640335Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xjzn8yr0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.536641316Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=22398 slug=sunfolding t=2024-05-29T13:44:14.536577332Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xjzn8yr0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.536554036Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=22398 slug=sunfolding version=1 fingerprint=b5ca52826b011746 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.536488263Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-sunfolding, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.536122912s EvaluationString:}]" duration=22.243446ms + logger=ngalert.state.manager user=446686 slug=coinfx instance="ApiName=cfx-withdrawal-api, Series=cfx-withdrawal-api" t=2024-05-29T13:44:14.536603513Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=446686 slug=coinfx t=2024-05-29T13:44:14.536476859Z level=debug msg="State manager processing evaluation results" resultCount=3 + logger=ngalert.scheduler user=446686 slug=coinfx version=38 fingerprint=04f555ef26e83a3b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.536351782Z level=debug msg="Alert rule evaluated" results="[{Instance:ApiName=cfx-deposit-api, Series=cfx-deposit-api State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ApiName=cfx-deposit-api, Series=cfx-deposit-api Value:0xc02cc6a460} C:{Var:C Labels:ApiName=cfx-deposit-api, Series=cfx-deposit-api Value:0xc02cc6a488}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.535883713s EvaluationString:[ var='B' labels={ApiName=cfx-deposit-api, Series=cfx-deposit-api} value=0 ], [ var='C' labels={ApiName=cfx-deposit-api, Series=cfx-deposit-api} value=0 ]} {Instance:ApiName=cfx-identity-api, Series=cfx-identity-api State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ApiName=cfx-identity-api, Series=cfx-identity-api Value:0xc02cc6a8e0} C:{Var:C Labels:ApiName=cfx-identity-api, Series=cfx-identity-api Value:0xc02cc6a888}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.535900442s EvaluationString:[ var='B' labels={ApiName=cfx-identity-api, Series=cfx-identity-api} value=0 ], [ var='C' labels={ApiName=cfx-identity-api, Series=cfx-identity-api} value=0 ]} {Instance:ApiName=cfx-withdrawal-api, Series=cfx-withdrawal-api State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ApiName=cfx-withdrawal-api, Series=cfx-withdrawal-api Value:0xc02cc6a990} C:{Var:C Labels:ApiName=cfx-withdrawal-api, Series=cfx-withdrawal-api Value:0xc02cc6a9a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.53590879s EvaluationString:[ var='B' labels={ApiName=cfx-withdrawal-api, Series=cfx-withdrawal-api} value=0 ], [ var='C' labels={ApiName=cfx-withdrawal-api, Series=cfx-withdrawal-api} value=0 ]}]" duration=60.583695ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xjyqi9al-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.536300223Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xjsw0t6w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.536185612Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.536043504Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.536113831Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.536026234Z caller=remote_instance_store.go:51 user=82292 slug=prosperatech msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xjpcn0co-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.53602873Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xjpcn0co-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.53599844Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=335419 slug=tbauctions instance="datasource_uid=grafanacloud-logs, ref_id=Exception rate" t=2024-05-29T13:44:14.53597234Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=335419 slug=tbauctions instance="datasource_uid=grafanacloud-logs, ref_id=Exception rate" t=2024-05-29T13:44:14.535964742Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=82292 slug=prosperatech instance= t=2024-05-29T13:44:14.535931553Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=335419 slug=tbauctions t=2024-05-29T13:44:14.535945496Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=335419 slug=tbauctions version=2 fingerprint=b183b9520a0e6cbf attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.535875674Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=Exception rate State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.535576069s EvaluationString:}]" duration=67.368678ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xjhwxktu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.535879669Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xjhwxktu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.535869369Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xjhwxktu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.535838508Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xjh92w0d-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.535788468Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xjh92w0d-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.535722847Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xjh92w0d-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.535669446Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.535588299Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=335419 slug=tbauctions t=2024-05-29T13:44:14.535619139Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.190552ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xjgzduew-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.535599026Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xjgzduew-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.535588846Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xjgzduew-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.535550675Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xjgzduew-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.535521025Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xjfnhi4n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.535448874Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.535395665Z caller=remote_instance_store.go:51 user=333193 slug=peeriq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=652809 slug=glassnode t=2024-05-29T13:44:14.535277511Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xjfiwusc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.535257992Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xjfiwusc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.535196632Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xje9ljcp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.534961609Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=733461 slug=lattice instance="instance=localhost:7400, job=sequencer-1, layer=l2, network=garnet, type=l2_unsafe" t=2024-05-29T13:44:14.534972558Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xje9ljcp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.534937909Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.534889404Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xj9jwst1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.534739457Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=733461 slug=lattice t=2024-05-29T13:44:14.534768375Z level=debug msg="State manager processing evaluation results" resultCount=6 + level=debug ts=2024-05-29T13:44:14.534637921Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.534669923Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=523054 slug=vialtopartners instance= t=2024-05-29T13:44:14.534625699Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + level=debug ts=2024-05-29T13:44:14.534569129Z caller=remote_instance_store.go:51 user=556147 slug=bettercloudholding msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.534615045Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xj85fyrm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.534646106Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.534627874Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xj85fyrm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.534542695Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.534552241Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.53456858Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=163215 slug=tripadvisor version=90 fingerprint=b9ad9c254a86952b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.534501462Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.534230029s EvaluationString:}]" duration=674.090023ms + level=debug ts=2024-05-29T13:44:14.534460587Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.534451393Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xj7dxjmh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.534401153Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xj7dxjmh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.534390793Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xj3mv9va-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.534285902Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="control_cluster=control-plane-cluster, endpoint=https-metrics, instance=10.0.5.224:10250, metrics_path=/metrics, namespace=redpanda-system, node=ip-10-0-5-224.us-east-2.compute.internal, persistentvolumeclaim=datadir-control-plane-1, service=prometheus-control-cluster-kubelet" t=2024-05-29T13:44:14.534382657Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="control_cluster=control-plane-cluster, endpoint=https-metrics, instance=10.0.5.224:10250, metrics_path=/metrics, namespace=redpanda-system, node=ip-10-0-5-224.us-east-2.compute.internal, persistentvolumeclaim=datadir-control-plane-1, service=prometheus-control-cluster-kubelet" t=2024-05-29T13:44:14.534368107Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xj3mv9va-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.534222562Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.534238005Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xiwdkuw8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.534113941Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.534142432Z level=debug msg="State manager processing evaluation results" resultCount=3 + level=debug ts=2024-05-29T13:44:14.534124544Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=27737 slug=edfmancapital t=2024-05-29T13:44:14.534081955Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=183214 slug=vectorizedio version=1 fingerprint=e64f831038a805df attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.53402552Z level=debug msg="Alert rule evaluated" results="[{Instance:control_cluster=control-plane-cluster, endpoint=https-metrics, instance=10.0.4.33:10250, metrics_path=/metrics, namespace=redpanda-system, node=ip-10-0-4-33.us-east-2.compute.internal, persistentvolumeclaim=datadir-control-plane-0, service=prometheus-control-cluster-kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:control_cluster=control-plane-cluster, endpoint=https-metrics, instance=10.0.4.33:10250, metrics_path=/metrics, namespace=redpanda-system, node=ip-10-0-4-33.us-east-2.compute.internal, persistentvolumeclaim=datadir-control-plane-0, service=prometheus-control-cluster-kubelet Value:0xc07de7d678} C:{Var:C Labels:control_cluster=control-plane-cluster, endpoint=https-metrics, instance=10.0.4.33:10250, metrics_path=/metrics, namespace=redpanda-system, node=ip-10-0-4-33.us-east-2.compute.internal, persistentvolumeclaim=datadir-control-plane-0, service=prometheus-control-cluster-kubelet Value:0xc07de7d758}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.533570484s EvaluationString:[ var='B' labels={control_cluster=control-plane-cluster, endpoint=https-metrics, instance=10.0.4.33:10250, metrics_path=/metrics, namespace=redpanda-system, node=ip-10-0-4-33.us-east-2.compute.internal, persistentvolumeclaim=datadir-control-plane-0, service=prometheus-control-cluster-kubelet} value=0.9541911180996581 ], [ var='C' labels={control_cluster=control-plane-cluster, endpoint=https-metrics, instance=10.0.4.33:10250, metrics_path=/metrics, namespace=redpanda-system, node=ip-10-0-4-33.us-east-2.compute.internal, persistentvolumeclaim=datadir-control-plane-0, service=prometheus-control-cluster-kubelet} value=0 ]} {Instance:control_cluster=control-plane-cluster, endpoint=https-metrics, instance=10.0.5.224:10250, metrics_path=/metrics, namespace=redpanda-system, node=ip-10-0-5-224.us-east-2.compute.internal, persistentvolumeclaim=datadir-control-plane-1, service=prometheus-control-cluster-kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:control_cluster=control-plane-cluster, endpoint=https-metrics, instance=10.0.5.224:10250, metrics_path=/metrics, namespace=redpanda-system, node=ip-10-0-5-224.us-east-2.compute.internal, persistentvolumeclaim=datadir-control-plane-1, service=prometheus-control-cluster-kubelet Value:0xc07de7d930} C:{Var:C Labels:control_cluster=control-plane-cluster, endpoint=https-metrics, instance=10.0.5.224:10250, metrics_path=/metrics, namespace=redpanda-system, node=ip-10-0-5-224.us-east-2.compute.internal, persistentvolumeclaim=datadir-control-plane-1, service=prometheus-control-cluster-kubelet Value:0xc07de7da10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.533584976s EvaluationString:[ var='B' labels={control_cluster=control-plane-cluster, endpoint=https-metrics, instance=10.0.5.224:10250, metrics_path=/metrics, namespace=redpanda-system, node=ip-10-0-5-224.us-east-2.compute.internal, persistentvolumeclaim=datadir-control-plane-1, service=prometheus-control-cluster-kubelet} value=0.9548271937591597 ], [ var='C' labels={control_cluster=control-plane-cluster, endpoint=https-metrics, instance=10.0.5.224:10250, metrics_path=/metrics, namespace=redpanda-system, node=ip-10-0-5-224.us-east-2.compute.internal, persistentvolumeclaim=datadir-control-plane-1, service=prometheus-control-cluster-kubelet} value=0 ]} {Instance:control_cluster=control-plane-cluster, endpoint=https-metrics, instance=10.0.6.105:10250, metrics_path=/metrics, namespace=redpanda-system, node=ip-10-0-6-105.us-east-2.compute.internal, persistentvolumeclaim=datadir-control-plane-2, service=prometheus-control-cluster-kubelet State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:control_cluster=control-plane-cluster, endpoint=https-metrics, instance=10.0.6.105:10250, metrics_path=/metrics, namespace=redpanda-system, node=ip-10-0-6-105.us-east-2.compute.internal, persistentvolumeclaim=datadir-control-plane-2, service=prometheus-control-cluster-kubelet Value:0xc07de7dbf0} C:{Var:C Labels:control_cluster=control-plane-cluster, endpoint=https-metrics, instance=10.0.6.105:10250, metrics_path=/metrics, namespace=redpanda-system, node=ip-10-0-6-105.us-east-2.compute.internal, persistentvolumeclaim=datadir-control-plane-2, service=prometheus-control-cluster-kubelet Value:0xc07de7dc98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.533592945s EvaluationString:[ var='B' labels={control_cluster=control-plane-cluster, endpoint=https-metrics, instance=10.0.6.105:10250, metrics_path=/metrics, namespace=redpanda-system, node=ip-10-0-6-105.us-east-2.compute.internal, persistentvolumeclaim=datadir-control-plane-2, service=prometheus-control-cluster-kubelet} value=0.956122679531021 ], [ var='C' labels={control_cluster=control-plane-cluster, endpoint=https-metrics, instance=10.0.6.105:10250, metrics_path=/metrics, namespace=redpanda-system, node=ip-10-0-6-105.us-east-2.compute.internal, persistentvolumeclaim=datadir-control-plane-2, service=prometheus-control-cluster-kubelet} value=0 ]}]" duration=41.808788ms + logger=ngalert.state.manager user=27737 slug=edfmancapital t=2024-05-29T13:44:14.534029616Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=27737 slug=edfmancapital version=3 fingerprint=796ba340f0f065ff attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.533951591Z level=debug msg="Alert rule evaluated" results="[{Instance:ConnectionId=dxcon-ffmxpihr State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ConnectionId=dxcon-ffmxpihr Value:0xc025de0fe0} C:{Var:C Labels:ConnectionId=dxcon-ffmxpihr Value:0xc025de0fe8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.533550456s EvaluationString:[ var='B' labels={ConnectionId=dxcon-ffmxpihr} value=1 ], [ var='C' labels={ConnectionId=dxcon-ffmxpihr} value=0 ]}]" duration=41.718801ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xiwdkuw8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.533981339Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xiwdkuw8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.533965569Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xivne8kl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.533897078Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xivne8kl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.533801327Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xivne8kl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.533716426Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.533749538Z caller=remote_instance_store.go:51 user=856040 slug=kuady msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=856040 slug=kuady t=2024-05-29T13:44:14.533693617Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=856040 slug=kuady instance= t=2024-05-29T13:44:14.533672276Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=856040 slug=kuady t=2024-05-29T13:44:14.533596535Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xiufmj6e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.533541795Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=890273 slug=cmhusqnp instance="instance=dueuwe4bhusqwcadbs2001.husq.gcp.hclsw.internal" t=2024-05-29T13:44:14.533518376Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=890273 slug=cmhusqnp instance="instance=dueuwe4bhusqwcadbs2001.husq.gcp.hclsw.internal" t=2024-05-29T13:44:14.533496725Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.533358455Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=890273 slug=cmhusqnp t=2024-05-29T13:44:14.533335102Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.533404798Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.533348694Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=890273 slug=cmhusqnp version=1 fingerprint=8701bb187df9eab3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.53326477Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=dueuwe4bhusqwcadbs2001.husq.gcp.hclsw.internal State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=dueuwe4bhusqwcadbs2001.husq.gcp.hclsw.internal Value:0xc039cb13a8} B:{Var:B Labels:instance=dueuwe4bhusqwcadbs2001.husq.gcp.hclsw.internal Value:0xc039cb1388} C:{Var:C Labels:instance=dueuwe4bhusqwcadbs2001.husq.gcp.hclsw.internal Value:0xc039cb1398}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532762296s EvaluationString:[ var='A' labels={instance=dueuwe4bhusqwcadbs2001.husq.gcp.hclsw.internal} value=63.56384634714903 ], [ var='B' labels={instance=dueuwe4bhusqwcadbs2001.husq.gcp.hclsw.internal} value=63.56384634714903 ], [ var='C' labels={instance=dueuwe4bhusqwcadbs2001.husq.gcp.hclsw.internal} value=0 ]}]" duration=13.861995ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xitaecvg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.533356463Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=26909 slug=designcrowd instance= previous_handler=resultNoData t=2024-05-29T13:44:14.533368969Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.533284871Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=26909 slug=designcrowd instance= previous_handler=resultNoData t=2024-05-29T13:44:14.533352909Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=26909 slug=designcrowd instance= t=2024-05-29T13:44:14.53333779Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=26909 slug=designcrowd t=2024-05-29T13:44:14.533312916Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=26909 slug=designcrowd version=3 fingerprint=5616d715a631b437 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.533182121Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[A0:{Var:A Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.532879622s EvaluationString:[ var='A0' metric='NoData' labels={} value=null ]}]" duration=46.939683ms + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.533220472Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xiouvag2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.533194671Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ximig9pp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.53312728Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.53316025Z caller=remote_instance_store.go:51 user=697627 slug=haqq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:14.532998222Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xik0pukc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.532922108Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=691102 slug=deluxeconfdev t=2024-05-29T13:44:14.532744671Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.532887193Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=691102 slug=deluxeconfdev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.53273024Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xik0pukc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.532831287Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698963 slug=lemonade instance="datasource_uid=grafanacloud-prom, ref_id=QUERY" t=2024-05-29T13:44:14.532797029Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=691102 slug=deluxeconfdev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.532719311Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.53269486Z caller=remote_instance_store.go:51 user=866972 slug=mitsubishi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=691102 slug=deluxeconfdev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.53266614Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=691102 slug=deluxeconfdev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.532655719Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xik0pukc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.532769047Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.532616779Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.532598992Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xidvxu98-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.532582235Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=444728 slug=stgnextgen t=2024-05-29T13:44:14.532562521Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.748459ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xicoxlok-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.532548274Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.532423037Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=373502 slug=stakeandrelax version=6 fingerprint=04758c3680e4a590 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.532181575Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.531766213s EvaluationString:}]" duration=291.314427ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xi5ho5on-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.532297102Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250971 slug=disruptivedigital t=2024-05-29T13:44:14.532041256Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250971 slug=disruptivedigital instance="datasource_uid=LXw3EdInk, ref_id=A" t=2024-05-29T13:44:14.532024873Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xi1lh1u7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.53206872Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xi18d18g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.531927208Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xhzrcye1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.531861337Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.531800851Z caller=remote_instance_store.go:51 user=635771 slug=sharedservices msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xhwr0ub5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.531679476Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.531513242Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xhw4aoyh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.531577585Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva t=2024-05-29T13:44:14.531435231Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.531366594Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.531469252Z caller=remote_instance_store.go:51 user=257565 slug=eddyson msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xhw4aoyh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.531490634Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xht8jr8t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.531344572Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.531337511Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.808191ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xht8jr8t-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.531276841Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xht0y7h2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.531245821Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xht0y7h2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.531220361Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.531141423Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.632259ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xhp0qd0y-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.531010189Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xhp0qd0y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.530934468Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xhj92lbz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.530863337Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xhj92lbz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.530797217Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xhj92lbz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.530736256Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=83647 slug=bidsolutions t=2024-05-29T13:44:14.530646248Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xhgzk82s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.530565484Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xhgzk82s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.530553004Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xhgzk82s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.530520664Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xh7jbxo1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.530458603Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.530531723Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xgqvjswe-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.530079079Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xgqvjswe-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.530043439Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xgqvjswe-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.530019759Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xgqvjswe-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.529954258Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xggdxj44-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.529877387Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xggdxj44-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.529809246Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xggdxj44-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.529774186Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xg3bgg1d-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.529456463Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xg3bgg1d-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.529432523Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.529582567Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.529578571Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xfvmo8bl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.529333892Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xfvmo8bl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.529318781Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xfvmo8bl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.529254691Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xftr73uh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.52914976Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xftr73uh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.529091969Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xftr73uh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.529055579Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xft8dzdi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.528983738Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xft8dzdi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.528908907Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xft8dzdi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.528829236Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.52936092Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xfnvni24-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.528602314Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xfnvni24-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.528501813Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xfh5josi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.528327131Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xfh5josi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.528281731Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xfh5josi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.528269311Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xfh5josi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.52820877Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xfbtd9xg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.528064449Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xfbtd9xg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.528003408Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xf6sl76a-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.527811526Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xf6sl76a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.527762125Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xexysyce-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.527611364Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xexysyce-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.527575673Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xexysyce-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.527561153Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xexy44c9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.527407082Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xexy44c9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.527391642Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xexy44c9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.527335641Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xet0cuve-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.527130449Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xet0cuve-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.527078008Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xesrk7pz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.527024388Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xesrk7pz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.526908917Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=139570 slug=pentesttools instance="device=/dev/sda, fstype=ext4, instance=cve-search-16487214.pentest-tools.com:59100, job=node_exporter_local_https, mountpoint=/" t=2024-05-29T13:44:14.528753813Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xeqt6286-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.526426952Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=139570 slug=pentesttools instance="device=/dev/sda, fstype=ext4, instance=cve-search-16487214.pentest-tools.com:59100, job=node_exporter_local_https, mountpoint=/" t=2024-05-29T13:44:14.528741406Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xeqt6286-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.526339461Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=139570 slug=pentesttools t=2024-05-29T13:44:14.528707879Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.528579836Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=147497 slug=rhodev t=2024-05-29T13:44:14.527557756Z level=debug msg="Saving alert states" count=111 max_state_save_concurrency=1 + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.52753294Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527512057Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.52750642Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527497809Z level=debug msg="Setting next state" handler=resultError + level=debug ts=2024-05-29T13:44:14.528326391Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527484048Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + level=debug ts=2024-05-29T13:44:14.528312699Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.528316453Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527450905Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527439119Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527396149Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager.persist user=630233 slug=bettercloudprod t=2024-05-29T13:44:14.528233852Z level=debug msg="Saving alert states" count=4 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=109452 slug=deltarisk t=2024-05-29T13:44:14.52820421Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=35.715328ms + logger=ngalert.state.manager user=630233 slug=bettercloudprod instance="environment=production, instance=production-haproxy-internal-8sp7" t=2024-05-29T13:44:14.528200943Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527358592Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.52732231Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527305602Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527274682Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527271428Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527253939Z level=debug msg="Setting next state" handler=resultError + level=debug ts=2024-05-29T13:44:14.528029229Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527246658Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527239446Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=630233 slug=bettercloudprod instance="environment=production, instance=haproxy-prod-app-rgqj" t=2024-05-29T13:44:14.528010857Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.527949817Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527204332Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=630233 slug=bettercloudprod instance="environment=production, instance=haproxy-prod-app-rgqj" t=2024-05-29T13:44:14.527999886Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527196475Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527175184Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.527886675Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527166589Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527160847Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527134608Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527126072Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527101626Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527092526Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527085422Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=630233 slug=bettercloudprod instance="environment=production, instance=haproxy-prod-app-crp0" t=2024-05-29T13:44:14.527874773Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527069706Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527060616Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527055051Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527048984Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527036056Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527022836Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.527008933Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=630233 slug=bettercloudprod instance="environment=production, instance=haproxy-prod-app-04kl" t=2024-05-29T13:44:14.527720741Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=630233 slug=bettercloudprod instance="environment=production, instance=haproxy-prod-app-04kl" t=2024-05-29T13:44:14.52770632Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526977589Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526972024Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526956007Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.527294337Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.527609956Z caller=remote_instance_store.go:51 user=147497 slug=rhodev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.52692857Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526910294Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526894306Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526887648Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526851718Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526831259Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.52682026Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526729869Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526712217Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526704255Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.527280099Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.527252372Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526675776Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526666292Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526637886Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.527245859Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.527219215Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526615263Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526607117Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.527242755Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526601872Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager.persist user=183214 slug=vectorizedio t=2024-05-29T13:44:14.527215296Z level=debug msg="Saving alert states done" count=10 max_state_save_concurrency=1 duration=165.45904ms + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526580993Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526574794Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.52656927Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526564526Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=112732 slug=gleamer instance= t=2024-05-29T13:44:14.527091999Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526552411Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526544155Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.5265366Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526528086Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526515623Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526487337Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526473495Z level=debug msg="Setting next state" handler=resultError + level=debug ts=2024-05-29T13:44:14.526962322Z caller=remote_instance_store.go:51 user=716630 slug=coapdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526464018Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.52700496Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526446629Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.526955889Z caller=remote_instance_store.go:51 user=288032 slug=dapperlabssre msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526398163Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526382109Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:14.526926207Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance= t=2024-05-29T13:44:14.526916312Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance= t=2024-05-29T13:44:14.526905981Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526366963Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526347862Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526329015Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526288974Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526272659Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526258939Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526238769Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526233613Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526214572Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526185029Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526181529Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526173789Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526160392Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526139046Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526129155Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526121759Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.526537448Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526098266Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.526439344Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526032648Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526021749Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.526016793Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525994296Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525981033Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525971853Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525967826Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525956401Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.historian backend=loki user=893151 slug=cmtdsnp t=2024-05-29T13:44:14.526314362Z level=debug msg="Alert state changed creating annotation" newState="Normal (NoData)" oldState=Pending + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525953349Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.historian backend=loki user=893151 slug=cmtdsnp t=2024-05-29T13:44:14.526290611Z level=debug msg="Alert state changed creating annotation" newState="Normal (NoData)" oldState=Pending + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525946871Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xemn640p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.526189109Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525941154Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525928532Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525921434Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525918681Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525914126Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525908764Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525905009Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xemn640p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.526152119Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525901806Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xemn640p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.526125929Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xedo4jk4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.526090958Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525845741Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525821035Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525815484Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525802762Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525794779Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525788179Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525772514Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525740811Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525728006Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xedo4jk4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.525971907Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.525963574Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525713421Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=180994 slug=cgmonitor t=2024-05-29T13:44:14.525898711Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525690765Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xed8uicn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.525941377Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xed8uicn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.525920197Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.525770085Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xed8uicn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.525867936Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525665454Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=163513 slug=dialpad version=434 fingerprint=8826dbf131d0f7e4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.5257387Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.525361739s EvaluationString:}]" duration=141.408805ms + level=debug ts=2024-05-29T13:44:14.525790794Z caller=remote_instance_store.go:51 user=697627 slug=haqq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525626032Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xed7pxew-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.525772535Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525601866Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525581809Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525554371Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + level=debug ts=2024-05-29T13:44:14.525659193Z caller=remote_instance_store.go:51 user=460915 slug=funrise msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525521688Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525493507Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525483421Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager.persist user=460915 slug=funrise t=2024-05-29T13:44:14.52562701Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xe9y69ko-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.525552853Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=460915 slug=funrise instance="instance=node-exporter-ev-manager-2" t=2024-05-29T13:44:14.525595549Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525471114Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525465523Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=460915 slug=funrise t=2024-05-29T13:44:14.525546141Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525455441Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525427319Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525421734Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525383674Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=438855 slug=teckresources instance= t=2024-05-29T13:44:14.525432965Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525378758Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=438855 slug=teckresources instance= t=2024-05-29T13:44:14.525413448Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=438855 slug=teckresources t=2024-05-29T13:44:14.52539979Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525367491Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=438855 slug=teckresources version=4 fingerprint=cf21e01eb5336756 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.525365598Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=3.610066ms + level=error ts=2024-05-29T13:44:14.525336723Z caller=remote_rule_evaluator.go:110 user=438855 slug=teckresources msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525342972Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xe8ft156-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.525395231Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525320681Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525195815Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xe55284h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.525129868Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=147497 slug=rhodev instance= t=2024-05-29T13:44:14.525174537Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.525099873Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:14.525075205Z level=debug msg="Setting next state" handler=resultError + level=debug ts=2024-05-29T13:44:14.525038356Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xe55284h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.525000747Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=237629 slug=ocrolus instance="pod=prod-dashboard-bff-http-67bb5cc995-rhnj2" t=2024-05-29T13:44:14.525036071Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xe3n5ncv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.524947697Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xe3n5ncv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.524904976Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xe3n5ncv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.524762765Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xe0pwff2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.524626653Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xe0pwff2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.524594713Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xe06i75o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.524438021Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xe06i75o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.524383501Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xe06i75o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.524361681Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xe06i75o-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.52430895Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.5242772Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xduwusb0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.524216449Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xduwusb0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.524187159Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xdswagn8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.524036257Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xdo3p2dr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.523997307Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xdo3p2dr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.523968057Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xdo3p2dr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.523958416Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.524107324Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xdo3p2dr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.523903896Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xdhp0buw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.523849175Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xdhp0buw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.523807415Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xdhp0buw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.523734164Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.523757094Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="resourceName=og-cit-prod-key-vault" t=2024-05-29T13:44:14.523752125Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=20177 slug=paddledash t=2024-05-29T13:44:14.523733061Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=20177 slug=paddledash instance="QueueName=paddle-staging-notification-service-high-dlq" t=2024-05-29T13:44:14.523722019Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=396586 slug=opengov t=2024-05-29T13:44:14.523646055Z level=debug msg="State manager processing evaluation results" resultCount=3 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xdfeash7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.523666783Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xdfeash7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.523613043Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xdfeash7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.523581703Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xdfeash7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.523569832Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.523614407Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.52351001Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xde0zugg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.523474831Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xdc6regw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.523393331Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xdc6regw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.52331431Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.523251674Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.523197253Z caller=remote_instance_store.go:51 user=76255 slug=benzinga msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=76255 slug=benzinga t=2024-05-29T13:44:14.523142736Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=76255 slug=benzinga instance= t=2024-05-29T13:44:14.523131494Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xd2vts9i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.523068317Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=76255 slug=benzinga version=1 fingerprint=164f2f7779ea2ca3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.523017594Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.522741596s EvaluationString:}]" duration=267.768523ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xd2vts9i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.522960616Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=407315 slug=ppcp t=2024-05-29T13:44:14.522905416Z level=debug msg="Saving alert states" count=5 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xd1qr8yp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.522840025Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=866972 slug=mitsubishi t=2024-05-29T13:44:14.522686909Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xcxy94ue-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.522699143Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=407315 slug=ppcp instance="DBInstanceIdentifier=binderreadonly" t=2024-05-29T13:44:14.522713332Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=866972 slug=mitsubishi t=2024-05-29T13:44:14.522595337Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xcxy94ue-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.522497821Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=407315 slug=ppcp version=14 fingerprint=bd1e7b0fe90f6fd3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.522389582Z level=debug msg="Alert rule evaluated" results="[{Instance:DBInstanceIdentifier=airbyte-pg State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=airbyte-pg Value:0xc009cce600} C:{Var:C Labels:DBInstanceIdentifier=airbyte-pg Value:0xc009cce608}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.521932459s EvaluationString:[ var='B' labels={DBInstanceIdentifier=airbyte-pg} value=1.10584832e+10 ], [ var='C' labels={DBInstanceIdentifier=airbyte-pg} value=0 ]} {Instance:DBInstanceIdentifier=binderreadonly State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=binderreadonly Value:0xc009cce638} C:{Var:C Labels:DBInstanceIdentifier=binderreadonly Value:0xc009cce690}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.521946956s EvaluationString:[ var='B' labels={DBInstanceIdentifier=binderreadonly} value=7.70080768e+08 ], [ var='C' labels={DBInstanceIdentifier=binderreadonly} value=0 ]} {Instance:DBInstanceIdentifier=clipboardcluster-worker1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=clipboardcluster-worker1 Value:0xc009cce6a0} C:{Var:C Labels:DBInstanceIdentifier=clipboardcluster-worker1 Value:0xc009cce6a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.521953202s EvaluationString:[ var='B' labels={DBInstanceIdentifier=clipboardcluster-worker1} value=4.84458496e+08 ], [ var='C' labels={DBInstanceIdentifier=clipboardcluster-worker1} value=0 ]} {Instance:DBInstanceIdentifier=metabase-instance-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=metabase-instance-1 Value:0xc009cce6b8} C:{Var:C Labels:DBInstanceIdentifier=metabase-instance-1 Value:0xc009cce6d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.521959952s EvaluationString:[ var='B' labels={DBInstanceIdentifier=metabase-instance-1} value=9.68269824e+08 ], [ var='C' labels={DBInstanceIdentifier=metabase-instance-1} value=0 ]} {Instance:DBInstanceIdentifier=paperpile-pg State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=paperpile-pg Value:0xc009cce720} C:{Var:C Labels:DBInstanceIdentifier=paperpile-pg Value:0xc009cce728}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.52196655s EvaluationString:[ var='B' labels={DBInstanceIdentifier=paperpile-pg} value=2.014257152e+09 ], [ var='C' labels={DBInstanceIdentifier=paperpile-pg} value=0 ]}]" duration=53.355898ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xcu9uyt2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.522454081Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.522338591Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.522282447Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xcu9uyt2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.52234935Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.52234555Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xcu9uyt2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.52231614Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xcsu1ssw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.522118397Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xcmjk3z7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.522056157Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.522000774Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xcm9jjlm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.521923125Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xcm9jjlm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.521902375Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xcgw26xy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.521755574Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xcdpmclk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.521634302Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=310637 slug=notino t=2024-05-29T13:44:14.521529Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.697386ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xcdpmclk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.521559442Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=18335 slug=semaphore instance= t=2024-05-29T13:44:14.521554872Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=66104 slug=messagegears t=2024-05-29T13:44:14.521464423Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=43.226272ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xcae1z75-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.52143962Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xcae1z75-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.52141774Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.521376157Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xc9717sy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.521281059Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xc3nk1uf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.521146887Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xbwx03he-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.521051556Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xbwx03he-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.520972706Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xbrgi0r3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.520902375Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xbrgi0r3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.520825594Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.521045297Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xbeusz8j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.520733473Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xbeusz8j-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.520704453Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xbeusz8j-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.520626752Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=277970 slug=teckresourcestest t=2024-05-29T13:44:14.521109494Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.521006676Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.521043086Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:14.521077318Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=2 fingerprint=506702cc9202ce67 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.521003324Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=13.657784ms + level=error ts=2024-05-29T13:44:14.520974034Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + level=debug ts=2024-05-29T13:44:14.520334261Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.519490354Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.520923434Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.520893132Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.840744ms + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.520838919Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.216835ms + level=debug ts=2024-05-29T13:44:14.520783782Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.520720396Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.52062798Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.520662507Z caller=remote_instance_store.go:51 user=224047 slug=ppbtradingtribeprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.520590651Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=60199 slug=wallapop t=2024-05-29T13:44:14.520582947Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=67.871732ms + logger=ngalert.scheduler user=224047 slug=ppbtradingtribeprd version=1 fingerprint=df95ae3579771b64 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.520514031Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.520239658s EvaluationString:}]" duration=12.104094ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xbaxy0sr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.52043165Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.520402533Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xbaxy0sr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.520391Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xb7jplf4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.520292869Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xb7jplf4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.520239488Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xb734o4x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.520140827Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.520066421Z caller=remote_instance_store.go:51 user=456850 slug=juniz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xau2d60y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.519804823Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.519689781Z caller=remote_image_capturer.go:33 user=444728 slug=stgnextgen rule_org_id=1 rule_uid=edm98xb5ovsw0c msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xarzb10v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.519723093Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xarzb10v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.519662722Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=444728 slug=stgnextgen instance="__name__=probe_success, config_version=1716189990996718080, instance=https://promotion-datatools-migration.delightfulsand-1c79d849.australiaeast.azurecontainerapps.io, job=promotion, probe=stg_private_probe" t=2024-05-29T13:44:14.519627955Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=444728 slug=stgnextgen t=2024-05-29T13:44:14.519572483Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xaityb5w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.519546771Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.519528292Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.519485162Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xaityb5w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.51948485Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xaityb5w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.519419419Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=169420 slug=newspring version=102 fingerprint=e90abb08df8e8e20 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.519072092Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=10.24.60.201, job=APC UPS, key=MYR, sysName=Unknown State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=10.24.60.201, job=APC UPS, key=MYR, sysName=Unknown Value:0xc014060a30} B:{Var:B Labels:instance=10.24.60.201, job=APC UPS, key=MYR, sysName=Unknown Value:0xc014060950} MYR:{Var:MYR Labels:instance=10.24.60.201, job=APC UPS, key=MYR, sysName=Unknown Value:0xc0140609e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.518657306s EvaluationString:[ var='A' labels={instance=10.24.60.201, job=APC UPS, key=MYR, sysName=Unknown} value=211 ], [ var='B' labels={instance=10.24.60.201, job=APC UPS, key=MYR, sysName=Unknown} value=0 ], [ var='MYR' labels={instance=10.24.60.201, job=APC UPS, key=MYR, sysName=Unknown} value=211 ]}]" duration=11.136312ms + level=debug ts=2024-05-29T13:44:14.51923865Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xah0s7ls-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.519246638Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xagfncjf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.519160347Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xagfncjf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.519148447Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xagfncjf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.519091666Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xaapvnfv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.519057466Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xaapvnfv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.518949295Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xa758082-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.518870244Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.518772668Z caller=remote_instance_store.go:51 user=813270 slug=adiante msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.51859128Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=130276 slug=devops8 instance="instance=www8-web-2, job=hyperv-hosts" t=2024-05-29T13:44:14.518677369Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.518475814Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.518440756Z caller=remote_instance_store.go:51 user=320906 slug=techcyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=314067 slug=itsme instance= t=2024-05-29T13:44:14.518409067Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=130276 slug=devops8 instance="instance=www6-web-2, job=hyperv-hosts" t=2024-05-29T13:44:14.518416417Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xa3f5y7w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.518303418Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xa3f5y7w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.518286208Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xa3f5y7w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.518226217Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-xa30hsjr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.518128316Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=130276 slug=devops8 instance="instance=www5-web-1, job=hyperv-hosts" t=2024-05-29T13:44:14.518218457Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=130276 slug=devops8 instance="instance=www4-web-1, job=hyperv-hosts" t=2024-05-29T13:44:14.518084051Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=130276 slug=devops8 instance="instance=www3-web-2, job=hyperv-hosts" t=2024-05-29T13:44:14.518017927Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.517943592Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:14.517843647Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=130276 slug=devops8 instance="instance=www2-web-2, job=hyperv-hosts" t=2024-05-29T13:44:14.517871803Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x9p5mr8f-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.517821743Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.517808173Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206107 slug=hydrolix instance="app=query-head" t=2024-05-29T13:44:14.517794252Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="app=query-head" t=2024-05-29T13:44:14.517778947Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.517675766Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=716630 slug=coapdev instance="__name__=windows_service_status, agent_hostname=cap-prd-app, instance=cap-prd-app:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok" t=2024-05-29T13:44:14.517697704Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix t=2024-05-29T13:44:14.517730878Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.state.manager user=716630 slug=coapdev instance="__name__=windows_service_status, agent_hostname=cap-prd-app, instance=cap-prd-app:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok" t=2024-05-29T13:44:14.517670454Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.517708375Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=206107 slug=hydrolix version=6 fingerprint=2105755b02528526 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.517611552Z level=debug msg="Alert rule evaluated" results="[{Instance:app=query-head State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:app=query-head Value:0xc01ce620d0} B:{Var:B Labels:app=query-head Value:0xc01ce62500} query_head:{Var:query_head Labels:app=query-head Value:0xc01ce620b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.517197295s EvaluationString:[ var='A' labels={app=query-head} value=2 ], [ var='B' labels={app=query-head} value=2 ], [ var='query_head' labels={app=query-head} value=0 ]} {Instance:app=query-peer State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:app=query-peer Value:0xc01ce62530} B:{Var:B Labels:app=query-peer Value:0xc01ce62550} query_head:{Var:query_head Labels:app=query-peer Value:0xc01ce62590}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.517211305s EvaluationString:[ var='A' labels={app=query-peer} value=3 ], [ var='B' labels={app=query-peer} value=3 ], [ var='query_head' labels={app=query-peer} value=0 ]}]" duration=259.937305ms + level=debug ts=2024-05-29T13:44:14.517662245Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=716630 slug=coapdev instance="__name__=windows_service_status, agent_hostname=app-dev, instance=app-dev:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok" t=2024-05-29T13:44:14.517607833Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.517660404Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x9n49vsa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.517673812Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=130276 slug=devops8 instance="instance=www10-web-1, job=hyperv-hosts" t=2024-05-29T13:44:14.517644719Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x9n49vsa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.517581241Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=716630 slug=coapdev version=1 fingerprint=821ee06c58d3e9d0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.517424779Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=windows_service_status, agent_hostname=app-dev, instance=app-dev:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=windows_service_status, agent_hostname=app-dev, instance=app-dev:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok Value:0xc002371d40} B:{Var:B Labels:__name__=windows_service_status, agent_hostname=app-dev, instance=app-dev:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok Value:0xc002371d90} C:{Var:C Labels:__name__=windows_service_status, agent_hostname=app-dev, instance=app-dev:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok Value:0xc002371de0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.516910258s EvaluationString:[ var='A' labels={__name__=windows_service_status, agent_hostname=app-dev, instance=app-dev:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok} value=1 ], [ var='B' labels={__name__=windows_service_status, agent_hostname=app-dev, instance=app-dev:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok} value=1 ], [ var='C' labels={__name__=windows_service_status, agent_hostname=app-dev, instance=app-dev:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok} value=0 ]} {Instance:__name__=windows_service_status, agent_hostname=cap-prd-app, instance=cap-prd-app:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=windows_service_status, agent_hostname=cap-prd-app, instance=cap-prd-app:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok Value:0xc002371e70} B:{Var:B Labels:__name__=windows_service_status, agent_hostname=cap-prd-app, instance=cap-prd-app:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok Value:0xc002371eb8} C:{Var:C Labels:__name__=windows_service_status, agent_hostname=cap-prd-app, instance=cap-prd-app:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok Value:0xc002371f00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.516941749s EvaluationString:[ var='A' labels={__name__=windows_service_status, agent_hostname=cap-prd-app, instance=cap-prd-app:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok} value=1 ], [ var='B' labels={__name__=windows_service_status, agent_hostname=cap-prd-app, instance=cap-prd-app:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok} value=1 ], [ var='C' labels={__name__=windows_service_status, agent_hostname=cap-prd-app, instance=cap-prd-app:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok} value=0 ]} {Instance:__name__=windows_service_status, agent_hostname=cap-qa-app, instance=cap-qa-app:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=windows_service_status, agent_hostname=cap-qa-app, instance=cap-qa-app:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok Value:0xc002371fd0} B:{Var:B Labels:__name__=windows_service_status, agent_hostname=cap-qa-app, instance=cap-qa-app:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok Value:0xc006b92030} C:{Var:C Labels:__name__=windows_service_status, agent_hostname=cap-qa-app, instance=cap-qa-app:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok Value:0xc006b92088}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.51696353s EvaluationString:[ var='A' labels={__name__=windows_service_status, agent_hostname=cap-qa-app, instance=cap-qa-app:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok} value=1 ], [ var='B' labels={__name__=windows_service_status, agent_hostname=cap-qa-app, instance=cap-qa-app:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok} value=1 ], [ var='C' labels={__name__=windows_service_status, agent_hostname=cap-qa-app, instance=cap-qa-app:12345, job=integrations/windows_exporter, name=enableservertomcat, status=ok} value=0 ]}]" duration=8.578026ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x9n49vsa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.51757009Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=130276 slug=devops8 t=2024-05-29T13:44:14.517456414Z level=debug msg="State manager processing evaluation results" resultCount=18 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x9i22dm5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.517450809Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=130276 slug=devops8 version=5 fingerprint=6e1f6344582fd6af attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.51723846Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=wuxi-web-1, job=hyperv-hosts State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:instance=wuxi-web-1, job=hyperv-hosts Value:0xc01dee8ad0} C:{Var:C Labels:instance=wuxi-web-1, job=hyperv-hosts Value:0xc01dee8b00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.516643847s EvaluationString:[ var='B' labels={instance=wuxi-web-1, job=hyperv-hosts} value=70.43437380135754 ], [ var='C' labels={instance=wuxi-web-1, job=hyperv-hosts} value=0 ]} {Instance:instance=www10-web-1, job=hyperv-hosts State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:instance=www10-web-1, job=hyperv-hosts Value:0xc01dee8b70} C:{Var:C Labels:instance=www10-web-1, job=hyperv-hosts Value:0xc01dee8b98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.516655364s EvaluationString:[ var='B' labels={instance=www10-web-1, job=hyperv-hosts} value=24.661398048176423 ], [ var='C' labels={instance=www10-web-1, job=hyperv-hosts} value=0 ]} {Instance:instance=www10-web-2, job=hyperv-hosts State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:instance=www10-web-2, job=hyperv-hosts Value:0xc01dee8c10} C:{Var:C Labels:instance=www10-web-2, job=hyperv-hosts Value:0xc01dee8c40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.516658997s EvaluationString:[ var='B' labels={instance=www10-web-2, job=hyperv-hosts} value=54.68273801340031 ], [ var='C' labels={instance=www10-web-2, job=hyperv-hosts} value=0 ]} {Instance:instance=www2-web-1, job=hyperv-hosts State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:instance=www2-web-1, job=hyperv-hosts Value:0xc01dee8ca0} C:{Var:C Labels:instance=www2-web-1, job=hyperv-hosts Value:0xc01dee8cd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.516663851s EvaluationString:[ var='B' labels={instance=www2-web-1, job=hyperv-hosts} value=36.764386881002984 ], [ var='C' labels={instance=www2-web-1, job=hyperv-hosts} value=0 ]} {Instance:instance=www2-web-2, job=hyperv-hosts State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:instance=www2-web-2, job=hyperv-hosts Value:0xc01dee8d40} C:{Var:C Labels:instance=www2-web-2, job=hyperv-hosts Value:0xc01dee8d70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.516666729s EvaluationString:[ var='B' labels={instance=www2-web-2, job=hyperv-hosts} value=51.57540397829372 ], [ var='C' labels={instance=www2-web-2, job=hyperv-hosts} value=0 ]} {Instance:instance=www3-web-1, job=hyperv-hosts State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:instance=www3-web-1, job=hyperv-hosts Value:0xc01dee8de0} C:{Var:C Labels:instance=www3-web-1, job=hyperv-hosts Value:0xc01dee8e20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.51667176s EvaluationString:[ var='B' labels={instance=www3-web-1, job=hyperv-hosts} value=58.33948542815691 ], [ var='C' labels={instance=www3-web-1, job=hyperv-hosts} value=0 ]} {Instance:instance=www3-web-2, job=hyperv-hosts State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:instance=www3-web-2, job=hyperv-hosts Value:0xc01dee8ec0} C:{Var:C Labels:instance=www3-web-2, job=hyperv-hosts Value:0xc01dee8e80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.516674529s EvaluationString:[ var='B' labels={instance=www3-web-2, job=hyperv-hosts} value=18.454396253908914 ], [ var='C' labels={instance=www3-web-2, job=hyperv-hosts} value=0 ]} {Instance:instance=www4-web-1, job=hyperv-hosts State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:instance=www4-web-1, job=hyperv-hosts Value:0xc01dee8f20} C:{Var:C Labels:instance=www4-web-1, job=hyperv-hosts Value:0xc01dee8f60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.516677333s EvaluationString:[ var='B' labels={instance=www4-web-1, job=hyperv-hosts} value=30.165841091776585 ], [ var='C' labels={instance=www4-web-1, job=hyperv-hosts} value=0 ]} {Instance:instance=www4-web-2, job=hyperv-hosts State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:instance=www4-web-2, job=hyperv-hosts Value:0xc01dee8fc8} C:{Var:C Labels:instance=www4-web-2, job=hyperv-hosts Value:0xc01dee9008}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.516680252s EvaluationString:[ var='B' labels={instance=www4-web-2, job=hyperv-hosts} value=13.958467682690026 ], [ var='C' labels={instance=www4-web-2, job=hyperv-hosts} value=0 ]} {Instance:instance=www5-web-1, job=hyperv-hosts State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:instance=www5-web-1, job=hyperv-hosts Value:0xc01dee90e0} C:{Var:C Labels:instance=www5-web-1, job=hyperv-hosts Value:0xc01dee9098}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.516683817s EvaluationString:[ var='B' labels={instance=www5-web-1, job=hyperv-hosts} value=24.380429272764797 ], [ var='C' labels={instance=www5-web-1, job=hyperv-hosts} value=0 ]} {Instance:instance=www5-web-2, job=hyperv-hosts State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:instance=www5-web-2, job=hyperv-hosts Value:0xc01dee9178} C:{Var:C Labels:instance=www5-web-2, job=hyperv-hosts Value:0xc01dee9150}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.516687088s EvaluationString:[ var='B' labels={instance=www5-web-2, job=hyperv-hosts} value=17.539273914891982 ], [ var='C' labels={instance=www5-web-2, job=hyperv-hosts} value=0 ]} {Instance:instance=www6-web-1, job=hyperv-hosts State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:instance=www6-web-1, job=hyperv-hosts Value:0xc01dee91d8} C:{Var:C Labels:instance=www6-web-1, job=hyperv-hosts Value:0xc01dee9208}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.516689688s EvaluationString:[ var='B' labels={instance=www6-web-1, job=hyperv-hosts} value=51.77390475511786 ], [ var='C' labels={instance=www6-web-1, job=hyperv-hosts} value=0 ]} {Instance:instance=www6-web-2, job=hyperv-hosts State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:instance=www6-web-2, job=hyperv-hosts Value:0xc01dee9278} C:{Var:C Labels:instance=www6-web-2, job=hyperv-hosts Value:0xc01dee92b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.516692214s EvaluationString:[ var='B' labels={instance=www6-web-2, job=hyperv-hosts} value=12.821680539356672 ], [ var='C' labels={instance=www6-web-2, job=hyperv-hosts} value=0 ]} {Instance:instance=www7-web-1, job=hyperv-hosts State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:instance=www7-web-1, job=hyperv-hosts Value:0xc01dee9340} C:{Var:C Labels:instance=www7-web-1, job=hyperv-hosts Value:0xc01dee9380}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.516694804s EvaluationString:[ var='B' labels={instance=www7-web-1, job=hyperv-hosts} value=32.33824730098331 ], [ var='C' labels={instance=www7-web-1, job=hyperv-hosts} value=0 ]} {Instance:instance=www7-web-2, job=hyperv-hosts State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:instance=www7-web-2, job=hyperv-hosts Value:0xc01dee9400} C:{Var:C Labels:instance=www7-web-2, job=hyperv-hosts Value:0xc01dee9428}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.516697793s EvaluationString:[ var='B' labels={instance=www7-web-2, job=hyperv-hosts} value=56.04935852413472 ], [ var='C' labels={instance=www7-web-2, job=hyperv-hosts} value=0 ]} {Instance:instance=www8-web-1, job=hyperv-hosts State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:instance=www8-web-1, job=hyperv-hosts Value:0xc01dee94b0} C:{Var:C Labels:instance=www8-web-1, job=hyperv-hosts Value:0xc01dee94f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.516699964s EvaluationString:[ var='B' labels={instance=www8-web-1, job=hyperv-hosts} value=37.98650691829879 ], [ var='C' labels={instance=www8-web-1, job=hyperv-hosts} value=0 ]} {Instance:instance=www8-web-2, job=hyperv-hosts State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:instance=www8-web-2, job=hyperv-hosts Value:0xc01dee9560} C:{Var:C Labels:instance=www8-web-2, job=hyperv-hosts Value:0xc01dee9590}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.516703633s EvaluationString:[ var='B' labels={instance=www8-web-2, job=hyperv-hosts} value=58.658386013289864 ], [ var='C' labels={instance=www8-web-2, job=hyperv-hosts} value=0 ]} {Instance:instance=www9-web2, job=hyperv-hosts State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:instance=www9-web2, job=hyperv-hosts Value:0xc01dee9610} C:{Var:C Labels:instance=www9-web2, job=hyperv-hosts Value:0xc01dee9660}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.516707096s EvaluationString:[ var='B' labels={instance=www9-web2, job=hyperv-hosts} value=36.919514289423 ], [ var='C' labels={instance=www9-web2, job=hyperv-hosts} value=0 ]}]" duration=53.269569ms + level=debug ts=2024-05-29T13:44:14.517474146Z caller=remote_instance_store.go:51 user=335419 slug=tbauctions msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.517443389Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x9gan8vd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.517385409Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=335419 slug=tbauctions t=2024-05-29T13:44:14.517361169Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=CPU Usage is too high" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x9gan8vd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.517299768Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x9gan8vd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.517277197Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x9ejldpk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.517245527Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.517080905Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x929wusf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.517035625Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.517153029Z caller=remote_instance_store.go:51 user=642786 slug=sophoscomnsg msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.517136153Z caller=remote_instance_store.go:51 user=656284 slug=cencosudx msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.517126159Z caller=remote_instance_store.go:51 user=656284 slug=cencosudx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x8z7gpz4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.516959734Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=624354 slug=truliooworkflow t=2024-05-29T13:44:14.516884605Z level=debug msg="Saving alert states done" count=13 max_state_save_concurrency=1 duration=184.017828ms + logger=ngalert.state.manager.persist user=635771 slug=sharedservices t=2024-05-29T13:44:14.516909866Z level=debug msg="Saving alert states" count=12 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x8w8l4f4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.516839763Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x8w8l4f4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.516829673Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.516799517Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x8w8l4f4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.516778852Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.516715786Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x8w8l4f4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.516738122Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=635771 slug=sharedservices instance="cluster=edge-use2-eks-prd, phase=Unknown" t=2024-05-29T13:44:14.516701184Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x8sthqkd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.516655221Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.51657443Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.516593395Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.516617369Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x8sthqkd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.51658467Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=a0d6d4c5bb49fe23 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.516538058Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.516343772s EvaluationString:}]" duration=202.901326ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x8qgu6oo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.51655619Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.516537716Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x8qgu6oo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.516501119Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.516505Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=635771 slug=sharedservices instance="cluster=edge-use2-eks-prd, phase=Pending" t=2024-05-29T13:44:14.516542922Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x8qgu6oo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.516481099Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.51646004Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=635771 slug=sharedservices instance="cluster=edge-use2-eks-prd, phase=Failed" t=2024-05-29T13:44:14.516463221Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.516443617Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x8qgu6oo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.516429209Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=59fdf437440b2c7b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.516358298Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.516127658s EvaluationString:}]" duration=166.630257ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x8ljer2k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.516388448Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x8ljer2k-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.516326068Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=635771 slug=sharedservices instance="cluster=edge-use1-eks-prd, phase=Pending" t=2024-05-29T13:44:14.516316859Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=635771 slug=sharedservices instance="cluster=edge-use1-eks-prd, phase=Pending" t=2024-05-29T13:44:14.516307059Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x8ljer2k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.516297157Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x8kz8wja-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.516258367Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.516215338Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x8kz8wja-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.516199706Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x8jxyarw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.516107065Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x8jxyarw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.516083845Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x8jxyarw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.516017165Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=635771 slug=sharedservices instance="cluster=edge-euc1-eks-prd, phase=Failed" t=2024-05-29T13:44:14.516015526Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x8grvp23-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.515963264Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x8grvp23-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.515877573Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=635771 slug=sharedservices version=3 fingerprint=66599f40f48ad3fe attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.515749433Z level=debug msg="Alert rule evaluated" results="[{Instance:cluster=edge-euc1-eks-prd, phase=Failed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=edge-euc1-eks-prd, phase=Failed Value:0xc0299817f0} B:{Var:B Labels:cluster=edge-euc1-eks-prd, phase=Failed Value:0xc029981810} C:{Var:C Labels:cluster=edge-euc1-eks-prd, phase=Failed Value:0xc029981830}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.514908472s EvaluationString:[ var='A' labels={cluster=edge-euc1-eks-prd, phase=Failed} value=0 ], [ var='B' labels={cluster=edge-euc1-eks-prd, phase=Failed} value=0 ], [ var='C' labels={cluster=edge-euc1-eks-prd, phase=Failed} value=0 ]} {Instance:cluster=edge-euc1-eks-prd, phase=Pending State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=edge-euc1-eks-prd, phase=Pending Value:0xc029981870} B:{Var:B Labels:cluster=edge-euc1-eks-prd, phase=Pending Value:0xc029981890} C:{Var:C Labels:cluster=edge-euc1-eks-prd, phase=Pending Value:0xc0299818b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.514929362s EvaluationString:[ var='A' labels={cluster=edge-euc1-eks-prd, phase=Pending} value=0 ], [ var='B' labels={cluster=edge-euc1-eks-prd, phase=Pending} value=0 ], [ var='C' labels={cluster=edge-euc1-eks-prd, phase=Pending} value=0 ]} {Instance:cluster=edge-euc1-eks-prd, phase=Unknown State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=edge-euc1-eks-prd, phase=Unknown Value:0xc0299818f0} B:{Var:B Labels:cluster=edge-euc1-eks-prd, phase=Unknown Value:0xc029981910} C:{Var:C Labels:cluster=edge-euc1-eks-prd, phase=Unknown Value:0xc029981930}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.514939762s EvaluationString:[ var='A' labels={cluster=edge-euc1-eks-prd, phase=Unknown} value=0 ], [ var='B' labels={cluster=edge-euc1-eks-prd, phase=Unknown} value=0 ], [ var='C' labels={cluster=edge-euc1-eks-prd, phase=Unknown} value=0 ]} {Instance:cluster=edge-use1-eks-prd, phase=Failed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=edge-use1-eks-prd, phase=Failed Value:0xc029981970} B:{Var:B Labels:cluster=edge-use1-eks-prd, phase=Failed Value:0xc029981990} C:{Var:C Labels:cluster=edge-use1-eks-prd, phase=Failed Value:0xc0299819b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.514950922s EvaluationString:[ var='A' labels={cluster=edge-use1-eks-prd, phase=Failed} value=0 ], [ var='B' labels={cluster=edge-use1-eks-prd, phase=Failed} value=0 ], [ var='C' labels={cluster=edge-use1-eks-prd, phase=Failed} value=0 ]} {Instance:cluster=edge-use1-eks-prd, phase=Pending State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=edge-use1-eks-prd, phase=Pending Value:0xc0299819f0} B:{Var:B Labels:cluster=edge-use1-eks-prd, phase=Pending Value:0xc029981a20} C:{Var:C Labels:cluster=edge-use1-eks-prd, phase=Pending Value:0xc029981a40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.514960373s EvaluationString:[ var='A' labels={cluster=edge-use1-eks-prd, phase=Pending} value=0 ], [ var='B' labels={cluster=edge-use1-eks-prd, phase=Pending} value=0 ], [ var='C' labels={cluster=edge-use1-eks-prd, phase=Pending} value=0 ]} {Instance:cluster=edge-use1-eks-prd, phase=Unknown State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=edge-use1-eks-prd, phase=Unknown Value:0xc029981ac0} B:{Var:B Labels:cluster=edge-use1-eks-prd, phase=Unknown Value:0xc029981a80} C:{Var:C Labels:cluster=edge-use1-eks-prd, phase=Unknown Value:0xc029981aa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.514972333s EvaluationString:[ var='A' labels={cluster=edge-use1-eks-prd, phase=Unknown} value=0 ], [ var='B' labels={cluster=edge-use1-eks-prd, phase=Unknown} value=0 ], [ var='C' labels={cluster=edge-use1-eks-prd, phase=Unknown} value=0 ]} {Instance:cluster=edge-use2-eks-prd, phase=Failed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=edge-use2-eks-prd, phase=Failed Value:0xc029981b00} B:{Var:B Labels:cluster=edge-use2-eks-prd, phase=Failed Value:0xc029981b20} C:{Var:C Labels:cluster=edge-use2-eks-prd, phase=Failed Value:0xc029981b40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.514982473s EvaluationString:[ var='A' labels={cluster=edge-use2-eks-prd, phase=Failed} value=0 ], [ var='B' labels={cluster=edge-use2-eks-prd, phase=Failed} value=0 ], [ var='C' labels={cluster=edge-use2-eks-prd, phase=Failed} value=0 ]} {Instance:cluster=edge-use2-eks-prd, phase=Pending State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=edge-use2-eks-prd, phase=Pending Value:0xc029981b80} B:{Var:B Labels:cluster=edge-use2-eks-prd, phase=Pending Value:0xc029981ba0} C:{Var:C Labels:cluster=edge-use2-eks-prd, phase=Pending Value:0xc029981bc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.514991993s EvaluationString:[ var='A' labels={cluster=edge-use2-eks-prd, phase=Pending} value=0 ], [ var='B' labels={cluster=edge-use2-eks-prd, phase=Pending} value=0 ], [ var='C' labels={cluster=edge-use2-eks-prd, phase=Pending} value=0 ]} {Instance:cluster=edge-use2-eks-prd, phase=Unknown State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=edge-use2-eks-prd, phase=Unknown Value:0xc029981c50} B:{Var:B Labels:cluster=edge-use2-eks-prd, phase=Unknown Value:0xc029981c10} C:{Var:C Labels:cluster=edge-use2-eks-prd, phase=Unknown Value:0xc029981c30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.515000993s EvaluationString:[ var='A' labels={cluster=edge-use2-eks-prd, phase=Unknown} value=0 ], [ var='B' labels={cluster=edge-use2-eks-prd, phase=Unknown} value=0 ], [ var='C' labels={cluster=edge-use2-eks-prd, phase=Unknown} value=0 ]} {Instance:cluster=eng-eks-prd, phase=Failed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=eng-eks-prd, phase=Failed Value:0xc029981cd8} B:{Var:B Labels:cluster=eng-eks-prd, phase=Failed Value:0xc029981cf8} C:{Var:C Labels:cluster=eng-eks-prd, phase=Failed Value:0xc029981c98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.515010103s EvaluationString:[ var='A' labels={cluster=eng-eks-prd, phase=Failed} value=0 ], [ var='B' labels={cluster=eng-eks-prd, phase=Failed} value=0 ], [ var='C' labels={cluster=eng-eks-prd, phase=Failed} value=0 ]} {Instance:cluster=eng-eks-prd, phase=Pending State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=eng-eks-prd, phase=Pending Value:0xc029981d68} B:{Var:B Labels:cluster=eng-eks-prd, phase=Pending Value:0xc029981d98} C:{Var:C Labels:cluster=eng-eks-prd, phase=Pending Value:0xc029981dc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.515020543s EvaluationString:[ var='A' labels={cluster=eng-eks-prd, phase=Pending} value=0 ], [ var='B' labels={cluster=eng-eks-prd, phase=Pending} value=0 ], [ var='C' labels={cluster=eng-eks-prd, phase=Pending} value=0 ]} {Instance:cluster=eng-eks-prd, phase=Unknown State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=eng-eks-prd, phase=Unknown Value:0xc029981e28} B:{Var:B Labels:cluster=eng-eks-prd, phase=Unknown Value:0xc029981e48} C:{Var:C Labels:cluster=eng-eks-prd, phase=Unknown Value:0xc029981e98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.515029433s EvaluationString:[ var='A' labels={cluster=eng-eks-prd, phase=Unknown} value=0 ], [ var='B' labels={cluster=eng-eks-prd, phase=Unknown} value=0 ], [ var='C' labels={cluster=eng-eks-prd, phase=Unknown} value=0 ]}]" duration=34.638086ms + level=debug ts=2024-05-29T13:44:14.515842479Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=344017 slug=descript t=2024-05-29T13:44:14.515733827Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=29.027377ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x886kymc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.515679551Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.51557355Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x87mfdgb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.51558627Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.515394581Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.515430536Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x84k4dot-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.515444349Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x84k4dot-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.515413388Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x84k4dot-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.515390698Z level=debug msg="Setting next state" handler=resultNormal + level=debug component=discovery ts=2024-05-29T13:44:14.515383781Z caller=retry.go:58 user=529753 msg="retrying grpc request" method=/remoteruler.rules.v1.RulesService/GetByRuleGroup attempt=2 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x83wgml9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.515349418Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.515356441Z caller=remote_alert_sender.go:94 user=127813 slug=clearsale host=clearsale-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.253.244:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=bddallrbad1q8d alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x83wgml9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.515318767Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=35223 slug=silkroad t=2024-05-29T13:44:14.515272034Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=38.142507ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x7ylhz2j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.515228186Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.515109615Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x7y53263-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.515071015Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x7y53263-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.515048485Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.514902196Z caller=remote_instance_store.go:51 user=765874 slug=rhwstaging msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.514894213Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=765874 slug=rhwstaging t=2024-05-29T13:44:14.514829355Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=765874 slug=rhwstaging instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.514773874Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x7q3xf9c-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.514806032Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x7q3xf9c-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.514727451Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=432323 slug=lithic instance="ClusterName=prod, ServiceName=postgres-processor-live" t=2024-05-29T13:44:14.514853035Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x7koseu2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.514698581Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:14.514414055Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.442793ms + logger=ngalert.scheduler user=432323 slug=lithic version=3 fingerprint=5121a281a702927d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.514491856Z level=debug msg="Alert rule evaluated" results="[{Instance:ClusterName=prod, ServiceName=postgres-processor-live State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ClusterName=prod, ServiceName=postgres-processor-live Value:0xc018305670} C:{Var:C Labels:ClusterName=prod, ServiceName=postgres-processor-live Value:0xc0183056a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.514182184s EvaluationString:[ var='B' labels={ClusterName=prod, ServiceName=postgres-processor-live} value=20.849609375 ], [ var='C' labels={ClusterName=prod, ServiceName=postgres-processor-live} value=0 ]}]" duration=55.982059ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x7koseu2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.51460579Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=122048 slug=skoobe instance= t=2024-05-29T13:44:14.51462759Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.51451613Z caller=remote_instance_store.go:51 user=257565 slug=eddyson msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x7g8rlq0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.514455639Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x7g8rlq0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.514443498Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.514274237Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.508711073Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.51425335Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.50852577Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.507781206Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x7bzfvkc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.514207806Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x7bzfvkc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.514183696Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x77aik63-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.514098445Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x77aik63-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.514087175Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x77aik63-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.514042774Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.514129015Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.514052741Z caller=remote_instance_store.go:51 user=543660 slug=jobcloudprogrammaticstage msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.514055429Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x77aik63-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.513969594Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.513928457Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.513953206Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x754n7cm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.513856442Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x754n7cm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.513802112Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x70nobm2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.51358068Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x6v2du6h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.513500349Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x6v2du6h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.513490689Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x6v2du6h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.513461378Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x6v2du6h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.513440768Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.513455341Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x6v2du6h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.513411638Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x6v1lfb3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.513350697Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=307381 slug=kambitaskforce version=10 fingerprint=ad9bda3486735287 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.513247489Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.512935933s EvaluationString:}]" duration=44.295081ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x6v1lfb3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.513299327Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x6u19pvh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.513215406Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.513044014Z caller=remote_instance_store.go:51 user=652086 slug=unihosted msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x6u19pvh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.513124175Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x6u19pvh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.513102475Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x6qq8psk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.513003814Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x6qq8psk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.512983533Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x6qq8psk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.512954973Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x6nkmf9v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.512813602Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x6mju3rl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.512701341Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.512706765Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x68388as-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.512567609Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x68388as-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.512512909Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x68388as-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.512492108Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.512565595Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.512499065Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698963 slug=lemonade instance="datasource_uid=grafanacloud-prom, ref_id=QUERY" t=2024-05-29T13:44:14.512380852Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698963 slug=lemonade instance="datasource_uid=grafanacloud-prom, ref_id=QUERY" t=2024-05-29T13:44:14.512372738Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.512393472Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:14.512349271Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x5y91cdw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.512293456Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x5y91cdw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.512252586Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance= t=2024-05-29T13:44:14.512148247Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x5xb7mbh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.512072694Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x5xb7mbh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.511978043Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.511931301Z caller=remote_instance_store.go:51 user=63636 slug=streamelements msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=63636 slug=streamelements instance= t=2024-05-29T13:44:14.511879483Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x5l050hp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.511756891Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x5l050hp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.511727281Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.511731406Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=63636 slug=streamelements version=4 fingerprint=d4e987c79e165b3f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.511788258Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.511520523s EvaluationString:}]" duration=71.685839ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x5l050hp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.51167578Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x5i6xnq6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.5116426Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.511762285Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.511719158Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.511680949Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.511640803Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=94702302c6ef6b72 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.511552765Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.511291399s EvaluationString:}]" duration=298.838969ms + level=debug ts=2024-05-29T13:44:14.511602467Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=472647 slug=planet instance="metric.name=value_num_undelivered_messages_max_max" t=2024-05-29T13:44:14.511576429Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=877555 slug=cmbe t=2024-05-29T13:44:14.511506438Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=877555 slug=cmbe instance= t=2024-05-29T13:44:14.511194435Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:14.511483903Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=472647 slug=planet version=3 fingerprint=0d88a527321c6d8e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.511395792Z level=debug msg="Alert rule evaluated" results="[{Instance:metric.name=value_num_undelivered_messages_max_max State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.name=value_num_undelivered_messages_max_max Value:0xc019f2e858} C:{Var:C Labels:metric.name=value_num_undelivered_messages_max_max Value:0xc019f2e880}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.51101441s EvaluationString:[ var='B' labels={metric.name=value_num_undelivered_messages_max_max} value=0 ], [ var='C' labels={metric.name=value_num_undelivered_messages_max_max} value=0 ]}]" duration=51.586364ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x5fjgwd4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.511377737Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x5fjgwd4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.511286546Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.511196289Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x5cmz6gr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.511166665Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=877555 slug=cmbe t=2024-05-29T13:44:14.511157455Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x5cmz6gr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.511096764Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x5cmz6gr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.511021643Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x5afdrz9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.510968133Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x5afdrz9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.510936662Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=770248 slug=aurora instance="id=1248397, name=aurora-prom" t=2024-05-29T13:44:14.510541177Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x5afdrz9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.51073043Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x58mw50i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.51069065Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.510630748Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x570tco3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.510545938Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x570tco3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.510456258Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x564hcv7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.510351586Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x4qwxzal-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.510134934Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=redpanda-post-upgrade, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-post-upgrade-pnp5c, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=aaa37243-ab5c-4c9b-bd25-6bf61707fd1a" t=2024-05-29T13:44:14.510234334Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x4qwxzal-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.510124784Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=redpanda-post-upgrade, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-post-upgrade-pnp5c, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=aaa37243-ab5c-4c9b-bd25-6bf61707fd1a" t=2024-05-29T13:44:14.51022059Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.510190884Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x4qwxzal-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.509995883Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x4jc5h5t-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.509950432Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.509897989Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x4jc5h5t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.509845111Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=127813 slug=clearsale instance= t=2024-05-29T13:44:14.509882615Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=redpanda, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-2, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=06bfff69-32a5-4415-a2ef-2dd4c70a49e4" t=2024-05-29T13:44:14.509872007Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x4jc5h5t-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.509791051Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=127813 slug=clearsale version=7 fingerprint=bc2869416ba9d12c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.509710556Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.509323783s EvaluationString:}]" duration=201.908348ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x4dyo0it-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.50972801Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.509789813Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x4dyo0it-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.50968995Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x48njh3y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.509531818Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x48njh3y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.509471017Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=prometheus, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-kube-prometheus-prometheus-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=850769f7-7609-4d2d-a0ed-325d5cc8eba3" t=2024-05-29T13:44:14.509418542Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=prometheus, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-kube-prometheus-prometheus-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=850769f7-7609-4d2d-a0ed-325d5cc8eba3" t=2024-05-29T13:44:14.509408297Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.509379665Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x3r3x3iz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.509207975Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.50922103Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x3r3x3iz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.509153164Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x3lqinsz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.509123114Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.509172414Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x3lqinsz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.509079853Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=pause, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-node-setup, pod=redpanda-node-setup-5m9wp, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7a472946-52e4-465a-b127-bc0340870774" t=2024-05-29T13:44:14.509114109Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x3lqinsz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.509056153Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x3lqinsz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.509016613Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.508944022Z caller=remote_instance_store.go:51 user=697627 slug=haqq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x3ktumoo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.508898832Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x3hgcnmp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.508807341Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.508854224Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager.persist user=178698 slug=avantpage t=2024-05-29T13:44:14.508820965Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.341881ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x3hgcnmp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.5087788Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.508707289Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-zrh4f, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=1251d71a-5ee0-41f6-975d-98d03afd4eac" t=2024-05-29T13:44:14.508735136Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.508705684Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x3fxhqy5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.508600308Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x3fxhqy5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.508527758Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-md5vm, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=abc6d80e-e267-43cb-a8b8-15455b08e2be" t=2024-05-29T13:44:14.508562055Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.508487284Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x32vicvo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.508204174Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x32vicvo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.508114424Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x30ec8lk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.508037393Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-5lw9n, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=84cb4c87-1f2c-4bbb-9a2b-89c6af7688b9" t=2024-05-29T13:44:14.508061503Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.508015714Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x30ec8lk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.507898931Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.507921364Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x2zyj1kv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.507846441Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.507749648Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.507637448Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x2zyj1kv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.50779536Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x2zyj1kv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.50778574Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x2zyj1kv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.50775691Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.507783433Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x2whdgkl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.507575438Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.507568054Z caller=remote_instance_store.go:51 user=349736 slug=elephanthealthcare msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x2v8skx9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.507535808Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.503524292Z caller=remote_alert_sender.go:94 user=528849 slug=bitvavo host=bitvavo-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.103.222:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=a5dc1727-ead5-4466-940e-ca8fcc13359d alerts=1 + level=info ts=2024-05-29T13:44:14.50340759Z caller=remote_alert_sender.go:94 user=528849 slug=bitvavo host=bitvavo-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.180.126:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=a5dc1727-ead5-4466-940e-ca8fcc13359d alerts=1 + level=debug ts=2024-05-29T13:44:14.502509313Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x2v8skx9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.507433247Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=metrics-server, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-j86lr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=e9db5f65-5c35-494f-b4c8-741e6948e523" t=2024-05-29T13:44:14.507448838Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.507382057Z caller=remote_instance_store.go:51 user=334644 slug=meiro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.507367107Z caller=remote_instance_store.go:51 user=202755 slug=iwmedia msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.507360432Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x2uzg3wt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.507363596Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.507318448Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.507367624Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=metrics-server, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-74tw6, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=a2198c71-1426-445e-8405-34d7121697a5" t=2024-05-29T13:44:14.507304845Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=metrics-server, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-74tw6, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=a2198c71-1426-445e-8405-34d7121697a5" t=2024-05-29T13:44:14.507295858Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=731546 slug=liderbci instance= t=2024-05-29T13:44:14.507126722Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-operator-6749b67c79-lbggf, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=929fb03c-fa84-408a-b7ca-ce4b6a1c2195" t=2024-05-29T13:44:14.50716527Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=731546 slug=liderbci t=2024-05-29T13:44:14.507091451Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.507122788Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x2ozr6p5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.507024222Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.507026869Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=250150 slug=bizagi version=58 fingerprint=b8d533814a825fda attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.506912603Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.506693836s EvaluationString:}]" duration=399.658999ms + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=eraser-controller-manager-794b999f7c-s7pbc, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=8a83a13f-a279-4970-b129-289c4d22c386" t=2024-05-29T13:44:14.506994844Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x2n9cxl6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.506898591Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="host=eqny405-pomsp03, quantile=0.75" t=2024-05-29T13:44:14.506740969Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="host=eqny405-pomsp01, quantile=0.75" t=2024-05-29T13:44:14.506688998Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x2e2jfpm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.506675349Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x2crggd1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.506566678Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.50656844Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=local-path-provisioner, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=local-path-provisioner, pod=local-path-provisioner-75d9964db8-dxgdm, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=b69e5deb-390a-416b-9840-c15814bff707" t=2024-05-29T13:44:14.506512818Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x2crggd1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.506486657Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x221bcpx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.506435756Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.50636804Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x221bcpx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.506383306Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.506196344Z caller=remote_instance_store.go:51 user=656284 slug=cencosudx msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.506117866Z caller=remote_instance_store.go:51 user=224047 slug=ppbtradingtribeprd msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.506103539Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.244.39:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddbhspyynfmrke alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x1jxycsq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.506140113Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x1gcwqfz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.505960751Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.505889427Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=38.34961ms + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-rjqlc, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=62d27fdc-c80b-452d-8cc6-95301a7b6d18" t=2024-05-29T13:44:14.505931003Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-rjqlc, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=62d27fdc-c80b-452d-8cc6-95301a7b6d18" t=2024-05-29T13:44:14.505917027Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.505861056Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.505847838Z caller=remote_instance_store.go:51 user=265692 slug=beekeeper msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x1gcth1b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.50583375Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=265692 slug=beekeeper t=2024-05-29T13:44:14.505796369Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=265692 slug=beekeeper instance="appCluster=default, cluster=production-us/stateful-services-prometheus, datacenter=production-us, instance=10.246.193.39:9102, job=mongooseim-nodes, nodename=mongoose-default-us-west-2b" t=2024-05-29T13:44:14.505748307Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x1fci1ws-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.505704749Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.50559296Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.505525556Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.505580831Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=a0dee0cad5c169f4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.505469791Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.505287055s EvaluationString:}]" duration=201.054169ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x1e4g2hr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.505483857Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=2wfwm2xz0f, Method=--, Resource=/salary-recommendations, Stage=--" t=2024-05-29T13:44:14.505396355Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x1c9feim-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.505389686Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x1c9feim-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.505361385Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x1c9feim-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.505340785Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.505363863Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x15wol5k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.505250564Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=kube-prometheus-stack, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-kube-prometheus-operator-5987fcd6bf-f5vpj, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=c73646fc-1f47-494e-80cd-0a6ebae46b3f" t=2024-05-29T13:44:14.505308329Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=kube-prometheus-stack, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-kube-prometheus-operator-5987fcd6bf-f5vpj, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=c73646fc-1f47-494e-80cd-0a6ebae46b3f" t=2024-05-29T13:44:14.505296121Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.505267687Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x15wol5k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.505240294Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x15wol5k-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.505208654Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=konnectivity-agent, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-6d7cd87dfd-qnvmw, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=714ab0ab-86e2-4e9b-940b-4eaf368a2c2d" t=2024-05-29T13:44:14.505146726Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.505162397Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x158yv0n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.505105983Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x158yv0n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.505020752Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x158yv0n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.504992832Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x11h1d42-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.504940571Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x11h1d42-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.50485271Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.505025639Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=konnectivity-agent, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-6d7cd87dfd-5m7cl, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=78a9043e-c3cb-4d8a-9083-75f703e8e146" t=2024-05-29T13:44:14.505018269Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=konnectivity-agent, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-6d7cd87dfd-5m7cl, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=78a9043e-c3cb-4d8a-9083-75f703e8e146" t=2024-05-29T13:44:14.505004973Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0xs7qf3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.504792419Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=2wfwm2xz0f, Method=--, Resource=/partners, Stage=--" t=2024-05-29T13:44:14.504876063Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=2wfwm2xz0f, Method=--, Resource=/partners, Stage=--" t=2024-05-29T13:44:14.504840727Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.504921273Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0xs7qf3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.504689798Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=2wfwm2xz0f, Method=--, Resource=/partners, Stage=--" t=2024-05-29T13:44:14.50483405Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=482906 slug=wavelo instance="__name__=probe_success, config_version=1712935541664376064, instance=https://wut-graphql.prod-polaris.cnco.tucows.systems/v1/sys/health, job=polaris_wut_graphql_cnco2_http_check, probe=prod-polaris-cnco2-probe-3" t=2024-05-29T13:44:14.504841412Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482906 slug=wavelo instance="__name__=probe_success, config_version=1712935541664376064, instance=https://wut-graphql.prod-polaris.cnco.tucows.systems/v1/sys/health, job=polaris_wut_graphql_cnco2_http_check, probe=prod-polaris-cnco2-probe-3" t=2024-05-29T13:44:14.504831873Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.50480812Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0udtoav-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.504492646Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0udtoav-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.504441146Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0udtoav-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.504388565Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0so97yd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.504359815Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0so97yd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.504339115Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0so97yd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.504310865Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=482906 slug=wavelo instance="__name__=probe_success, config_version=1712935541664376064, instance=https://wut-graphql.prod-polaris.cnco.tucows.systems/v1/sys/health, job=polaris_wut_graphql_cnco2_http_check, probe=prod-polaris-cnco2-probe-2" t=2024-05-29T13:44:14.504764006Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0so97yd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.504256824Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0sgqk8h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.504204163Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=keda-operator-metrics-apiserver, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-operator-metrics-apiserver-58c8cbcc85-sfclb, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=b63562f3-b779-47ba-927c-d0f22b59df56" t=2024-05-29T13:44:14.504689356Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.504658101Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + level=debug ts=2024-05-29T13:44:14.504617226Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=keda-operator-metrics-apiserver, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-operator-metrics-apiserver-58c8cbcc85-lqzcz, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=6611d0e0-b561-4125-9687-ed850ba051a0" t=2024-05-29T13:44:14.504548461Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=482906 slug=wavelo instance="__name__=probe_success, config_version=1712935514066554112, instance=https://wut-graphql.prod-polaris.bra2.tucows.systems/v1/sys/health, job=polaris_wut_graphql_bra2_http_check, probe=prod-polaris-bra2-probe-2" t=2024-05-29T13:44:14.504461528Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.504413363Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.504308476Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.504270871Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=482906 slug=wavelo t=2024-05-29T13:44:14.504171097Z level=debug msg="State manager processing evaluation results" resultCount=6 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=keda-admission-webhooks, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-admission-webhooks-7778cc48bd-hqhms, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=5947dd80-7c60-4733-a220-5487fd03e404" t=2024-05-29T13:44:14.504234183Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=482906 slug=wavelo version=7 fingerprint=03f5bfbe6b7cbf59 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.503982508Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=probe_success, config_version=1712935514066554112, instance=https://wut-graphql.prod-polaris.bra2.tucows.systems/v1/sys/health, job=polaris_wut_graphql_bra2_http_check, probe=prod-polaris-bra2-probe-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=probe_success, config_version=1712935514066554112, instance=https://wut-graphql.prod-polaris.bra2.tucows.systems/v1/sys/health, job=polaris_wut_graphql_bra2_http_check, probe=prod-polaris-bra2-probe-1 Value:0xc00d16f590} C:{Var:C Labels:__name__=probe_success, config_version=1712935514066554112, instance=https://wut-graphql.prod-polaris.bra2.tucows.systems/v1/sys/health, job=polaris_wut_graphql_bra2_http_check, probe=prod-polaris-bra2-probe-1 Value:0xc00d16f540}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.503268622s EvaluationString:[ var='A' labels={__name__=probe_success, config_version=1712935514066554112, instance=https://wut-graphql.prod-polaris.bra2.tucows.systems/v1/sys/health, job=polaris_wut_graphql_bra2_http_check, probe=prod-polaris-bra2-probe-1} value=1 ], [ var='C' labels={__name__=probe_success, config_version=1712935514066554112, instance=https://wut-graphql.prod-polaris.bra2.tucows.systems/v1/sys/health, job=polaris_wut_graphql_bra2_http_check, probe=prod-polaris-bra2-probe-1} value=0 ]} {Instance:__name__=probe_success, config_version=1712935514066554112, instance=https://wut-graphql.prod-polaris.bra2.tucows.systems/v1/sys/health, job=polaris_wut_graphql_bra2_http_check, probe=prod-polaris-bra2-probe-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=probe_success, config_version=1712935514066554112, instance=https://wut-graphql.prod-polaris.bra2.tucows.systems/v1/sys/health, job=polaris_wut_graphql_bra2_http_check, probe=prod-polaris-bra2-probe-2 Value:0xc00d16f618} C:{Var:C Labels:__name__=probe_success, config_version=1712935514066554112, instance=https://wut-graphql.prod-polaris.bra2.tucows.systems/v1/sys/health, job=polaris_wut_graphql_bra2_http_check, probe=prod-polaris-bra2-probe-2 Value:0xc00d16f658}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.503285365s EvaluationString:[ var='A' labels={__name__=probe_success, config_version=1712935514066554112, instance=https://wut-graphql.prod-polaris.bra2.tucows.systems/v1/sys/health, job=polaris_wut_graphql_bra2_http_check, probe=prod-polaris-bra2-probe-2} value=1 ], [ var='C' labels={__name__=probe_success, config_version=1712935514066554112, instance=https://wut-graphql.prod-polaris.bra2.tucows.systems/v1/sys/health, job=polaris_wut_graphql_bra2_http_check, probe=prod-polaris-bra2-probe-2} value=0 ]} {Instance:__name__=probe_success, config_version=1712935514066554112, instance=https://wut-graphql.prod-polaris.bra2.tucows.systems/v1/sys/health, job=polaris_wut_graphql_bra2_http_check, probe=prod-polaris-bra2-probe-3 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=probe_success, config_version=1712935514066554112, instance=https://wut-graphql.prod-polaris.bra2.tucows.systems/v1/sys/health, job=polaris_wut_graphql_bra2_http_check, probe=prod-polaris-bra2-probe-3 Value:0xc00d16f780} C:{Var:C Labels:__name__=probe_success, config_version=1712935514066554112, instance=https://wut-graphql.prod-polaris.bra2.tucows.systems/v1/sys/health, job=polaris_wut_graphql_bra2_http_check, probe=prod-polaris-bra2-probe-3 Value:0xc00d16f7c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.503295715s EvaluationString:[ var='A' labels={__name__=probe_success, config_version=1712935514066554112, instance=https://wut-graphql.prod-polaris.bra2.tucows.systems/v1/sys/health, job=polaris_wut_graphql_bra2_http_check, probe=prod-polaris-bra2-probe-3} value=1 ], [ var='C' labels={__name__=probe_success, config_version=1712935514066554112, instance=https://wut-graphql.prod-polaris.bra2.tucows.systems/v1/sys/health, job=polaris_wut_graphql_bra2_http_check, probe=prod-polaris-bra2-probe-3} value=0 ]} {Instance:__name__=probe_success, config_version=1712935541664376064, instance=https://wut-graphql.prod-polaris.cnco.tucows.systems/v1/sys/health, job=polaris_wut_graphql_cnco2_http_check, probe=prod-polaris-cnco2-probe-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=probe_success, config_version=1712935541664376064, instance=https://wut-graphql.prod-polaris.cnco.tucows.systems/v1/sys/health, job=polaris_wut_graphql_cnco2_http_check, probe=prod-polaris-cnco2-probe-1 Value:0xc00d16f838} C:{Var:C Labels:__name__=probe_success, config_version=1712935541664376064, instance=https://wut-graphql.prod-polaris.cnco.tucows.systems/v1/sys/health, job=polaris_wut_graphql_cnco2_http_check, probe=prod-polaris-cnco2-probe-1 Value:0xc00d16f878}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.503304322s EvaluationString:[ var='A' labels={__name__=probe_success, config_version=1712935541664376064, instance=https://wut-graphql.prod-polaris.cnco.tucows.systems/v1/sys/health, job=polaris_wut_graphql_cnco2_http_check, probe=prod-polaris-cnco2-probe-1} value=1 ], [ var='C' labels={__name__=probe_success, config_version=1712935541664376064, instance=https://wut-graphql.prod-polaris.cnco.tucows.systems/v1/sys/health, job=polaris_wut_graphql_cnco2_http_check, probe=prod-polaris-cnco2-probe-1} value=0 ]} {Instance:__name__=probe_success, config_version=1712935541664376064, instance=https://wut-graphql.prod-polaris.cnco.tucows.systems/v1/sys/health, job=polaris_wut_graphql_cnco2_http_check, probe=prod-polaris-cnco2-probe-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=probe_success, config_version=1712935541664376064, instance=https://wut-graphql.prod-polaris.cnco.tucows.systems/v1/sys/health, job=polaris_wut_graphql_cnco2_http_check, probe=prod-polaris-cnco2-probe-2 Value:0xc00d16f910} C:{Var:C Labels:__name__=probe_success, config_version=1712935541664376064, instance=https://wut-graphql.prod-polaris.cnco.tucows.systems/v1/sys/health, job=polaris_wut_graphql_cnco2_http_check, probe=prod-polaris-cnco2-probe-2 Value:0xc00d16f960}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.503313159s EvaluationString:[ var='A' labels={__name__=probe_success, config_version=1712935541664376064, instance=https://wut-graphql.prod-polaris.cnco.tucows.systems/v1/sys/health, job=polaris_wut_graphql_cnco2_http_check, probe=prod-polaris-cnco2-probe-2} value=1 ], [ var='C' labels={__name__=probe_success, config_version=1712935541664376064, instance=https://wut-graphql.prod-polaris.cnco.tucows.systems/v1/sys/health, job=polaris_wut_graphql_cnco2_http_check, probe=prod-polaris-cnco2-probe-2} value=0 ]} {Instance:__name__=probe_success, config_version=1712935541664376064, instance=https://wut-graphql.prod-polaris.cnco.tucows.systems/v1/sys/health, job=polaris_wut_graphql_cnco2_http_check, probe=prod-polaris-cnco2-probe-3 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=probe_success, config_version=1712935541664376064, instance=https://wut-graphql.prod-polaris.cnco.tucows.systems/v1/sys/health, job=polaris_wut_graphql_cnco2_http_check, probe=prod-polaris-cnco2-probe-3 Value:0xc00d16f9e0} C:{Var:C Labels:__name__=probe_success, config_version=1712935541664376064, instance=https://wut-graphql.prod-polaris.cnco.tucows.systems/v1/sys/health, job=polaris_wut_graphql_cnco2_http_check, probe=prod-polaris-cnco2-probe-3 Value:0xc00d16fa20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.503323667s EvaluationString:[ var='A' labels={__name__=probe_success, config_version=1712935541664376064, instance=https://wut-graphql.prod-polaris.cnco.tucows.systems/v1/sys/health, job=polaris_wut_graphql_cnco2_http_check, probe=prod-polaris-cnco2-probe-3} value=1 ], [ var='C' labels={__name__=probe_success, config_version=1712935541664376064, instance=https://wut-graphql.prod-polaris.cnco.tucows.systems/v1/sys/health, job=polaris_wut_graphql_cnco2_http_check, probe=prod-polaris-cnco2-probe-3} value=0 ]}]" duration=38.589357ms + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:14.504046188Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.504105396Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.504071562Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=432323 slug=lithic instance="FunctionName=card-closer-lambda-dbc7619" t=2024-05-29T13:44:14.504019628Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0q09ptb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.503947621Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0q09ptb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.503921521Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0mj7zjr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.50386226Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=gatekeeper-controller-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=gatekeeper-system, pod=gatekeeper-controller-6494586d5d-7gjcz, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=8bb616d6-04ab-4035-a705-01496224ed8e" t=2024-05-29T13:44:14.503837809Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0mj7zjr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.503757389Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0mj7zjr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.503735099Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0lkktpu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.503665768Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=gatekeeper-audit-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=gatekeeper-system, pod=gatekeeper-audit-57fc5568f8-57dm4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7e8837a9-a135-46bc-b7b2-e768aaf3e4dd" t=2024-05-29T13:44:14.503679747Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.503219364Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.50322134Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0lkktpu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.503574797Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-stl62, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=35624b38-34ef-4454-bd57-19fe9dd6bbfc" t=2024-05-29T13:44:14.503549043Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.503523953Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=707603 slug=canoneurope instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.503461977Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.503433493Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.503478056Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0khm734-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.503454396Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=707603 slug=canoneurope instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.50342541Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=707603 slug=canoneurope t=2024-05-29T13:44:14.503411618Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0khm734-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.503400785Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=344017 slug=descript t=2024-05-29T13:44:14.503407678Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-lrwvk, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=fba32b2e-4e41-4a80-8bdb-5def6f35bcdb" t=2024-05-29T13:44:14.503425767Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=707603 slug=canoneurope version=1 fingerprint=7d1edffa5dcbabd5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.503349633Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.503061673s EvaluationString:}]" duration=16.016938ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0gkqzkz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.503370075Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.503098202Z caller=remote_instance_store.go:51 user=154996 slug=veovo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0gkqzkz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.503328974Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.503380956Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + level=debug ts=2024-05-29T13:44:14.503257628Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0gkqzkz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.503267404Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x0gkqzkz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.503257614Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.503226864Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.503243357Z caller=remote_alert_sender.go:94 user=656459 slug=activeport host=activeport-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.164.20.114:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=a45ede3f-98de-4d0e-b588-d56a04c59787 alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x06vph50-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.503153093Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=155740 slug=routific t=2024-05-29T13:44:14.503102382Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=51.891557ms + level=debug ts=2024-05-29T13:44:14.503126373Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x06vph50-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.503085672Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.503113456Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x005pnd2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.503012941Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:14.503090615Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=432323 slug=lithic instance= t=2024-05-29T13:44:14.503063032Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x005pnd2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.502957931Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-x005pnd2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.50290808Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wzsfs1zg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.502729318Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=432323 slug=lithic version=1 fingerprint=0dce1d201767e97e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.502825913Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.502564247s EvaluationString:}]" duration=56.289327ms + logger=ngalert.state.manager.persist user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:14.502705648Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.531146ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wzpvli4o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.502503166Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wzpvli4o-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.502452695Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wznf0yg8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.502384415Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=310637 slug=notino instance="app_kubernetes_io_name=databreakersapi" t=2024-05-29T13:44:14.502788813Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.502732744Z caller=remote_alert_sender.go:94 user=174016 slug=journalstaging host=journalstaging-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.66.172:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fIEnsqd7k alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wznf0yg8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.502289634Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wznf0yg8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.502276234Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wzipz1xl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.502230113Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.502628812Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + level=debug ts=2024-05-29T13:44:14.502567586Z caller=remote_instance_store.go:51 user=456850 slug=juniz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.502607186Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=3a41b2a531603e81 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.502489084Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.502243378s EvaluationString:}]" duration=298.581064ms + level=debug ts=2024-05-29T13:44:14.502434173Z caller=remote_instance_store.go:51 user=543654 slug=jobcloudprogrammaticprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=344017 slug=descript t=2024-05-29T13:44:14.502327931Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.966343ms + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.502391175Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + level=debug ts=2024-05-29T13:44:14.502193807Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.50221458Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.502146569Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.502151708Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.501954171Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.50209151Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.502067917Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.501977603Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.501992695Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + level=debug ts=2024-05-29T13:44:14.501924831Z caller=remote_instance_store.go:51 user=543660 slug=jobcloudprogrammaticstage msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wzb0ig85-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.501808439Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wzb0ig85-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.501762338Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=console, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-console-7477fb68c6-wh6p5, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=de473449-41b4-463c-94a7-0884bf61ce34" t=2024-05-29T13:44:14.501862831Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wzazrk9e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.501694028Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wzazrk9e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.501663357Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wzazrk9e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.501544116Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=306551 slug=teckresourcesalerts t=2024-05-29T13:44:14.501624851Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.192521ms + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.501620154Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + level=debug ts=2024-05-29T13:44:14.501395784Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.50146748Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wyz50u0m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.501213143Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=config-watcher, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-1, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=25cacb21-e76b-4893-9bcf-e272513a58cf" t=2024-05-29T13:44:14.501374371Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.501280095Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wyw2y1l4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.50095928Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wyw2y1l4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.50093846Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.501296574Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wyvwzanf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.500854259Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.501166032Z caller=remote_instance_store.go:51 user=656284 slug=cencosudx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=config-watcher, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=86e24cc1-e049-4cb8-9244-88cdb77ec7a3" t=2024-05-29T13:44:14.501176964Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.501102288Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + level=debug ts=2024-05-29T13:44:14.501093577Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.50099754Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=config-reloader, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-kube-prometheus-prometheus-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=850769f7-7609-4d2d-a0ed-325d5cc8eba3" t=2024-05-29T13:44:14.501029401Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=491157 slug=prd01wr t=2024-05-29T13:44:14.500970079Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=491157 slug=prd01wr instance="DatabaseClass=db.r5.8xlarge" t=2024-05-29T13:44:14.500944974Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=491157 slug=prd01wr version=1 fingerprint=2c414b883d098b85 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.500775193Z level=debug msg="Alert rule evaluated" results="[{Instance:DatabaseClass=db.r5.8xlarge State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DatabaseClass=db.r5.8xlarge Value:0xc01d155ff0} C:{Var:C Labels:DatabaseClass=db.r5.8xlarge Value:0xc01d155ff8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.50029144s EvaluationString:[ var='B' labels={DatabaseClass=db.r5.8xlarge} value=0.5733273705691291 ], [ var='C' labels={DatabaseClass=db.r5.8xlarge} value=0 ]}]" duration=104.373155ms + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-pgjjk, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=965fe23d-a7fe-4bbe-87e4-7d6582f91560" t=2024-05-29T13:44:14.500873319Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wyjn4x63-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.500703907Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-knjm4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d2ddda4b-6924-4658-8e46-711b086baa73" t=2024-05-29T13:44:14.500679287Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wyf6wczk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.500598076Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wyf6wczk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.500568256Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wyaujuq0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.500392724Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-c64l4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7c955a73-a481-44de-8041-7fc9df160d3e" t=2024-05-29T13:44:14.500311778Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.500292943Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wyaujuq0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.500276503Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=456946 slug=menlosecurityredge instance="datasource_uid=grafanacloud-prom, ref_id=DownstreamConnectivity" t=2024-05-29T13:44:14.500194923Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=456946 slug=menlosecurityredge instance="datasource_uid=grafanacloud-prom, ref_id=DownstreamConnectivity" t=2024-05-29T13:44:14.500178288Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=456946 slug=menlosecurityredge instance="datasource_uid=grafanacloud-prom, ref_id=DownstreamConnectivity" t=2024-05-29T13:44:14.500151281Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wy82kb0a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.500103521Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=456946 slug=menlosecurityredge t=2024-05-29T13:44:14.500082985Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=456946 slug=menlosecurityredge version=5 fingerprint=e06392b5437e8698 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.499984331Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=DownstreamConnectivity State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.499642249s EvaluationString:}]" duration=18.003479ms + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:14.499971183Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.499988302Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wy4m6jck-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.499928859Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=608555 slug=ias t=2024-05-29T13:44:14.499956438Z level=debug msg="Deleting alert states" count=1 + logger=ngalert.state.manager user=806229 slug=simplisafe t=2024-05-29T13:44:14.499895092Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.499901984Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wy31f4no-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.499833478Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wy31f4no-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.499804248Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-tcgpr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=1c96ea92-c31e-4d04-9577-bd50df81aca3" t=2024-05-29T13:44:14.499862773Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-rbgkx, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=a77457b9-3f87-4645-a5d9-782245b0e52a" t=2024-05-29T13:44:14.499716594Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wy31f4no-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.499659566Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-rbgkx, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=a77457b9-3f87-4645-a5d9-782245b0e52a" t=2024-05-29T13:44:14.499703138Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wy1zb9w7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.499588226Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-p2xzh, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=4c4984fb-62cb-4650-8ce1-4427811e8e66" t=2024-05-29T13:44:14.499542201Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wy1zb9w7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.499478815Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.499487519Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + level=debug ts=2024-05-29T13:44:14.499414423Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-ndshv, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=bcc33fc6-215a-492f-9eed-58a35dae260e" t=2024-05-29T13:44:14.499388935Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.499346354Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + level=debug ts=2024-05-29T13:44:14.499187849Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wxys0ix3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.499146821Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=cert-manager-webhook, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-webhook-86d79b7d64-hnmhr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=c34ce708-88b6-4f66-9c06-31c911c41ae6" t=2024-05-29T13:44:14.499231066Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wxys0ix3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.4990675Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wxys0ix3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.49899026Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=190917 slug=d1cx t=2024-05-29T13:44:14.499145795Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=190917 slug=d1cx t=2024-05-29T13:44:14.499079071Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wxx1971x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.498943879Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wxx1971x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.498860318Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wxx1971x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.498789827Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wxx1971x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.498740987Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wxqd5yb7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.498692176Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.498671656Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.498892681Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wxqd5yb7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.498629926Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.498700841Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wxmi30kj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.498523505Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=azure-policy-webhook, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-policy-webhook-564c9d7c7b-qvxch, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=aee2420d-cf19-4555-8f65-f8e4f5f07040" t=2024-05-29T13:44:14.498644449Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=679831 slug=joveostageaws t=2024-05-29T13:44:14.498442892Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.498501762Z caller=remote_instance_store.go:51 user=679831 slug=joveostageaws msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=679831 slug=joveostageaws instance="datasource_uid=fe98eaba-ee1b-4198-8ef3-9181223fbc0d, ref_id=A" t=2024-05-29T13:44:14.498405291Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.498429109Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.498333726Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=679831 slug=joveostageaws t=2024-05-29T13:44:14.49835839Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.49835038Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wxjv4ule-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.498270602Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wxjv4ule-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.498234742Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wxjv4ule-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.498210331Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.498201478Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wxjv4ule-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.498160441Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wxjv4ule-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.498148351Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wxbeuqj1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.49805105Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wxbeuqj1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.49802688Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.498052478Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wxbeuqj1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.497959779Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wxafbgym-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.497885378Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-fflbq, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=bd560720-ea43-4d92-919c-6969b7ea69e5" t=2024-05-29T13:44:14.497873487Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wxafbgym-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.497788237Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.497794811Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-www5fz1f-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.497728997Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-d9cmk, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=5136a23f-db62-4d76-8312-5858b7ecf39a" t=2024-05-29T13:44:14.497731688Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.497684708Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-www5fz1f-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.497622095Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wwuy90bf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.497544605Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=autoscaler, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=coredns-autoscaler-8b5b467ff-kd279, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=08447e82-5fab-433f-b3ad-46203d94faa8" t=2024-05-29T13:44:14.497543532Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=autoscaler, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=coredns-autoscaler-8b5b467ff-kd279, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=08447e82-5fab-433f-b3ad-46203d94faa8" t=2024-05-29T13:44:14.497528636Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.497419908Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}Pod crash looping {{ $labels.instance }}), cluster {{ $labels.control-cluster }} {{ $labels.redpanda_id }}': error parsing template __alert_DP - Kubernetes Pod Crashloop: template: __alert_DP - Kubernetes Pod Crashloop:1: bad character U+002D '-'" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wwuy90bf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.497491214Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wwuy90bf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.497452284Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.497313512Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.497421251Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.497386686Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wwtmyvv0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.497349443Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wwsqg2f5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.497233001Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=183214 slug=vectorizedio version=245 fingerprint=53bb6019dc207b95 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.495596681Z level=debug msg="Alert rule evaluated" results="[{Instance:container=autoscaler, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=coredns-autoscaler-8b5b467ff-kd279, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=08447e82-5fab-433f-b3ad-46203d94faa8 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=autoscaler, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=coredns-autoscaler-8b5b467ff-kd279, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=08447e82-5fab-433f-b3ad-46203d94faa8 Value:0xc06f4954d8} C:{Var:C Labels:container=autoscaler, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=coredns-autoscaler-8b5b467ff-kd279, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=08447e82-5fab-433f-b3ad-46203d94faa8 Value:0xc06f4955b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490269891s EvaluationString:[ var='B' labels={container=autoscaler, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=coredns-autoscaler-8b5b467ff-kd279, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=08447e82-5fab-433f-b3ad-46203d94faa8} value=0 ], [ var='C' labels={container=autoscaler, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=coredns-autoscaler-8b5b467ff-kd279, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=08447e82-5fab-433f-b3ad-46203d94faa8} value=0 ]} {Instance:container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-d9cmk, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=5136a23f-db62-4d76-8312-5858b7ecf39a State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-d9cmk, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=5136a23f-db62-4d76-8312-5858b7ecf39a Value:0xc06f495708} C:{Var:C Labels:container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-d9cmk, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=5136a23f-db62-4d76-8312-5858b7ecf39a Value:0xc06f495888}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490284683s EvaluationString:[ var='B' labels={container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-d9cmk, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=5136a23f-db62-4d76-8312-5858b7ecf39a} value=0 ], [ var='C' labels={container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-d9cmk, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=5136a23f-db62-4d76-8312-5858b7ecf39a} value=0 ]} {Instance:container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-fflbq, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=bd560720-ea43-4d92-919c-6969b7ea69e5 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-fflbq, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=bd560720-ea43-4d92-919c-6969b7ea69e5 Value:0xc06f495ac0} C:{Var:C Labels:container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-fflbq, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=bd560720-ea43-4d92-919c-6969b7ea69e5 Value:0xc06f495c50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490290614s EvaluationString:[ var='B' labels={container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-fflbq, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=bd560720-ea43-4d92-919c-6969b7ea69e5} value=0 ], [ var='C' labels={container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-fflbq, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=bd560720-ea43-4d92-919c-6969b7ea69e5} value=0 ]} {Instance:container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-kxj44, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=c18f2f0d-52f2-483d-b71c-2fb23627c46a State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-kxj44, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=c18f2f0d-52f2-483d-b71c-2fb23627c46a Value:0xc06f495fc0} C:{Var:C Labels:container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-kxj44, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=c18f2f0d-52f2-483d-b71c-2fb23627c46a Value:0xc030fb2088}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490301514s EvaluationString:[ var='B' labels={container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-kxj44, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=c18f2f0d-52f2-483d-b71c-2fb23627c46a} value=0 ], [ var='C' labels={container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-kxj44, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=c18f2f0d-52f2-483d-b71c-2fb23627c46a} value=0 ]} {Instance:container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-ql8fb, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=66846bf7-889f-44a3-a0c2-21b18d93a269 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-ql8fb, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=66846bf7-889f-44a3-a0c2-21b18d93a269 Value:0xc030fb2220} C:{Var:C Labels:container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-ql8fb, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=66846bf7-889f-44a3-a0c2-21b18d93a269 Value:0xc030fb22c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490309704s EvaluationString:[ var='B' labels={container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-ql8fb, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=66846bf7-889f-44a3-a0c2-21b18d93a269} value=0 ], [ var='C' labels={container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-ql8fb, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=66846bf7-889f-44a3-a0c2-21b18d93a269} value=0 ]} {Instance:container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-wvdn5, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=6fb5ee22-5ca9-441c-98f7-90d2ff43d7ac State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-wvdn5, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=6fb5ee22-5ca9-441c-98f7-90d2ff43d7ac Value:0xc030fb2470} C:{Var:C Labels:container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-wvdn5, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=6fb5ee22-5ca9-441c-98f7-90d2ff43d7ac Value:0xc030fb2530}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490316268s EvaluationString:[ var='B' labels={container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-wvdn5, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=6fb5ee22-5ca9-441c-98f7-90d2ff43d7ac} value=0 ], [ var='C' labels={container=azure-npm, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-npm-wvdn5, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=6fb5ee22-5ca9-441c-98f7-90d2ff43d7ac} value=0 ]} {Instance:container=azure-policy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-policy-9f4f796d9-krdrr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=548eab5a-5069-4649-b4a6-52232e7da8b5 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=azure-policy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-policy-9f4f796d9-krdrr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=548eab5a-5069-4649-b4a6-52232e7da8b5 Value:0xc030fb2690} C:{Var:C Labels:container=azure-policy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-policy-9f4f796d9-krdrr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=548eab5a-5069-4649-b4a6-52232e7da8b5 Value:0xc030fb2738}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490324989s EvaluationString:[ var='B' labels={container=azure-policy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-policy-9f4f796d9-krdrr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=548eab5a-5069-4649-b4a6-52232e7da8b5} value=0 ], [ var='C' labels={container=azure-policy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-policy-9f4f796d9-krdrr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=548eab5a-5069-4649-b4a6-52232e7da8b5} value=0 ]} {Instance:container=azure-policy-webhook, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-policy-webhook-564c9d7c7b-qvxch, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=aee2420d-cf19-4555-8f65-f8e4f5f07040 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=azure-policy-webhook, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-policy-webhook-564c9d7c7b-qvxch, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=aee2420d-cf19-4555-8f65-f8e4f5f07040 Value:0xc030fb2860} C:{Var:C Labels:container=azure-policy-webhook, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-policy-webhook-564c9d7c7b-qvxch, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=aee2420d-cf19-4555-8f65-f8e4f5f07040 Value:0xc030fb2910}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490334803s EvaluationString:[ var='B' labels={container=azure-policy-webhook, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-policy-webhook-564c9d7c7b-qvxch, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=aee2420d-cf19-4555-8f65-f8e4f5f07040} value=0 ], [ var='C' labels={container=azure-policy-webhook, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-policy-webhook-564c9d7c7b-qvxch, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=aee2420d-cf19-4555-8f65-f8e4f5f07040} value=0 ]} {Instance:container=cert-manager-cainjector, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-cainjector-6dbcc4bd6b-k98fr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=fce25ecd-1fa0-4cfd-b674-dd2c0cc56f18 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=cert-manager-cainjector, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-cainjector-6dbcc4bd6b-k98fr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=fce25ecd-1fa0-4cfd-b674-dd2c0cc56f18 Value:0xc030fb2a58} C:{Var:C Labels:container=cert-manager-cainjector, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-cainjector-6dbcc4bd6b-k98fr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=fce25ecd-1fa0-4cfd-b674-dd2c0cc56f18 Value:0xc030fb2ae8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490343308s EvaluationString:[ var='B' labels={container=cert-manager-cainjector, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-cainjector-6dbcc4bd6b-k98fr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=fce25ecd-1fa0-4cfd-b674-dd2c0cc56f18} value=0 ], [ var='C' labels={container=cert-manager-cainjector, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-cainjector-6dbcc4bd6b-k98fr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=fce25ecd-1fa0-4cfd-b674-dd2c0cc56f18} value=0 ]} {Instance:container=cert-manager-controller, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-657b59cd95-6f67n, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=adc8657f-bf2f-4627-81f0-d116cdfcbf6b State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=cert-manager-controller, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-657b59cd95-6f67n, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=adc8657f-bf2f-4627-81f0-d116cdfcbf6b Value:0xc030fb2ce8} C:{Var:C Labels:container=cert-manager-controller, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-657b59cd95-6f67n, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=adc8657f-bf2f-4627-81f0-d116cdfcbf6b Value:0xc030fb2c40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490352243s EvaluationString:[ var='B' labels={container=cert-manager-controller, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-657b59cd95-6f67n, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=adc8657f-bf2f-4627-81f0-d116cdfcbf6b} value=0 ], [ var='C' labels={container=cert-manager-controller, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-657b59cd95-6f67n, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=adc8657f-bf2f-4627-81f0-d116cdfcbf6b} value=0 ]} {Instance:container=cert-manager-unlocker, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-unlocker-job-zj7ps, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=afe5e80a-d0b1-4b5a-935f-a93b60a8b447 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=cert-manager-unlocker, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-unlocker-job-zj7ps, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=afe5e80a-d0b1-4b5a-935f-a93b60a8b447 Value:0xc030fb2e60} C:{Var:C Labels:container=cert-manager-unlocker, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-unlocker-job-zj7ps, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=afe5e80a-d0b1-4b5a-935f-a93b60a8b447 Value:0xc030fb2f00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490361835s EvaluationString:[ var='B' labels={container=cert-manager-unlocker, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-unlocker-job-zj7ps, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=afe5e80a-d0b1-4b5a-935f-a93b60a8b447} value=0 ], [ var='C' labels={container=cert-manager-unlocker, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-unlocker-job-zj7ps, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=afe5e80a-d0b1-4b5a-935f-a93b60a8b447} value=0 ]} {Instance:container=cert-manager-webhook, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-webhook-86d79b7d64-hnmhr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=c34ce708-88b6-4f66-9c06-31c911c41ae6 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=cert-manager-webhook, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-webhook-86d79b7d64-hnmhr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=c34ce708-88b6-4f66-9c06-31c911c41ae6 Value:0xc030fb3058} C:{Var:C Labels:container=cert-manager-webhook, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-webhook-86d79b7d64-hnmhr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=c34ce708-88b6-4f66-9c06-31c911c41ae6 Value:0xc030fb30e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490369701s EvaluationString:[ var='B' labels={container=cert-manager-webhook, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-webhook-86d79b7d64-hnmhr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=c34ce708-88b6-4f66-9c06-31c911c41ae6} value=0 ], [ var='C' labels={container=cert-manager-webhook, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-webhook-86d79b7d64-hnmhr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=c34ce708-88b6-4f66-9c06-31c911c41ae6} value=0 ]} {Instance:container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-ndshv, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=bcc33fc6-215a-492f-9eed-58a35dae260e State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-ndshv, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=bcc33fc6-215a-492f-9eed-58a35dae260e Value:0xc030fb32e0} C:{Var:C Labels:container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-ndshv, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=bcc33fc6-215a-492f-9eed-58a35dae260e Value:0xc030fb3238}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490376278s EvaluationString:[ var='B' labels={container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-ndshv, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=bcc33fc6-215a-492f-9eed-58a35dae260e} value=0 ], [ var='C' labels={container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-ndshv, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=bcc33fc6-215a-492f-9eed-58a35dae260e} value=0 ]} {Instance:container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-p2xzh, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=4c4984fb-62cb-4650-8ce1-4427811e8e66 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-p2xzh, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=4c4984fb-62cb-4650-8ce1-4427811e8e66 Value:0xc030fb3420} C:{Var:C Labels:container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-p2xzh, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=4c4984fb-62cb-4650-8ce1-4427811e8e66 Value:0xc030fb34c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490390722s EvaluationString:[ var='B' labels={container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-p2xzh, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=4c4984fb-62cb-4650-8ce1-4427811e8e66} value=0 ], [ var='C' labels={container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-p2xzh, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=4c4984fb-62cb-4650-8ce1-4427811e8e66} value=0 ]} {Instance:container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-rbgkx, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=a77457b9-3f87-4645-a5d9-782245b0e52a State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-rbgkx, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=a77457b9-3f87-4645-a5d9-782245b0e52a Value:0xc030fb3678} C:{Var:C Labels:container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-rbgkx, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=a77457b9-3f87-4645-a5d9-782245b0e52a Value:0xc030fb35f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.49039593s EvaluationString:[ var='B' labels={container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-rbgkx, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=a77457b9-3f87-4645-a5d9-782245b0e52a} value=0 ], [ var='C' labels={container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-rbgkx, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=a77457b9-3f87-4645-a5d9-782245b0e52a} value=0 ]} {Instance:container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-tcgpr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=1c96ea92-c31e-4d04-9577-bd50df81aca3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-tcgpr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=1c96ea92-c31e-4d04-9577-bd50df81aca3 Value:0xc030fb3850} C:{Var:C Labels:container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-tcgpr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=1c96ea92-c31e-4d04-9577-bd50df81aca3 Value:0xc030fb37c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490404512s EvaluationString:[ var='B' labels={container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-tcgpr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=1c96ea92-c31e-4d04-9577-bd50df81aca3} value=0 ], [ var='C' labels={container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-tcgpr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=1c96ea92-c31e-4d04-9577-bd50df81aca3} value=0 ]} {Instance:container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-txnsc, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=9d70bb0f-cad1-40e4-813a-af5c07ef98f2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-txnsc, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=9d70bb0f-cad1-40e4-813a-af5c07ef98f2 Value:0xc030fb3980} C:{Var:C Labels:container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-txnsc, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=9d70bb0f-cad1-40e4-813a-af5c07ef98f2 Value:0xc030fb3a28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490412046s EvaluationString:[ var='B' labels={container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-txnsc, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=9d70bb0f-cad1-40e4-813a-af5c07ef98f2} value=0 ], [ var='C' labels={container=cloud-node-manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=cloud-node-manager-txnsc, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=9d70bb0f-cad1-40e4-813a-af5c07ef98f2} value=0 ]} {Instance:container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-58p6q, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=ec6f4f56-9cce-462b-970b-ce3d05303b85 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-58p6q, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=ec6f4f56-9cce-462b-970b-ce3d05303b85 Value:0xc030fb3bd8} C:{Var:C Labels:container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-58p6q, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=ec6f4f56-9cce-462b-970b-ce3d05303b85 Value:0xc030fb3c68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490418123s EvaluationString:[ var='B' labels={container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-58p6q, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=ec6f4f56-9cce-462b-970b-ce3d05303b85} value=0 ], [ var='C' labels={container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-58p6q, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=ec6f4f56-9cce-462b-970b-ce3d05303b85} value=0 ]} {Instance:container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-c64l4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7c955a73-a481-44de-8041-7fc9df160d3e State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-c64l4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7c955a73-a481-44de-8041-7fc9df160d3e Value:0xc030fb3f00} C:{Var:C Labels:container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-c64l4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7c955a73-a481-44de-8041-7fc9df160d3e Value:0xc030fb3e30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490424524s EvaluationString:[ var='B' labels={container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-c64l4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7c955a73-a481-44de-8041-7fc9df160d3e} value=0 ], [ var='C' labels={container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-c64l4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7c955a73-a481-44de-8041-7fc9df160d3e} value=0 ]} {Instance:container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-fwrbx, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=6c07d1e7-4fe9-4f59-bdfa-eb74338c6a35 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-fwrbx, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=6c07d1e7-4fe9-4f59-bdfa-eb74338c6a35 Value:0xc0059320d8} C:{Var:C Labels:container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-fwrbx, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=6c07d1e7-4fe9-4f59-bdfa-eb74338c6a35 Value:0xc0059321c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490434789s EvaluationString:[ var='B' labels={container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-fwrbx, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=6c07d1e7-4fe9-4f59-bdfa-eb74338c6a35} value=0 ], [ var='C' labels={container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-fwrbx, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=6c07d1e7-4fe9-4f59-bdfa-eb74338c6a35} value=0 ]} {Instance:container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-knjm4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d2ddda4b-6924-4658-8e46-711b086baa73 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-knjm4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d2ddda4b-6924-4658-8e46-711b086baa73 Value:0xc005932350} C:{Var:C Labels:container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-knjm4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d2ddda4b-6924-4658-8e46-711b086baa73 Value:0xc005932428}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490445992s EvaluationString:[ var='B' labels={container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-knjm4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d2ddda4b-6924-4658-8e46-711b086baa73} value=0 ], [ var='C' labels={container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-knjm4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d2ddda4b-6924-4658-8e46-711b086baa73} value=0 ]} {Instance:container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-pgjjk, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=965fe23d-a7fe-4bbe-87e4-7d6582f91560 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-pgjjk, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=965fe23d-a7fe-4bbe-87e4-7d6582f91560 Value:0xc0059327a0} C:{Var:C Labels:container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-pgjjk, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=965fe23d-a7fe-4bbe-87e4-7d6582f91560 Value:0xc005932938}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490454718s EvaluationString:[ var='B' labels={container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-pgjjk, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=965fe23d-a7fe-4bbe-87e4-7d6582f91560} value=0 ], [ var='C' labels={container=cns-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-cns-pgjjk, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=965fe23d-a7fe-4bbe-87e4-7d6582f91560} value=0 ]} {Instance:container=config-reloader, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-kube-prometheus-prometheus-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=850769f7-7609-4d2d-a0ed-325d5cc8eba3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=config-reloader, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-kube-prometheus-prometheus-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=850769f7-7609-4d2d-a0ed-325d5cc8eba3 Value:0xc005932d08} C:{Var:C Labels:container=config-reloader, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-kube-prometheus-prometheus-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=850769f7-7609-4d2d-a0ed-325d5cc8eba3 Value:0xc005932e30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490463926s EvaluationString:[ var='B' labels={container=config-reloader, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-kube-prometheus-prometheus-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=850769f7-7609-4d2d-a0ed-325d5cc8eba3} value=0 ], [ var='C' labels={container=config-reloader, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-kube-prometheus-prometheus-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=850769f7-7609-4d2d-a0ed-325d5cc8eba3} value=0 ]} {Instance:container=config-watcher, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=86e24cc1-e049-4cb8-9244-88cdb77ec7a3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=config-watcher, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=86e24cc1-e049-4cb8-9244-88cdb77ec7a3 Value:0xc005933118} C:{Var:C Labels:container=config-watcher, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=86e24cc1-e049-4cb8-9244-88cdb77ec7a3 Value:0xc005933028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490475127s EvaluationString:[ var='B' labels={container=config-watcher, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=86e24cc1-e049-4cb8-9244-88cdb77ec7a3} value=0 ], [ var='C' labels={container=config-watcher, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=86e24cc1-e049-4cb8-9244-88cdb77ec7a3} value=0 ]} {Instance:container=config-watcher, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-1, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=25cacb21-e76b-4893-9bcf-e272513a58cf State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=config-watcher, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-1, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=25cacb21-e76b-4893-9bcf-e272513a58cf Value:0xc005933500} C:{Var:C Labels:container=config-watcher, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-1, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=25cacb21-e76b-4893-9bcf-e272513a58cf Value:0xc005933468}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490483813s EvaluationString:[ var='B' labels={container=config-watcher, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-1, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=25cacb21-e76b-4893-9bcf-e272513a58cf} value=0 ], [ var='C' labels={container=config-watcher, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-1, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=25cacb21-e76b-4893-9bcf-e272513a58cf} value=0 ]} {Instance:container=config-watcher, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-2, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=06bfff69-32a5-4415-a2ef-2dd4c70a49e4 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=config-watcher, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-2, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=06bfff69-32a5-4415-a2ef-2dd4c70a49e4 Value:0xc005933950} C:{Var:C Labels:container=config-watcher, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-2, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=06bfff69-32a5-4415-a2ef-2dd4c70a49e4 Value:0xc005933670}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490491889s EvaluationString:[ var='B' labels={container=config-watcher, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-2, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=06bfff69-32a5-4415-a2ef-2dd4c70a49e4} value=0 ], [ var='C' labels={container=config-watcher, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-2, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=06bfff69-32a5-4415-a2ef-2dd4c70a49e4} value=0 ]} {Instance:container=console, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-console-7477fb68c6-lzq8t, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d8448ec8-0914-4fa1-a4b8-04af815ba901 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=console, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-console-7477fb68c6-lzq8t, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d8448ec8-0914-4fa1-a4b8-04af815ba901 Value:0xc005933ab8} C:{Var:C Labels:container=console, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-console-7477fb68c6-lzq8t, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d8448ec8-0914-4fa1-a4b8-04af815ba901 Value:0xc005933b50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490496908s EvaluationString:[ var='B' labels={container=console, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-console-7477fb68c6-lzq8t, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d8448ec8-0914-4fa1-a4b8-04af815ba901} value=0 ], [ var='C' labels={container=console, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-console-7477fb68c6-lzq8t, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d8448ec8-0914-4fa1-a4b8-04af815ba901} value=0 ]} {Instance:container=console, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-console-7477fb68c6-wh6p5, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=de473449-41b4-463c-94a7-0884bf61ce34 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=console, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-console-7477fb68c6-wh6p5, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=de473449-41b4-463c-94a7-0884bf61ce34 Value:0xc005933c70} C:{Var:C Labels:container=console, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-console-7477fb68c6-wh6p5, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=de473449-41b4-463c-94a7-0884bf61ce34 Value:0xc005933d08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490501643s EvaluationString:[ var='B' labels={container=console, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-console-7477fb68c6-wh6p5, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=de473449-41b4-463c-94a7-0884bf61ce34} value=0 ], [ var='C' labels={container=console, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-console-7477fb68c6-wh6p5, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=de473449-41b4-463c-94a7-0884bf61ce34} value=0 ]} {Instance:container=controller, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=ingress-nginx-controller-7b5bf69fcf-t9qj9, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=42e2188e-244b-40ff-9f5b-7b3edc7825d8 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=controller, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=ingress-nginx-controller-7b5bf69fcf-t9qj9, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=42e2188e-244b-40ff-9f5b-7b3edc7825d8 Value:0xc005933f80} C:{Var:C Labels:container=controller, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=ingress-nginx-controller-7b5bf69fcf-t9qj9, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=42e2188e-244b-40ff-9f5b-7b3edc7825d8 Value:0xc02e576010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490506912s EvaluationString:[ var='B' labels={container=controller, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=ingress-nginx-controller-7b5bf69fcf-t9qj9, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=42e2188e-244b-40ff-9f5b-7b3edc7825d8} value=0 ], [ var='C' labels={container=controller, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=ingress-nginx-controller-7b5bf69fcf-t9qj9, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=42e2188e-244b-40ff-9f5b-7b3edc7825d8} value=0 ]} {Instance:container=controller, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=ingress-nginx-controller-7b5bf69fcf-tjr4q, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d55ccf5a-a2e6-433c-878c-965ba3b5c51c State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=controller, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=ingress-nginx-controller-7b5bf69fcf-tjr4q, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d55ccf5a-a2e6-433c-878c-965ba3b5c51c Value:0xc02e576218} C:{Var:C Labels:container=controller, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=ingress-nginx-controller-7b5bf69fcf-tjr4q, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d55ccf5a-a2e6-433c-878c-965ba3b5c51c Value:0xc02e576300}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.49051235s EvaluationString:[ var='B' labels={container=controller, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=ingress-nginx-controller-7b5bf69fcf-tjr4q, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d55ccf5a-a2e6-433c-878c-965ba3b5c51c} value=0 ], [ var='C' labels={container=controller, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=ingress-nginx-controller-7b5bf69fcf-tjr4q, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d55ccf5a-a2e6-433c-878c-965ba3b5c51c} value=0 ]} {Instance:container=coredns, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=coredns-767bfbd4fb-6t8s4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=acd51b06-cd91-44e8-95a0-1ef385677185 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=coredns, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=coredns-767bfbd4fb-6t8s4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=acd51b06-cd91-44e8-95a0-1ef385677185 Value:0xc02e576728} C:{Var:C Labels:container=coredns, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=coredns-767bfbd4fb-6t8s4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=acd51b06-cd91-44e8-95a0-1ef385677185 Value:0xc02e5765d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490517816s EvaluationString:[ var='B' labels={container=coredns, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=coredns-767bfbd4fb-6t8s4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=acd51b06-cd91-44e8-95a0-1ef385677185} value=0 ], [ var='C' labels={container=coredns, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=coredns-767bfbd4fb-6t8s4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=acd51b06-cd91-44e8-95a0-1ef385677185} value=0 ]} {Instance:container=coredns, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=coredns-767bfbd4fb-hlfx8, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=ddf312ad-e7e6-49f9-82d0-02e2c88b9590 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=coredns, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=coredns-767bfbd4fb-hlfx8, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=ddf312ad-e7e6-49f9-82d0-02e2c88b9590 Value:0xc02e5768e0} C:{Var:C Labels:container=coredns, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=coredns-767bfbd4fb-hlfx8, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=ddf312ad-e7e6-49f9-82d0-02e2c88b9590 Value:0xc02e5769a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.49053135s EvaluationString:[ var='B' labels={container=coredns, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=coredns-767bfbd4fb-hlfx8, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=ddf312ad-e7e6-49f9-82d0-02e2c88b9590} value=0 ], [ var='C' labels={container=coredns, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=coredns-767bfbd4fb-hlfx8, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=ddf312ad-e7e6-49f9-82d0-02e2c88b9590} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=external-dns, pod=external-dns-9c6d94bb-774xr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=1ac014bd-8268-4b78-86a9-c80b7b99ad3a State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=external-dns, pod=external-dns-9c6d94bb-774xr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=1ac014bd-8268-4b78-86a9-c80b7b99ad3a Value:0xc02e576c38} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=external-dns, pod=external-dns-9c6d94bb-774xr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=1ac014bd-8268-4b78-86a9-c80b7b99ad3a Value:0xc02e576f78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490537787s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=external-dns, pod=external-dns-9c6d94bb-774xr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=1ac014bd-8268-4b78-86a9-c80b7b99ad3a} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=external-dns, pod=external-dns-9c6d94bb-774xr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=1ac014bd-8268-4b78-86a9-c80b7b99ad3a} value=0 ]} {Instance:container=external-dns-helper, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=external-dns, pod=external-dns-helper-manager-6fd8b57b68-jpgcz, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=abf8fbce-c45a-4df6-988e-442a1b14098b State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns-helper, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=external-dns, pod=external-dns-helper-manager-6fd8b57b68-jpgcz, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=abf8fbce-c45a-4df6-988e-442a1b14098b Value:0xc02e577120} C:{Var:C Labels:container=external-dns-helper, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=external-dns, pod=external-dns-helper-manager-6fd8b57b68-jpgcz, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=abf8fbce-c45a-4df6-988e-442a1b14098b Value:0xc02e577200}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490542181s EvaluationString:[ var='B' labels={container=external-dns-helper, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=external-dns, pod=external-dns-helper-manager-6fd8b57b68-jpgcz, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=abf8fbce-c45a-4df6-988e-442a1b14098b} value=0 ], [ var='C' labels={container=external-dns-helper, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=external-dns, pod=external-dns-helper-manager-6fd8b57b68-jpgcz, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=abf8fbce-c45a-4df6-988e-442a1b14098b} value=0 ]} {Instance:container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-cl9mx, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7a350df8-edcb-4dc6-9ae0-c067e91fdb34 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-cl9mx, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7a350df8-edcb-4dc6-9ae0-c067e91fdb34 Value:0xc02e5774f0} C:{Var:C Labels:container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-cl9mx, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7a350df8-edcb-4dc6-9ae0-c067e91fdb34 Value:0xc02e577600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490546594s EvaluationString:[ var='B' labels={container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-cl9mx, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7a350df8-edcb-4dc6-9ae0-c067e91fdb34} value=0 ], [ var='C' labels={container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-cl9mx, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7a350df8-edcb-4dc6-9ae0-c067e91fdb34} value=0 ]} {Instance:container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-gg7q8, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=74c1eeb6-4589-48ad-ada3-738d8485ba97 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-gg7q8, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=74c1eeb6-4589-48ad-ada3-738d8485ba97 Value:0xc02e577c98} C:{Var:C Labels:container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-gg7q8, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=74c1eeb6-4589-48ad-ada3-738d8485ba97 Value:0xc02e577af0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490554207s EvaluationString:[ var='B' labels={container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-gg7q8, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=74c1eeb6-4589-48ad-ada3-738d8485ba97} value=0 ], [ var='C' labels={container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-gg7q8, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=74c1eeb6-4589-48ad-ada3-738d8485ba97} value=0 ]} {Instance:container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-j48fg, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d0db028f-facf-49ad-9212-52a99c63fba3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-j48fg, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d0db028f-facf-49ad-9212-52a99c63fba3 Value:0xc04b732250} C:{Var:C Labels:container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-j48fg, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d0db028f-facf-49ad-9212-52a99c63fba3 Value:0xc04b732180}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490560333s EvaluationString:[ var='B' labels={container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-j48fg, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d0db028f-facf-49ad-9212-52a99c63fba3} value=0 ], [ var='C' labels={container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-j48fg, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d0db028f-facf-49ad-9212-52a99c63fba3} value=0 ]} {Instance:container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-lrwvk, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=fba32b2e-4e41-4a80-8bdb-5def6f35bcdb State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-lrwvk, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=fba32b2e-4e41-4a80-8bdb-5def6f35bcdb Value:0xc04b732440} C:{Var:C Labels:container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-lrwvk, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=fba32b2e-4e41-4a80-8bdb-5def6f35bcdb Value:0xc04b732540}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.49056477s EvaluationString:[ var='B' labels={container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-lrwvk, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=fba32b2e-4e41-4a80-8bdb-5def6f35bcdb} value=0 ], [ var='C' labels={container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-lrwvk, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=fba32b2e-4e41-4a80-8bdb-5def6f35bcdb} value=0 ]} {Instance:container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-stl62, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=35624b38-34ef-4454-bd57-19fe9dd6bbfc State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-stl62, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=35624b38-34ef-4454-bd57-19fe9dd6bbfc Value:0xc04b7326e0} C:{Var:C Labels:container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-stl62, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=35624b38-34ef-4454-bd57-19fe9dd6bbfc Value:0xc04b732800}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.49056911s EvaluationString:[ var='B' labels={container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-stl62, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=35624b38-34ef-4454-bd57-19fe9dd6bbfc} value=0 ], [ var='C' labels={container=fluent-bit, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=fluent-bit, pod=fluent-bit-stl62, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=35624b38-34ef-4454-bd57-19fe9dd6bbfc} value=0 ]} {Instance:container=gatekeeper-audit-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=gatekeeper-system, pod=gatekeeper-audit-57fc5568f8-57dm4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7e8837a9-a135-46bc-b7b2-e768aaf3e4dd State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=gatekeeper-audit-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=gatekeeper-system, pod=gatekeeper-audit-57fc5568f8-57dm4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7e8837a9-a135-46bc-b7b2-e768aaf3e4dd Value:0xc04b732a30} C:{Var:C Labels:container=gatekeeper-audit-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=gatekeeper-system, pod=gatekeeper-audit-57fc5568f8-57dm4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7e8837a9-a135-46bc-b7b2-e768aaf3e4dd Value:0xc04b732978}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490575934s EvaluationString:[ var='B' labels={container=gatekeeper-audit-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=gatekeeper-system, pod=gatekeeper-audit-57fc5568f8-57dm4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7e8837a9-a135-46bc-b7b2-e768aaf3e4dd} value=0 ], [ var='C' labels={container=gatekeeper-audit-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=gatekeeper-system, pod=gatekeeper-audit-57fc5568f8-57dm4, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7e8837a9-a135-46bc-b7b2-e768aaf3e4dd} value=0 ]} {Instance:container=gatekeeper-controller-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=gatekeeper-system, pod=gatekeeper-controller-6494586d5d-7gjcz, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=8bb616d6-04ab-4035-a705-01496224ed8e State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=gatekeeper-controller-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=gatekeeper-system, pod=gatekeeper-controller-6494586d5d-7gjcz, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=8bb616d6-04ab-4035-a705-01496224ed8e Value:0xc04b732c30} C:{Var:C Labels:container=gatekeeper-controller-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=gatekeeper-system, pod=gatekeeper-controller-6494586d5d-7gjcz, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=8bb616d6-04ab-4035-a705-01496224ed8e Value:0xc04b732b60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490582244s EvaluationString:[ var='B' labels={container=gatekeeper-controller-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=gatekeeper-system, pod=gatekeeper-controller-6494586d5d-7gjcz, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=8bb616d6-04ab-4035-a705-01496224ed8e} value=0 ], [ var='C' labels={container=gatekeeper-controller-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=gatekeeper-system, pod=gatekeeper-controller-6494586d5d-7gjcz, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=8bb616d6-04ab-4035-a705-01496224ed8e} value=0 ]} {Instance:container=gatekeeper-controller-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=gatekeeper-system, pod=gatekeeper-controller-6494586d5d-r27pw, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=af20a3ad-78d5-46dc-8ab2-480d5844093e State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=gatekeeper-controller-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=gatekeeper-system, pod=gatekeeper-controller-6494586d5d-r27pw, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=af20a3ad-78d5-46dc-8ab2-480d5844093e Value:0xc04b732df0} C:{Var:C Labels:container=gatekeeper-controller-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=gatekeeper-system, pod=gatekeeper-controller-6494586d5d-r27pw, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=af20a3ad-78d5-46dc-8ab2-480d5844093e Value:0xc04b732eb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490586937s EvaluationString:[ var='B' labels={container=gatekeeper-controller-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=gatekeeper-system, pod=gatekeeper-controller-6494586d5d-r27pw, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=af20a3ad-78d5-46dc-8ab2-480d5844093e} value=0 ], [ var='C' labels={container=gatekeeper-controller-container, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=gatekeeper-system, pod=gatekeeper-controller-6494586d5d-r27pw, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=af20a3ad-78d5-46dc-8ab2-480d5844093e} value=0 ]} {Instance:container=keda-admission-webhooks, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-admission-webhooks-7778cc48bd-b5rkj, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=b984193e-79d1-4575-b5e8-70838b119b7e State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=keda-admission-webhooks, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-admission-webhooks-7778cc48bd-b5rkj, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=b984193e-79d1-4575-b5e8-70838b119b7e Value:0xc04b733080} C:{Var:C Labels:container=keda-admission-webhooks, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-admission-webhooks-7778cc48bd-b5rkj, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=b984193e-79d1-4575-b5e8-70838b119b7e Value:0xc04b733108}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490593281s EvaluationString:[ var='B' labels={container=keda-admission-webhooks, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-admission-webhooks-7778cc48bd-b5rkj, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=b984193e-79d1-4575-b5e8-70838b119b7e} value=0 ], [ var='C' labels={container=keda-admission-webhooks, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-admission-webhooks-7778cc48bd-b5rkj, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=b984193e-79d1-4575-b5e8-70838b119b7e} value=0 ]} {Instance:container=keda-admission-webhooks, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-admission-webhooks-7778cc48bd-hqhms, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=5947dd80-7c60-4733-a220-5487fd03e404 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=keda-admission-webhooks, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-admission-webhooks-7778cc48bd-hqhms, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=5947dd80-7c60-4733-a220-5487fd03e404 Value:0xc04b7332c8} C:{Var:C Labels:container=keda-admission-webhooks, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-admission-webhooks-7778cc48bd-hqhms, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=5947dd80-7c60-4733-a220-5487fd03e404 Value:0xc04b733370}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490600078s EvaluationString:[ var='B' labels={container=keda-admission-webhooks, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-admission-webhooks-7778cc48bd-hqhms, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=5947dd80-7c60-4733-a220-5487fd03e404} value=0 ], [ var='C' labels={container=keda-admission-webhooks, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-admission-webhooks-7778cc48bd-hqhms, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=5947dd80-7c60-4733-a220-5487fd03e404} value=0 ]} {Instance:container=keda-operator, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-operator-5c76fdd585-8zksd, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=2128899f-bcd4-4074-8e40-0bfcd047707f State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=keda-operator, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-operator-5c76fdd585-8zksd, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=2128899f-bcd4-4074-8e40-0bfcd047707f Value:0xc04b733570} C:{Var:C Labels:container=keda-operator, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-operator-5c76fdd585-8zksd, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=2128899f-bcd4-4074-8e40-0bfcd047707f Value:0xc04b733638}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490604379s EvaluationString:[ var='B' labels={container=keda-operator, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-operator-5c76fdd585-8zksd, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=2128899f-bcd4-4074-8e40-0bfcd047707f} value=0 ], [ var='C' labels={container=keda-operator, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-operator-5c76fdd585-8zksd, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=2128899f-bcd4-4074-8e40-0bfcd047707f} value=0 ]} {Instance:container=keda-operator, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-operator-5c76fdd585-lf9h8, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=e063ad14-4465-4c6f-aacf-b2cd82890a9f State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=keda-operator, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-operator-5c76fdd585-lf9h8, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=e063ad14-4465-4c6f-aacf-b2cd82890a9f Value:0xc04b733820} C:{Var:C Labels:container=keda-operator, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-operator-5c76fdd585-lf9h8, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=e063ad14-4465-4c6f-aacf-b2cd82890a9f Value:0xc04b7338d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.49060928s EvaluationString:[ var='B' labels={container=keda-operator, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-operator-5c76fdd585-lf9h8, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=e063ad14-4465-4c6f-aacf-b2cd82890a9f} value=0 ], [ var='C' labels={container=keda-operator, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-operator-5c76fdd585-lf9h8, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=e063ad14-4465-4c6f-aacf-b2cd82890a9f} value=0 ]} {Instance:container=keda-operator-metrics-apiserver, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-operator-metrics-apiserver-58c8cbcc85-lqzcz, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=6611d0e0-b561-4125-9687-ed850ba051a0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=keda-operator-metrics-apiserver, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-operator-metrics-apiserver-58c8cbcc85-lqzcz, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=6611d0e0-b561-4125-9687-ed850ba051a0 Value:0xc04b733a90} C:{Var:C Labels:container=keda-operator-metrics-apiserver, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-operator-metrics-apiserver-58c8cbcc85-lqzcz, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=6611d0e0-b561-4125-9687-ed850ba051a0 Value:0xc04b733bc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.49061341s EvaluationString:[ var='B' labels={container=keda-operator-metrics-apiserver, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-operator-metrics-apiserver-58c8cbcc85-lqzcz, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=6611d0e0-b561-4125-9687-ed850ba051a0} value=0 ], [ var='C' labels={container=keda-operator-metrics-apiserver, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-operator-metrics-apiserver-58c8cbcc85-lqzcz, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=6611d0e0-b561-4125-9687-ed850ba051a0} value=0 ]} {Instance:container=keda-operator-metrics-apiserver, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-operator-metrics-apiserver-58c8cbcc85-sfclb, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=b63562f3-b779-47ba-927c-d0f22b59df56 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=keda-operator-metrics-apiserver, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-operator-metrics-apiserver-58c8cbcc85-sfclb, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=b63562f3-b779-47ba-927c-d0f22b59df56 Value:0xc04b733e20} C:{Var:C Labels:container=keda-operator-metrics-apiserver, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-operator-metrics-apiserver-58c8cbcc85-sfclb, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=b63562f3-b779-47ba-927c-d0f22b59df56 Value:0xc04b733d70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.49062018s EvaluationString:[ var='B' labels={container=keda-operator-metrics-apiserver, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-operator-metrics-apiserver-58c8cbcc85-sfclb, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=b63562f3-b779-47ba-927c-d0f22b59df56} value=0 ], [ var='C' labels={container=keda-operator-metrics-apiserver, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=keda-operator-metrics-apiserver-58c8cbcc85-sfclb, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=b63562f3-b779-47ba-927c-d0f22b59df56} value=0 ]} {Instance:container=kminion, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-kminion-5679cb79b7-szmm8, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=fefa7b8f-2ab7-456c-b3d1-40172aca9a8d State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=kminion, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-kminion-5679cb79b7-szmm8, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=fefa7b8f-2ab7-456c-b3d1-40172aca9a8d Value:0xc04b733ff0} C:{Var:C Labels:container=kminion, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-kminion-5679cb79b7-szmm8, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=fefa7b8f-2ab7-456c-b3d1-40172aca9a8d Value:0xc0108860d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.49062596s EvaluationString:[ var='B' labels={container=kminion, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-kminion-5679cb79b7-szmm8, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=fefa7b8f-2ab7-456c-b3d1-40172aca9a8d} value=0 ], [ var='C' labels={container=kminion, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-kminion-5679cb79b7-szmm8, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=fefa7b8f-2ab7-456c-b3d1-40172aca9a8d} value=0 ]} {Instance:container=konnectivity-agent, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-6d7cd87dfd-5m7cl, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=78a9043e-c3cb-4d8a-9083-75f703e8e146 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=konnectivity-agent, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-6d7cd87dfd-5m7cl, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=78a9043e-c3cb-4d8a-9083-75f703e8e146 Value:0xc010886280} C:{Var:C Labels:container=konnectivity-agent, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-6d7cd87dfd-5m7cl, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=78a9043e-c3cb-4d8a-9083-75f703e8e146 Value:0xc010886318}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490630763s EvaluationString:[ var='B' labels={container=konnectivity-agent, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-6d7cd87dfd-5m7cl, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=78a9043e-c3cb-4d8a-9083-75f703e8e146} value=0 ], [ var='C' labels={container=konnectivity-agent, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-6d7cd87dfd-5m7cl, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=78a9043e-c3cb-4d8a-9083-75f703e8e146} value=0 ]} {Instance:container=konnectivity-agent, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-6d7cd87dfd-qnvmw, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=714ab0ab-86e2-4e9b-940b-4eaf368a2c2d State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=konnectivity-agent, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-6d7cd87dfd-qnvmw, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=714ab0ab-86e2-4e9b-940b-4eaf368a2c2d Value:0xc010886450} C:{Var:C Labels:container=konnectivity-agent, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-6d7cd87dfd-qnvmw, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=714ab0ab-86e2-4e9b-940b-4eaf368a2c2d Value:0xc0108864e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.4906356s EvaluationString:[ var='B' labels={container=konnectivity-agent, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-6d7cd87dfd-qnvmw, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=714ab0ab-86e2-4e9b-940b-4eaf368a2c2d} value=0 ], [ var='C' labels={container=konnectivity-agent, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=konnectivity-agent-6d7cd87dfd-qnvmw, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=714ab0ab-86e2-4e9b-940b-4eaf368a2c2d} value=0 ]} {Instance:container=kube-prometheus-stack, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-kube-prometheus-operator-5987fcd6bf-f5vpj, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=c73646fc-1f47-494e-80cd-0a6ebae46b3f State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=kube-prometheus-stack, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-kube-prometheus-operator-5987fcd6bf-f5vpj, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=c73646fc-1f47-494e-80cd-0a6ebae46b3f Value:0xc010886658} C:{Var:C Labels:container=kube-prometheus-stack, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-kube-prometheus-operator-5987fcd6bf-f5vpj, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=c73646fc-1f47-494e-80cd-0a6ebae46b3f Value:0xc010886608}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.49064027s EvaluationString:[ var='B' labels={container=kube-prometheus-stack, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-kube-prometheus-operator-5987fcd6bf-f5vpj, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=c73646fc-1f47-494e-80cd-0a6ebae46b3f} value=0 ], [ var='C' labels={container=kube-prometheus-stack, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-kube-prometheus-operator-5987fcd6bf-f5vpj, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=c73646fc-1f47-494e-80cd-0a6ebae46b3f} value=0 ]} {Instance:container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-58x8w, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=b8179e99-d8a4-4e13-8ad3-5227dffb84fc State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-58x8w, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=b8179e99-d8a4-4e13-8ad3-5227dffb84fc Value:0xc010886800} C:{Var:C Labels:container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-58x8w, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=b8179e99-d8a4-4e13-8ad3-5227dffb84fc Value:0xc0108868d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490645544s EvaluationString:[ var='B' labels={container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-58x8w, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=b8179e99-d8a4-4e13-8ad3-5227dffb84fc} value=0 ], [ var='C' labels={container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-58x8w, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=b8179e99-d8a4-4e13-8ad3-5227dffb84fc} value=0 ]} {Instance:container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-77nk6, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d69cedfa-94c2-4fa9-8248-dc8ab7d814fd State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-77nk6, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d69cedfa-94c2-4fa9-8248-dc8ab7d814fd Value:0xc010886a50} C:{Var:C Labels:container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-77nk6, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d69cedfa-94c2-4fa9-8248-dc8ab7d814fd Value:0xc010886b08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490650866s EvaluationString:[ var='B' labels={container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-77nk6, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d69cedfa-94c2-4fa9-8248-dc8ab7d814fd} value=0 ], [ var='C' labels={container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-77nk6, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=d69cedfa-94c2-4fa9-8248-dc8ab7d814fd} value=0 ]} {Instance:container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-pjdkz, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=ab73148d-034f-46e1-aab0-dd5bfee74680 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-pjdkz, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=ab73148d-034f-46e1-aab0-dd5bfee74680 Value:0xc010886ca0} C:{Var:C Labels:container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-pjdkz, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=ab73148d-034f-46e1-aab0-dd5bfee74680 Value:0xc010886d68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490658926s EvaluationString:[ var='B' labels={container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-pjdkz, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=ab73148d-034f-46e1-aab0-dd5bfee74680} value=0 ], [ var='C' labels={container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-pjdkz, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=ab73148d-034f-46e1-aab0-dd5bfee74680} value=0 ]} {Instance:container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-rjqlc, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=62d27fdc-c80b-452d-8cc6-95301a7b6d18 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-rjqlc, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=62d27fdc-c80b-452d-8cc6-95301a7b6d18 Value:0xc010886f90} C:{Var:C Labels:container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-rjqlc, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=62d27fdc-c80b-452d-8cc6-95301a7b6d18 Value:0xc010886ec0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490681366s EvaluationString:[ var='B' labels={container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-rjqlc, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=62d27fdc-c80b-452d-8cc6-95301a7b6d18} value=0 ], [ var='C' labels={container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-rjqlc, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=62d27fdc-c80b-452d-8cc6-95301a7b6d18} value=0 ]} {Instance:container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-tx65r, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=bab867c2-6b0f-49fd-a68d-92e08397b8ab State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-tx65r, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=bab867c2-6b0f-49fd-a68d-92e08397b8ab Value:0xc010887130} C:{Var:C Labels:container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-tx65r, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=bab867c2-6b0f-49fd-a68d-92e08397b8ab Value:0xc0108871d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490687761s EvaluationString:[ var='B' labels={container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-tx65r, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=bab867c2-6b0f-49fd-a68d-92e08397b8ab} value=0 ], [ var='C' labels={container=kube-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=kube-proxy-tx65r, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=bab867c2-6b0f-49fd-a68d-92e08397b8ab} value=0 ]} {Instance:container=kube-rbac-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-operator-6749b67c79-lbggf, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=929fb03c-fa84-408a-b7ca-ce4b6a1c2195 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=kube-rbac-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-operator-6749b67c79-lbggf, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=929fb03c-fa84-408a-b7ca-ce4b6a1c2195 Value:0xc0108873e8} C:{Var:C Labels:container=kube-rbac-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-operator-6749b67c79-lbggf, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=929fb03c-fa84-408a-b7ca-ce4b6a1c2195 Value:0xc010887308}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490692903s EvaluationString:[ var='B' labels={container=kube-rbac-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-operator-6749b67c79-lbggf, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=929fb03c-fa84-408a-b7ca-ce4b6a1c2195} value=0 ], [ var='C' labels={container=kube-rbac-proxy, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-operator-6749b67c79-lbggf, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=929fb03c-fa84-408a-b7ca-ce4b6a1c2195} value=0 ]} {Instance:container=kube-state-metrics, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-kube-state-metrics-6db866c85b-xxb9k, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=fd358414-6174-407d-ac99-3d7f1637be24 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=kube-state-metrics, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-kube-state-metrics-6db866c85b-xxb9k, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=fd358414-6174-407d-ac99-3d7f1637be24 Value:0xc010887588} C:{Var:C Labels:container=kube-state-metrics, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-kube-state-metrics-6db866c85b-xxb9k, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=fd358414-6174-407d-ac99-3d7f1637be24 Value:0xc010887500}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490697605s EvaluationString:[ var='B' labels={container=kube-state-metrics, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-kube-state-metrics-6db866c85b-xxb9k, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=fd358414-6174-407d-ac99-3d7f1637be24} value=0 ], [ var='C' labels={container=kube-state-metrics, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-kube-state-metrics-6db866c85b-xxb9k, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=fd358414-6174-407d-ac99-3d7f1637be24} value=0 ]} {Instance:container=local-path-provisioner, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=local-path-provisioner, pod=local-path-provisioner-75d9964db8-dxgdm, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=b69e5deb-390a-416b-9840-c15814bff707 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=local-path-provisioner, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=local-path-provisioner, pod=local-path-provisioner-75d9964db8-dxgdm, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=b69e5deb-390a-416b-9840-c15814bff707 Value:0xc0108876a8} C:{Var:C Labels:container=local-path-provisioner, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=local-path-provisioner, pod=local-path-provisioner-75d9964db8-dxgdm, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=b69e5deb-390a-416b-9840-c15814bff707 Value:0xc010887740}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490702188s EvaluationString:[ var='B' labels={container=local-path-provisioner, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=local-path-provisioner, pod=local-path-provisioner-75d9964db8-dxgdm, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=b69e5deb-390a-416b-9840-c15814bff707} value=0 ], [ var='C' labels={container=local-path-provisioner, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=local-path-provisioner, pod=local-path-provisioner-75d9964db8-dxgdm, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=b69e5deb-390a-416b-9840-c15814bff707} value=0 ]} {Instance:container=manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-wi-webhook-controller-manager-764796f7c6-mcwbb, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=cfe0c107-b7bb-4039-a11b-2b242c0ead6d State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-wi-webhook-controller-manager-764796f7c6-mcwbb, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=cfe0c107-b7bb-4039-a11b-2b242c0ead6d Value:0xc010887928} C:{Var:C Labels:container=manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-wi-webhook-controller-manager-764796f7c6-mcwbb, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=cfe0c107-b7bb-4039-a11b-2b242c0ead6d Value:0xc010887890}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490706703s EvaluationString:[ var='B' labels={container=manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-wi-webhook-controller-manager-764796f7c6-mcwbb, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=cfe0c107-b7bb-4039-a11b-2b242c0ead6d} value=0 ], [ var='C' labels={container=manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-wi-webhook-controller-manager-764796f7c6-mcwbb, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=cfe0c107-b7bb-4039-a11b-2b242c0ead6d} value=0 ]} {Instance:container=manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-wi-webhook-controller-manager-764796f7c6-tr94h, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=430099c7-44a2-4f82-93b4-8399fda34228 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-wi-webhook-controller-manager-764796f7c6-tr94h, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=430099c7-44a2-4f82-93b4-8399fda34228 Value:0xc010887a58} C:{Var:C Labels:container=manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-wi-webhook-controller-manager-764796f7c6-tr94h, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=430099c7-44a2-4f82-93b4-8399fda34228 Value:0xc010887b20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490711756s EvaluationString:[ var='B' labels={container=manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-wi-webhook-controller-manager-764796f7c6-tr94h, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=430099c7-44a2-4f82-93b4-8399fda34228} value=0 ], [ var='C' labels={container=manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=azure-wi-webhook-controller-manager-764796f7c6-tr94h, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=430099c7-44a2-4f82-93b4-8399fda34228} value=0 ]} {Instance:container=manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=eraser-controller-manager-794b999f7c-s7pbc, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=8a83a13f-a279-4970-b129-289c4d22c386 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=eraser-controller-manager-794b999f7c-s7pbc, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=8a83a13f-a279-4970-b129-289c4d22c386 Value:0xc010887c70} C:{Var:C Labels:container=manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=eraser-controller-manager-794b999f7c-s7pbc, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=8a83a13f-a279-4970-b129-289c4d22c386 Value:0xc010887d00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490717988s EvaluationString:[ var='B' labels={container=manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=eraser-controller-manager-794b999f7c-s7pbc, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=8a83a13f-a279-4970-b129-289c4d22c386} value=0 ], [ var='C' labels={container=manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=eraser-controller-manager-794b999f7c-s7pbc, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=8a83a13f-a279-4970-b129-289c4d22c386} value=0 ]} {Instance:container=manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-operator-6749b67c79-lbggf, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=929fb03c-fa84-408a-b7ca-ce4b6a1c2195 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-operator-6749b67c79-lbggf, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=929fb03c-fa84-408a-b7ca-ce4b6a1c2195 Value:0xc010887e58} C:{Var:C Labels:container=manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-operator-6749b67c79-lbggf, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=929fb03c-fa84-408a-b7ca-ce4b6a1c2195 Value:0xc010887ef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490723832s EvaluationString:[ var='B' labels={container=manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-operator-6749b67c79-lbggf, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=929fb03c-fa84-408a-b7ca-ce4b6a1c2195} value=0 ], [ var='C' labels={container=manager, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-operator-6749b67c79-lbggf, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=929fb03c-fa84-408a-b7ca-ce4b6a1c2195} value=0 ]} {Instance:container=metrics-server, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-74tw6, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=a2198c71-1426-445e-8405-34d7121697a5 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=metrics-server, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-74tw6, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=a2198c71-1426-445e-8405-34d7121697a5 Value:0xc091612040} C:{Var:C Labels:container=metrics-server, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-74tw6, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=a2198c71-1426-445e-8405-34d7121697a5 Value:0xc091612220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.49072792s EvaluationString:[ var='B' labels={container=metrics-server, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-74tw6, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=a2198c71-1426-445e-8405-34d7121697a5} value=0 ], [ var='C' labels={container=metrics-server, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-74tw6, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=a2198c71-1426-445e-8405-34d7121697a5} value=0 ]} {Instance:container=metrics-server, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-j86lr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=e9db5f65-5c35-494f-b4c8-741e6948e523 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=metrics-server, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-j86lr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=e9db5f65-5c35-494f-b4c8-741e6948e523 Value:0xc0916123c8} C:{Var:C Labels:container=metrics-server, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-j86lr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=e9db5f65-5c35-494f-b4c8-741e6948e523 Value:0xc091612490}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490732754s EvaluationString:[ var='B' labels={container=metrics-server, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-j86lr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=e9db5f65-5c35-494f-b4c8-741e6948e523} value=0 ], [ var='C' labels={container=metrics-server, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-j86lr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=e9db5f65-5c35-494f-b4c8-741e6948e523} value=0 ]} {Instance:container=metrics-server-vpa, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-74tw6, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=a2198c71-1426-445e-8405-34d7121697a5 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=metrics-server-vpa, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-74tw6, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=a2198c71-1426-445e-8405-34d7121697a5 Value:0xc0916125f0} C:{Var:C Labels:container=metrics-server-vpa, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-74tw6, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=a2198c71-1426-445e-8405-34d7121697a5 Value:0xc091612700}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490736659s EvaluationString:[ var='B' labels={container=metrics-server-vpa, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-74tw6, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=a2198c71-1426-445e-8405-34d7121697a5} value=0 ], [ var='C' labels={container=metrics-server-vpa, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-74tw6, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=a2198c71-1426-445e-8405-34d7121697a5} value=0 ]} {Instance:container=metrics-server-vpa, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-j86lr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=e9db5f65-5c35-494f-b4c8-741e6948e523 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=metrics-server-vpa, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-j86lr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=e9db5f65-5c35-494f-b4c8-741e6948e523 Value:0xc0916129e0} C:{Var:C Labels:container=metrics-server-vpa, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-j86lr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=e9db5f65-5c35-494f-b4c8-741e6948e523 Value:0xc091612b48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490740817s EvaluationString:[ var='B' labels={container=metrics-server-vpa, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-j86lr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=e9db5f65-5c35-494f-b4c8-741e6948e523} value=0 ], [ var='C' labels={container=metrics-server-vpa, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=kube-system, pod=metrics-server-6bc6599b58-j86lr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=e9db5f65-5c35-494f-b4c8-741e6948e523} value=0 ]} {Instance:container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-5lw9n, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=84cb4c87-1f2c-4bbb-9a2b-89c6af7688b9 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-5lw9n, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=84cb4c87-1f2c-4bbb-9a2b-89c6af7688b9 Value:0xc091612cc8} C:{Var:C Labels:container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-5lw9n, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=84cb4c87-1f2c-4bbb-9a2b-89c6af7688b9 Value:0xc091612e30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490744833s EvaluationString:[ var='B' labels={container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-5lw9n, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=84cb4c87-1f2c-4bbb-9a2b-89c6af7688b9} value=0 ], [ var='C' labels={container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-5lw9n, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=84cb4c87-1f2c-4bbb-9a2b-89c6af7688b9} value=0 ]} {Instance:container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-8429f, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=e23bc71d-eacc-456f-860e-783efdbb29b0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-8429f, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=e23bc71d-eacc-456f-860e-783efdbb29b0 Value:0xc091612fd8} C:{Var:C Labels:container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-8429f, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=e23bc71d-eacc-456f-860e-783efdbb29b0 Value:0xc0916130b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490749736s EvaluationString:[ var='B' labels={container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-8429f, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=e23bc71d-eacc-456f-860e-783efdbb29b0} value=0 ], [ var='C' labels={container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-8429f, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=e23bc71d-eacc-456f-860e-783efdbb29b0} value=0 ]} {Instance:container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-9jp79, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=bfc65a6e-a231-4b27-9a43-1d2bb98f0402 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-9jp79, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=bfc65a6e-a231-4b27-9a43-1d2bb98f0402 Value:0xc0916132c0} C:{Var:C Labels:container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-9jp79, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=bfc65a6e-a231-4b27-9a43-1d2bb98f0402 Value:0xc091613420}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490754472s EvaluationString:[ var='B' labels={container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-9jp79, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=bfc65a6e-a231-4b27-9a43-1d2bb98f0402} value=0 ], [ var='C' labels={container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-9jp79, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=bfc65a6e-a231-4b27-9a43-1d2bb98f0402} value=0 ]} {Instance:container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-md5vm, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=abc6d80e-e267-43cb-a8b8-15455b08e2be State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-md5vm, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=abc6d80e-e267-43cb-a8b8-15455b08e2be Value:0xc0916137a8} C:{Var:C Labels:container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-md5vm, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=abc6d80e-e267-43cb-a8b8-15455b08e2be Value:0xc0916136f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490759481s EvaluationString:[ var='B' labels={container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-md5vm, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=abc6d80e-e267-43cb-a8b8-15455b08e2be} value=0 ], [ var='C' labels={container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-md5vm, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=abc6d80e-e267-43cb-a8b8-15455b08e2be} value=0 ]} {Instance:container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-zrh4f, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=1251d71a-5ee0-41f6-975d-98d03afd4eac State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-zrh4f, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=1251d71a-5ee0-41f6-975d-98d03afd4eac Value:0xc091613990} C:{Var:C Labels:container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-zrh4f, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=1251d71a-5ee0-41f6-975d-98d03afd4eac Value:0xc091613a28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490763872s EvaluationString:[ var='B' labels={container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-zrh4f, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=1251d71a-5ee0-41f6-975d-98d03afd4eac} value=0 ], [ var='C' labels={container=node-exporter, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-node-exporter-zrh4f, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=1251d71a-5ee0-41f6-975d-98d03afd4eac} value=0 ]} {Instance:container=pause, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-node-setup, pod=redpanda-node-setup-2g9nd, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=2e51a0c8-ba70-42dd-b16c-08890241cb33 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=pause, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-node-setup, pod=redpanda-node-setup-2g9nd, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=2e51a0c8-ba70-42dd-b16c-08890241cb33 Value:0xc091613b60} C:{Var:C Labels:container=pause, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-node-setup, pod=redpanda-node-setup-2g9nd, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=2e51a0c8-ba70-42dd-b16c-08890241cb33 Value:0xc091613c00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490768689s EvaluationString:[ var='B' labels={container=pause, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-node-setup, pod=redpanda-node-setup-2g9nd, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=2e51a0c8-ba70-42dd-b16c-08890241cb33} value=0 ], [ var='C' labels={container=pause, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-node-setup, pod=redpanda-node-setup-2g9nd, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=2e51a0c8-ba70-42dd-b16c-08890241cb33} value=0 ]} {Instance:container=pause, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-node-setup, pod=redpanda-node-setup-5m9wp, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7a472946-52e4-465a-b127-bc0340870774 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=pause, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-node-setup, pod=redpanda-node-setup-5m9wp, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7a472946-52e4-465a-b127-bc0340870774 Value:0xc091613ef8} C:{Var:C Labels:container=pause, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-node-setup, pod=redpanda-node-setup-5m9wp, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7a472946-52e4-465a-b127-bc0340870774 Value:0xc091613f88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490773566s EvaluationString:[ var='B' labels={container=pause, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-node-setup, pod=redpanda-node-setup-5m9wp, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7a472946-52e4-465a-b127-bc0340870774} value=0 ], [ var='C' labels={container=pause, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-node-setup, pod=redpanda-node-setup-5m9wp, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=7a472946-52e4-465a-b127-bc0340870774} value=0 ]} {Instance:container=pause, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-node-setup, pod=redpanda-node-setup-jswf6, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=6b880760-7c39-45f7-a97f-5fff96c648b4 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=pause, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-node-setup, pod=redpanda-node-setup-jswf6, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=6b880760-7c39-45f7-a97f-5fff96c648b4 Value:0xc006202110} C:{Var:C Labels:container=pause, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-node-setup, pod=redpanda-node-setup-jswf6, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=6b880760-7c39-45f7-a97f-5fff96c648b4 Value:0xc006202210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490778866s EvaluationString:[ var='B' labels={container=pause, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-node-setup, pod=redpanda-node-setup-jswf6, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=6b880760-7c39-45f7-a97f-5fff96c648b4} value=0 ], [ var='C' labels={container=pause, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-node-setup, pod=redpanda-node-setup-jswf6, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=6b880760-7c39-45f7-a97f-5fff96c648b4} value=0 ]} {Instance:container=prometheus, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-kube-prometheus-prometheus-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=850769f7-7609-4d2d-a0ed-325d5cc8eba3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=prometheus, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-kube-prometheus-prometheus-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=850769f7-7609-4d2d-a0ed-325d5cc8eba3 Value:0xc006202478} C:{Var:C Labels:container=prometheus, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-kube-prometheus-prometheus-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=850769f7-7609-4d2d-a0ed-325d5cc8eba3 Value:0xc006202358}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490783376s EvaluationString:[ var='B' labels={container=prometheus, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-kube-prometheus-prometheus-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=850769f7-7609-4d2d-a0ed-325d5cc8eba3} value=0 ], [ var='C' labels={container=prometheus, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda-monitoring, pod=prometheus-prometheus-kube-prometheus-prometheus-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=850769f7-7609-4d2d-a0ed-325d5cc8eba3} value=0 ]} {Instance:container=redpanda, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=86e24cc1-e049-4cb8-9244-88cdb77ec7a3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=redpanda, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=86e24cc1-e049-4cb8-9244-88cdb77ec7a3 Value:0xc0062027c0} C:{Var:C Labels:container=redpanda, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=86e24cc1-e049-4cb8-9244-88cdb77ec7a3 Value:0xc006202680}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490788598s EvaluationString:[ var='B' labels={container=redpanda, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=86e24cc1-e049-4cb8-9244-88cdb77ec7a3} value=0 ], [ var='C' labels={container=redpanda, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-0, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=86e24cc1-e049-4cb8-9244-88cdb77ec7a3} value=0 ]} {Instance:container=redpanda, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-1, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=25cacb21-e76b-4893-9bcf-e272513a58cf State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=redpanda, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-1, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=25cacb21-e76b-4893-9bcf-e272513a58cf Value:0xc006202968} C:{Var:C Labels:container=redpanda, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-1, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=25cacb21-e76b-4893-9bcf-e272513a58cf Value:0xc006202ac0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490793024s EvaluationString:[ var='B' labels={container=redpanda, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-1, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=25cacb21-e76b-4893-9bcf-e272513a58cf} value=0 ], [ var='C' labels={container=redpanda, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-1, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=25cacb21-e76b-4893-9bcf-e272513a58cf} value=0 ]} {Instance:container=redpanda, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-2, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=06bfff69-32a5-4415-a2ef-2dd4c70a49e4 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=redpanda, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-2, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=06bfff69-32a5-4415-a2ef-2dd4c70a49e4 Value:0xc006202d00} C:{Var:C Labels:container=redpanda, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-2, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=06bfff69-32a5-4415-a2ef-2dd4c70a49e4 Value:0xc006202e00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490797188s EvaluationString:[ var='B' labels={container=redpanda, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-2, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=06bfff69-32a5-4415-a2ef-2dd4c70a49e4} value=0 ], [ var='C' labels={container=redpanda, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-2, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=06bfff69-32a5-4415-a2ef-2dd4c70a49e4} value=0 ]} {Instance:container=redpanda-post-install, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-configuration-d9q49, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=482658c1-beec-4cad-9f34-087d828d1b95 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=redpanda-post-install, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-configuration-d9q49, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=482658c1-beec-4cad-9f34-087d828d1b95 Value:0xc006203450} C:{Var:C Labels:container=redpanda-post-install, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-configuration-d9q49, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=482658c1-beec-4cad-9f34-087d828d1b95 Value:0xc006203528}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490801554s EvaluationString:[ var='B' labels={container=redpanda-post-install, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-configuration-d9q49, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=482658c1-beec-4cad-9f34-087d828d1b95} value=0 ], [ var='C' labels={container=redpanda-post-install, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-configuration-d9q49, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=482658c1-beec-4cad-9f34-087d828d1b95} value=0 ]} {Instance:container=redpanda-post-upgrade, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-post-upgrade-pnp5c, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=aaa37243-ab5c-4c9b-bd25-6bf61707fd1a State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=redpanda-post-upgrade, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-post-upgrade-pnp5c, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=aaa37243-ab5c-4c9b-bd25-6bf61707fd1a Value:0xc006203728} C:{Var:C Labels:container=redpanda-post-upgrade, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-post-upgrade-pnp5c, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=aaa37243-ab5c-4c9b-bd25-6bf61707fd1a Value:0xc006203878}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490806519s EvaluationString:[ var='B' labels={container=redpanda-post-upgrade, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-post-upgrade-pnp5c, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=aaa37243-ab5c-4c9b-bd25-6bf61707fd1a} value=0 ], [ var='C' labels={container=redpanda-post-upgrade, endpoint=http, instance=10.0.4.24:8080, job=kube-state-metrics, namespace=redpanda, pod=redpanda-broker-post-upgrade-pnp5c, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=prometheus-kube-state-metrics, uid=aaa37243-ab5c-4c9b-bd25-6bf61707fd1a} value=0 ]}]" duration=297.006846ms + level=debug ts=2024-05-29T13:44:14.493510678Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.497263736Z caller=remote_instance_store.go:51 user=374423 slug=bitburst msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.497206095Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:14.497160997Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=39.171446ms + logger=ngalert.state.manager user=374423 slug=bitburst instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.497179318Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.497085936Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.49709384Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.497016893Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.497030885Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=426229 slug=accelbyte t=2024-05-29T13:44:14.497012456Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=55.587078ms + level=debug ts=2024-05-29T13:44:14.496856794Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wwrec0ob-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.496887318Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wwocy8uu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.496857478Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wwocy8uu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.496800347Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.496713318Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wwneg506-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.496594975Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wwdimsrx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.496435273Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wwdimsrx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.496423603Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.496150429Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.495937322Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.496327417Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wwcidmub-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.496204861Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wwcidmub-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.496193431Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wwaqkfgo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.49616033Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.496074446Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:14.496089623Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wwaqkfgo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.49610229Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.495879422Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wwaqkfgo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.49609037Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=dfbf4ea84ce34563 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.495998902Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc08eb74078} IgnoreBelow:{Var:IgnoreBelow Labels: Value:0xc08eb740c0} Threshold:{Var:Threshold Labels: Value:0xc08eb740c8} compare:{Var:compare Labels: Value:0xc08eb74040} sum:{Var:sum Labels: Value:0xc08eb74050}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.495478932s EvaluationString:[ var='Breaches' labels={} value=1 ], [ var='IgnoreBelow' labels={} value=0 ], [ var='Threshold' labels={} value=-10 ], [ var='compare' labels={} value=0 ], [ var='sum' labels={} value=0 ]}]" duration=219.365797ms + level=debug ts=2024-05-29T13:44:14.495782876Z caller=remote_instance_store.go:51 user=334644 slug=meiro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.495724004Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wvv73glv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.495758816Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.495695716Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wvv73glv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.495685276Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wvttxedh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.495590325Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wvttxedh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.495565384Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wvpstpe7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.495458063Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wvpstpe7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.495362752Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=396586 slug=opengov t=2024-05-29T13:44:14.49521014Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.054916ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wv66hdxy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.49510518Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wv66hdxy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.495046519Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wv66hdxy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.494986178Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wv4vd7ye-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.494945678Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wv4vd7ye-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.494809667Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.494719562Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wv3rr97z-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.494620035Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wv3rr97z-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.494575234Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=178698 slug=avantpage t=2024-05-29T13:44:14.494476228Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=698103 slug=vericast t=2024-05-29T13:44:14.494326714Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wv1mrmg2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.494285571Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wv1mrmg2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.49420339Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wv0x2j69-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.49416945Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wuz0cyyo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.493936698Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.493964426Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.493911686Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.49388389Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.493764494Z caller=remote_instance_store.go:51 user=697627 slug=haqq msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=9dd72c50ec6eff22 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.493745698Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.493445724s EvaluationString:}]" duration=166.911542ms + level=debug ts=2024-05-29T13:44:14.493752705Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wuw83jkn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.493724606Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.4936209Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.49360043Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.493546026Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=679029 slug=joveoprodaws t=2024-05-29T13:44:14.493520963Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.493461074Z level=debug msg="Changing state" previous_state=Pending next_state=Normal previous_ends_at=2024-05-29T13:43:10Z next_ends_at=2024-05-29T13:44:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wuvksqvt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.493490473Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.493419123Z level=debug msg="Changing state" previous_state=Pending next_state=Normal previous_ends_at=2024-05-29T13:43:10Z next_ends_at=2024-05-29T13:44:10Z + logger=ngalert.state.manager user=679029 slug=joveoprodaws t=2024-05-29T13:44:14.493446622Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.493412333Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=679029 slug=joveoprodaws version=9238 fingerprint=7a18349ce852a92f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.493355601Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.492919223s EvaluationString:}]" duration=1.840067455s + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wul1l7qy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.493369522Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wul1l7qy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.493322371Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wul1l7qy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.493294841Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.493258671Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.493199462Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=174016 slug=journalstaging instance="datasource_uid=bYQmLgyGz, ref_id=A" t=2024-05-29T13:44:14.493168027Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=174016 slug=journalstaging instance="datasource_uid=bYQmLgyGz, ref_id=A" t=2024-05-29T13:44:14.493155474Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=528849 slug=bitvavo t=2024-05-29T13:44:14.492449096Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=528849 slug=bitvavo instance= t=2024-05-29T13:44:14.492412186Z level=warn msg="Failed to take an image" dashboard=tzwet3P2a panel=2 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.scheduler user=174016 slug=journalstaging version=1 fingerprint=0906d9c226ecd96f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.49302919Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=bYQmLgyGz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.492766975s EvaluationString:}]" duration=13.36013ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wujq8ddi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.493078529Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.492379745Z caller=remote_image_capturer.go:61 user=528849 slug=bitvavo rule_org_id=1 rule_uid=a5dc1727-ead5-4466-940e-ca8fcc13359d dashboard=tzwet3P2a panel=2 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wujq8ddi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.492959338Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.492870636Z caller=remote_instance_store.go:51 user=172518 slug=harbourit msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.492840003Z caller=remote_instance_store.go:51 user=516847 slug=signit msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=172518 slug=harbourit instance= t=2024-05-29T13:44:14.492800249Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=172518 slug=harbourit instance= t=2024-05-29T13:44:14.492792386Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wuj77nxr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.492732755Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.492674697Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=516847 slug=signit version=18 fingerprint=ce98e3ac5ad1028e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.492605178Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=mysql_global_status_uptime, cluster=cloud, container=prometheus-mysql-exporter, endpoint=mysql-exporter, instance=10.244.0.6:9104, job=mysql-exporter-prometheus-mysql-exporter, namespace=grafanacloud, pod=mysql-exporter-prometheus-mysql-exporter-f57549f69-mwkf2, prometheus=grafanacloud/prometheus-kube-prometheus-prometheus, service=mysql-exporter-prometheus-mysql-exporter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mysql_global_status_uptime, cluster=cloud, container=prometheus-mysql-exporter, endpoint=mysql-exporter, instance=10.244.0.6:9104, job=mysql-exporter-prometheus-mysql-exporter, namespace=grafanacloud, pod=mysql-exporter-prometheus-mysql-exporter-f57549f69-mwkf2, prometheus=grafanacloud/prometheus-kube-prometheus-prometheus, service=mysql-exporter-prometheus-mysql-exporter Value:0xc03c9c5768} B:{Var:B Labels:__name__=mysql_global_status_uptime, cluster=cloud, container=prometheus-mysql-exporter, endpoint=mysql-exporter, instance=10.244.0.6:9104, job=mysql-exporter-prometheus-mysql-exporter, namespace=grafanacloud, pod=mysql-exporter-prometheus-mysql-exporter-f57549f69-mwkf2, prometheus=grafanacloud/prometheus-kube-prometheus-prometheus, service=mysql-exporter-prometheus-mysql-exporter Value:0xc03c9c5980} C:{Var:C Labels:__name__=mysql_global_status_uptime, cluster=cloud, container=prometheus-mysql-exporter, endpoint=mysql-exporter, instance=10.244.0.6:9104, job=mysql-exporter-prometheus-mysql-exporter, namespace=grafanacloud, pod=mysql-exporter-prometheus-mysql-exporter-f57549f69-mwkf2, prometheus=grafanacloud/prometheus-kube-prometheus-prometheus, service=mysql-exporter-prometheus-mysql-exporter Value:0xc03c9c5bb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.492109122s EvaluationString:[ var='A' labels={__name__=mysql_global_status_uptime, cluster=cloud, container=prometheus-mysql-exporter, endpoint=mysql-exporter, instance=10.244.0.6:9104, job=mysql-exporter-prometheus-mysql-exporter, namespace=grafanacloud, pod=mysql-exporter-prometheus-mysql-exporter-f57549f69-mwkf2, prometheus=grafanacloud/prometheus-kube-prometheus-prometheus, service=mysql-exporter-prometheus-mysql-exporter} value=145041 ], [ var='B' labels={__name__=mysql_global_status_uptime, cluster=cloud, container=prometheus-mysql-exporter, endpoint=mysql-exporter, instance=10.244.0.6:9104, job=mysql-exporter-prometheus-mysql-exporter, namespace=grafanacloud, pod=mysql-exporter-prometheus-mysql-exporter-f57549f69-mwkf2, prometheus=grafanacloud/prometheus-kube-prometheus-prometheus, service=mysql-exporter-prometheus-mysql-exporter} value=145041 ], [ var='C' labels={__name__=mysql_global_status_uptime, cluster=cloud, container=prometheus-mysql-exporter, endpoint=mysql-exporter, instance=10.244.0.6:9104, job=mysql-exporter-prometheus-mysql-exporter, namespace=grafanacloud, pod=mysql-exporter-prometheus-mysql-exporter-f57549f69-mwkf2, prometheus=grafanacloud/prometheus-kube-prometheus-prometheus, service=mysql-exporter-prometheus-mysql-exporter} value=0 ]}]" duration=13.53988ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wuj77nxr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.492670465Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wuj77nxr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.492646384Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wuigkera-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.492501383Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=109452 slug=deltarisk instance= t=2024-05-29T13:44:14.492471672Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=109452 slug=deltarisk instance= t=2024-05-29T13:44:14.492460389Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=392320 slug=backyard t=2024-05-29T13:44:14.492413915Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=109452 slug=deltarisk t=2024-05-29T13:44:14.492408665Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wufzvm76-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.492367412Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=321780 slug=jsavajols t=2024-05-29T13:44:14.492389673Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wufzvm76-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.492333481Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wufzvm76-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.492306761Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=392320 slug=backyard version=60 fingerprint=f15f340b4fd32220 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.492060423Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.491751732s EvaluationString:}]" duration=10.264965ms + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.492121868Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.562744ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wuby1dtj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.492152789Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.492113045Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wuaehg2i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.491976478Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wuaehg2i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.491952757Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.49185391Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wuaehg2i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.491889417Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wua2b7w2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.491783576Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wu5ygev1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.491684225Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wu5ygev1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.491611514Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wu5ygev1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.491597714Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wu4kevjb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.491503713Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wu4kevjb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.491416522Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wu4kevjb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.491358241Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wu3j9w81-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.491297821Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wu3j9w81-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.49120198Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wu3j9w81-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.491177939Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=70430 slug=dapperlabs instance="datasource_uid=dTFCYYk4k, ref_id=A" t=2024-05-29T13:44:14.4911625Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.49108003Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wts7kx06-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.490935847Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.490845045Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=656459 slug=activeport instance="datasource_uid=a17a51ac-52fa-4a8f-ae4d-66e273cfbbfc, ref_id=A" t=2024-05-29T13:44:14.490764416Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.scheduler user=112732 slug=gleamer t=2024-05-29T13:44:14.490663293Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=656459 slug=activeport t=2024-05-29T13:44:14.490719164Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=656459 slug=activeport version=76 fingerprint=61d3fd6046cde74d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.490635916Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=a17a51ac-52fa-4a8f-ae4d-66e273cfbbfc, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.490296709s EvaluationString:}]" duration=16.260975ms + level=debug ts=2024-05-29T13:44:14.490698609Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=656459 slug=activeport t=2024-05-29T13:44:14.49067417Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.218769ms + level=debug ts=2024-05-29T13:44:14.490708379Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.490666005Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wts7kx06-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.490644974Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wtrez7xw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.490599103Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=20177 slug=paddledash t=2024-05-29T13:44:14.490608592Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=20177 slug=paddledash instance= t=2024-05-29T13:44:14.490578428Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=20177 slug=paddledash t=2024-05-29T13:44:14.490514418Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wtrez7xw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.490489012Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wtmbix8s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.490349121Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.49022141Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wtmbix8s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.49029066Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wth6qzed-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.490164479Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=297794 slug=leanix t=2024-05-29T13:44:14.490097514Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wth6qzed-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.490122309Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wt0e9vdn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.489655014Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wt0e9vdn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.489626823Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wstbwndg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.489394181Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wstbwndg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.489355981Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.489188922Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wsome2mx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.489146179Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.485826616Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.489041014Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wsnvjmjy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.489055048Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wsnvjmjy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.489026317Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wsnvjmjy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.488960327Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=210775 slug=mfsac t=2024-05-29T13:44:14.488996354Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.044186ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wshhj406-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.488847426Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wshhj406-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.488756655Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.488749749Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:14.488612389Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.095822ms + level=debug ts=2024-05-29T13:44:14.488649448Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wsdp2fqo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.488541792Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wsdp2fqo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.488435391Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wsb02wbf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.48835588Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wsb02wbf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.48827897Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.488257415Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wrzj4xva-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.488127738Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wrzj4xva-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.487962516Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wrqz8wsh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.487711864Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wrqz8wsh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.487642763Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wrqz8wsh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.487614313Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wrqm7xjq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.487490562Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.487406006Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=543654 slug=jobcloudprogrammaticprod version=4 fingerprint=1515c76ff582fc54 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.487296958Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.487010783s EvaluationString:}]" duration=37.205991ms + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.487155123Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.487135983Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:14.487116327Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.487194045Z caller=remote_instance_store.go:51 user=346766 slug=checklyhq msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=543660 slug=jobcloudprogrammaticstage version=1 fingerprint=52af5107848cf02e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.487003389Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.486520787s EvaluationString:}]" duration=16.32552ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wrqcs5b1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.487115848Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wrka83m6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.487072097Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=346766 slug=checklyhq instance= t=2024-05-29T13:44:14.487048073Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wrka83m6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.487044597Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=346766 slug=checklyhq t=2024-05-29T13:44:14.486998917Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.48698751Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wrka83m6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.486922156Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.486915515Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wrjb5536-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.486852725Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.486811084Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.486711952Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wrjb5536-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.486733504Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wrjb5536-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.486668813Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=310637 slug=notino t=2024-05-29T13:44:14.486695956Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=117.88562ms + logger=ngalert.state.manager user=344017 slug=descript instance="resource.label.project_id=production-273614, resource.type=k8s_container" t=2024-05-29T13:44:14.486693371Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.486663969Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wrj216k0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.486630713Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=173730 slug=nikon t=2024-05-29T13:44:14.486593667Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=27.273651ms + level=debug ts=2024-05-29T13:44:14.486611564Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=344017 slug=descript version=3 fingerprint=c53fdd3a87d6b7f1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.486585367Z level=debug msg="Alert rule evaluated" results="[{Instance:resource.label.project_id=production-273614, resource.type=k8s_container State:Normal Error: Results:map[] Values:map[Reduce:{Var:Reduce Labels:resource.label.project_id=production-273614, resource.type=k8s_container Value:0xc032ce9668} Threshold:{Var:Threshold Labels:resource.label.project_id=production-273614, resource.type=k8s_container Value:0xc032ce9640}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.486244054s EvaluationString:[ var='Reduce' labels={resource.label.project_id=production-273614, resource.type=k8s_container} value=0.11756074154687694 ], [ var='Threshold' labels={resource.label.project_id=production-273614, resource.type=k8s_container} value=0 ]}]" duration=516.684527ms + level=debug ts=2024-05-29T13:44:14.486583181Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wrj216k0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.486566282Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wrj216k0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.486541302Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=316960 slug=mojamteam t=2024-05-29T13:44:14.486484219Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.35831ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wrj216k0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.486504791Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wriux5gw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.48640808Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.486397755Z caller=remote_instance_store.go:51 user=344017 slug=descript msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.486320612Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=344017 slug=descript instance="datasource_uid=4JVxmaNVk, ref_id=query" t=2024-05-29T13:44:14.486341653Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wrh6zq9g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.486203938Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wrh6zq9g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.486094687Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wrh6zq9g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.486065727Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wrdbmc01-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.485928565Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.485832251Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wrdbmc01-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.485867305Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.485733547Z caller=remote_instance_store.go:51 user=697627 slug=haqq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wr81r0ea-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.485731133Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wr81r0ea-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.485658123Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.485657353Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.767133ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wr6xep1o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.485593272Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wr5edcn6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.48537658Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wr4z06y6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.485121897Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wqv3u2z6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.485005316Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.484922621Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.48476094Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.484755165Z caller=remote_instance_store.go:51 user=556147 slug=bettercloudholding msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.484587644Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wqrb1k8x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.484751713Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.484648784Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wqrb1k8x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.484689533Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wqrb1k8x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.484675532Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wqlgh43e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.484634632Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wqlgh43e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.484503471Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.484443795Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.484437443Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wqcadx32-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.484308829Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.484211908Z caller=remote_instance_store.go:51 user=354676 slug=gridmarketenergy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wqc5644z-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.484191477Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wqc5644z-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.484077686Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node30 - 35.213.201.39, job=node-exporter, metrics_node_id=31, node_id=30" t=2024-05-29T13:44:14.484018451Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node30 - 35.213.201.39, job=node-exporter, metrics_node_id=31, node_id=30" t=2024-05-29T13:44:14.484005031Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wq8iumet-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.483967565Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wq8iumet-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.483942695Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wq8iumet-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.483833864Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wq3b2x5c-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.483762553Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node28 - 35.213.132.28, job=node-exporter, metrics_node_id=29, node_id=28" t=2024-05-29T13:44:14.483727767Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.483660008Z caller=remote_instance_store.go:51 user=548276 slug=relayrobotics msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=548276 slug=relayrobotics t=2024-05-29T13:44:14.483613103Z level=debug msg="Saving alert states" count=5 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wq3b2x5c-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.483600301Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wq3b2x5c-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.483571341Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=548276 slug=relayrobotics instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.483590933Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpzijnzf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.483523381Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=548276 slug=relayrobotics instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.483559667Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=548276 slug=relayrobotics instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.483549995Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpzijnzf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.48348308Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=548276 slug=relayrobotics instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.48349644Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.483449154Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node26 - 35.214.1.71, job=node-exporter, metrics_node_id=27, node_id=26" t=2024-05-29T13:44:14.483415459Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpzijnzf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.483372459Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpzijnzf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.483343399Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpzijnzf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.483309658Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node25 - 160.202.129.143, job=node-exporter, metrics_node_id=26, node_id=25" t=2024-05-29T13:44:14.483273791Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:14.483238302Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpxy2dje-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.483177697Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpxy2dje-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.483129216Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.483125761Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpw3ofsy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.483070776Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.483002233Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpw3ofsy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.482942265Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpw3ofsy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.482930664Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node22 - 35.207.208.247, job=node-exporter, metrics_node_id=23, node_id=22" t=2024-05-29T13:44:14.482822279Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.482751878Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpqn7rij-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.482741732Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.482581195Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.482577372Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.48268716Z caller=remote_instance_store.go:51 user=543660 slug=jobcloudprogrammaticstage msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.482563985Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.482538399Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpqjxigw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.482684042Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.482531695Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node21 - 35.214.93.21, job=node-exporter, metrics_node_id=22, node_id=21" t=2024-05-29T13:44:14.482662043Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.482513275Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpqjxigw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.482611121Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.482421409Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.482413188Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpqjxigw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.482565451Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.482395722Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node20 - 35.208.184.213, job=node-exporter, metrics_node_id=21, node_id=20" t=2024-05-29T13:44:14.482539367Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpnbz0km-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.48249465Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.482446962Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:14.482499193Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.482336028Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.482329454Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpnbz0km-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.482437249Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.48231887Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.482307579Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpnbz0km-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.482379349Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.482402751Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.482393737Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node19 - 35.216.100.177, job=node-exporter, metrics_node_id=20, node_id=19" t=2024-05-29T13:44:14.482379433Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.482250497Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpj17koz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.482242767Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.482104033Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpf77kt7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.482097026Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.482013483Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.481987575Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.482027852Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=316418 slug=workmotion t=2024-05-29T13:44:14.482055324Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.159455ms + level=debug ts=2024-05-29T13:44:14.482012355Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.481944156Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=183214 slug=vectorizedio t=2024-05-29T13:44:14.481988992Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.1163ms + level=debug ts=2024-05-29T13:44:14.481904818Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.481873868Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node16 - 35.215.219.90, job=node-exporter, metrics_node_id=17, node_id=16" t=2024-05-29T13:44:14.481935906Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpch4fe6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.481939344Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:14.481912777Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpch4fe6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.481867503Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.481852268Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.481829812Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.481793471Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node15 - 69.67.150.140, job=node-exporter, metrics_node_id=16, node_id=15" t=2024-05-29T13:44:14.481813387Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.481765371Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpbbo2hp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.481754202Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.481738358Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.481722736Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.481711848Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpbbo2hp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.481718182Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpbaw3j8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.481617911Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=172772 slug=ppbtradingtribe t=2024-05-29T13:44:14.481519054Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.225794ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpbaw3j8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.4815769Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.481479586Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wpbaw3j8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.48149877Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wp7od53d-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.481437439Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.481391679Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wp7od53d-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.481393349Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node12 - 35.207.109.141, job=node-exporter, metrics_node_id=13, node_id=12" t=2024-05-29T13:44:14.481340542Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-woyny2rl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.481251827Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.481225289Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.481190251Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.481169384Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node11 - 35.212.175.54, job=node-exporter, metrics_node_id=12, node_id=11" t=2024-05-29T13:44:14.481190247Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-woyny2rl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.481185146Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.481070392Z caller=remote_instance_store.go:51 user=656284 slug=cencosudx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wosqz2tm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.481075125Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wosqz2tm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.481000485Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.480981686Z caller=remote_instance_store.go:51 user=813270 slug=adiante msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node09 - 72.46.84.1, job=node-exporter, metrics_node_id=10, node_id=9" t=2024-05-29T13:44:14.480928352Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:14.480872602Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" + level=debug ts=2024-05-29T13:44:14.480825549Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.480856087Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wop5zwn2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.480724972Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.48080708Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance= t=2024-05-29T13:44:14.480762183Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node08 - 35.215.25.82, job=node-exporter, metrics_node_id=9, node_id=8" t=2024-05-29T13:44:14.480775349Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance= t=2024-05-29T13:44:14.48075228Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:14.480742488Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node07 - 35.213.161.121, job=node-exporter, metrics_node_id=8, node_id=7" t=2024-05-29T13:44:14.480631236Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-woblx8tv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.480465839Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-woblx8tv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.480432369Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:14.480333629Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-woaf1grl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.480260227Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-woaf1grl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.480234187Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-woaf1grl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.480196566Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wny877ef-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.480022795Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node05 - 35.217.56.255, job=node-exporter, metrics_node_id=6, node_id=5" t=2024-05-29T13:44:14.480202676Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wnxwmvih-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.479842353Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.480102155Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wnx8xcli-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.47962011Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node04 - 35.213.80.116, job=node-exporter, metrics_node_id=5, node_id=4" t=2024-05-29T13:44:14.480050465Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wnlc5y8k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.47957438Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.479985724Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.480029105Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.479920257Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wnja3cbu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.479389538Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.479983231Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.479852379Z caller=remote_instance_store.go:51 user=487988 slug=microstrategyits msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wn9hfurf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.479265427Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.479936285Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node03 - 35.213.128.79, job=node-exporter, metrics_node_id=4, node_id=3" t=2024-05-29T13:44:14.479898449Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node02 - 185.209.178.107, job=node-exporter, metrics_node_id=3, node_id=2" t=2024-05-29T13:44:14.479772187Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wn6mjd2i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.478871153Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=402122 slug=leapwallet t=2024-05-29T13:44:14.479615898Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.306176ms + level=debug ts=2024-05-29T13:44:14.479519757Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.479448425Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.479402581Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.478204997Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node00 - 35.209.23.215, job=node-exporter, metrics_node_id=1, node_id=0" t=2024-05-29T13:44:14.479416707Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=integration-docker, instance=localhost:9100, instance_type=hedera-node, inventory_name=node06 - 34.125.43.221, job=node-exporter, metrics_node_id=7, node_id=6" t=2024-05-29T13:44:14.479210566Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.478999749Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.478891814Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:14.478830721Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=integration-docker" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wn2t4yq8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.47862964Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wn2t4yq8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.47861427Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wn2t4yq8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.47857858Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wn113hno-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.478482489Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wmjtg8vr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.478273577Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=66104 slug=messagegears t=2024-05-29T13:44:14.478235345Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=66104 slug=messagegears instance= t=2024-05-29T13:44:14.478227624Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.47826081Z caller=remote_instance_store.go:51 user=66104 slug=messagegears msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wmjtg8vr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.478228146Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.478200355Z caller=remote_instance_store.go:51 user=456850 slug=juniz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=66104 slug=messagegears t=2024-05-29T13:44:14.478193063Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wmjtg8vr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.478158275Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=87780 slug=zencloudandhosting instance="datasource_uid=000000020, ref_id=A" t=2024-05-29T13:44:14.478215097Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=87780 slug=zencloudandhosting t=2024-05-29T13:44:14.478137265Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=66104 slug=messagegears version=1 fingerprint=08e5ed76523fb082 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.478139273Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.477913094s EvaluationString:}]" duration=60.678815ms + logger=ngalert.scheduler user=87780 slug=zencloudandhosting version=1 fingerprint=56c63047d5385d86 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.478034424Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=000000020, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.477726902s EvaluationString:}]" duration=105.891366ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wmiu2y0d-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.478092565Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=504140 slug=chipotlestg t=2024-05-29T13:44:14.477868726Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.477970029Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.477840826Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wmiu2y0d-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.478008694Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg t=2024-05-29T13:44:14.477738823Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wmgbutnl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.477857842Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.477711222Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wmgbutnl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.477816082Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wmgbutnl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.477782092Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.47761384Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wmbrymp8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.477470528Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wmbrymp8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.477432228Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="datasource_uid=hNyufM-7z, ref_id=A" t=2024-05-29T13:44:14.477541758Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="datasource_uid=hNyufM-7z, ref_id=A" t=2024-05-29T13:44:14.477499719Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="datasource_uid=hNyufM-7z, ref_id=A" t=2024-05-29T13:44:14.477478058Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wm6lvk7n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.477378077Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wm6lvk7n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.477320327Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wm6lvk7n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.477291407Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wm4cmjf2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.477240156Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wm4cmjf2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.477188456Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.477223718Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.477120463Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.111181ms + level=debug ts=2024-05-29T13:44:14.477117323Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wlwzjok7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.477086554Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=35223 slug=silkroad instance= t=2024-05-29T13:44:14.477104753Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=35223 slug=silkroad t=2024-05-29T13:44:14.477050425Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wlwz1mqz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.476945003Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wlwz1mqz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.476909603Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wlwz1mqz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.476900163Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wlwz1mqz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.476847482Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wlj5q7qw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.476736521Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wlco3dh1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.47668253Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=419587 slug=greenpass instance= t=2024-05-29T13:44:14.476724168Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=419587 slug=greenpass t=2024-05-29T13:44:14.476690022Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wlco3dh1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.47660946Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node30 - 35.213.160.82, job=node-exporter, metrics_node_id=31, node_id=30" t=2024-05-29T13:44:14.476686952Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wla4z4bt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.476444898Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wl6e3ycu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.476402757Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.476413625Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.476381037Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wl6e3ycu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.476351117Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wkysont5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.476265106Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wkwt6kvm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.476036634Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wkqntckv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.475877802Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node26 - 35.207.87.37, job=node-exporter, metrics_node_id=27, node_id=26" t=2024-05-29T13:44:14.475998328Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node25 - 35.213.131.196, job=node-exporter, metrics_node_id=26, node_id=25" t=2024-05-29T13:44:14.475853306Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wkqnehvk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.4756865Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wkqnehvk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.47563454Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wkqjzlgt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.475597189Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=68499 slug=identt t=2024-05-29T13:44:14.47570443Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.496829ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wkpjwbrc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.475468108Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wkpjwbrc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.475417417Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wkpjwbrc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.475368477Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node23 - 34.0.0.36, job=node-exporter, metrics_node_id=24, node_id=23" t=2024-05-29T13:44:14.475501623Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=245291 slug=pismo instance="datasource_uid=grafanacloud-prom, ref_id=Query" t=2024-05-29T13:44:14.475324848Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=245291 slug=pismo instance="datasource_uid=grafanacloud-prom, ref_id=Query" t=2024-05-29T13:44:14.475315269Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wklwm8r1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.475278556Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wklj8poe-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.475205495Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wklj8poe-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.475184445Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.475181648Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wklj8poe-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.475116514Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.475208472Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:14.475138122Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet2" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wkkz6wi7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.475042714Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.475038931Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node20 - 35.208.128.216, job=node-exporter, metrics_node_id=21, node_id=20" t=2024-05-29T13:44:14.475045982Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node20 - 35.208.128.216, job=node-exporter, metrics_node_id=21, node_id=20" t=2024-05-29T13:44:14.475032062Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wkkz6wi7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.474990213Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wkkz6wi7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.474951333Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wkaj8pzc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.474895712Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.474801502Z caller=remote_instance_store.go:51 user=236496 slug=improbable msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:14.47484862Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet2" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wkaj8pzc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.474809691Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wkaj8pzc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.474795991Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=236496 slug=improbable t=2024-05-29T13:44:14.474727571Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node18 - 35.210.61.131, job=node-exporter, metrics_node_id=19, node_id=18" t=2024-05-29T13:44:14.474707188Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wk6wm4su-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.47467835Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wk6wm4su-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.47465477Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.474630273Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:14.474592472Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet2" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node17 - 35.215.245.35, job=node-exporter, metrics_node_id=18, node_id=17" t=2024-05-29T13:44:14.474482828Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wk38326u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.474459978Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.47444359Z caller=remote_rule_evaluator.go:193 user=656459 slug=activeport msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node17 - 35.215.245.35, job=node-exporter, metrics_node_id=18, node_id=17" t=2024-05-29T13:44:14.474468037Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wjz434ee-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.474367837Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node16 - 35.214.36.62, job=node-exporter, metrics_node_id=17, node_id=16" t=2024-05-29T13:44:14.474335268Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node16 - 35.214.36.62, job=node-exporter, metrics_node_id=17, node_id=16" t=2024-05-29T13:44:14.474324031Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wjwequ96-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.474275206Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wjwequ96-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.474164594Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wjvf52ko-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.474102334Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.474151824Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node15 - 35.213.254.208, job=node-exporter, metrics_node_id=16, node_id=15" t=2024-05-29T13:44:14.474202954Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wjvf52ko-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.474030513Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=114492 slug=railsbank instance="DBInstanceIdentifier=beta-play-kong-external-rds-20240419105928418300000002" t=2024-05-29T13:44:14.474190313Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank instance="DBInstanceIdentifier=beta-play-kong-external-rds-20240419105928418300000002" t=2024-05-29T13:44:14.474177995Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.474146472Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.47407514Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:14.474074507Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet2" + logger=ngalert.scheduler user=112387 slug=lucidhq version=1 fingerprint=a29f726715818bd1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.47399767Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.473713158s EvaluationString:}]" duration=70.516722ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wjoff0cl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.473858061Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node12 - 34.0.8.35, job=node-exporter, metrics_node_id=13, node_id=12" t=2024-05-29T13:44:14.473839783Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.472951717Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.468000462Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wjoff0cl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.473829971Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wjoff0cl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.473818781Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wjma43yc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.47376859Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wjma43yc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.47372376Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wjma43yc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.473671309Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node11 - 35.215.159.121, job=node-exporter, metrics_node_id=12, node_id=11" t=2024-05-29T13:44:14.473667712Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wjkwsec1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.473563448Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wjkwsec1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.473518118Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node10 - 35.213.99.36, job=node-exporter, metrics_node_id=11, node_id=10" t=2024-05-29T13:44:14.473513988Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wjheu94u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.473383656Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wjheu94u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.473362246Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node09 - 35.214.241.250, job=node-exporter, metrics_node_id=10, node_id=9" t=2024-05-29T13:44:14.47335291Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wjh9qnow-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.473330166Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.473265462Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wjh9qnow-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.473266645Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.473205272Z caller=remote_instance_store.go:51 user=438185 slug=nodeinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wjd55wll-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.473164484Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wjd55wll-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.473089763Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wjd55wll-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.473000033Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wj6cacqr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.472949662Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:14.473041736Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet2" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wj3915t1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.47278656Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=371756 slug=asapp t=2024-05-29T13:44:14.472908305Z level=debug msg="Saving alert states done" count=4 max_state_save_concurrency=1 duration=81.836348ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wiy74eu3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.47270761Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wiy74eu3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.472671519Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.472937135Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node05 - 35.215.8.212, job=node-exporter, metrics_node_id=6, node_id=5" t=2024-05-29T13:44:14.472702244Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wiwwfgo8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.472526708Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-witwwuow-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.472361326Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-witwwuow-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.472348786Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=845543 slug=deliveryhero t=2024-05-29T13:44:14.472348161Z level=debug msg="Saving alert states" count=11 max_state_save_concurrency=1 + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-wallet-pay-main/86cbdc1b31c9b528, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-wallet-pay-main/86cbdc1b31c9b528, region=eu-west-2" t=2024-05-29T13:44:14.472338441Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:14.472347231Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet2" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.472286857Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.943292ms + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-main/e2769b6bd5bfe6e2, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-main/e2769b6bd5bfe6e2, region=eu-west-2" t=2024-05-29T13:44:14.4722723Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wit6vgqj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.472273215Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wit6vgqj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.472221635Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node02 - 35.208.49.252, job=node-exporter, metrics_node_id=3, node_id=2" t=2024-05-29T13:44:14.472238779Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wisibduo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.472130584Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=316960 slug=mojamteam t=2024-05-29T13:44:14.472122942Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-istio-eks/4411ce58c236cede, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-istio-eks/4411ce58c236cede, region=eu-west-2" t=2024-05-29T13:44:14.472119037Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.472120183Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2c/aa17872cb310fc88, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2c/aa17872cb310fc88, region=eu-west-2" t=2024-05-29T13:44:14.472076106Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=316960 slug=mojamteam t=2024-05-29T13:44:14.472068172Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2a/e69b7998a9c0c446, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2a/e69b7998a9c0c446, region=eu-west-2" t=2024-05-29T13:44:14.472013674Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wisibduo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.472066903Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wisibduo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.472018793Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=845543 slug=deliveryhero version=1 fingerprint=1de47b31a0834c18 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.47176337Z level=debug msg="Alert rule evaluated" results="[{Instance:account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2a/e69b7998a9c0c446, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2a/e69b7998a9c0c446, region=eu-west-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2a/e69b7998a9c0c446, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2a/e69b7998a9c0c446, region=eu-west-2 Value:0xc0329c28c0} B:{Var:B Labels:account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2a/e69b7998a9c0c446, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2a/e69b7998a9c0c446, region=eu-west-2 Value:0xc0329c2910} C:{Var:C Labels:account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2a/e69b7998a9c0c446, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2a/e69b7998a9c0c446, region=eu-west-2 Value:0xc0329c2950}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.470965857s EvaluationString:[ var='A' labels={account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2a/e69b7998a9c0c446, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2a/e69b7998a9c0c446, region=eu-west-2} value=28 ], [ var='B' labels={account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2a/e69b7998a9c0c446, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2a/e69b7998a9c0c446, region=eu-west-2} value=28 ], [ var='C' labels={account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2a/e69b7998a9c0c446, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2a/e69b7998a9c0c446, region=eu-west-2} value=0 ]} {Instance:account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2b/61fb74a2e9ff38ac, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2b/61fb74a2e9ff38ac, region=eu-west-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2b/61fb74a2e9ff38ac, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2b/61fb74a2e9ff38ac, region=eu-west-2 Value:0xc0329c29e0} B:{Var:B Labels:account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2b/61fb74a2e9ff38ac, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2b/61fb74a2e9ff38ac, region=eu-west-2 Value:0xc0329c2a30} C:{Var:C Labels:account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2b/61fb74a2e9ff38ac, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2b/61fb74a2e9ff38ac, region=eu-west-2 Value:0xc0329c2a70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.470976637s EvaluationString:[ var='A' labels={account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2b/61fb74a2e9ff38ac, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2b/61fb74a2e9ff38ac, region=eu-west-2} value=44 ], [ var='B' labels={account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2b/61fb74a2e9ff38ac, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2b/61fb74a2e9ff38ac, region=eu-west-2} value=44 ], [ var='C' labels={account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2b/61fb74a2e9ff38ac, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2b/61fb74a2e9ff38ac, region=eu-west-2} value=0 ]} {Instance:account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2c/aa17872cb310fc88, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2c/aa17872cb310fc88, region=eu-west-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2c/aa17872cb310fc88, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2c/aa17872cb310fc88, region=eu-west-2 Value:0xc0329c2b00} B:{Var:B Labels:account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2c/aa17872cb310fc88, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2c/aa17872cb310fc88, region=eu-west-2 Value:0xc0329c2b50} C:{Var:C Labels:account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2c/aa17872cb310fc88, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2c/aa17872cb310fc88, region=eu-west-2 Value:0xc0329c2b90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.470981518s EvaluationString:[ var='A' labels={account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2c/aa17872cb310fc88, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2c/aa17872cb310fc88, region=eu-west-2} value=28 ], [ var='B' labels={account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2c/aa17872cb310fc88, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2c/aa17872cb310fc88, region=eu-west-2} value=28 ], [ var='C' labels={account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-ext-eu-west-2c/aa17872cb310fc88, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-ext-eu-west-2c/aa17872cb310fc88, region=eu-west-2} value=0 ]} {Instance:account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-istio-eks/4411ce58c236cede, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-istio-eks/4411ce58c236cede, region=eu-west-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-istio-eks/4411ce58c236cede, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-istio-eks/4411ce58c236cede, region=eu-west-2 Value:0xc0329c2c60} B:{Var:B Labels:account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-istio-eks/4411ce58c236cede, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-istio-eks/4411ce58c236cede, region=eu-west-2 Value:0xc0329c2ca0} C:{Var:C Labels:account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-istio-eks/4411ce58c236cede, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-istio-eks/4411ce58c236cede, region=eu-west-2 Value:0xc0329c2c10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.471004759s EvaluationString:[ var='A' labels={account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-istio-eks/4411ce58c236cede, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-istio-eks/4411ce58c236cede, region=eu-west-2} value=0 ], [ var='B' labels={account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-istio-eks/4411ce58c236cede, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-istio-eks/4411ce58c236cede, region=eu-west-2} value=0 ], [ var='C' labels={account_id=219247119227, dimension_LoadBalancer=app/wallet-prod-istio-eks/4411ce58c236cede, name=arn:aws:elasticloadbalancing:eu-west-2:219247119227:loadbalancer/app/wallet-prod-istio-eks/4411ce58c236cede, region=eu-west-2} value=0 ]} {Instance:account_id=457710302499, dimension_LoadBalancer=app/shared-prod-istio-main/5081e1c1528f5752, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/shared-prod-istio-main/5081e1c1528f5752, region=eu-west-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=457710302499, dimension_LoadBalancer=app/shared-prod-istio-main/5081e1c1528f5752, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/shared-prod-istio-main/5081e1c1528f5752, region=eu-west-2 Value:0xc0329c2d20} B:{Var:B Labels:account_id=457710302499, dimension_LoadBalancer=app/shared-prod-istio-main/5081e1c1528f5752, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/shared-prod-istio-main/5081e1c1528f5752, region=eu-west-2 Value:0xc0329c2d60} C:{Var:C Labels:account_id=457710302499, dimension_LoadBalancer=app/shared-prod-istio-main/5081e1c1528f5752, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/shared-prod-istio-main/5081e1c1528f5752, region=eu-west-2 Value:0xc0329c2db0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.471009069s EvaluationString:[ var='A' labels={account_id=457710302499, dimension_LoadBalancer=app/shared-prod-istio-main/5081e1c1528f5752, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/shared-prod-istio-main/5081e1c1528f5752, region=eu-west-2} value=14 ], [ var='B' labels={account_id=457710302499, dimension_LoadBalancer=app/shared-prod-istio-main/5081e1c1528f5752, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/shared-prod-istio-main/5081e1c1528f5752, region=eu-west-2} value=14 ], [ var='C' labels={account_id=457710302499, dimension_LoadBalancer=app/shared-prod-istio-main/5081e1c1528f5752, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/shared-prod-istio-main/5081e1c1528f5752, region=eu-west-2} value=0 ]} {Instance:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-fo/0f5606a3891cc620, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-fo/0f5606a3891cc620, region=eu-west-2 State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-fo/0f5606a3891cc620, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-fo/0f5606a3891cc620, region=eu-west-2 Value:0xc0329c2e90} B:{Var:B Labels:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-fo/0f5606a3891cc620, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-fo/0f5606a3891cc620, region=eu-west-2 Value:0xc0329c2ed0} C:{Var:C Labels:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-fo/0f5606a3891cc620, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-fo/0f5606a3891cc620, region=eu-west-2 Value:0xc0329c2e50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.471013109s EvaluationString:[ var='A' labels={account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-fo/0f5606a3891cc620, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-fo/0f5606a3891cc620, region=eu-west-2} value=1056 ], [ var='B' labels={account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-fo/0f5606a3891cc620, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-fo/0f5606a3891cc620, region=eu-west-2} value=1056 ], [ var='C' labels={account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-fo/0f5606a3891cc620, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-fo/0f5606a3891cc620, region=eu-west-2} value=1 ]} {Instance:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-internal-fo/26a2a9b90496ad59, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-internal-fo/26a2a9b90496ad59, region=eu-west-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-internal-fo/26a2a9b90496ad59, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-internal-fo/26a2a9b90496ad59, region=eu-west-2 Value:0xc0329c2f50} B:{Var:B Labels:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-internal-fo/26a2a9b90496ad59, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-internal-fo/26a2a9b90496ad59, region=eu-west-2 Value:0xc0329c2fb0} C:{Var:C Labels:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-internal-fo/26a2a9b90496ad59, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-internal-fo/26a2a9b90496ad59, region=eu-west-2 Value:0xc0329c3000}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.471018989s EvaluationString:[ var='A' labels={account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-internal-fo/26a2a9b90496ad59, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-internal-fo/26a2a9b90496ad59, region=eu-west-2} value=4 ], [ var='B' labels={account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-internal-fo/26a2a9b90496ad59, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-internal-fo/26a2a9b90496ad59, region=eu-west-2} value=4 ], [ var='C' labels={account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-internal-fo/26a2a9b90496ad59, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-internal-fo/26a2a9b90496ad59, region=eu-west-2} value=0 ]} {Instance:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-internal-main/0f3904258f6c8ab3, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-internal-main/0f3904258f6c8ab3, region=eu-west-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-internal-main/0f3904258f6c8ab3, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-internal-main/0f3904258f6c8ab3, region=eu-west-2 Value:0xc0329c30c0} B:{Var:B Labels:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-internal-main/0f3904258f6c8ab3, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-internal-main/0f3904258f6c8ab3, region=eu-west-2 Value:0xc0329c3150} C:{Var:C Labels:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-internal-main/0f3904258f6c8ab3, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-internal-main/0f3904258f6c8ab3, region=eu-west-2 Value:0xc0329c31e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.471027349s EvaluationString:[ var='A' labels={account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-internal-main/0f3904258f6c8ab3, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-internal-main/0f3904258f6c8ab3, region=eu-west-2} value=0 ], [ var='B' labels={account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-internal-main/0f3904258f6c8ab3, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-internal-main/0f3904258f6c8ab3, region=eu-west-2} value=0 ], [ var='C' labels={account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-internal-main/0f3904258f6c8ab3, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-internal-main/0f3904258f6c8ab3, region=eu-west-2} value=0 ]} {Instance:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-main/e2769b6bd5bfe6e2, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-main/e2769b6bd5bfe6e2, region=eu-west-2 State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-main/e2769b6bd5bfe6e2, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-main/e2769b6bd5bfe6e2, region=eu-west-2 Value:0xc0329c3270} B:{Var:B Labels:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-main/e2769b6bd5bfe6e2, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-main/e2769b6bd5bfe6e2, region=eu-west-2 Value:0xc0329c32d0} C:{Var:C Labels:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-main/e2769b6bd5bfe6e2, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-main/e2769b6bd5bfe6e2, region=eu-west-2 Value:0xc0329c3320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.471031519s EvaluationString:[ var='A' labels={account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-main/e2769b6bd5bfe6e2, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-main/e2769b6bd5bfe6e2, region=eu-west-2} value=2044 ], [ var='B' labels={account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-main/e2769b6bd5bfe6e2, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-main/e2769b6bd5bfe6e2, region=eu-west-2} value=2044 ], [ var='C' labels={account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-istio-main/e2769b6bd5bfe6e2, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-istio-main/e2769b6bd5bfe6e2, region=eu-west-2} value=1 ]} {Instance:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-wallet-pay-fo/8423d820672f5aca, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-wallet-pay-fo/8423d820672f5aca, region=eu-west-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-wallet-pay-fo/8423d820672f5aca, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-wallet-pay-fo/8423d820672f5aca, region=eu-west-2 Value:0xc0329c33f0} B:{Var:B Labels:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-wallet-pay-fo/8423d820672f5aca, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-wallet-pay-fo/8423d820672f5aca, region=eu-west-2 Value:0xc0329c3430} C:{Var:C Labels:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-wallet-pay-fo/8423d820672f5aca, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-wallet-pay-fo/8423d820672f5aca, region=eu-west-2 Value:0xc0329c33a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.471038418s EvaluationString:[ var='A' labels={account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-wallet-pay-fo/8423d820672f5aca, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-wallet-pay-fo/8423d820672f5aca, region=eu-west-2} value=8 ], [ var='B' labels={account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-wallet-pay-fo/8423d820672f5aca, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-wallet-pay-fo/8423d820672f5aca, region=eu-west-2} value=8 ], [ var='C' labels={account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-wallet-pay-fo/8423d820672f5aca, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-wallet-pay-fo/8423d820672f5aca, region=eu-west-2} value=0 ]} {Instance:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-wallet-pay-main/86cbdc1b31c9b528, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-wallet-pay-main/86cbdc1b31c9b528, region=eu-west-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-wallet-pay-main/86cbdc1b31c9b528, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-wallet-pay-main/86cbdc1b31c9b528, region=eu-west-2 Value:0xc0329c34d0} B:{Var:B Labels:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-wallet-pay-main/86cbdc1b31c9b528, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-wallet-pay-main/86cbdc1b31c9b528, region=eu-west-2 Value:0xc0329c3510} C:{Var:C Labels:account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-wallet-pay-main/86cbdc1b31c9b528, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-wallet-pay-main/86cbdc1b31c9b528, region=eu-west-2 Value:0xc0329c3550}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.471043448s EvaluationString:[ var='A' labels={account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-wallet-pay-main/86cbdc1b31c9b528, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-wallet-pay-main/86cbdc1b31c9b528, region=eu-west-2} value=10 ], [ var='B' labels={account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-wallet-pay-main/86cbdc1b31c9b528, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-wallet-pay-main/86cbdc1b31c9b528, region=eu-west-2} value=10 ], [ var='C' labels={account_id=457710302499, dimension_LoadBalancer=app/talabat-prod-wallet-pay-main/86cbdc1b31c9b528, name=arn:aws:elasticloadbalancing:eu-west-2:457710302499:loadbalancer/app/talabat-prod-wallet-pay-main/86cbdc1b31c9b528, region=eu-west-2} value=0 ]}]" duration=26.613662ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wioukojk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.471967452Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wink57xr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.471833431Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wink57xr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.47182196Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wink57xr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.47176978Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=engnet1, instance=localhost:9100, instance_type=hedera-node, inventory_name=node06 - 35.208.138.144, job=node-exporter, metrics_node_id=7, node_id=6" t=2024-05-29T13:44:14.471803935Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wiinnyvw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.471663499Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=engnet1, instance=localhost:9100, instance_type=hedera-node, inventory_name=node05 - 35.217.108.203, job=node-exporter, metrics_node_id=6, node_id=5" t=2024-05-29T13:44:14.471692743Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=277970 slug=teckresourcestest t=2024-05-29T13:44:14.471588943Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=36.926533ms + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:14.471635158Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet1" + level=debug ts=2024-05-29T13:44:14.471507423Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wigxdk19-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.471455877Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wibe42mu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.471384556Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:14.471490473Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet1" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wibe42mu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.471360676Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.470497778Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.470297803Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=472647 slug=planet instance="__name__=g4_task_instance_partitions_remaining_seconds, g4_cluster=g4c-sub-04" t=2024-05-29T13:44:14.47144252Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wibe42mu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.471291585Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wi7sfxmu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.471252825Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=engnet1, instance=localhost:9100, instance_type=hedera-node, inventory_name=node03 - 35.212.205.112, job=node-exporter, metrics_node_id=4, node_id=3" t=2024-05-29T13:44:14.471317618Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.471177173Z caller=remote_instance_store.go:51 user=542095 slug=intelligencefusion msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.471103197Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wi5l5al1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.471072753Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wi4lqb21-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.470871031Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wi2xcbbo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.4708394Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.471062689Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.471081474Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.47097753Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.470941461Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.470813718Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_wmem_min, environment=engnet1, instance=localhost:9100, instance_type=hedera-node, inventory_name=node00 - 35.215.85.106, job=node-exporter, metrics_node_id=1, node_id=0" t=2024-05-29T13:44:14.470808909Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.470746427Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.470699975Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.470608899Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:14.470498043Z level=debug msg="State manager processing evaluation results" resultCount=84 + logger=ngalert.state.manager user=206107 slug=hydrolix instance="code=499" t=2024-05-29T13:44:14.470615423Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix t=2024-05-29T13:44:14.470560795Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wi2jvl1w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.470560668Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-whzszk9a-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.470492297Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-whzszk9a-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.470379286Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-whz2jxq4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.470347225Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-whz2jxq4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.470292515Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-whx2vdtl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.470182494Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-whvn5454-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.470059862Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-whu54qcq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.469944001Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-whu54qcq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.469895361Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-whu54qcq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.4698669Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=183214 slug=vectorizedio t=2024-05-29T13:44:14.469871109Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=zsAoBWS4z, ref_id=A" t=2024-05-29T13:44:14.469855441Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=183214 slug=vectorizedio version=24 fingerprint=ceebefb6a0a29c94 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.469744107Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=zsAoBWS4z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.469480018s EvaluationString:}]" duration=74.987291ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-whr2hx5l-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.469707719Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-whf173gl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.469679169Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-whbulknf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.469524017Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-whbulknf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.469494707Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-whbulknf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.469420206Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wgxrfkao-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.469352325Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wgxrfkao-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.469301785Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wgxrfkao-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.469288475Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wgud57d6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.469238234Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=332534 slug=adevintakijiji t=2024-05-29T13:44:14.46921942Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=332534 slug=adevintakijiji instance="resource.label.database_id=ca-kijiji-production-up0f:ngdb-box-gcp, resource.label.project_id=ca-kijiji-production-up0f, resource.label.region=us-east4, resource.type=cloudsql_database" t=2024-05-29T13:44:14.469201968Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wgud57d6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.469159873Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wgud57d6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.469149923Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=332534 slug=adevintakijiji version=22 fingerprint=e440a280aff4fe39 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.468958987Z level=debug msg="Alert rule evaluated" results="[{Instance:resource.label.database_id=ca-kijiji-production-up0f:ngdb-box-gcp, resource.label.project_id=ca-kijiji-production-up0f, resource.label.region=us-east4, resource.type=cloudsql_database State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:resource.label.database_id=ca-kijiji-production-up0f:ngdb-box-gcp, resource.label.project_id=ca-kijiji-production-up0f, resource.label.region=us-east4, resource.type=cloudsql_database Value:0xc06dca4170} C:{Var:C Labels:resource.label.database_id=ca-kijiji-production-up0f:ngdb-box-gcp, resource.label.project_id=ca-kijiji-production-up0f, resource.label.region=us-east4, resource.type=cloudsql_database Value:0xc06dca4190}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.467523766s EvaluationString:[ var='B' labels={resource.label.database_id=ca-kijiji-production-up0f:ngdb-box-gcp, resource.label.project_id=ca-kijiji-production-up0f, resource.label.region=us-east4, resource.type=cloudsql_database} value=0.010651447882363147 ], [ var='C' labels={resource.label.database_id=ca-kijiji-production-up0f:ngdb-box-gcp, resource.label.project_id=ca-kijiji-production-up0f, resource.label.region=us-east4, resource.type=cloudsql_database} value=0 ]}]" duration=200.683777ms + level=debug ts=2024-05-29T13:44:14.468989904Z caller=remote_instance_store.go:51 user=210775 slug=mfsac msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.468882082Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.468873169Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wgipu0af-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.468935921Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wgipu0af-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.468906521Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.468836557Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wgipu0af-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.46888539Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.468856308Z caller=remote_instance_store.go:51 user=316418 slug=workmotion msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.46880044Z caller=remote_instance_store.go:51 user=334644 slug=meiro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=316418 slug=workmotion instance="LoadBalancer=app/beta-backend-lb/44a8c2667a89a576, TargetGroup=targetgroup/beta-partner-service-tg/796e7c05bcf06e11" t=2024-05-29T13:44:14.468833812Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wghnln8i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.468741279Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=316418 slug=workmotion t=2024-05-29T13:44:14.468603522Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.468746293Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.468657842Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wghnln8i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.468689078Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wgf42rlo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.468618648Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=277807 slug=info96f8 instance="__name__=magento_mysql_health_check, environment=swiss-sense, instance=www.swisssense.nl:443, job=magento, monitor=magento" t=2024-05-29T13:44:14.468359835Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=404942 slug=twinstake instance="environment=monitoring-twinstake, group=polygon_mainnet, instance=polygon-exporter-mainnet-01, job=polygon_exporter, vm=polygon-validator-mainnet-02" t=2024-05-29T13:44:14.46854461Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=277807 slug=info96f8 instance="__name__=magento_mysql_health_check, environment=swiss-sense, instance=www.swisssense.nl:443, job=magento, monitor=magento" t=2024-05-29T13:44:14.468337084Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=277807 slug=info96f8 t=2024-05-29T13:44:14.46829636Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=swiss-sense" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wfz5lji4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.468315925Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wfz5lji4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.468231874Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wfv2iix9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.468187073Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wfv2iix9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.468158903Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wfv2iix9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.468120913Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wfv2iix9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.468073402Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=111653 slug=theassociationmxp t=2024-05-29T13:44:14.468032443Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=111653 slug=theassociationmxp instance= t=2024-05-29T13:44:14.468013991Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.467933671Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.46780161Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wfieie58-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.467656358Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wfieie58-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.467634718Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.467538256Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wfieie58-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.467589687Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=797387 slug=roadrunnerdev t=2024-05-29T13:44:14.467324845Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.369473ms + level=debug ts=2024-05-29T13:44:14.467345225Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wfgt1xb1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.467365125Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wfcvx3y3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.467324454Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.467287353Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wfbmsphr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.467160933Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wfbmsphr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.467041301Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=163513 slug=dialpad t=2024-05-29T13:44:14.466935431Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.521959ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wfb154av-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.466957781Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.466905482Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wf52860m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.466667588Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wev96uym-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.466524596Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.46643494Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.466285774Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=236496 slug=improbable t=2024-05-29T13:44:14.466150431Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.191497ms + level=debug ts=2024-05-29T13:44:14.466063714Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-weiequd1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.466010511Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.466003223Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.fifa-2023-common-g5.coreSlave*.usersessions.status.fifa-2023-xbsx.GaugeUS_aws-sin_Slave,5)) Query" t=2024-05-29T13:44:14.465964172Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-weh9kqwn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.46595747Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.465901463Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-weh9kqwn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.46593476Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-weh9kqwn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.46589056Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-weh9kqwn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.465854489Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.46588434Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.465856499Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.465843924Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-weh53lx3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.465728598Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-weh53lx3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.465646587Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.465644925Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.465652856Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wed7gsfp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.465583776Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=190917 slug=d1cx instance= t=2024-05-29T13:44:14.46550636Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wed7gsfp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.465534096Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wed7gsfp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.465482085Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-web56qwf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.465451925Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.465470636Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-web56qwf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.465399804Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-web56qwf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.465359294Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-we2veuh3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.465229203Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-we2veuh3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.465199972Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-we2veuh3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.465179132Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wdza386z-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.465151872Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.46511232Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.465067381Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wdza386z-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.465036021Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.464954281Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wdt6chwg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.46492147Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wdhwsae6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.464681597Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wdhwsae6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.464628086Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wdfzl81a-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.464586106Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=620731 slug=masonite instance="resourceName=SLVAZPRDINFMDM" t=2024-05-29T13:44:14.464535362Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=620731 slug=masonite instance="resourceName=SLVAZPRDINFDQ" t=2024-05-29T13:44:14.464480801Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wdcoo63i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.464423284Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=620731 slug=masonite version=39 fingerprint=9f463ea4a7c6f937 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.464330269Z level=debug msg="Alert rule evaluated" results="[{Instance:resourceName=SLVAZPRDINFDQ State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:resourceName=SLVAZPRDINFDQ Value:0xc0164de4d8} B:{Var:B Labels:resourceName=SLVAZPRDINFDQ Value:0xc0164de4d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.463778664s EvaluationString:[ var='A' labels={resourceName=SLVAZPRDINFDQ} value=1 ], [ var='B' labels={resourceName=SLVAZPRDINFDQ} value=0 ]} {Instance:resourceName=SLVAZPRDINFMDM State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:resourceName=SLVAZPRDINFMDM Value:0xc0164de548} B:{Var:B Labels:resourceName=SLVAZPRDINFMDM Value:0xc0164de5a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.463795504s EvaluationString:[ var='A' labels={resourceName=SLVAZPRDINFMDM} value=1 ], [ var='B' labels={resourceName=SLVAZPRDINFMDM} value=0 ]}]" duration=340.278115ms + level=debug ts=2024-05-29T13:44:14.464320175Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wdcoo63i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.464309223Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.463829328Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.463500821Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=612525 slug=adleyeview instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.464130331Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=612525 slug=adleyeview t=2024-05-29T13:44:14.46411353Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wdawa6tg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.464216942Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wdawa6tg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.464166432Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.464086395Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wdai1ucs-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.464083781Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wdai1ucs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.46404479Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.463981274Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.463938088Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.463816508Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wd5qsxqu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.463739497Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wd55yc3l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.463668907Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="datasource_uid=ee613482-6425-4196-93d8-43d0b8d057e7, ref_id=A" t=2024-05-29T13:44:14.463463277Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.463545244Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="datasource_uid=ee613482-6425-4196-93d8-43d0b8d057e7, ref_id=A" t=2024-05-29T13:44:14.463450809Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="datasource_uid=ee613482-6425-4196-93d8-43d0b8d057e7, ref_id=A" t=2024-05-29T13:44:14.463435808Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=637816 slug=kingobservatory t=2024-05-29T13:44:14.463414212Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=313382 slug=hyai t=2024-05-29T13:44:14.463292163Z level=debug msg="Saving alert states done" count=9 max_state_save_concurrency=1 duration=115.044883ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wczdz0og-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.463370763Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wcwbv37l-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.463182511Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wcwbv37l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.463152631Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.463270955Z caller=remote_alert_sender.go:94 user=851297 slug=roadrunneruat host=roadrunneruat-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.2.93.152:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fdc585bitvhmoc alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wcwbv37l-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.463101841Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wcwbv37l-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.46307689Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.463088277Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=350551 slug=loopme instance="cluster_name=trino-4h55" t=2024-05-29T13:44:14.463202819Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=350551 slug=loopme instance="cluster_name=spinnaker-g5s7" t=2024-05-29T13:44:14.463110364Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wcvhs9bm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.46304348Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.462911356Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.462921036Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=350551 slug=loopme instance="cluster_name=ops-001-ew4" t=2024-05-29T13:44:14.462902874Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wcvhs9bm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.462833618Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=371756 slug=asapp t=2024-05-29T13:44:14.462832884Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.33499ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wcrmfq47-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.462713967Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.462812497Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=350551 slug=loopme instance="cluster_name=kafka-global-b7iq" t=2024-05-29T13:44:14.462775225Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wcrmfq47-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.462671066Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wcrmfq47-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.462641066Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wcrmfq47-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.462630456Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wcjv7uzp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.462593055Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.462496991Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wcjv7uzp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.462469804Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=320906 slug=techcyte instance="AvailabilityZone=us-west-2b, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-ts/3edf6bf059aa33cc" t=2024-05-29T13:44:14.462433788Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=350551 slug=loopme instance="cluster_name=dmp-001-ew4" t=2024-05-29T13:44:14.462433533Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=350551 slug=loopme instance="cluster_name=dmp-001-ew4" t=2024-05-29T13:44:14.462421054Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=320906 slug=techcyte instance="AvailabilityZone=us-west-2b, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-copier-tg/3ba547d4c30855a7" t=2024-05-29T13:44:14.462391001Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=172772 slug=ppbtradingtribe t=2024-05-29T13:44:14.462289753Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=320906 slug=techcyte instance="AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-ts/3edf6bf059aa33cc" t=2024-05-29T13:44:14.462328481Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=172772 slug=ppbtradingtribe instance= t=2024-05-29T13:44:14.462276061Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wcj0alwd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.462298962Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=350551 slug=loopme instance="cluster_name=datascience-001-ew4" t=2024-05-29T13:44:14.462332268Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=320906 slug=techcyte instance="AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-cargo-service/8d2c8ec8987ec8bf" t=2024-05-29T13:44:14.46229186Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wcj0alwd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.462285312Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=350551 slug=loopme instance="cluster_name=core-b8mf" t=2024-05-29T13:44:14.462252341Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=320906 slug=techcyte instance="AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-tec-uploader-cloud-manager/6a94e1b0259995f9" t=2024-05-29T13:44:14.462207733Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=320906 slug=techcyte t=2024-05-29T13:44:14.462167475Z level=debug msg="State manager processing evaluation results" resultCount=8 + logger=ngalert.scheduler user=320906 slug=techcyte version=3 fingerprint=ac313a3a0d183427 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.461991146Z level=debug msg="Alert rule evaluated" results="[{Instance:AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-tec-uploader-cloud-manager/6a94e1b0259995f9 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-tec-uploader-cloud-manager/6a94e1b0259995f9 Value:0xc0064579b0} C:{Var:C Labels:AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-tec-uploader-cloud-manager/6a94e1b0259995f9 Value:0xc0064579b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.461355715s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-tec-uploader-cloud-manager/6a94e1b0259995f9} value=4.030584407198605 ], [ var='C' labels={AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-tec-uploader-cloud-manager/6a94e1b0259995f9} value=0 ]} {Instance:AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-backend-b/db3d0627f10c89a2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-backend-b/db3d0627f10c89a2 Value:0xc006457ad0} C:{Var:C Labels:AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-backend-b/db3d0627f10c89a2 Value:0xc006457ad8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.461373079s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-backend-b/db3d0627f10c89a2} value=0.4586347609771088 ], [ var='C' labels={AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-backend-b/db3d0627f10c89a2} value=0 ]} {Instance:AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-cargo-service/8d2c8ec8987ec8bf State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-cargo-service/8d2c8ec8987ec8bf Value:0xc006457bf0} C:{Var:C Labels:AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-cargo-service/8d2c8ec8987ec8bf Value:0xc006457bf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.461380796s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-cargo-service/8d2c8ec8987ec8bf} value=0.06547938894005453 ], [ var='C' labels={AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-cargo-service/8d2c8ec8987ec8bf} value=0 ]} {Instance:AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-ts/3edf6bf059aa33cc State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-ts/3edf6bf059aa33cc Value:0xc006457d10} C:{Var:C Labels:AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-ts/3edf6bf059aa33cc Value:0xc006457d18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.46138991s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-ts/3edf6bf059aa33cc} value=0.944418289321895 ], [ var='C' labels={AvailabilityZone=us-west-2a, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-ts/3edf6bf059aa33cc} value=0 ]} {Instance:AvailabilityZone=us-west-2b, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-backend-b/db3d0627f10c89a2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2b, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-backend-b/db3d0627f10c89a2 Value:0xc006457e30} C:{Var:C Labels:AvailabilityZone=us-west-2b, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-backend-b/db3d0627f10c89a2 Value:0xc006457e38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.461402709s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2b, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-backend-b/db3d0627f10c89a2} value=0.47435364120730583 ], [ var='C' labels={AvailabilityZone=us-west-2b, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-backend-b/db3d0627f10c89a2} value=0 ]} {Instance:AvailabilityZone=us-west-2b, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-copier-tg/3ba547d4c30855a7 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2b, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-copier-tg/3ba547d4c30855a7 Value:0xc006457f60} C:{Var:C Labels:AvailabilityZone=us-west-2b, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-copier-tg/3ba547d4c30855a7 Value:0xc006457f68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.46141198s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2b, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-copier-tg/3ba547d4c30855a7} value=0.003411897412989218 ], [ var='C' labels={AvailabilityZone=us-west-2b, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-copier-tg/3ba547d4c30855a7} value=0 ]} {Instance:AvailabilityZone=us-west-2b, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-ts/3edf6bf059aa33cc State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2b, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-ts/3edf6bf059aa33cc Value:0xc027910058} C:{Var:C Labels:AvailabilityZone=us-west-2b, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-ts/3edf6bf059aa33cc Value:0xc027910050}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.46141854s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2b, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-ts/3edf6bf059aa33cc} value=0.9146156295941774 ], [ var='C' labels={AvailabilityZone=us-west-2b, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-ts/3edf6bf059aa33cc} value=0 ]} {Instance:AvailabilityZone=us-west-2c, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-cargo-service/8d2c8ec8987ec8bf State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2c, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-cargo-service/8d2c8ec8987ec8bf Value:0xc027910120} C:{Var:C Labels:AvailabilityZone=us-west-2c, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-cargo-service/8d2c8ec8987ec8bf Value:0xc027910128}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.461427209s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2c, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-cargo-service/8d2c8ec8987ec8bf} value=0.07634832871230089 ], [ var='C' labels={AvailabilityZone=us-west-2c, LoadBalancer=app/ci-techcyte-external/f67ea13da4834247, TargetGroup=targetgroup/ci-techcyte-cargo-service/8d2c8ec8987ec8bf} value=0 ]}]" duration=92.229999ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wcarwq4s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.46206767Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wcarwq4s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.46202496Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.462068229Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.461942637Z caller=remote_instance_store.go:51 user=206439 slug=relaypro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wcarwq4s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.461927769Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.461938902Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=453308 slug=hyperzodprod t=2024-05-29T13:44:14.461832249Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=350551 slug=loopme instance="cluster_name=ch-exchange-3u73" t=2024-05-29T13:44:14.461802236Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=327842 slug=exabeam t=2024-05-29T13:44:14.461766943Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.352208ms + level=debug ts=2024-05-29T13:44:14.461723239Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=350551 slug=loopme instance="cluster_name=ch-ds-x6xf" t=2024-05-29T13:44:14.461703903Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wc429pih-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.461535145Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wc429pih-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.461506014Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.461548962Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wc429pih-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.461427273Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.46140279Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.461362231Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.461306315Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wc3ylxom-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.461297642Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wc3ylxom-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.461213121Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=350551 slug=loopme instance="cluster_name=ads-1fxg" t=2024-05-29T13:44:14.461221473Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wbv8aq35-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.46111609Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wbscnyi8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.460979529Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wbscnyi8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.460847998Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wblv3mpu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.460742546Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.46058872Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wbf8wyua-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.460548164Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.460437242Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.460088894Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wbdpzhkr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.460391513Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wbccijga-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.460253631Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.460215648Z caller=remote_instance_store.go:51 user=487988 slug=microstrategyits msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.460150784Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.460130444Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.460165619Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wb6757t5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.460006189Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.46000987Z caller=remote_instance_store.go:51 user=544547 slug=tpe msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=544547 slug=tpe t=2024-05-29T13:44:14.459969019Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wb3rbpy6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.459911698Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wb3rbpy6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.459886388Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID1578dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=544547 slug=tpe version=22 fingerprint=19a6e1819346f995 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.459821741Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.459492401s EvaluationString:}]" duration=22.361148ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wb3rbpy6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.459789027Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wb27w9wi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.459723926Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wb27w9wi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.459672455Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wb27w9wi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.459587965Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wax7su5y-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.459548854Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wax7su5y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.459481863Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wax7su5y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.459449883Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wax7su5y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.459392023Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-warfabaq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.459351942Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.459331452Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.459350619Z caller=remote_instance_store.go:51 user=173730 slug=nikon msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=173730 slug=nikon instance= t=2024-05-29T13:44:14.459305468Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=173730 slug=nikon instance= t=2024-05-29T13:44:14.459300644Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=173374 slug=felmo instance= t=2024-05-29T13:44:14.459204983Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-waj3g5mx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.45917372Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-waj3g5mx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.45911572Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.historian backend=loki user=163513 slug=dialpad t=2024-05-29T13:44:14.459026531Z level=debug msg="Done saving alert state history batch" + logger=ngalert.state.manager user=797387 slug=roadrunnerdev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.458939711Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wahi96zm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.458918638Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wa9cvyss-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.458701615Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wa92gme2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.458512974Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wa92gme2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.458426413Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=60603 slug=avalaratax t=2024-05-29T13:44:14.458348096Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=111.93556ms + level=debug ts=2024-05-29T13:44:14.458358888Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wa43nzlp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.45814337Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wa43nzlp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.458112929Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.458048428Z caller=remote_rule_evaluator.go:193 user=656459 slug=activeport msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager.persist user=635771 slug=sharedservices t=2024-05-29T13:44:14.457979092Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.134486ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wa43nzlp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.457998638Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:14.45798742Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.457970745Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.457954929Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wa0ynf2k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.457823987Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wa04wr1f-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.457738406Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wa04wr1f-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.457717905Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-wa04wr1f-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.457688715Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w9zv28pa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.457640445Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w9zv28pa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.457617754Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.457662043Z caller=remote_alert_sender.go:94 user=364493 slug=csdepartment host=csdepartment-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.152.121.198:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=YfHEsSXnz alerts=1 + level=debug ts=2024-05-29T13:44:14.457374784Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=364493 slug=csdepartment t=2024-05-29T13:44:14.457601028Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.883435ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w9zv28pa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.457538514Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w9zv28pa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.457514513Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.457435534Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=531329 slug=xzhang1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w9zg5rag-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.457460353Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.457403733Z caller=ruler.go:522 msg="tenant is owned by this instance" user=531329 slug=xzhang1 groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w9zg5rag-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.457359042Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w9mxkq06-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.457329911Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.457185845Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.45714656Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w9kk5v8m-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.457032858Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w9kk5v8m-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.457022898Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.457042438Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.456990602Z caller=remote_instance_store.go:51 user=438185 slug=nodeinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w9h2thif-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.456915907Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.456868108Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:14.456853866Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=48.85513ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w98zvis8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.456846946Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w98zvis8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.456826126Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w98zvis8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.456774796Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w98zvis8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.456704125Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.456606007Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w97qq02v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.456624514Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w97qq02v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.456614704Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.456605846Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w97qq02v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.456559274Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.456513952Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w9490nc5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.456421232Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w90epxm5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.456308061Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w90epxm5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.456267801Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w8dlpuj7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.45623899Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.456247728Z caller=remote_instance_store.go:51 user=243675 slug=oneschema msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.45612672Z caller=remote_instance_store.go:51 user=526847 slug=soniclabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=60199 slug=wallapop instance= t=2024-05-29T13:44:14.456106919Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w88tseav-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.456087169Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.456012659Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w88tseav-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.455995258Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=851297 slug=roadrunneruat t=2024-05-29T13:44:14.455933381Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=851297 slug=roadrunneruat instance= t=2024-05-29T13:44:14.455915981Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w83xsmt5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.455957407Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=851297 slug=roadrunneruat instance= t=2024-05-29T13:44:14.455884249Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w83xsmt5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.455852066Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w83xsmt5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.455831756Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w7x8b3in-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.455751715Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w7uqdftt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.455654214Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.45567797Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w7uqdftt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.455577693Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.455515288Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w7gp9gex-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.455348471Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.455241768Z caller=remote_instance_store.go:51 user=68499 slug=identt msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w7gp9gex-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.45524473Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w7cx28jt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.45521444Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.455198334Z caller=remote_instance_store.go:51 user=87780 slug=zencloudandhosting msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=68499 slug=identt instance="datasource_uid=LG8GjSxMk, ref_id=A" t=2024-05-29T13:44:14.45518807Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w7cx28jt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.455096999Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w793l64n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.454962367Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w76qy05b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.454899457Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.453844522Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=507549 slug=coindcx instance="datasource_uid=4DXtZk24z, ref_id=A" t=2024-05-29T13:44:14.454839707Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.45477874Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.454702782Z caller=remote_instance_store.go:51 user=656284 slug=cencosudx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w76qy05b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.454680374Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w743pxbw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.454627564Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=174675 slug=journalprod instance="datasource_uid=uF2hBHyGz, ref_id=A" t=2024-05-29T13:44:14.454564454Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=174675 slug=journalprod instance="datasource_uid=uF2hBHyGz, ref_id=A" t=2024-05-29T13:44:14.454553835Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=174675 slug=journalprod version=1 fingerprint=3abd0a1a8e4b8946 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.454379608Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=uF2hBHyGz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.453876005s EvaluationString:}]" duration=42.450193ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w70nlibo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.454399231Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.454354797Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w70nlibo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.454348011Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=87780 slug=zencloudandhosting instance="datasource_uid=000000020, ref_id=A" t=2024-05-29T13:44:14.454297602Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager.persist user=473762 slug=intentiq t=2024-05-29T13:44:14.454286876Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w6zh8ua3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.45430691Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=473762 slug=intentiq instance="instance=https://www.intentiq.com/, job=Intent IQ web site" t=2024-05-29T13:44:14.454263466Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w6zh8ua3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.45422962Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=473762 slug=intentiq t=2024-05-29T13:44:14.454127521Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=633335 slug=promqlworkshop t=2024-05-29T13:44:14.454144081Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.135792ms + logger=ngalert.scheduler user=473762 slug=intentiq version=30 fingerprint=1d93447746c53396 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.45404006Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=https://www.intentiq.com/, job=Intent IQ web site State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://www.intentiq.com/, job=Intent IQ web site Value:0xc020e74fa8} C:{Var:C Labels:instance=https://www.intentiq.com/, job=Intent IQ web site Value:0xc020e74fc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.453643114s EvaluationString:[ var='A' labels={instance=https://www.intentiq.com/, job=Intent IQ web site} value=5.294953e+06 ], [ var='C' labels={instance=https://www.intentiq.com/, job=Intent IQ web site} value=0 ]}]" duration=17.227604ms + level=debug ts=2024-05-29T13:44:14.45414314Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.454057012Z caller=remote_instance_store.go:51 user=452115 slug=ybmetrics msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=397201 slug=zultys t=2024-05-29T13:44:14.453947836Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=98.47968ms + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:14.453932294Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=46.893164ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w6vpr20a-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.453925267Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w6vpr20a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.453882236Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.453763565Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w6vpr20a-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.453790095Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w6vpr20a-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.453776885Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w6t9wdjr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.453736675Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.453629628Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.453531247Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.45324794Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.453465493Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=163513 slug=dialpad t=2024-05-29T13:44:14.453410104Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.453384474Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.453353679Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w6syogin-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.453361291Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w6n7uw79-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.45325868Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:14.453078412Z level=debug msg="Saving alert states done" count=11 max_state_save_concurrency=1 duration=194.784472ms + level=debug ts=2024-05-29T13:44:14.453059644Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w5z1z4ye-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.452934146Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=236496 slug=improbable t=2024-05-29T13:44:14.452864771Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w5z1z4ye-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.452892296Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=371756 slug=asapp t=2024-05-29T13:44:14.452814325Z level=debug msg="Saving alert states done" count=170 max_state_save_concurrency=1 duration=3.535060271s + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w5wvqmtp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.452819915Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=60199 slug=wallapop t=2024-05-29T13:44:14.452709162Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w5wvqmtp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.452711054Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=60199 slug=wallapop instance= t=2024-05-29T13:44:14.452696936Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=60199 slug=wallapop t=2024-05-29T13:44:14.452677804Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.452573102Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w5sogeys-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.452565063Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.45243158Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w5sflpxl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.452383471Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w5nem8h9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.452227299Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w54zzxqf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.452067777Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.45202213Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w4xz9agv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.451824935Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.45173651Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.451653091Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=884866 slug=cnonumerique version=81 fingerprint=a35b31e9d7546287 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.451516217Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=ddhkbrfewv7k0d, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.451160519s EvaluationString:}]" duration=22.829336ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w4v5aa9w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.451640143Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w4v5aa9w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.451603673Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w4u0d0sb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.451465791Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=166137 slug=teletracking t=2024-05-29T13:44:14.451332127Z level=debug msg="Deleting alert states" count=1 + logger=ngalert.state.manager user=166137 slug=teletracking t=2024-05-29T13:44:14.451315303Z level=info msg="Detected stale state entry" cacheID="[[\"Series\",\"query67bfcee2a3be4ccb84570e5b93b8f55b\"],[\"__alert_rule_namespace_uid__\",\"ujUZjBd7z\"],[\"__alert_rule_uid__\",\"58029cc6-f2bf-45ea-8ab2-ba18f918b14b\"],[\"alertname\",\"Alert-AmazonMQ-sinaihealthtest-gateway-receive-queue-high-message-count\"],[\"grafana_folder\",\"Access - Prod\"],[\"service\",\"premise-gateway\"],[\"serviceline\",\"managed-services\"],[\"severity\",\"P2\"]]" state=Normal reason= + level=debug ts=2024-05-29T13:44:14.45134632Z caller=remote_instance_store.go:57 user=166137 slug=teletracking msg="calling DeleteAlertInstances - not implemented" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w4q7fpw6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.451284789Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=155740 slug=routific t=2024-05-29T13:44:14.451208567Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=155740 slug=routific instance= t=2024-05-29T13:44:14.451190851Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w4q7fpw6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.451184938Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w4q7fpw6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.451114258Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=155740 slug=routific version=3 fingerprint=57eae8f165f963b4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.451080004Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.450816835s EvaluationString:}]" duration=49.675993ms + level=debug ts=2024-05-29T13:44:14.451031622Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.450905183Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112732 slug=gleamer instance="appId=SERGAS01, component=gmsk-proxy-api, id=full, instance=gmsk-proxy-api:8090, job=gmsk-proxy-api, origin_prometheus=SERGAS01" t=2024-05-29T13:44:14.450800297Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112732 slug=gleamer t=2024-05-29T13:44:14.450754518Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w492bsyo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.450704154Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:14.450609438Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w48e9s1f-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.450545972Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w48e9s1f-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.450519092Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=60603 slug=avalaratax t=2024-05-29T13:44:14.45048819Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.45039234Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=60603 slug=avalaratax t=2024-05-29T13:44:14.450404889Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w44j9tw1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.450415781Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=60603 slug=avalaratax version=2 fingerprint=a620e7de91c8bc14 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.450331754Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.450083018s EvaluationString:}]" duration=54.405007ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w44j9tw1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.45034223Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w44j9tw1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.450255999Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w3qj3i5m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.450146268Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w3qj3i5m-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.450084227Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w3mgia27-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.450033557Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w3mgia27-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.450020917Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w3mgia27-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.449884415Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=139073 slug=cargo1 t=2024-05-29T13:44:14.449854615Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=39.604794ms + level=debug ts=2024-05-29T13:44:14.449773881Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w3lqtr2e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.449688243Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w3g8myuu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.449648263Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.449625728Z caller=remote_instance_store.go:51 user=523054 slug=vialtopartners msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w3g8myuu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.449624322Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w3g8myuu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.449557042Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w39lfjnl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.44936017Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w2w8ejzl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.449238568Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.449027526Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w2w8ejzl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.449070877Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.448997951Z caller=remote_instance_store.go:51 user=916149 slug=cmfollpd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=916149 slug=cmfollpd instance="instance=puusea4afollutlbst1001.foll.gcp.hclsw.internal" t=2024-05-29T13:44:14.44894677Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w2mw4z8r-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.448802284Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=916149 slug=cmfollpd version=1 fingerprint=c0ac6adefdeaf58e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.448752748Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=puusea4afollutlbst1001.foll.gcp.hclsw.internal State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=puusea4afollutlbst1001.foll.gcp.hclsw.internal Value:0xc029d5fcd8} B:{Var:B Labels:instance=puusea4afollutlbst1001.foll.gcp.hclsw.internal Value:0xc029d5fce8} C:{Var:C Labels:instance=puusea4afollutlbst1001.foll.gcp.hclsw.internal Value:0xc029d5fcf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.448200153s EvaluationString:[ var='A' labels={instance=puusea4afollutlbst1001.foll.gcp.hclsw.internal} value=9.157108903469151 ], [ var='B' labels={instance=puusea4afollutlbst1001.foll.gcp.hclsw.internal} value=9.157108903469151 ], [ var='C' labels={instance=puusea4afollutlbst1001.foll.gcp.hclsw.internal} value=0 ]}]" duration=21.574712ms + logger=ngalert.state.manager.persist user=228733 slug=csmoney t=2024-05-29T13:44:14.448755396Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.877404ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w2mw4z8r-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.448731813Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w2mw4z8r-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.448637642Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w2emjjtw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.44844443Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.448384745Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=304032 slug=clearbanc t=2024-05-29T13:44:14.448172133Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.376772ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w2b3g8jt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.448169678Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.448080325Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.448157829Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.448120674Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.448097193Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.448048943Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.447886245Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=239286 slug=om2test t=2024-05-29T13:44:14.447769194Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w1zqbpah-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.447740833Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.447526366Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.44747956Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.447467076Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.447433273Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.447415081Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.447389518Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w1tbn5rp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.447336629Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w1q4gpyk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.447208378Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w1q4gpyk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.447148867Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.447103613Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.447098818Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w1d8d63x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.447112747Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.447061219Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.446970255Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w1d8d63x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.447026166Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.44695371Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.446948337Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=2N2rUZnVk, ref_id=A" t=2024-05-29T13:44:14.446915024Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=2N2rUZnVk, ref_id=A" t=2024-05-29T13:44:14.4469013Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.446851221Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=430961 slug=solifi version=6 fingerprint=43d2fb8ae79f8ea1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.446793981Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=2N2rUZnVk, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.446457991s EvaluationString:}]" duration=38.254071ms + logger=ngalert.state.manager.persist user=142180 slug=luxtronic t=2024-05-29T13:44:14.446878162Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=24.854242ms + logger=ngalert.state.manager.persist user=471861 slug=planetstaging t=2024-05-29T13:44:14.446818965Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.982464ms + level=debug ts=2024-05-29T13:44:14.44675291Z caller=remote_instance_store.go:51 user=364493 slug=csdepartment msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.446720263Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=364493 slug=csdepartment t=2024-05-29T13:44:14.446716947Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=364493 slug=csdepartment instance="datasource_uid=grafanacloud-prom, ref_id=A,D" t=2024-05-29T13:44:14.446689262Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.446611756Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.446609995Z caller=remote_instance_store.go:51 user=407477 slug=inventa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w162o7p4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.446564961Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.446539259Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.446211593Z caller=remote_instance_store.go:51 user=154996 slug=veovo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w0x5frpu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.44642984Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w0x5frpu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.446364629Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=735588 slug=srepradnya t=2024-05-29T13:44:14.446290476Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=7.551519ms + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.446314778Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.375691ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w0x5frpu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.446304918Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w0w9lak6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.446203677Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w0okusf3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.446048086Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w0okusf3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.445986375Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w0okusf3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.445953325Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w0okusf3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.445927404Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.445903829Z caller=remote_instance_store.go:51 user=451427 slug=rocketchat msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.445804311Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-w0nhf2h0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.445749663Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.445441228Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.44544162Z caller=remote_alert_sender.go:94 user=22398 slug=sunfolding host=sunfolding-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.83.87:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=d1280aea-d041-4cbb-834a-ce0f63a29cd3 alerts=1 + logger=ngalert.state.manager.persist user=22398 slug=sunfolding t=2024-05-29T13:44:14.445342439Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.527518ms + level=debug ts=2024-05-29T13:44:14.44533664Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.445212215Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vzflpl8w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.445161316Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vzflpl8w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.445136136Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.445120685Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vzflpl8w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.445070926Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.444903014Z caller=remote_rule_evaluator.go:193 user=548276 slug=relayrobotics msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.scheduler user=635771 slug=sharedservices version=2 fingerprint=3233f1dccb6b7025 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.444703812Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.44400219s EvaluationString:}]" duration=43.916176ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vzbj168y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.444767792Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.444643164Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.444500731Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vywhf459-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.444449709Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vyuwlhq2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.444298838Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.44437448Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vyuwlhq2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.444201067Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vyuwlhq2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.444156846Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vyuwlhq2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.444144396Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vyswsdza-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.444084085Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vyswsdza-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.444025385Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:14.444159129Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:14.444150959Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vyswsdza-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.443978024Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=191103 slug=amazonadmin t=2024-05-29T13:44:14.444115128Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vyqrzspb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.443940484Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=191103 slug=amazonadmin version=18 fingerprint=51610bba29fb731c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.444054847Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.44381598s EvaluationString:}]" duration=239.658143ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vyqrzspb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.443915904Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.443904015Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vyqrzspb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.443880183Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vyqrzspb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.443866283Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.443704599Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.44375709Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vypzp8dl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.44361541Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=604874 slug=argamon t=2024-05-29T13:44:14.443553473Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.46032ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vyosu3f8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.44354687Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vyosu3f8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.443487729Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=521139 slug=adevintamobiledepro instance="BundleType=Apache Cassandra, ClusterDataCenterID=db79b1b0-40df-4be3-81a7-891df6c4c268, ClusterDataCenterName=AWS_VPC_EU_CENTRAL_1, ClusterID=9bccdb4a-e98a-45f0-bdfa-4ac73a278e79, ClusterName=ads, PrivateIp=10.36.8.164, __name__=ic_node_osload, instance=1c8c9e71-8d34-4907-a586-0a361ed9fcb2, job=instaclustr_ads, monitor=instaclustr, nodeID=1c8c9e71-8d34-4907-a586-0a361ed9fcb2, type=last_five_minutes" t=2024-05-29T13:44:14.44351454Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vyosu3f8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.443453859Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.443436609Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=471861 slug=planetstaging instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.44343847Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vym1co08-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.443381668Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=327842 slug=exabeam instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.443374202Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vym1co08-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.443333328Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=521139 slug=adevintamobiledepro instance="BundleType=Apache Cassandra, ClusterDataCenterID=db79b1b0-40df-4be3-81a7-891df6c4c268, ClusterDataCenterName=AWS_VPC_EU_CENTRAL_1, ClusterID=9bccdb4a-e98a-45f0-bdfa-4ac73a278e79, ClusterName=ads, PrivateIp=10.36.8.139, __name__=ic_node_osload, instance=569a05e1-198a-45ad-b2aa-cd66f4d35542, job=instaclustr_ads, monitor=instaclustr, nodeID=569a05e1-198a-45ad-b2aa-cd66f4d35542, type=last_five_minutes" t=2024-05-29T13:44:14.443258201Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vyj8oz0d-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.443168486Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vyhkxmft-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.442936593Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=114286 slug=enverus t=2024-05-29T13:44:14.442930587Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=67.01974ms + logger=ngalert.state.manager user=521139 slug=adevintamobiledepro instance="BundleType=Apache Cassandra, ClusterDataCenterID=9af07421-d090-435b-bb78-6654ce27f90a, ClusterDataCenterName=AWS_VPC_EU_CENTRAL_1, ClusterID=32de044d-4927-4935-acfa-1b7f8f4515ec, ClusterName=tns, PrivateIp=10.36.8.106, __name__=ic_node_osload, instance=ff0ee959-3855-48ae-917d-52f39c522479, job=instaclustr_tns, monitor=instaclustr, nodeID=ff0ee959-3855-48ae-917d-52f39c522479, type=last_five_minutes" t=2024-05-29T13:44:14.442914016Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=521139 slug=adevintamobiledepro instance="BundleType=Apache Cassandra, ClusterDataCenterID=4fe02eaa-dc50-4973-8bb9-ecc8e92ffcc8, ClusterDataCenterName=AWS_VPC_EU_CENTRAL_1, ClusterID=6b5b1eec-d750-4af0-a9e5-2e96dae3b3d0, ClusterName=adlogstore, PrivateIp=10.36.8.236, __name__=ic_node_osload, instance=7a618cae-b75f-4132-91ae-57155f3dd249, job=instaclustr_adlogstore, monitor=instaclustr, nodeID=7a618cae-b75f-4132-91ae-57155f3dd249, type=last_five_minutes" t=2024-05-29T13:44:14.442868797Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=521139 slug=adevintamobiledepro instance="BundleType=Apache Cassandra, ClusterDataCenterID=4fe02eaa-dc50-4973-8bb9-ecc8e92ffcc8, ClusterDataCenterName=AWS_VPC_EU_CENTRAL_1, ClusterID=6b5b1eec-d750-4af0-a9e5-2e96dae3b3d0, ClusterName=adlogstore, PrivateIp=10.36.8.236, __name__=ic_node_osload, instance=7a618cae-b75f-4132-91ae-57155f3dd249, job=instaclustr_adlogstore, monitor=instaclustr, nodeID=7a618cae-b75f-4132-91ae-57155f3dd249, type=last_five_minutes" t=2024-05-29T13:44:14.442854072Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.44283253Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=521139 slug=adevintamobiledepro instance="BundleType=Apache Cassandra, ClusterDataCenterID=4fe02eaa-dc50-4973-8bb9-ecc8e92ffcc8, ClusterDataCenterName=AWS_VPC_EU_CENTRAL_1, ClusterID=6b5b1eec-d750-4af0-a9e5-2e96dae3b3d0, ClusterName=adlogstore, PrivateIp=10.36.8.200, __name__=ic_node_osload, instance=f404b17d-b84f-42fa-873d-d5141796871d, job=instaclustr_adlogstore, monitor=instaclustr, nodeID=f404b17d-b84f-42fa-873d-d5141796871d, type=last_five_minutes" t=2024-05-29T13:44:14.442751569Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vyhkxmft-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.442735751Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vyhkxmft-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.442706191Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=173374 slug=felmo t=2024-05-29T13:44:14.44261494Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.920294ms + level=debug ts=2024-05-29T13:44:14.442634374Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:14.442653286Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=32.614736ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vychn8r7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.44258521Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.442576381Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.442536341Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:14.442379035Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.442366994Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.44234964Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.442297154Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.44228808Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=538037 slug=drivewealth version=206 fingerprint=0c6326e7a1a335ab attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.442166068Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.441796047s EvaluationString:}]" duration=2.597705261s + level=debug ts=2024-05-29T13:44:14.442268669Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.442134048Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.442071344Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.442036661Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vy3ndu7f-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.441974874Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.441990521Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.441943496Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vy3ndu7f-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.441894273Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vy1ueizp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.441851352Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vy0n2t8g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.44163482Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vy0n2t8g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.441544869Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.441850399Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vxy5ibvj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.441470488Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.441810129Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vxy5ibvj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.441305617Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vxtimm07-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.441260996Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vxtimm07-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.441157775Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vxmes7kh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.441012504Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.441730161Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vxmes7kh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.440947543Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.441378698Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=ams-development" + level=debug ts=2024-05-29T13:44:14.441333135Z caller=remote_instance_store.go:51 user=542095 slug=intelligencefusion msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cluster=ams-development, datacenter=us-east-1" t=2024-05-29T13:44:14.441286233Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="cluster=ams-development" t=2024-05-29T13:44:14.441104876Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=542095 slug=intelligencefusion instance="ServiceName=incident-api-projection-2" t=2024-05-29T13:44:14.441056259Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=542095 slug=intelligencefusion instance="ServiceName=incident-api-projection-2" t=2024-05-29T13:44:14.441040546Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=542095 slug=intelligencefusion instance="ServiceName=incident-api-projection-1" t=2024-05-29T13:44:14.441002499Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=851297 slug=roadrunneruat t=2024-05-29T13:44:14.440807483Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=851297 slug=roadrunneruat instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.440781222Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vxk8lgui-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.440831502Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vxk8lgui-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.440756581Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vxk8lgui-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.440727191Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vxi7uyx6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.44064396Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=542095 slug=intelligencefusion instance="ServiceName=incident-api-notifications-worker" t=2024-05-29T13:44:14.440953043Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=542095 slug=intelligencefusion instance="ServiceName=incident-api-incidents-worker" t=2024-05-29T13:44:14.440908014Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.440700725Z caller=remote_instance_store.go:51 user=517596 slug=datar msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=851297 slug=roadrunneruat instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.440753222Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vxcauohd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.440336227Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vxcauohd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.440305896Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vxcauohd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.440263826Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=542095 slug=intelligencefusion instance="ServiceName=incident-api-background" t=2024-05-29T13:44:14.440870975Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vxcauohd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.440232166Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=851297 slug=roadrunneruat t=2024-05-29T13:44:14.440711921Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=542095 slug=intelligencefusion instance="ServiceName=incident-api" t=2024-05-29T13:44:14.440841884Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=542095 slug=intelligencefusion instance="ServiceName=incident-api" t=2024-05-29T13:44:14.440794872Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vx55667l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.439833152Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=542095 slug=intelligencefusion instance="ServiceName=content-api" t=2024-05-29T13:44:14.440770412Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vx55667l-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.439742281Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.440664358Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vx4b1sq0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.43969945Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vx4b1sq0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.43963675Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vx4b1sq0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.439607159Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=542095 slug=intelligencefusion instance="ServiceName=asset-api" t=2024-05-29T13:44:14.440706168Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vx44x6wc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.439381087Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vx0pzu8e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.439057224Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vwvnxt3p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.438822681Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vwv6aw4q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.438656Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.44031936Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.440216386Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=542095 slug=intelligencefusion version=124 fingerprint=be8cfa43691638bd attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.439965342Z level=debug msg="Alert rule evaluated" results="[{Instance:ServiceName=actor-api State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ServiceName=actor-api Value:0xc006910b10} C:{Var:C Labels:ServiceName=actor-api Value:0xc006910b18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.43920836s EvaluationString:[ var='B' labels={ServiceName=actor-api} value=0.972119598959883 ], [ var='C' labels={ServiceName=actor-api} value=0 ]} {Instance:ServiceName=alert-api State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ServiceName=alert-api Value:0xc006910b80} C:{Var:C Labels:ServiceName=alert-api Value:0xc006910b88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.439224825s EvaluationString:[ var='B' labels={ServiceName=alert-api} value=1.1923354134584465 ], [ var='C' labels={ServiceName=alert-api} value=0 ]} {Instance:ServiceName=asset-api State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ServiceName=asset-api Value:0xc006910c10} C:{Var:C Labels:ServiceName=asset-api Value:0xc006910c18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.439230869s EvaluationString:[ var='B' labels={ServiceName=asset-api} value=2.154929432707528 ], [ var='C' labels={ServiceName=asset-api} value=0 ]} {Instance:ServiceName=auth-api State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ServiceName=auth-api Value:0xc006910cc8} C:{Var:C Labels:ServiceName=auth-api Value:0xc006910ca8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.439236372s EvaluationString:[ var='B' labels={ServiceName=auth-api} value=16.367299548039835 ], [ var='C' labels={ServiceName=auth-api} value=0 ]} {Instance:ServiceName=content-api State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ServiceName=content-api Value:0xc006910d50} C:{Var:C Labels:ServiceName=content-api Value:0xc006910cf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.43924102s EvaluationString:[ var='B' labels={ServiceName=content-api} value=0.21383562125265598 ], [ var='C' labels={ServiceName=content-api} value=0 ]} {Instance:ServiceName=incident-api State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ServiceName=incident-api Value:0xc006910db0} C:{Var:C Labels:ServiceName=incident-api Value:0xc006910db8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.439246161s EvaluationString:[ var='B' labels={ServiceName=incident-api} value=3.143010247912672 ], [ var='C' labels={ServiceName=incident-api} value=0 ]} {Instance:ServiceName=incident-api-background State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ServiceName=incident-api-background Value:0xc006910e18} C:{Var:C Labels:ServiceName=incident-api-background Value:0xc006910e40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.439251011s EvaluationString:[ var='B' labels={ServiceName=incident-api-background} value=9.870855687186124 ], [ var='C' labels={ServiceName=incident-api-background} value=0 ]} {Instance:ServiceName=incident-api-incidents-worker State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ServiceName=incident-api-incidents-worker Value:0xc006910e70} C:{Var:C Labels:ServiceName=incident-api-incidents-worker Value:0xc006910e78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.43926021s EvaluationString:[ var='B' labels={ServiceName=incident-api-incidents-worker} value=0.6078287387887638 ], [ var='C' labels={ServiceName=incident-api-incidents-worker} value=0 ]} {Instance:ServiceName=incident-api-notifications-worker State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ServiceName=incident-api-notifications-worker Value:0xc006910eb8} C:{Var:C Labels:ServiceName=incident-api-notifications-worker Value:0xc006910ef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.439265625s EvaluationString:[ var='B' labels={ServiceName=incident-api-notifications-worker} value=5.747903360364337 ], [ var='C' labels={ServiceName=incident-api-notifications-worker} value=0 ]} {Instance:ServiceName=incident-api-projection-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ServiceName=incident-api-projection-1 Value:0xc006910f48} C:{Var:C Labels:ServiceName=incident-api-projection-1 Value:0xc006910f40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.43927387s EvaluationString:[ var='B' labels={ServiceName=incident-api-projection-1} value=29.65020994345347 ], [ var='C' labels={ServiceName=incident-api-projection-1} value=0 ]} {Instance:ServiceName=incident-api-projection-2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ServiceName=incident-api-projection-2 Value:0xc006910fb0} C:{Var:C Labels:ServiceName=incident-api-projection-2 Value:0xc006910f78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.439279318s EvaluationString:[ var='B' labels={ServiceName=incident-api-projection-2} value=13.909638404846191 ], [ var='C' labels={ServiceName=incident-api-projection-2} value=0 ]} {Instance:ServiceName=incident-api-projection-3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ServiceName=incident-api-projection-3 Value:0xc006911000} C:{Var:C Labels:ServiceName=incident-api-projection-3 Value:0xc006911008}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.439283342s EvaluationString:[ var='B' labels={ServiceName=incident-api-projection-3} value=10.349967360496521 ], [ var='C' labels={ServiceName=incident-api-projection-3} value=0 ]} {Instance:ServiceName=news-api State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ServiceName=news-api Value:0xc006911078} C:{Var:C Labels:ServiceName=news-api Value:0xc0069110a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.439289002s EvaluationString:[ var='B' labels={ServiceName=news-api} value=0.5786376845919424 ], [ var='C' labels={ServiceName=news-api} value=0 ]} {Instance:ServiceName=note-api State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ServiceName=note-api Value:0xc0069110f8} C:{Var:C Labels:ServiceName=note-api Value:0xc006911128}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.439292981s EvaluationString:[ var='B' labels={ServiceName=note-api} value=0.18942615824441114 ], [ var='C' labels={ServiceName=note-api} value=0 ]} {Instance:ServiceName=notification-service State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ServiceName=notification-service Value:0xc006911158} C:{Var:C Labels:ServiceName=notification-service Value:0xc0069111b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.439297657s EvaluationString:[ var='B' labels={ServiceName=notification-service} value=0.2915002815425396 ], [ var='C' labels={ServiceName=notification-service} value=0 ]}]" duration=32.670266ms + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:14.440176265Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=107.771171ms + level=debug ts=2024-05-29T13:44:14.439854133Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.439786878Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.439762796Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.439665665Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=237629 slug=ocrolus t=2024-05-29T13:44:14.439447046Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=237629 slug=ocrolus t=2024-05-29T13:44:14.439356197Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=735588 slug=srepradnya instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.438713767Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=735588 slug=srepradnya t=2024-05-29T13:44:14.438690367Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=735588 slug=srepradnya version=5 fingerprint=cc2b923435d7fde5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.438607085Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.43833683s EvaluationString:}]" duration=7.269524ms + level=debug ts=2024-05-29T13:44:14.438489217Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vwskghqv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.438356216Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vwskghqv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.438344446Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vwfx3i5c-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.438117504Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vwfx3i5c-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.438074984Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.438022942Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vw62bb9f-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.437967642Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vw62bb9f-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.437952182Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vw62bb9f-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.437924312Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.437965915Z caller=remote_instance_store.go:51 user=334644 slug=meiro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vw62bb9f-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.437912582Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vw62bb9f-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.437843931Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=548157 slug=kushkiprod t=2024-05-29T13:44:14.437894251Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.717281ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vvrleg9d-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.437671409Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vvrleg9d-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.437658129Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=438185 slug=nodeinfra instance="chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=nld-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_nld_1, purpose=custom_metric, region=netherland, servicetype=headless" t=2024-05-29T13:44:14.437647179Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=438185 slug=nodeinfra instance="chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=backup-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_backup_phx_1, purpose=custom_metric, region=seoul, servicetype=headless" t=2024-05-29T13:44:14.437546239Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206439 slug=relaypro instance="host_short=ibot-pro-awsoh05" t=2024-05-29T13:44:14.43756925Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vvn6b6kw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.437522298Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.437454787Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=438185 slug=nodeinfra instance="chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=backup-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_backup_phx_1, purpose=custom_metric, region=seoul, servicetype=headless" t=2024-05-29T13:44:14.437532423Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vvn6b6kw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.437509708Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206439 slug=relaypro instance="host_short=ibot-pro-awsoh02" t=2024-05-29T13:44:14.437476388Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206439 slug=relaypro instance="host_short=ibot-pro-awsoh02" t=2024-05-29T13:44:14.437466435Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vvn6b6kw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.437414797Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206439 slug=relaypro instance="host_short=ibot-pro-awsoh01" t=2024-05-29T13:44:14.437436703Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=206439 slug=relaypro version=5 fingerprint=9279abfef89d91e2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.437251762Z level=debug msg="Alert rule evaluated" results="[{Instance:host_short=ibot-pro-awsoh01 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host_short=ibot-pro-awsoh01 Value:0xc03db19cf0} C:{Var:C Labels:host_short=ibot-pro-awsoh01 Value:0xc03db19cf8} D:{Var:D Labels:host_short=ibot-pro-awsoh01 Value:0xc03db19de0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.436807974s EvaluationString:[ var='A' labels={host_short=ibot-pro-awsoh01} value=5 ], [ var='C' labels={host_short=ibot-pro-awsoh01} value=5 ], [ var='D' labels={host_short=ibot-pro-awsoh01} value=0 ]} {Instance:host_short=ibot-pro-awsoh02 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host_short=ibot-pro-awsoh02 Value:0xc03db19e50} C:{Var:C Labels:host_short=ibot-pro-awsoh02 Value:0xc03db19e58} D:{Var:D Labels:host_short=ibot-pro-awsoh02 Value:0xc03db19f60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.436824689s EvaluationString:[ var='A' labels={host_short=ibot-pro-awsoh02} value=5 ], [ var='C' labels={host_short=ibot-pro-awsoh02} value=5 ], [ var='D' labels={host_short=ibot-pro-awsoh02} value=0 ]} {Instance:host_short=ibot-pro-awsoh03 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host_short=ibot-pro-awsoh03 Value:0xc01f7cc030} C:{Var:C Labels:host_short=ibot-pro-awsoh03 Value:0xc03db19fd0} D:{Var:D Labels:host_short=ibot-pro-awsoh03 Value:0xc03db19fd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.436832502s EvaluationString:[ var='A' labels={host_short=ibot-pro-awsoh03} value=5 ], [ var='C' labels={host_short=ibot-pro-awsoh03} value=5 ], [ var='D' labels={host_short=ibot-pro-awsoh03} value=0 ]} {Instance:host_short=ibot-pro-awsoh04 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host_short=ibot-pro-awsoh04 Value:0xc01f7cc090} C:{Var:C Labels:host_short=ibot-pro-awsoh04 Value:0xc01f7cc098} D:{Var:D Labels:host_short=ibot-pro-awsoh04 Value:0xc01f7cc0e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.436841871s EvaluationString:[ var='A' labels={host_short=ibot-pro-awsoh04} value=5 ], [ var='C' labels={host_short=ibot-pro-awsoh04} value=5 ], [ var='D' labels={host_short=ibot-pro-awsoh04} value=0 ]} {Instance:host_short=ibot-pro-awsoh05 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host_short=ibot-pro-awsoh05 Value:0xc01f7cc130} C:{Var:C Labels:host_short=ibot-pro-awsoh05 Value:0xc01f7cc138} D:{Var:D Labels:host_short=ibot-pro-awsoh05 Value:0xc01f7cc180}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.436848056s EvaluationString:[ var='A' labels={host_short=ibot-pro-awsoh05} value=5 ], [ var='C' labels={host_short=ibot-pro-awsoh05} value=5 ], [ var='D' labels={host_short=ibot-pro-awsoh05} value=0 ]}]" duration=15.125949ms + level=debug ts=2024-05-29T13:44:14.437340631Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.437308033Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vvltzk30-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.437301936Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vvltzk30-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.437235895Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=438185 slug=nodeinfra version=209 fingerprint=b3c4419520ad4eab attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.436988793Z level=debug msg="Alert rule evaluated" results="[{Instance:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=ash-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_ash_1, purpose=custom_metric, region=ashburn, servicetype=headless State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=ash-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_ash_1, purpose=custom_metric, region=ashburn, servicetype=headless Value:0xc01e63c290} B:{Var:B Labels:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=ash-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_ash_1, purpose=custom_metric, region=ashburn, servicetype=headless Value:0xc01e63c360} C:{Var:C Labels:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=ash-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_ash_1, purpose=custom_metric, region=ashburn, servicetype=headless Value:0xc01e63c410}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.436111163s EvaluationString:[ var='A' labels={chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=ash-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_ash_1, purpose=custom_metric, region=ashburn, servicetype=headless} value=0 ], [ var='B' labels={chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=ash-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_ash_1, purpose=custom_metric, region=ashburn, servicetype=headless} value=0 ], [ var='C' labels={chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=ash-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_ash_1, purpose=custom_metric, region=ashburn, servicetype=headless} value=0 ]} {Instance:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=backup-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_backup_phx_1, purpose=custom_metric, region=seoul, servicetype=headless State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=backup-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_backup_phx_1, purpose=custom_metric, region=seoul, servicetype=headless Value:0xc01e63c580} B:{Var:B Labels:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=backup-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_backup_phx_1, purpose=custom_metric, region=seoul, servicetype=headless Value:0xc01e63c638} C:{Var:C Labels:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=backup-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_backup_phx_1, purpose=custom_metric, region=seoul, servicetype=headless Value:0xc01e63c6e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.436132373s EvaluationString:[ var='A' labels={chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=backup-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_backup_phx_1, purpose=custom_metric, region=seoul, servicetype=headless} value=0 ], [ var='B' labels={chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=backup-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_backup_phx_1, purpose=custom_metric, region=seoul, servicetype=headless} value=0 ], [ var='C' labels={chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=backup-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_backup_phx_1, purpose=custom_metric, region=seoul, servicetype=headless} value=0 ]} {Instance:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=nld-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_nld_1, purpose=custom_metric, region=netherland, servicetype=headless State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=nld-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_nld_1, purpose=custom_metric, region=netherland, servicetype=headless Value:0xc01e63c900} B:{Var:B Labels:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=nld-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_nld_1, purpose=custom_metric, region=netherland, servicetype=headless Value:0xc01e63c9b8} C:{Var:C Labels:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=nld-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_nld_1, purpose=custom_metric, region=netherland, servicetype=headless Value:0xc01e63ca68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.4361464s EvaluationString:[ var='A' labels={chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=nld-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_nld_1, purpose=custom_metric, region=netherland, servicetype=headless} value=0 ], [ var='B' labels={chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=nld-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_nld_1, purpose=custom_metric, region=netherland, servicetype=headless} value=0 ], [ var='C' labels={chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=nld-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_nld_1, purpose=custom_metric, region=netherland, servicetype=headless} value=0 ]} {Instance:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=phx-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_phx_1, purpose=custom_metric, region=phoenix, servicetype=headless State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=phx-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_phx_1, purpose=custom_metric, region=phoenix, servicetype=headless Value:0xc01e63cc88} B:{Var:B Labels:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=phx-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_phx_1, purpose=custom_metric, region=phoenix, servicetype=headless Value:0xc01e63cd40} C:{Var:C Labels:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=phx-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_phx_1, purpose=custom_metric, region=phoenix, servicetype=headless Value:0xc01e63cbc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.436164849s EvaluationString:[ var='A' labels={chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=phx-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_phx_1, purpose=custom_metric, region=phoenix, servicetype=headless} value=0 ], [ var='B' labels={chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=phx-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_phx_1, purpose=custom_metric, region=phoenix, servicetype=headless} value=0 ], [ var='C' labels={chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=phx-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_phx_1, purpose=custom_metric, region=phoenix, servicetype=headless} value=0 ]} {Instance:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=sgp-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_sgp_1, purpose=custom_metric, region=singapore, servicetype=headless State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=sgp-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_sgp_1, purpose=custom_metric, region=singapore, servicetype=headless Value:0xc01e63cfa0} B:{Var:B Labels:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=sgp-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_sgp_1, purpose=custom_metric, region=singapore, servicetype=headless Value:0xc01e63d058} C:{Var:C Labels:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=sgp-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_sgp_1, purpose=custom_metric, region=singapore, servicetype=headless Value:0xc01e63d110}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.43617967s EvaluationString:[ var='A' labels={chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=sgp-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_sgp_1, purpose=custom_metric, region=singapore, servicetype=headless} value=0 ], [ var='B' labels={chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=sgp-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_sgp_1, purpose=custom_metric, region=singapore, servicetype=headless} value=0 ], [ var='C' labels={chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=sgp-nc-1.ninodes.com, node_name=prod_NCG_mainnet_headless_sgp_1, purpose=custom_metric, region=singapore, servicetype=headless} value=0 ]} {Instance:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=sgp-nc-2.ninodes.com, node_name=prod_NCG_mainnet_headless_sgp_2, purpose=custom_metric, region=singapore, servicetype=headless State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=sgp-nc-2.ninodes.com, node_name=prod_NCG_mainnet_headless_sgp_2, purpose=custom_metric, region=singapore, servicetype=headless Value:0xc01e63d278} B:{Var:B Labels:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=sgp-nc-2.ninodes.com, node_name=prod_NCG_mainnet_headless_sgp_2, purpose=custom_metric, region=singapore, servicetype=headless Value:0xc01e63d330} C:{Var:C Labels:chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=sgp-nc-2.ninodes.com, node_name=prod_NCG_mainnet_headless_sgp_2, purpose=custom_metric, region=singapore, servicetype=headless Value:0xc01e63d438}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.436193952s EvaluationString:[ var='A' labels={chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=sgp-nc-2.ninodes.com, node_name=prod_NCG_mainnet_headless_sgp_2, purpose=custom_metric, region=singapore, servicetype=headless} value=0 ], [ var='B' labels={chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=sgp-nc-2.ninodes.com, node_name=prod_NCG_mainnet_headless_sgp_2, purpose=custom_metric, region=singapore, servicetype=headless} value=0 ], [ var='C' labels={chain=NCG, cloud=pheonixNap, deployment=production, instance=13.124.239.97:8380, job=prod-customMetric, network=mainnet, node=sgp-nc-2.ninodes.com, node_name=prod_NCG_mainnet_headless_sgp_2, purpose=custom_metric, region=singapore, servicetype=headless} value=0 ]}]" duration=41.81494ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vvkbl4xo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.437140874Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.437032451Z caller=remote_instance_store.go:51 user=656284 slug=cencosudx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vvkbl4xo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.436995692Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.43687365Z caller=remote_rule_evaluator.go:193 user=473762 slug=intentiq msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + level=debug ts=2024-05-29T13:44:14.436952321Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=qe-parcel-ranges-ingest" t=2024-05-29T13:44:14.436886187Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vvdm7gmb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.436861091Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vvdm7gmb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.436823731Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pi-dx-sales-contacts-consumer" t=2024-05-29T13:44:14.436857192Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pi-dx-parcel-ranges-backfill" t=2024-05-29T13:44:14.436843238Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pi-dx-customers-backfill" t=2024-05-29T13:44:14.436822316Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pi-dx-customer-addresses-backfill" t=2024-05-29T13:44:14.436746326Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pi-dx-courier-tours-backfill" t=2024-05-29T13:44:14.436708731Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-stable-collection-template-backfill" t=2024-05-29T13:44:14.436616511Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-stable-collection-block-backfill" t=2024-05-29T13:44:14.43658709Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-stable-collection-block-backfill" t=2024-05-29T13:44:14.436576162Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.436586128Z caller=remote_instance_store.go:51 user=147497 slug=rhodev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-routes-backfill" t=2024-05-29T13:44:14.436538638Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=147497 slug=rhodev t=2024-05-29T13:44:14.43654978Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-customer-contacts-backfill" t=2024-05-29T13:44:14.436449714Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-customer-contact-relations-backfill" t=2024-05-29T13:44:14.436404144Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vv67vngc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.436356566Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vv67vngc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.436327326Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vv67vngc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.436305295Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.436306755Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=mp-dx-parcel-events-backfill" t=2024-05-29T13:44:14.436278731Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vurm55un-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.436203664Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=il-ingest-receipt" t=2024-05-29T13:44:14.436158053Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=il-ingest-parcel-events" t=2024-05-29T13:44:14.4361141Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=il-ingest-long-term-redirections" t=2024-05-29T13:44:14.436094838Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=il-ingest-long-term-redirections" t=2024-05-29T13:44:14.436082884Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.436073105Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=228733 slug=csmoney t=2024-05-29T13:44:14.435873334Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=228733 slug=csmoney instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.435747643Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=il-ingest-collections" t=2024-05-29T13:44:14.436020364Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vurm55un-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.436098563Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=il-ingest-collection-labels" t=2024-05-29T13:44:14.435981809Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vurm55un-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.436037303Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.436102988Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.436039159Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.436042393Z caller=remote_instance_store.go:51 user=633335 slug=promqlworkshop msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=fido-dx-queued-tasks" t=2024-05-29T13:44:14.43588503Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.435934745Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vupqi6qq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.435989162Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vupqi6qq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.435890221Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID1620dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=304032 slug=clearbanc t=2024-05-29T13:44:14.435863175Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.286013ms + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=fido-dx-customers-backfill" t=2024-05-29T13:44:14.435804675Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vupqi6qq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.435846251Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=fido-dx-customer-addresses-backfill" t=2024-05-29T13:44:14.435785005Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=fido-dx-cust-cod-pref-backfill" t=2024-05-29T13:44:14.435736056Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=bob" t=2024-05-29T13:44:14.435673375Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=bob" t=2024-05-29T13:44:14.435655269Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.435718783Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.435690633Z caller=remote_image_capturer.go:33 user=475799 slug=dpdcz rule_org_id=1 rule_uid=adg97z7k9wirkc msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.scheduler user=475799 slug=dpdcz version=64 fingerprint=b42d035198a271c1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.435283609Z level=debug msg="Alert rule evaluated" results="[{Instance:consumer_name=bob State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=bob Value:0xc0b5803540} B:{Var:B Labels:consumer_name=bob Value:0xc0b5803350}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434077977s EvaluationString:[ var='A' labels={consumer_name=bob} value=8.712994202185784e+06 ], [ var='B' labels={consumer_name=bob} value=1 ]} {Instance:consumer_name=fido-dx-cust-cod-pref-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=fido-dx-cust-cod-pref-backfill Value:0xc0b5803740} B:{Var:B Labels:consumer_name=fido-dx-cust-cod-pref-backfill Value:0xc0b5803748}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434097308s EvaluationString:[ var='A' labels={consumer_name=fido-dx-cust-cod-pref-backfill} value=0 ], [ var='B' labels={consumer_name=fido-dx-cust-cod-pref-backfill} value=0 ]} {Instance:consumer_name=fido-dx-customer-addresses-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=fido-dx-customer-addresses-backfill Value:0xc0b58037a8} B:{Var:B Labels:consumer_name=fido-dx-customer-addresses-backfill Value:0xc0b5803af0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434105739s EvaluationString:[ var='A' labels={consumer_name=fido-dx-customer-addresses-backfill} value=0 ], [ var='B' labels={consumer_name=fido-dx-customer-addresses-backfill} value=0 ]} {Instance:consumer_name=fido-dx-customers-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=fido-dx-customers-backfill Value:0xc0b5803ef0} B:{Var:B Labels:consumer_name=fido-dx-customers-backfill Value:0xc0b5803ef8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434112752s EvaluationString:[ var='A' labels={consumer_name=fido-dx-customers-backfill} value=0 ], [ var='B' labels={consumer_name=fido-dx-customers-backfill} value=0 ]} {Instance:consumer_name=fido-dx-payment-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=fido-dx-payment-backfill Value:0xc0ba18a048} B:{Var:B Labels:consumer_name=fido-dx-payment-backfill Value:0xc0ba18a090}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434119439s EvaluationString:[ var='A' labels={consumer_name=fido-dx-payment-backfill} value=2.7431693989071033 ], [ var='B' labels={consumer_name=fido-dx-payment-backfill} value=0 ]} {Instance:consumer_name=fido-dx-queued-tasks State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=fido-dx-queued-tasks Value:0xc0ba18a0c0} B:{Var:B Labels:consumer_name=fido-dx-queued-tasks Value:0xc0ba18a0c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.43412583s EvaluationString:[ var='A' labels={consumer_name=fido-dx-queued-tasks} value=0 ], [ var='B' labels={consumer_name=fido-dx-queued-tasks} value=0 ]} {Instance:consumer_name=fido-dx-work-days-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=fido-dx-work-days-backfill Value:0xc0ba18a120} B:{Var:B Labels:consumer_name=fido-dx-work-days-backfill Value:0xc0ba18a0f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434130706s EvaluationString:[ var='A' labels={consumer_name=fido-dx-work-days-backfill} value=0 ], [ var='B' labels={consumer_name=fido-dx-work-days-backfill} value=0 ]} {Instance:consumer_name=il-ingest-collection-labels State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=il-ingest-collection-labels Value:0xc0ba18a160} B:{Var:B Labels:consumer_name=il-ingest-collection-labels Value:0xc0ba18a168}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434137851s EvaluationString:[ var='A' labels={consumer_name=il-ingest-collection-labels} value=0 ], [ var='B' labels={consumer_name=il-ingest-collection-labels} value=0 ]} {Instance:consumer_name=il-ingest-collections State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=il-ingest-collections Value:0xc0ba18a198} B:{Var:B Labels:consumer_name=il-ingest-collections Value:0xc0ba18a1c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434142649s EvaluationString:[ var='A' labels={consumer_name=il-ingest-collections} value=0 ], [ var='B' labels={consumer_name=il-ingest-collections} value=0 ]} {Instance:consumer_name=il-ingest-invoice State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=il-ingest-invoice Value:0xc0ba18a1f0} B:{Var:B Labels:consumer_name=il-ingest-invoice Value:0xc0ba18a1f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434148423s EvaluationString:[ var='A' labels={consumer_name=il-ingest-invoice} value=0 ], [ var='B' labels={consumer_name=il-ingest-invoice} value=0 ]} {Instance:consumer_name=il-ingest-long-term-redirections State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=il-ingest-long-term-redirections Value:0xc0ba18a228} B:{Var:B Labels:consumer_name=il-ingest-long-term-redirections Value:0xc0ba18a250}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434170857s EvaluationString:[ var='A' labels={consumer_name=il-ingest-long-term-redirections} value=0 ], [ var='B' labels={consumer_name=il-ingest-long-term-redirections} value=0 ]} {Instance:consumer_name=il-ingest-parcel-events State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=il-ingest-parcel-events Value:0xc0ba18a280} B:{Var:B Labels:consumer_name=il-ingest-parcel-events Value:0xc0ba18a288}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434176378s EvaluationString:[ var='A' labels={consumer_name=il-ingest-parcel-events} value=0 ], [ var='B' labels={consumer_name=il-ingest-parcel-events} value=0 ]} {Instance:consumer_name=il-ingest-receipt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=il-ingest-receipt Value:0xc0ba18a2b8} B:{Var:B Labels:consumer_name=il-ingest-receipt Value:0xc0ba18a2e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434181276s EvaluationString:[ var='A' labels={consumer_name=il-ingest-receipt} value=0 ], [ var='B' labels={consumer_name=il-ingest-receipt} value=0 ]} {Instance:consumer_name=il-ingest-stable-collection-blocks State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=il-ingest-stable-collection-blocks Value:0xc0ba18a310} B:{Var:B Labels:consumer_name=il-ingest-stable-collection-blocks Value:0xc0ba18a318}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434186083s EvaluationString:[ var='A' labels={consumer_name=il-ingest-stable-collection-blocks} value=0 ], [ var='B' labels={consumer_name=il-ingest-stable-collection-blocks} value=0 ]} {Instance:consumer_name=il-ingest-stable-collection-template State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=il-ingest-stable-collection-template Value:0xc0ba18a348} B:{Var:B Labels:consumer_name=il-ingest-stable-collection-template Value:0xc0ba18a370}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434191924s EvaluationString:[ var='A' labels={consumer_name=il-ingest-stable-collection-template} value=0 ], [ var='B' labels={consumer_name=il-ingest-stable-collection-template} value=0 ]} {Instance:consumer_name=mp-dx-parcel-events-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=mp-dx-parcel-events-backfill Value:0xc0ba18a3a0} B:{Var:B Labels:consumer_name=mp-dx-parcel-events-backfill Value:0xc0ba18a3a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434196701s EvaluationString:[ var='A' labels={consumer_name=mp-dx-parcel-events-backfill} value=26.437158469945356 ], [ var='B' labels={consumer_name=mp-dx-parcel-events-backfill} value=0 ]} {Instance:consumer_name=mp-dx-routes-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=mp-dx-routes-backfill Value:0xc0ba18a3d8} B:{Var:B Labels:consumer_name=mp-dx-routes-backfill Value:0xc0ba18a400}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434204427s EvaluationString:[ var='A' labels={consumer_name=mp-dx-routes-backfill} value=0 ], [ var='B' labels={consumer_name=mp-dx-routes-backfill} value=0 ]} {Instance:consumer_name=pcm-dx-cache-addresses-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=pcm-dx-cache-addresses-backfill Value:0xc0ba18a430} B:{Var:B Labels:consumer_name=pcm-dx-cache-addresses-backfill Value:0xc0ba18a438}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434210089s EvaluationString:[ var='A' labels={consumer_name=pcm-dx-cache-addresses-backfill} value=0 ], [ var='B' labels={consumer_name=pcm-dx-cache-addresses-backfill} value=0 ]} {Instance:consumer_name=pcm-dx-collection-ps-relations-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=pcm-dx-collection-ps-relations-backfill Value:0xc0ba18a490} B:{Var:B Labels:consumer_name=pcm-dx-collection-ps-relations-backfill Value:0xc0ba18a468}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434216448s EvaluationString:[ var='A' labels={consumer_name=pcm-dx-collection-ps-relations-backfill} value=0 ], [ var='B' labels={consumer_name=pcm-dx-collection-ps-relations-backfill} value=0 ]} {Instance:consumer_name=pcm-dx-customer-contact-relations-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=pcm-dx-customer-contact-relations-backfill Value:0xc0ba18a4c0} B:{Var:B Labels:consumer_name=pcm-dx-customer-contact-relations-backfill Value:0xc0ba18a4c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434225893s EvaluationString:[ var='A' labels={consumer_name=pcm-dx-customer-contact-relations-backfill} value=0 ], [ var='B' labels={consumer_name=pcm-dx-customer-contact-relations-backfill} value=0 ]} {Instance:consumer_name=pcm-dx-customer-contacts-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=pcm-dx-customer-contacts-backfill Value:0xc0ba18a4f8} B:{Var:B Labels:consumer_name=pcm-dx-customer-contacts-backfill Value:0xc0ba18a520}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.43423164s EvaluationString:[ var='A' labels={consumer_name=pcm-dx-customer-contacts-backfill} value=0 ], [ var='B' labels={consumer_name=pcm-dx-customer-contacts-backfill} value=0 ]} {Instance:consumer_name=pcm-dx-labels-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=pcm-dx-labels-backfill Value:0xc0ba18a558} B:{Var:B Labels:consumer_name=pcm-dx-labels-backfill Value:0xc0ba18a550}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434239761s EvaluationString:[ var='A' labels={consumer_name=pcm-dx-labels-backfill} value=0 ], [ var='B' labels={consumer_name=pcm-dx-labels-backfill} value=0 ]} {Instance:consumer_name=pcm-dx-periods-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=pcm-dx-periods-backfill Value:0xc0ba18a5b0} B:{Var:B Labels:consumer_name=pcm-dx-periods-backfill Value:0xc0ba18a588}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434245243s EvaluationString:[ var='A' labels={consumer_name=pcm-dx-periods-backfill} value=0 ], [ var='B' labels={consumer_name=pcm-dx-periods-backfill} value=0 ]} {Instance:consumer_name=pcm-dx-routes-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=pcm-dx-routes-backfill Value:0xc0ba18a5e0} B:{Var:B Labels:consumer_name=pcm-dx-routes-backfill Value:0xc0ba18a5e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434250511s EvaluationString:[ var='A' labels={consumer_name=pcm-dx-routes-backfill} value=0 ], [ var='B' labels={consumer_name=pcm-dx-routes-backfill} value=0 ]} {Instance:consumer_name=pcm-dx-stable-collection-block-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=pcm-dx-stable-collection-block-backfill Value:0xc0ba18a618} B:{Var:B Labels:consumer_name=pcm-dx-stable-collection-block-backfill Value:0xc0ba18a640}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434256524s EvaluationString:[ var='A' labels={consumer_name=pcm-dx-stable-collection-block-backfill} value=0 ], [ var='B' labels={consumer_name=pcm-dx-stable-collection-block-backfill} value=0 ]} {Instance:consumer_name=pcm-dx-stable-collection-template-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=pcm-dx-stable-collection-template-backfill Value:0xc0ba18a670} B:{Var:B Labels:consumer_name=pcm-dx-stable-collection-template-backfill Value:0xc0ba18a678}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434264199s EvaluationString:[ var='A' labels={consumer_name=pcm-dx-stable-collection-template-backfill} value=0 ], [ var='B' labels={consumer_name=pcm-dx-stable-collection-template-backfill} value=0 ]} {Instance:consumer_name=pcm-dx-tours-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=pcm-dx-tours-backfill Value:0xc0ba18a6a8} B:{Var:B Labels:consumer_name=pcm-dx-tours-backfill Value:0xc0ba18a6d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434270983s EvaluationString:[ var='A' labels={consumer_name=pcm-dx-tours-backfill} value=0 ], [ var='B' labels={consumer_name=pcm-dx-tours-backfill} value=0 ]} {Instance:consumer_name=pcm-dx-workdays-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=pcm-dx-workdays-backfill Value:0xc0ba18a700} B:{Var:B Labels:consumer_name=pcm-dx-workdays-backfill Value:0xc0ba18a708}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434276729s EvaluationString:[ var='A' labels={consumer_name=pcm-dx-workdays-backfill} value=0 ], [ var='B' labels={consumer_name=pcm-dx-workdays-backfill} value=0 ]} {Instance:consumer_name=pi-dx-courier-tours-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=pi-dx-courier-tours-backfill Value:0xc0ba18a738} B:{Var:B Labels:consumer_name=pi-dx-courier-tours-backfill Value:0xc0ba18a760}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434283183s EvaluationString:[ var='A' labels={consumer_name=pi-dx-courier-tours-backfill} value=0 ], [ var='B' labels={consumer_name=pi-dx-courier-tours-backfill} value=0 ]} {Instance:consumer_name=pi-dx-customer-addresses-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=pi-dx-customer-addresses-backfill Value:0xc0ba18a798} B:{Var:B Labels:consumer_name=pi-dx-customer-addresses-backfill Value:0xc0ba18a790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434300419s EvaluationString:[ var='A' labels={consumer_name=pi-dx-customer-addresses-backfill} value=0 ], [ var='B' labels={consumer_name=pi-dx-customer-addresses-backfill} value=0 ]} {Instance:consumer_name=pi-dx-customer-territory-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=pi-dx-customer-territory-backfill Value:0xc0ba18a7c8} B:{Var:B Labels:consumer_name=pi-dx-customer-territory-backfill Value:0xc0ba18a7f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.43430567s EvaluationString:[ var='A' labels={consumer_name=pi-dx-customer-territory-backfill} value=0 ], [ var='B' labels={consumer_name=pi-dx-customer-territory-backfill} value=0 ]} {Instance:consumer_name=pi-dx-customers-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=pi-dx-customers-backfill Value:0xc0ba18a828} B:{Var:B Labels:consumer_name=pi-dx-customers-backfill Value:0xc0ba18a820}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434310709s EvaluationString:[ var='A' labels={consumer_name=pi-dx-customers-backfill} value=0 ], [ var='B' labels={consumer_name=pi-dx-customers-backfill} value=0 ]} {Instance:consumer_name=pi-dx-parcel-ranges-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=pi-dx-parcel-ranges-backfill Value:0xc0ba18a858} B:{Var:B Labels:consumer_name=pi-dx-parcel-ranges-backfill Value:0xc0ba18a880}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434316476s EvaluationString:[ var='A' labels={consumer_name=pi-dx-parcel-ranges-backfill} value=0 ], [ var='B' labels={consumer_name=pi-dx-parcel-ranges-backfill} value=0 ]} {Instance:consumer_name=pi-dx-sales-contacts-consumer State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=pi-dx-sales-contacts-consumer Value:0xc0ba18a8b0} B:{Var:B Labels:consumer_name=pi-dx-sales-contacts-consumer Value:0xc0ba18a8b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434322211s EvaluationString:[ var='A' labels={consumer_name=pi-dx-sales-contacts-consumer} value=0 ], [ var='B' labels={consumer_name=pi-dx-sales-contacts-consumer} value=0 ]} {Instance:consumer_name=qe-parcel-ranges-ingest State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=qe-parcel-ranges-ingest Value:0xc0ba18a8e8} B:{Var:B Labels:consumer_name=qe-parcel-ranges-ingest Value:0xc0ba18a910}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434327756s EvaluationString:[ var='A' labels={consumer_name=qe-parcel-ranges-ingest} value=0 ], [ var='B' labels={consumer_name=qe-parcel-ranges-ingest} value=0 ]} {Instance:consumer_name=qe-resending-notification-backfill State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=qe-resending-notification-backfill Value:0xc0ba18a940} B:{Var:B Labels:consumer_name=qe-resending-notification-backfill Value:0xc0ba18a948}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434333388s EvaluationString:[ var='A' labels={consumer_name=qe-resending-notification-backfill} value=0 ], [ var='B' labels={consumer_name=qe-resending-notification-backfill} value=0 ]}]" duration=39.161801ms + logger=ngalert.state.manager.persist user=163513 slug=dialpad t=2024-05-29T13:44:14.435529573Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=23.287114ms + level=debug ts=2024-05-29T13:44:14.43555982Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.435535209Z caller=remote_alert_sender.go:94 user=109521 slug=timaparf host=timaparf-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.152.49.253:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=PBFh7TO7k alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vu8oojh3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.435333715Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.435214296Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vu8b5d7s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.435192334Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vu8b5d7s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.435071363Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vu81p7cp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.435026732Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=633335 slug=promqlworkshop t=2024-05-29T13:44:14.434962375Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug component=discovery ts=2024-05-29T13:44:14.434898384Z caller=retry.go:58 user=480895 msg="retrying grpc request" method=/remoteruler.rules.v1.RulesService/GetByRuleGroup attempt=3 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vu81p7cp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.434940471Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vu81p7cp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.434874191Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vtzlq9ep-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.43479798Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=471861 slug=planetstaging t=2024-05-29T13:44:14.434833442Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=471861 slug=planetstaging instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.434811184Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=471861 slug=planetstaging version=2 fingerprint=61694b0af84f9d29 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.434752085Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.434443054s EvaluationString:}]" duration=20.28304ms + level=debug ts=2024-05-29T13:44:14.434716735Z caller=remote_instance_store.go:51 user=277970 slug=teckresourcestest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:14.434646991Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vtzlq9ep-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.434611728Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=2 fingerprint=bc8efe9a03568751 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.434570418Z level=error msg="Failed to evaluate rule" error="failed to build query 'F': data source not found" duration=9.659869ms + level=error ts=2024-05-29T13:44:14.434529032Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'F': data source not found" + level=debug ts=2024-05-29T13:44:14.43458004Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vtxkovzx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.434575528Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vtxkovzx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.434500907Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vtxkovzx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.434470277Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.historian backend=loki user=174054 slug=netrading t=2024-05-29T13:44:14.434432933Z level=debug msg="Done saving alert state history batch" + logger=ngalert.state.manager.persist user=112387 slug=lucidhq t=2024-05-29T13:44:14.434454419Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=112387 slug=lucidhq instance="Region=eu-central-1, ServiceLimit=--, ServiceName=EC2" t=2024-05-29T13:44:14.434428588Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.434387155Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vtx4gwuy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.434353475Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vtx4gwuy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.434280595Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.434263888Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=242310 slug=suzy instance= t=2024-05-29T13:44:14.434122903Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance= t=2024-05-29T13:44:14.43411157Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vttnjtbj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.434123253Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vttnjtbj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.434011162Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vttnjtbj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.433981102Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vtrv0dy7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.433907891Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.433863171Z caller=remote_instance_store.go:51 user=304032 slug=clearbanc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=22398 slug=sunfolding t=2024-05-29T13:44:14.433814893Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=22398 slug=sunfolding version=1 fingerprint=ad270e91dc6c1d7d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.433605556Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-sunfolding, ref_id=B State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.433254512s EvaluationString:}]" duration=20.104036ms + level=debug ts=2024-05-29T13:44:14.433651614Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=304032 slug=clearbanc t=2024-05-29T13:44:14.433590171Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.433592585Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.433549679Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.594478ms + logger=ngalert.state.manager.persist user=361282 slug=turing t=2024-05-29T13:44:14.433554544Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=23.378116ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vta94drc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.433550017Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.433452526Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vta94drc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.433525397Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vta94drc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.433488367Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vta94drc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.433474436Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vta94drc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.433435846Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vta94drc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.433405436Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vt7ktzk8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.433342635Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vt7ktzk8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.433269174Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vspxp4pu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.433142623Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vspxp4pu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.433115253Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vspxp4pu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.433019162Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vs9hdzps-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.432785089Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.432557219Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.432502177Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vs9ghg8j-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.432499216Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vs7c4k7y-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.432452416Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vs7c4k7y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.432369825Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.432287249Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vs1b1x14-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.432275814Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vs1b1x14-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.432189063Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.431926661Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vrv1c3c2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.43189016Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.431937543Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vrrj27fh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.431821419Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:14.431905022Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.431912872Z caller=remote_instance_store.go:51 user=295631 slug=dapvizor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.431855712Z caller=remote_instance_store.go:51 user=656284 slug=cencosudx msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.431677673Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=295631 slug=dapvizor instance="datasource_uid=ioFV1Jn4z, ref_id=A" t=2024-05-29T13:44:14.431825146Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.43179639Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=295631 slug=dapvizor instance="datasource_uid=ioFV1Jn4z, ref_id=A" t=2024-05-29T13:44:14.431811403Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=295631 slug=dapvizor t=2024-05-29T13:44:14.431789791Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vrrj27fh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.431760379Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vrrj27fh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.431680198Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.431678624Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vrrgrrc6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.431443136Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vrrgrrc6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.431403835Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vrqy9a7s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.431333254Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vrqy9a7s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.431259134Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vrqy9a7s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.431225193Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.431057382Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114492 slug=railsbank instance="DBInstanceIdentifier=prod-playlive-card-data-synchronise-20220707100444096800000007" t=2024-05-29T13:44:14.431103983Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vroy4x3b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.431114672Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vroy4x3b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.431064372Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vroy4x3b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.431035911Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.430976872Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.430847686Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=468607 slug=thetalake instance= t=2024-05-29T13:44:14.430819952Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:14.430698423Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=110.383418ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vr7u1lu4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.430209543Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.430149948Z caller=remote_instance_store.go:51 user=604874 slug=argamon msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.430135207Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vr7u1lu4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.430197243Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vr7u1lu4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.430096252Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vr2dnw8t-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.430046111Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vr2dnw8t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.429990141Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vr2dnw8t-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.42995201Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vqwup8tk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.42988833Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.429702247Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vqnkp8gg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.429716358Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vqnkp8gg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.429694578Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vqnkp8gg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.429626737Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vqj7g74v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.429598537Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vqj7g74v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.429575706Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vqj7g74v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.429510136Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=452240 slug=trulioo instance= t=2024-05-29T13:44:14.429393932Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.429312457Z caller=remote_instance_store.go:51 user=815713 slug=returnstaging msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vqhx92k4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.429275933Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vqhx92k4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.429254173Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=486972 slug=payretailers instance= t=2024-05-29T13:44:14.429254826Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=815713 slug=returnstaging t=2024-05-29T13:44:14.4292431Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=815713 slug=returnstaging instance="datasource_uid=timescale_read_only, ref_id=A" t=2024-05-29T13:44:14.429202891Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vqgajp1f-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.429209563Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=815713 slug=returnstaging instance="datasource_uid=timescale_read_only, ref_id=A" t=2024-05-29T13:44:14.42918286Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=815713 slug=returnstaging t=2024-05-29T13:44:14.429086772Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info ts=2024-05-29T13:44:14.428905235Z caller=remote_alert_sender.go:94 user=922741 slug=johnnyleeothon host=johnnyleeothon-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.250.231:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=hkOycEi4k alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vqeikzuk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.42894884Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.428824002Z caller=remote_instance_store.go:51 user=691855 slug=chainlake msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vqau1hr2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.42891875Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.428869801Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=691855 slug=chainlake t=2024-05-29T13:44:14.428776257Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vqau1hr2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.428866029Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vqau1hr2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.428843319Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vqau1hr2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.428792868Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vqacgcn2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.428706068Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vq7rydup-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.428615017Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.428394115Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vpn1fec5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.428315464Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.428237795Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.428268173Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.428027282Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:14.428192742Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=28.537312ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vpiv2wjo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.428164002Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vpiv2wjo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.428141712Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.428035423Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vpiv2wjo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.428060511Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vpiv2wjo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.428049551Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vphxy55k-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.427922849Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vpcw42tp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.427845799Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.427736426Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vpcw42tp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.427794878Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vpcw42tp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.427770508Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vp6745ut-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.427672947Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vp6745ut-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.427663247Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=173374 slug=felmo t=2024-05-29T13:44:14.427685777Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.427544211Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=173374 slug=felmo version=12 fingerprint=2d7244ac51212700 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.427512892Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.427124454s EvaluationString:}]" duration=126.217189ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vp6745ut-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.427581066Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vp6745ut-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.427571066Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.427508008Z caller=remote_alert_sender.go:94 user=84360 slug=sib host=sib-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.245.254:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fKfawCi4k alerts=1 + logger=ngalert.state.manager.persist user=112732 slug=gleamer t=2024-05-29T13:44:14.42742479Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=40.768311ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-voyxfxto-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.427366814Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.427321849Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-voyxfxto-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.427338163Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-voyxfxto-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.427276593Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-voq67x55-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.427219812Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.427219334Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.426970677Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-voq67x55-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.427182582Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-voq67x55-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.427092291Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.426925056Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.426802279Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.426790758Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vocme5gf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.426692457Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vocme5gf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.426640336Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.426562199Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vobff9mn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.426571636Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vobff9mn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.426521635Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vobff9mn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.426510785Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.426429423Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vo693hri-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.426378404Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.426308393Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vo693hri-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.426336943Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vo693hri-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.426311613Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=309009 slug=elestyle t=2024-05-29T13:44:14.426244433Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.1153ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vnwgsrmt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.426270722Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vnwgsrmt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.426155091Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=556147 slug=bettercloudholding t=2024-05-29T13:44:14.426045652Z level=debug msg="Saving alert states" count=8 max_state_save_concurrency=1 + logger=ngalert.state.manager user=556147 slug=bettercloudholding instance="device=/dev/sdb, instance=zookeeper-hold-uscen-b-c001-n002, mountpoint=/opt/disk0" t=2024-05-29T13:44:14.426031372Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.425813401Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=556147 slug=bettercloudholding instance="device=/dev/root, instance=zookeeper-hold-uscen-c-c001-n003, mountpoint=/" t=2024-05-29T13:44:14.42575765Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=556147 slug=bettercloudholding instance="device=/dev/root, instance=zookeeper-hold-uscen-c-c001-n003, mountpoint=/" t=2024-05-29T13:44:14.425743625Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vnky49oo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.425732697Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vnky49oo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.425717767Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=556147 slug=bettercloudholding instance="device=/dev/root, instance=zookeeper-hold-uscen-b-c001-n002, mountpoint=/" t=2024-05-29T13:44:14.425587585Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vnky49oo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.425623826Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.425506914Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vni84a5s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.425469854Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.425403546Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vnglity1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.425363923Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.425377274Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=542894 slug=aize instance= t=2024-05-29T13:44:14.425299815Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=542894 slug=aize t=2024-05-29T13:44:14.425241038Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=542894 slug=aize version=16 fingerprint=7d62082143d03d99 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.425150032Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc024dde7c8} C:{Var:C Labels: Value:0xc024dde7d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.424809418s EvaluationString:[ var='B' labels={} value=1 ], [ var='C' labels={} value=0 ]}]" duration=607.978984ms + level=debug ts=2024-05-29T13:44:14.425130718Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vn7vd7e3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.42508605Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vmy0k676-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.424986869Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vmy0k676-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.424938009Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=556147 slug=bettercloudholding instance="device=/dev/root, instance=haproxy-hold-app-b0sh, mountpoint=/" t=2024-05-29T13:44:14.424955221Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.42492057Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vmy0k676-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.424849538Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.424713825Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.42472451Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vmwyzvva-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.424746567Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=556147 slug=bettercloudholding t=2024-05-29T13:44:14.424678793Z level=debug msg="State manager processing evaluation results" resultCount=8 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vmwyzvva-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.424721786Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vmwyzvva-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.424677946Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.424628483Z caller=remote_instance_store.go:51 user=497819 slug=fornybar msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.424459983Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vmw1gxv1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.424622985Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vmw1gxv1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.424530044Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vmw1gxv1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.424520214Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.424532257Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vmvsx1wc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.424491274Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.424458754Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vmvsx1wc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.424468254Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.424350016Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vmvsx1wc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.424391753Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vmvjz728-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.424214271Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vmvjz728-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.424186271Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.424036318Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.423997921Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vmumkxw9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.424033499Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.423999388Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vmumkxw9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.423963589Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.423751277Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vmsxynjx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.423825607Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.423470417Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vml3rneg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.423499334Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vml3rneg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.423471813Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.423399333Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vmeh74bf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.423296972Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.423175853Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.423253012Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.42321252Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.42308863Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.423075578Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vlvqb0a1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.423075299Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:14.422738755Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=21.782266ms + level=debug ts=2024-05-29T13:44:14.422977789Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.422968534Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.422950226Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206107 slug=hydrolix t=2024-05-29T13:44:14.422919893Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=206107 slug=hydrolix version=3 fingerprint=4a6ee427f90473e7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.42282097Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc027129668} C:{Var:C Labels: Value:0xc027129670}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.422509322s EvaluationString:[ var='A' labels={} value=0 ], [ var='C' labels={} value=0 ]}]" duration=53.674994ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vlrfh3nf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.422669365Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=337951 slug=pawapay instance= t=2024-05-29T13:44:14.422469053Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vlm0j71x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.422417703Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vlm0j71x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.422375292Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:14.42237953Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vljezi1n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.422229081Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vljezi1n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.42215877Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.422094101Z caller=remote_instance_store.go:51 user=202755 slug=iwmedia msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=548157 slug=kushkiprod t=2024-05-29T13:44:14.422117026Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vlbe3k9f-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.422107289Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=142180 slug=luxtronic t=2024-05-29T13:44:14.421974824Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=142180 slug=luxtronic version=28 fingerprint=4740608374e1617a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.421910759Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.421628491s EvaluationString:}]" duration=150.012424ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vl6vp6e8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.421827697Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vl32dt4p-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.421682655Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vl32dt4p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.421639745Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=528217 slug=monitoringcenter t=2024-05-29T13:44:14.421642611Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=528217 slug=monitoringcenter instance= t=2024-05-29T13:44:14.42162557Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vl32dt4p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.421614724Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.421471989Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vl1n6lp4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.421476633Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vl1n6lp4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.421428852Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vl1n6lp4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.421416142Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vkvb0aja-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.421231Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vkvb0aja-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.42117904Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vkvb0aja-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.42116613Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.421065375Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.421162602Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.421136676Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:14.42108214Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vkub1s6n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.420972218Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vkub1s6n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.420937957Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.42092992Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vkn2ahl2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.420750335Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vkhtn2j6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.420686995Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.420634993Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=304032 slug=clearbanc t=2024-05-29T13:44:14.42057466Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=304032 slug=clearbanc instance="clearco_owner=capital-one-devs, clearco_service=queues-stripe-ecom-transaction" t=2024-05-29T13:44:14.42055982Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=304032 slug=clearbanc instance="clearco_owner=capital-one-devs, clearco_service=queues-stripe-ecom-transaction" t=2024-05-29T13:44:14.420541706Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=5bfb0093cca014a6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.420569807Z level=debug msg="Alert rule evaluated" results="[{Instance:name=keepLastValue(apex.US_East.players.ps4.mh448980.serverstats) Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc0acae1528} IgnoreBelow:{Var:IgnoreBelow Labels: Value:0xc0acae1550} Threshold:{Var:Threshold Labels: Value:0xc0acae1558} compare:{Var:compare Labels:name=keepLastValue(apex.US_East.players.ps4.mh448980.serverstats) Query Value:0xc0acae1598} sum:{Var:sum Labels:name=keepLastValue(apex.US_East.players.ps4.mh448980.serverstats) Query Value:0xc0acae15b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.42022032s EvaluationString:[ var='Breaches' labels={} value=1 ], [ var='IgnoreBelow' labels={} value=1000 ], [ var='Threshold' labels={} value=-20 ], [ var='compare' labels={name=keepLastValue(apex.US_East.players.ps4.mh448980.serverstats) Query} value=0 ], [ var='sum' labels={name=keepLastValue(apex.US_East.players.ps4.mh448980.serverstats) Query} value=0 ]}]" duration=36.687674ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vkgy74db-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.420513713Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=628703 slug=bancobci instance="instance=cloudflare-exporter.cloudflare-exporter:8080, job=integrations/cloudflare, zone=bciplus.cl" t=2024-05-29T13:44:14.420463993Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=628703 slug=bancobci instance="instance=cloudflare-exporter.cloudflare-exporter:8080, job=integrations/cloudflare, zone=bciplus.cl" t=2024-05-29T13:44:14.420404292Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vkgy74db-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.420418432Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vkgy74db-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.420377952Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vkgy74db-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.420362921Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vk6mpucv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.420286411Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.420279808Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vk6mpucv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.42018846Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vk2rdyt6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.420116659Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vk2rdyt6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.420048008Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vk2rdyt6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.419994528Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.419692029Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vk0fbo4v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.419747425Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vjytdllt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.419655824Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vjytdllt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.419616014Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.41960298Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.718166ms + level=debug ts=2024-05-29T13:44:14.419577255Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=358720 slug=earnbounty t=2024-05-29T13:44:14.419549795Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.793508ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vjytdllt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.419535353Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vjuw08bp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.419406722Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vjuw08bp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.419370291Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.419409528Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=25.494955ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vjuw08bp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.419297801Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vjuaaz1s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.41923105Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vjuaaz1s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.419168909Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vjjqfbxy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.419086788Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vjigr56z-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.418857806Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vjigr56z-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.418637374Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vjgckkly-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.418577903Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vjgckkly-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.418542453Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.418626619Z caller=remote_instance_store.go:51 user=127813 slug=clearsale msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=127813 slug=clearsale t=2024-05-29T13:44:14.418562482Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=127813 slug=clearsale version=2 fingerprint=ed22465a775c8538 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.418291308Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.41785539s EvaluationString:}]" duration=202.4753ms + logger=ngalert.state.manager.persist user=112732 slug=gleamer t=2024-05-29T13:44:14.418484387Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vjfy34ts-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.418334731Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vjfy34ts-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.41824954Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.418144919Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vjedn97a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.418135309Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.418070953Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.917326ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vjedn97a-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.418074838Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vjdps4qm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.418036078Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vjdps4qm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.418011437Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vjdps4qm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.417944107Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vjdps4qm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.417878486Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.41775723Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.417659241Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.417593292Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.417642203Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:14.417596329Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vj97rodi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.417492122Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:14.417495402Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.417463834Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.41741606Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vj97rodi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.417451312Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.417402274Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vj5yfr4j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.417356541Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vj5yfr4j-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.41730081Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vizm5n2h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.417207549Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.417223375Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vizm5n2h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.417097318Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.417182286Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.416954507Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.416952812Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-viz0o843-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.416887336Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=fdb52ef10b39aa1d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.41676325Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.416487819s EvaluationString:}]" duration=357.718019ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-viz0o843-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.416779335Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-viye7i5y-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.416636573Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-viugtf7u-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.416503632Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-viugjo5v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.41631117Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-viugjo5v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.4162903Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-viugjo5v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.416241779Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vitdk82j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.416209749Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vitdk82j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.416188459Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.41619334Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vitdk82j-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.416148238Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vitdk82j-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.416115208Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.415998068Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vir7y21g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.416044567Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vir7y21g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.416033147Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.415881893Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.415765179Z caller=remote_instance_store.go:51 user=738479 slug=gohero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-viq4p6es-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.415780305Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vijphy40-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.415655693Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vijphy40-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.415627553Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vihcb0um-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.415586053Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vihcb0um-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.415564632Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vienir01-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.415426071Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.415359685Z caller=remote_instance_store.go:51 user=109521 slug=timaparf msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=109521 slug=timaparf t=2024-05-29T13:44:14.415314503Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=info ts=2024-05-29T13:44:14.415265547Z caller=remote_image_capturer.go:61 user=109521 slug=timaparf rule_org_id=1 rule_uid=PBFh7TO7k dashboard=000000012 panel=5 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vidncc99-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.415225379Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=620339 slug=energostack version=1 fingerprint=6702372b34c03ff4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.415148809Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.414882983s EvaluationString:}]" duration=32.887507ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vidncc99-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.415085597Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vi464eat-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.415041997Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.414942335Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.414906429Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vi464eat-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.414886785Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.414845684Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vi022gdm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.414709104Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.414391055Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vi022gdm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.414611393Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vhzc8vhg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.414571892Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vhzc8vhg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.414544392Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vhzc8vhg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.414522282Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.414400575Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vhzc8vhg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.414483161Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vhm6es8h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.414421811Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vhm6es8h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.41438415Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.41422744Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=109521 slug=timaparf instance= t=2024-05-29T13:44:14.414182774Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vhlwpm6b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.414186808Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.414131929Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vhlwpm6b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.414155798Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vhlwpm6b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.414118257Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vhjb23ek-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.414020566Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vhjb23ek-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.413906495Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vhivikf4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.413839275Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vhivikf4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.413650903Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vh6r4uyf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.413487681Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vh6r4uyf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.413464621Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vh42jvzv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.413323849Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vh42jvzv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.413254369Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vh42jvzv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.413227718Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vgyc4at8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.413189118Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vgyc4at8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.413109937Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vgyc4at8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.413081957Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vgvirg9x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.412966446Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.412861115Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.412847778Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vgvirg9x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.412821254Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vgvirg9x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.412806724Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vgtnqk8w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.412762674Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vgtnqk8w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.412655302Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vgtnqk8w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.412573232Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=922741 slug=johnnyleeothon instance= t=2024-05-29T13:44:14.412461554Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vgqioyt5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.41238464Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vgqioyt5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.412355709Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vgphin5l-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.412307559Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vgphin5l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.412264178Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.41234932Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.412284127Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=467258 slug=neonprod t=2024-05-29T13:44:14.412311418Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=163513 slug=dialpad instance= t=2024-05-29T13:44:14.412187346Z level=debug msg="Changing state" previous_state=Pending next_state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=467258 slug=neonprod t=2024-05-29T13:44:14.412234711Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.412241666Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vgphin5l-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.412163307Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.412165226Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.412124828Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vgm67wiq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.412097647Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=postgres-pakistan-encounter-source/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.412107238Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.412041316Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=postgres-pakistan-encounter-source/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.412093924Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vgm67wiq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.412056226Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.412065081Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager.persist user=856040 slug=kuady t=2024-05-29T13:44:14.411998216Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.466519ms + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=mongo-pakistan-users-source/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.411971933Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.411942919Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vgdzjcjs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.411913145Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.411844095Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=mongo-pakistan-organisations-source/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.41179309Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=237629 slug=ocrolus instance= t=2024-05-29T13:44:14.411797315Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=mongo-pakistan-organisations-source/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.4117843Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=237629 slug=ocrolus t=2024-05-29T13:44:14.41175055Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vgdzjcjs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.411761263Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.411686947Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=mongo-pakistan-diseases-source/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.411711135Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.411681358Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.411637349Z caller=remote_instance_store.go:51 user=110009 slug=dipperx msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.411633334Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=mongo-pakistan-consultations-source/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.411634348Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=mongo-pakistan-consultations-source/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.41162834Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.411612252Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=110009 slug=dipperx instance= t=2024-05-29T13:44:14.411558679Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=mongo-pakistan-conditions-source/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.411562915Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=mongo-pakistan-conditions-source/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.411553412Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vg79d58c-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.41144306Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.411535618Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=mongo-pakistan-clinics-source/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.411486053Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=mongo-pakistan-clinics-source/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.411476774Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.41145903Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vg79d58c-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.411377079Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.411340593Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vg6x2j0c-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.411218058Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vg6x2j0c-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.411157057Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.411227953Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-roles-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.411150714Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-roles-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.411140112Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.410999648Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.410941984Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.41086823Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.410875065Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=warn ts=2024-05-29T13:44:14.410773393Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=762244 slug=yolk + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vg3au6zi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.410725553Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.410747192Z caller=ruler.go:522 msg="tenant is owned by this instance" user=762244 slug=yolk groups=0 + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-prescription-administration-schedules-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.410781096Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.410755927Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vg0h4a09-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.410659122Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.41075576Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.410561454Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-organisations-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.410668131Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.410641144Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vg0h4a09-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.410574391Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.410501962Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.410420566Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.410512574Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vfx7kjds-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.41047229Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vfx7kjds-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.41044536Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.410408569Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vfx7kjds-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.410380919Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.41035297Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.410210305Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.410151035Z caller=remote_instance_store.go:51 user=517596 slug=datar msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:14.41029073Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.410290414Z caller=remote_instance_store.go:51 user=139073 slug=cargo1 msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=361282 slug=turing t=2024-05-29T13:44:14.410169918Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=361282 slug=turing instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.410149533Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=139073 slug=cargo1 t=2024-05-29T13:44:14.41024723Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.410134987Z caller=ruler.go:522 msg="tenant is owned by this instance" user=843304 slug=ppcgroup groups=9 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vfvqefi5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.410212887Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vfvqefi5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.410185867Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=361282 slug=turing t=2024-05-29T13:44:14.41008986Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.410114578Z caller=remote_instance_store.go:51 user=557231 slug=lnrsusinsuranceprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-lot-snapshot-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.410155488Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.410133451Z caller=remote_instance_store.go:51 user=432323 slug=lithic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.410126764Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.410068412Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vfvd2zc5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.410025885Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vfvd2zc5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.409998765Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-lot-addition-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.410048826Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-lot-addition-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.410039948Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.410000397Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.409985194Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.409918839Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.40995806Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.409890111Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.40976494Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=363785 slug=moonletmonitor t=2024-05-29T13:44:14.409677035Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.703816ms + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-sku-external-reference-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.409690137Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.409652692Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vfspyguj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.409613751Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vfspyguj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.409600231Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.409561409Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vfspyguj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.409566951Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vfspyguj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.40950972Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vfspyguj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.40948545Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.409430718Z caller=remote_image_capturer.go:33 user=656284 slug=cencosudx rule_org_id=1 rule_uid=cdlhl6g7q3h8ga msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vfqkhey0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.409450539Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=656284 slug=cencosudx instance="cluster=production-1, deployment=selenium-firefox, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-k8s-monitoring.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=cmedia" t=2024-05-29T13:44:14.409403317Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vfqkhey0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.409323628Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.409328397Z caller=remote_image_capturer.go:33 user=656284 slug=cencosudx rule_org_id=1 rule_uid=cdlhl6g7q3h8ga msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=656284 slug=cencosudx instance="cluster=production-1, deployment=selenium-chrome, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-k8s-monitoring.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=cmedia" t=2024-05-29T13:44:14.409319537Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-move-request-fulfillment-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.40928305Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.409231168Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vfdp3xjm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.409085746Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vfdp3xjm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.409072976Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.409143174Z caller=remote_image_capturer.go:33 user=656284 slug=cencosudx rule_org_id=1 rule_uid=cdlhl6g7q3h8ga msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:14.40907884Z caller=remote_instance_store.go:51 user=257565 slug=eddyson msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.40910605Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.409070666Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.409076034Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.409065764Z caller=remote_image_capturer.go:33 user=656284 slug=cencosudx rule_org_id=1 rule_uid=cdlhl6g7q3h8ga msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=656284 slug=cencosudx instance="cluster=production-1, deployment=files-manager-dp, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-k8s-monitoring.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=cmedia" t=2024-05-29T13:44:14.409060023Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-idempotency-key-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.40902091Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-idempotency-key-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.409008155Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vf65egia-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.408750052Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.409011012Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.408797752Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=656284 slug=cencosudx instance="cluster=production-1, deployment=campaign-api-dp, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-k8s-monitoring.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=cmedia" t=2024-05-29T13:44:14.4087736Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=656284 slug=cencosudx instance="cluster=production-1, deployment=campaign-api-dp, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-k8s-monitoring.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=cmedia" t=2024-05-29T13:44:14.40875509Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vf65egia-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.408687602Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=656284 slug=cencosudx instance="cluster=production-1, deployment=bff-api-dp, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-k8s-monitoring.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=cmedia" t=2024-05-29T13:44:14.408579798Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.40863886Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.408464919Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.408479397Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-move-lot-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.408399253Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.408367561Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-lot-modification-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.408269421Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.408227615Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vexpqrt9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.408180086Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.408077831Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vexpqrt9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.408087055Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vexpqrt9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.408061185Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.40790298Z caller=remote_instance_store.go:51 user=813270 slug=adiante msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.40795538Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-diseases-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.407998096Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.407929696Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:14.407995101Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.407935367Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.407853543Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vexh9294-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.407879793Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="host=ny4ap-uoms-01" t=2024-05-29T13:44:14.407931374Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.407847969Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vevkjhss-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.407825633Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:14.407835726Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vevkjhss-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.407807372Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.407763542Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:14.40777019Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-conditions-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.407667937Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-conditions-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.407654689Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vekcawdv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.4076142Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vekcawdv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.40758974Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.407576962Z caller=ruler.go:522 msg="tenant is owned by this instance" user=500041 slug=ziachfuchs groups=2 + logger=ngalert.state.manager.persist user=642786 slug=sophoscomnsg t=2024-05-29T13:44:14.407637057Z level=debug msg="Saving alert states" count=14 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vekcawdv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.40754461Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="namespace=sase-common-dp" t=2024-05-29T13:44:14.407562186Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-veh4z3xh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.407509539Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="namespace=reserve-resource" t=2024-05-29T13:44:14.407509295Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.40745469Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="namespace=reserve-resource" t=2024-05-29T13:44:14.407494145Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.407476012Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-veh4z3xh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.407453249Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-veh4z3xh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.407440789Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-veh4z3xh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.407378238Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-anc-encounter-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.407402694Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.407379547Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-veh0rlp3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.407345188Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="namespace=prometheus" t=2024-05-29T13:44:14.407354344Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="namespace=prometheus" t=2024-05-29T13:44:14.407339283Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-veh0rlp3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.407257427Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.407348145Z caller=remote_instance_store.go:51 user=109452 slug=deltarisk msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="namespace=logz-io" t=2024-05-29T13:44:14.407307023Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="namespace=logz-io" t=2024-05-29T13:44:14.407270523Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-veh0rlp3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.407242747Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.407196435Z caller=remote_instance_store.go:51 user=313382 slug=hyai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-ui-defaults-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.407269831Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-veh0rlp3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.407200926Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=109452 slug=deltarisk instance= t=2024-05-29T13:44:14.40729448Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.407252584Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.scheduler user=109452 slug=deltarisk version=15 fingerprint=5deecf359acfcaa8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.40717067Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.406886243s EvaluationString:}]" duration=60.3504ms + logger=ngalert.state.manager.persist user=387869 slug=lantor t=2024-05-29T13:44:14.407230837Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-veg7fanx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.407102185Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-veg7fanx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.407061545Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.40717379Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance= t=2024-05-29T13:44:14.407027824Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.407103116Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-payment-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.407111423Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=288032 slug=dapperlabssre version=1 fingerprint=add1d7dc4079c149 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.40692853Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.406593861s EvaluationString:}]" duration=24.867413ms + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.407081786Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-inventory-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.407034322Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.407003262Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vebqbsfk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.406906603Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-encounter-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.406948345Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vebbyjiw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.406780532Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-ui-defaults-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.406864655Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.406834992Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vebbyjiw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.406661201Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.406710157Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="namespace=argocd" t=2024-05-29T13:44:14.406660405Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.406621798Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="namespace=argocd" t=2024-05-29T13:44:14.406617865Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-veav9o17-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.40661177Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-inventory-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.406629284Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-veav9o17-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.406547749Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.406598278Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-encounter-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.406519377Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=642786 slug=sophoscomnsg t=2024-05-29T13:44:14.406366142Z level=debug msg="State manager processing evaluation results" resultCount=14 + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-encounter-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.40650693Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-veav9o17-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.406507249Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-veasmxcb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.406400458Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.40631678Z caller=remote_instance_store.go:51 user=154996 slug=veovo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-veasmxcb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.406369188Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.406221279Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.406349358Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-roles-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.40624932Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.406120788Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-organisations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.406163927Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.40612035Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ve4mnce0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.406067134Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-diseases-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.4060471Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ve0d4b2w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.406040324Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.406004654Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vdlrsn21-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.405898253Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-consultations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.405911078Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.405879959Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vdlrsn21-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.405798682Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vdlrsn21-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.405788222Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vdj2c04g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.405738741Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-conditions-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.405777663Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.4056681Z caller=grafana.go:247 user=90424 slug=westerveltlumber msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=15&state=firing&state=error" groups=154 alerts=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vdhh3anu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.40560889Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vdhh3anu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.40558854Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-users-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.405567442Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.405530671Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.405467105Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-roles-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.405472919Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vdcej3sy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.405411878Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vdcej3sy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.405402078Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vdcej3sy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.405372067Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vd80dkzl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.405320897Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.405352574Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vd80dkzl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.405281826Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-diseases-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.405269984Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.405235574Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.405250879Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-consultations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.405189779Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.405185781Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.405168293Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vd6s2cid-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.405163755Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vd6s2cid-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.405115265Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.40513143Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.405126419Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.404880796Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.405082861Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vd6f65e8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.404888592Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vd6f65e8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.404848982Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.404816644Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.404777145Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-users-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.404745578Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.40461121Z caller=remote_instance_store.go:51 user=497819 slug=fornybar msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-ui-defaults-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.404624083Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.404575947Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-ui-defaults-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.404500523Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.404401151Z caller=remote_instance_store.go:51 user=746350 slug=finnapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vckekt96-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.404351057Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vckekt96-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.404336157Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.404251904Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.404324233Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager.persist user=746350 slug=finnapp t=2024-05-29T13:44:14.404350013Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.404277131Z caller=ruler.go:522 msg="tenant is owned by this instance" user=867755 slug=xgenpreprod groups=1 + logger=ngalert.state.manager user=746350 slug=finnapp t=2024-05-29T13:44:14.404308957Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=746350 slug=finnapp version=8 fingerprint=7b7d0546c40694ff attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.404248906Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.403863766s EvaluationString:}]" duration=17.538614ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vcibka2d-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.404210705Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.404153326Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.403977928Z caller=ruler.go:522 msg="tenant is owned by this instance" user=745048 slug=youwinngroup groups=0 + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.404171404Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.403246299Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vcibka2d-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.404127285Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescriptions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.404075916Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vcibka2d-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.404065524Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vcbv0mg4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.404030234Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.404039281Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.403975084Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vcbv0mg4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.403957643Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.403956458Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vcbv0mg4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.403930143Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.403924336Z caller=remote_instance_store.go:51 user=656284 slug=cencosudx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.403908077Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.403849302Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.403804827Z caller=ruler.go:522 msg="tenant is owned by this instance" user=515819 slug=wwwslaists groups=1 + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-dispensations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.403810473Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.403687919Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=rcn, exported_endpoint=PublishEvent, exported_service=Conversation" t=2024-05-29T13:44:14.403677766Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-dispensations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.403700767Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.403658851Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vca1lbl1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.403614269Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.403549734Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager.persist user=127813 slug=clearsale t=2024-05-29T13:44:14.403455075Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=27.334815ms + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-administration-schedules-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.40347663Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-administration-schedules-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.403469614Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vc8p36le-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.403438698Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.403446966Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=optimummobile, exported_endpoint=PublishIntent, exported_service=Conversation" t=2024-05-29T13:44:14.403410283Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.403361571Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.403349529Z caller=remote_instance_store.go:51 user=109928 slug=deadhappy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-payment-payment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.403347855Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=109928 slug=deadhappy instance= t=2024-05-29T13:44:14.403263929Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vc8p36le-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.403308006Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=109928 slug=deadhappy t=2024-05-29T13:44:14.403209415Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.403231318Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=optimummobile, exported_endpoint=PublishEvent, exported_service=Conversation" t=2024-05-29T13:44:14.40324611Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-payment-healthpaysubmission-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.403229836Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vc3sg2ue-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.403195345Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.403200379Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.403118975Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=optimummobile, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation" t=2024-05-29T13:44:14.403084811Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.402947319Z caller=ruler.go:522 msg="tenant is owned by this instance" user=500919 slug=xatka groups=0 + level=debug ts=2024-05-29T13:44:14.402975835Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.402959964Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager.persist user=260796 slug=expressvpn t=2024-05-29T13:44:14.402884893Z level=debug msg="Saving alert states done" count=10 max_state_save_concurrency=1 duration=249.224081ms + level=debug ts=2024-05-29T13:44:14.402960793Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vbvfn63i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.402931182Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-organisations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.402906659Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-organisations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.402900021Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.402880184Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.402800987Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=optimumfixed, exported_endpoint=PublishMessage, exported_service=Conversation" t=2024-05-29T13:44:14.402758059Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.402739135Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.402715837Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager.persist user=214309 slug=spenmo t=2024-05-29T13:44:14.402670303Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.461693ms + level=debug ts=2024-05-29T13:44:14.402595075Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.402518436Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.402629682Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.402486631Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vbus2f1s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.402513058Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.40254628Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.402464763Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=856040 slug=kuady instance= t=2024-05-29T13:44:14.402495016Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=856040 slug=kuady instance= t=2024-05-29T13:44:14.402487776Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-snapshot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.402400444Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=856040 slug=kuady t=2024-05-29T13:44:14.402428314Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vbus2f1s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.402439027Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.402436241Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=optimumfixed, exported_endpoint=PublishEvent, exported_service=Conversation" t=2024-05-29T13:44:14.402415472Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-snapshot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.402319173Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.402276237Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vbr07pdy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.402290176Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vbr07pdy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.402260025Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vbr07pdy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.402250035Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-addition-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.402223586Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-addition-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.402213547Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.402191973Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vbq5otvv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.402156184Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vbmcx98r-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.402031443Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.402035961Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vbmcx98r-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.401981283Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.401953593Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vb8perrx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.401929902Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vb8perrx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.401919492Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=mtpanacea1, exported_endpoint=PublishNewRepEvent, exported_service=Conversation" t=2024-05-29T13:44:14.401859802Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vb8perrx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.401806891Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.401831699Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vaz7qu7c-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.40176573Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.401718905Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=jetblue, exported_endpoint=SendToAgent, exported_service=Conversation" t=2024-05-29T13:44:14.401726617Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vaz7qu7c-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.40170484Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.401600622Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vathp5tw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.401582558Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vathp5tw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.401570438Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=806229 slug=simplisafe t=2024-05-29T13:44:14.400918279Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="env=prd-west2" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.401478339Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="env=prd-east" t=2024-05-29T13:44:14.400900909Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.401448427Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=jetblue, exported_endpoint=PublishMessage, exported_service=Conversation" t=2024-05-29T13:44:14.401439841Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vathp5tw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.401432687Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vao70hxc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.401397817Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vao70hxc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.401376096Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.401007059Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.401368556Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.401279831Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=jetblue, exported_endpoint=PublishIntent, exported_service=Conversation" t=2024-05-29T13:44:14.401269361Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=119385 slug=elastio t=2024-05-29T13:44:14.401290072Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=119385 slug=elastio instance="DBClusterIdentifier=develop-blue-stack-postgresqlv2, Role=WRITER" t=2024-05-29T13:44:14.401277499Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=119385 slug=elastio instance="DBClusterIdentifier=develop-blue-stack-postgresqlv2, Role=READER" t=2024-05-29T13:44:14.401234799Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=119385 slug=elastio instance="DBClusterIdentifier=develop-blue-stack-postgresqlv2, Role=READER" t=2024-05-29T13:44:14.40122203Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="VpnId=vpn-028038af91f4d9750" t=2024-05-29T13:44:14.401221188Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.401203246Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112387 slug=lucidhq t=2024-05-29T13:44:14.401180976Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vamt7keg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.401189084Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-vamt7keg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.401177094Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.401017409Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=jetblue, exported_endpoint=PublishEvent, exported_service=Conversation" t=2024-05-29T13:44:14.401101728Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=jetblue, exported_endpoint=PublishEvent, exported_service=Conversation" t=2024-05-29T13:44:14.401090706Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-valytwz0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.401086343Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.401069637Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.401039353Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-idempotency-key-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.400940108Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.400904341Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=warn ts=2024-05-29T13:44:14.400813298Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=660593 slug=yotamnimble + level=warn ts=2024-05-29T13:44:14.400772298Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=531355 slug=zippsafe + level=debug ts=2024-05-29T13:44:14.400803345Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-va7znzgr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.400692059Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.400793227Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.400670625Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.400645419Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.40056983Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo instance="datasource_uid=grafanacloud-logs, ref_id=Query" t=2024-05-29T13:44:14.400549677Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-va4v73d3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.400457017Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=245291 slug=pismo version=60 fingerprint=6bd19448d28c74f2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.400362562Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=Query State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.400043643s EvaluationString:}]" duration=71.828865ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-va4v73d3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.400380116Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishwireless, exported_endpoint=PublishForm, exported_service=Conversation" t=2024-05-29T13:44:14.400381273Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishwireless, exported_endpoint=PublishForm, exported_service=Conversation" t=2024-05-29T13:44:14.400370416Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.400281193Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:14.400306394Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=723813 slug=yuliasha + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.400279711Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=212546 slug=modica t=2024-05-29T13:44:14.400284399Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.400235603Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.400192893Z caller=ruler.go:522 msg="tenant is owned by this instance" user=540824 slug=yyis groups=0 + logger=ngalert.state.manager user=212546 slug=modica t=2024-05-29T13:44:14.40022695Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=212546 slug=modica version=1 fingerprint=0fc1153ef7f52282 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.400135959Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.399845953s EvaluationString:}]" duration=43.860623ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-va355fae-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.400115973Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-va355fae-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.400045603Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.400020756Z caller=remote_instance_store.go:51 user=363785 slug=moonletmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishpostpaid, exported_endpoint=PublishMessage, exported_service=Conversation" t=2024-05-29T13:44:14.399970431Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=363785 slug=moonletmonitor t=2024-05-29T13:44:14.399867208Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.399857824Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.scheduler user=363785 slug=moonletmonitor version=4 fingerprint=680318ec45327a19 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.399779341Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.399452119s EvaluationString:}]" duration=33.736696ms + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.399793535Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.399703945Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v9h4bhkc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.399713569Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.399721793Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.399645476Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.399556358Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.399567415Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-consultations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.399510877Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=432323 slug=lithic version=21 fingerprint=94a0c9d3cfa4b9af attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.399488146Z level=debug msg="Alert rule evaluated" results="[{Instance:LoadBalancer=app/authorizer-live-lb/c3a147816d0d11e6 State:Normal Error: Results:map[] Values:map[D:{Var:D Labels:LoadBalancer=app/authorizer-live-lb/c3a147816d0d11e6 Value:0xc036cd4b58} E:{Var:E Labels:LoadBalancer=app/authorizer-live-lb/c3a147816d0d11e6 Value:0xc036cd4b80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.399065539s EvaluationString:[ var='D' labels={LoadBalancer=app/authorizer-live-lb/c3a147816d0d11e6} value=NaN ], [ var='E' labels={LoadBalancer=app/authorizer-live-lb/c3a147816d0d11e6} value=0 ]}]" duration=107.104032ms + level=debug ts=2024-05-29T13:44:14.399541238Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.39954557Z caller=grafana.go:247 user=90424 slug=westerveltlumber msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=15&state=firing&state=error" groups=142 alerts=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v9h4bhkc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.399575538Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v9g56u6q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.399502367Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v9g56u6q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.399472437Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v9g56u6q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.399424396Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishpostpaid, exported_endpoint=PublishEvent, exported_service=Conversation" t=2024-05-29T13:44:14.39935527Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v9g56u6q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.399335745Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.399390198Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=654951 slug=apcontrol instance="__name__=mc_mempool_size, host_name=MyLedger web server, host_role=web server, instance=localhost:9100, job=node_exporter" t=2024-05-29T13:44:14.39932524Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=654951 slug=apcontrol t=2024-05-29T13:44:14.399263847Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=654951 slug=apcontrol version=21 fingerprint=144a1e65260a073b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.399128617Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=mc_mempool_size, host_name=MyLedger web server, host_role=web server, instance=localhost:9100, job=node_exporter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mc_mempool_size, host_name=MyLedger web server, host_role=web server, instance=localhost:9100, job=node_exporter Value:0xc051b34470} B:{Var:B Labels:__name__=mc_mempool_size, host_name=MyLedger web server, host_role=web server, instance=localhost:9100, job=node_exporter Value:0xc051b344c8} C:{Var:C Labels:__name__=mc_mempool_size, host_name=MyLedger web server, host_role=web server, instance=localhost:9100, job=node_exporter Value:0xc051b34528}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.398597694s EvaluationString:[ var='A' labels={__name__=mc_mempool_size, host_name=MyLedger web server, host_role=web server, instance=localhost:9100, job=node_exporter} value=0 ], [ var='B' labels={__name__=mc_mempool_size, host_name=MyLedger web server, host_role=web server, instance=localhost:9100, job=node_exporter} value=0 ], [ var='C' labels={__name__=mc_mempool_size, host_name=MyLedger web server, host_role=web server, instance=localhost:9100, job=node_exporter} value=0 ]}]" duration=10.081489ms + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.399252909Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v9atq8hw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.399217224Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishpostpaid, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation" t=2024-05-29T13:44:14.399216825Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishpostpaid, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation" t=2024-05-29T13:44:14.399201861Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v9atq8hw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.399164774Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.399037782Z caller=ruler.go:522 msg="tenant is owned by this instance" user=642049 slug=ziron groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v93nw9n2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.399061333Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:14.399024582Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=675415 slug=x7hx + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v93nw9n2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.399051013Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.399092328Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.399031359Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishget, exported_endpoint=PublishMessage, exported_service=Conversation" t=2024-05-29T13:44:14.399021245Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.39899184Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v92n157v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.398887571Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v92n157v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.39885222Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.398882079Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.398861784Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.398895577Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.398800618Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v8w2qhmm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.398675309Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-users-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.39870974Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v8r9tqik-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.398371496Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v8r9tqik-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.398297565Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.398569134Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v8plfhqw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.398144263Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dish, exported_endpoint=PublishEvent, exported_service=Conversation" t=2024-05-29T13:44:14.39849381Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-roles-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.398492825Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-roles-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.398481973Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.398455245Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=assurantlifestyle, exported_endpoint=RequestTransfer, exported_service=Conversation" t=2024-05-29T13:44:14.398358546Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-prescriptions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.398378328Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.398342942Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-prescription-dispensations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.39825466Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.398221509Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.398133522Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-prescription-administration-schedules-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.398168315Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.398140669Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=assurantlifestyle, exported_endpoint=PublishTextMessage, exported_service=Conversation" t=2024-05-29T13:44:14.398118319Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.398065869Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.397960965Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-organisations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.39796903Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.397940754Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.397951113Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v8e910et-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.397939961Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.3978477Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.397862843Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.397785421Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.397739958Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=472647 slug=planet instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.397850467Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.397877618Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.397817768Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=472647 slug=planet instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.397838482Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v85hl38k-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.397727259Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:14.39782217Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v85hl38k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.397688009Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v85hl38k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.397665268Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.397737521Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-supplier-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.397814569Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-supplier-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.39780644Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.397788814Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.397708246Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=warn ts=2024-05-29T13:44:14.397706969Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=514312 slug=zephyrde + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7tz0ah2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.397629818Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7tz0ah2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.397602508Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7tz0ah2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.397556137Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7tcpda2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.397455836Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=assurantlifestyle, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation" t=2024-05-29T13:44:14.39747791Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=assurantlifestyle, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation" t=2024-05-29T13:44:14.397464288Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.397304118Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-lot-snapshot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.397420043Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-lot-snapshot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.397384459Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=162543 slug=rapharacing t=2024-05-29T13:44:14.397320281Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.264083ms + logger=ngalert.state.manager user=84360 slug=sib instance= t=2024-05-29T13:44:14.397397328Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7tcpda2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.397323915Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.397355464Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.397323977Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7s80ifl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.397239324Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=84360 slug=sib t=2024-05-29T13:44:14.397328786Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Latency Alert : POST /camp/save-campaign" + level=debug ts=2024-05-29T13:44:14.397199498Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=american-airlines, exported_endpoint=SendToAgent, exported_service=Conversation" t=2024-05-29T13:44:14.397305927Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=307381 slug=kambitaskforce t=2024-05-29T13:44:14.397265407Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="datasource_uid=1x3mYGa7z, ref_id=A" t=2024-05-29T13:44:14.397239319Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-lot-addition-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.397247539Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7s80ifl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.397191323Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=84360 slug=sib version=2 fingerprint=de446a37f14524e8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.397059174Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.396646352s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=232.859243ms + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.397208121Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.397155297Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.397058854Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=american-airlines, exported_endpoint=PublishTextPreview, exported_service=Conversation" t=2024-05-29T13:44:14.397128598Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.397089753Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=american-airlines, exported_endpoint=PublishTextMessage, exported_service=Conversation" t=2024-05-29T13:44:14.39698689Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7r4tq4i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.39689777Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.396970958Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7r4tq4i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.39687144Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.396902233Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7q7qtzc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.39681221Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.396777462Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.396765137Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.396735014Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.396646519Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7q6psp2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.396640538Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.396661031Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.396619101Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7q6psp2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.396545987Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=american-airlines, exported_endpoint=PublishEvent, exported_service=Conversation" t=2024-05-29T13:44:14.396529134Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7p44u0h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.396396895Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.396436786Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.39637574Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.396408555Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7nm1p7o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.396089382Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7nm1p7o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.396047972Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7nm1p7o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.396031492Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7nm1p7o-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.395999121Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7n0riz1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.395931311Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=american-airlines, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation" t=2024-05-29T13:44:14.396295589Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7n0riz1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.39584928Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=american-airlines, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation" t=2024-05-29T13:44:14.396263946Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.396265878Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7jvkg3v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.395685538Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7jvkg3v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.395575177Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.396171673Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7h0q7qi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.395486006Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=aizhomesol, exported_endpoint=PublishMessage, exported_service=Conversation" t=2024-05-29T13:44:14.396131374Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-idempotency-key-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.39598777Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=aizhomesol, exported_endpoint=PublishIntent, exported_service=Conversation" t=2024-05-29T13:44:14.39596279Z level=debug msg="Keeping state" state=Normal + level=debug component=discovery ts=2024-05-29T13:44:14.395880952Z caller=retry.go:58 user=328875 msg="retrying grpc request" method=/remoteruler.rules.v1.RulesService/GetByRuleGroup attempt=1 + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.395963735Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=aizhomesol, exported_endpoint=PublishIntent, exported_service=Conversation" t=2024-05-29T13:44:14.395952032Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.395886806Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.395873192Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.395809551Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=601459 slug=ws12 + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.395847305Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=aizhomesol, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation" t=2024-05-29T13:44:14.395709161Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp t=2024-05-29T13:44:14.395560815Z level=debug msg="State manager processing evaluation results" resultCount=52 + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.395685032Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.395660938Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.395608773Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=371756 slug=asapp version=159 fingerprint=5b09cee0b257ad6f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.394957007Z level=debug msg="Alert rule evaluated" results="[{Instance:company_marker=aizhomesol, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=aizhomesol, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation Value:0xc0272baee0} C:{Var:C Labels:company_marker=aizhomesol, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation Value:0xc0272baee8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.392844938s EvaluationString:[ var='B' labels={company_marker=aizhomesol, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation} value=0.22349006298458868 ], [ var='C' labels={company_marker=aizhomesol, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation} value=0 ]} {Instance:company_marker=aizhomesol, exported_endpoint=PublishEvent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=aizhomesol, exported_endpoint=PublishEvent, exported_service=Conversation Value:0xc0272bb010} C:{Var:C Labels:company_marker=aizhomesol, exported_endpoint=PublishEvent, exported_service=Conversation Value:0xc0272bb018}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.392872297s EvaluationString:[ var='B' labels={company_marker=aizhomesol, exported_endpoint=PublishEvent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=aizhomesol, exported_endpoint=PublishEvent, exported_service=Conversation} value=0 ]} {Instance:company_marker=aizhomesol, exported_endpoint=PublishIntent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=aizhomesol, exported_endpoint=PublishIntent, exported_service=Conversation Value:0xc0272bb148} C:{Var:C Labels:company_marker=aizhomesol, exported_endpoint=PublishIntent, exported_service=Conversation Value:0xc0272bb140}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.392883332s EvaluationString:[ var='B' labels={company_marker=aizhomesol, exported_endpoint=PublishIntent, exported_service=Conversation} value=0.9289747556895561 ], [ var='C' labels={company_marker=aizhomesol, exported_endpoint=PublishIntent, exported_service=Conversation} value=0 ]} {Instance:company_marker=aizhomesol, exported_endpoint=PublishMessage, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=aizhomesol, exported_endpoint=PublishMessage, exported_service=Conversation Value:0xc0272bb260} C:{Var:C Labels:company_marker=aizhomesol, exported_endpoint=PublishMessage, exported_service=Conversation Value:0xc0272bb268}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.39289237s EvaluationString:[ var='B' labels={company_marker=aizhomesol, exported_endpoint=PublishMessage, exported_service=Conversation} value=0.8099601962417833 ], [ var='C' labels={company_marker=aizhomesol, exported_endpoint=PublishMessage, exported_service=Conversation} value=0 ]} {Instance:company_marker=american-airlines, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=american-airlines, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation Value:0xc0272bb318} C:{Var:C Labels:company_marker=american-airlines, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation Value:0xc0272bb380}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.392900305s EvaluationString:[ var='B' labels={company_marker=american-airlines, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation} value=0.018748128544925753 ], [ var='C' labels={company_marker=american-airlines, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation} value=0 ]} {Instance:company_marker=american-airlines, exported_endpoint=PublishEvent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=american-airlines, exported_endpoint=PublishEvent, exported_service=Conversation Value:0xc0272bb438} C:{Var:C Labels:company_marker=american-airlines, exported_endpoint=PublishEvent, exported_service=Conversation Value:0xc0272bb430}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.392907925s EvaluationString:[ var='B' labels={company_marker=american-airlines, exported_endpoint=PublishEvent, exported_service=Conversation} value=0.008297171954276554 ], [ var='C' labels={company_marker=american-airlines, exported_endpoint=PublishEvent, exported_service=Conversation} value=0 ]} {Instance:company_marker=american-airlines, exported_endpoint=PublishIntent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=american-airlines, exported_endpoint=PublishIntent, exported_service=Conversation Value:0xc0272bb4e8} C:{Var:C Labels:company_marker=american-airlines, exported_endpoint=PublishIntent, exported_service=Conversation Value:0xc0272bb590}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.392914204s EvaluationString:[ var='B' labels={company_marker=american-airlines, exported_endpoint=PublishIntent, exported_service=Conversation} value=0.017539419774685164 ], [ var='C' labels={company_marker=american-airlines, exported_endpoint=PublishIntent, exported_service=Conversation} value=0 ]} {Instance:company_marker=american-airlines, exported_endpoint=PublishMessage, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=american-airlines, exported_endpoint=PublishMessage, exported_service=Conversation Value:0xc0272bb620} C:{Var:C Labels:company_marker=american-airlines, exported_endpoint=PublishMessage, exported_service=Conversation Value:0xc0272bb628}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.392920622s EvaluationString:[ var='B' labels={company_marker=american-airlines, exported_endpoint=PublishMessage, exported_service=Conversation} value=0.01646487653712255 ], [ var='C' labels={company_marker=american-airlines, exported_endpoint=PublishMessage, exported_service=Conversation} value=0 ]} {Instance:company_marker=american-airlines, exported_endpoint=PublishTextMessage, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=american-airlines, exported_endpoint=PublishTextMessage, exported_service=Conversation Value:0xc0272bb6c8} C:{Var:C Labels:company_marker=american-airlines, exported_endpoint=PublishTextMessage, exported_service=Conversation Value:0xc0272bb750}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.392927712s EvaluationString:[ var='B' labels={company_marker=american-airlines, exported_endpoint=PublishTextMessage, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=american-airlines, exported_endpoint=PublishTextMessage, exported_service=Conversation} value=0 ]} {Instance:company_marker=american-airlines, exported_endpoint=PublishTextPreview, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=american-airlines, exported_endpoint=PublishTextPreview, exported_service=Conversation Value:0xc0272bb7c0} C:{Var:C Labels:company_marker=american-airlines, exported_endpoint=PublishTextPreview, exported_service=Conversation Value:0xc0272bb7c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.392933778s EvaluationString:[ var='B' labels={company_marker=american-airlines, exported_endpoint=PublishTextPreview, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=american-airlines, exported_endpoint=PublishTextPreview, exported_service=Conversation} value=0 ]} {Instance:company_marker=american-airlines, exported_endpoint=SendToAgent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=american-airlines, exported_endpoint=SendToAgent, exported_service=Conversation Value:0xc0272bb878} C:{Var:C Labels:company_marker=american-airlines, exported_endpoint=SendToAgent, exported_service=Conversation Value:0xc0272bb970}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.392940905s EvaluationString:[ var='B' labels={company_marker=american-airlines, exported_endpoint=SendToAgent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=american-airlines, exported_endpoint=SendToAgent, exported_service=Conversation} value=0 ]} {Instance:company_marker=assurantlifestyle, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=assurantlifestyle, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation Value:0xc0272bba00} C:{Var:C Labels:company_marker=assurantlifestyle, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation Value:0xc0272bba08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.392947617s EvaluationString:[ var='B' labels={company_marker=assurantlifestyle, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation} value=0.0936249511573181 ], [ var='C' labels={company_marker=assurantlifestyle, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation} value=0 ]} {Instance:company_marker=assurantlifestyle, exported_endpoint=PublishEvent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=assurantlifestyle, exported_endpoint=PublishEvent, exported_service=Conversation Value:0xc0272bbb40} C:{Var:C Labels:company_marker=assurantlifestyle, exported_endpoint=PublishEvent, exported_service=Conversation Value:0xc0272bbaa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.392954841s EvaluationString:[ var='B' labels={company_marker=assurantlifestyle, exported_endpoint=PublishEvent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=assurantlifestyle, exported_endpoint=PublishEvent, exported_service=Conversation} value=0 ]} {Instance:company_marker=assurantlifestyle, exported_endpoint=PublishIntent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=assurantlifestyle, exported_endpoint=PublishIntent, exported_service=Conversation Value:0xc0272bbbe0} C:{Var:C Labels:company_marker=assurantlifestyle, exported_endpoint=PublishIntent, exported_service=Conversation Value:0xc0272bbbe8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.392960613s EvaluationString:[ var='B' labels={company_marker=assurantlifestyle, exported_endpoint=PublishIntent, exported_service=Conversation} value=0.14795988403152877 ], [ var='C' labels={company_marker=assurantlifestyle, exported_endpoint=PublishIntent, exported_service=Conversation} value=0 ]} {Instance:company_marker=assurantlifestyle, exported_endpoint=PublishMessage, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=assurantlifestyle, exported_endpoint=PublishMessage, exported_service=Conversation Value:0xc0272bbcc8} C:{Var:C Labels:company_marker=assurantlifestyle, exported_endpoint=PublishMessage, exported_service=Conversation Value:0xc0272bbd90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.392966895s EvaluationString:[ var='B' labels={company_marker=assurantlifestyle, exported_endpoint=PublishMessage, exported_service=Conversation} value=0.13524533851489795 ], [ var='C' labels={company_marker=assurantlifestyle, exported_endpoint=PublishMessage, exported_service=Conversation} value=0 ]} {Instance:company_marker=assurantlifestyle, exported_endpoint=PublishTextMessage, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=assurantlifestyle, exported_endpoint=PublishTextMessage, exported_service=Conversation Value:0xc0272bbe10} C:{Var:C Labels:company_marker=assurantlifestyle, exported_endpoint=PublishTextMessage, exported_service=Conversation Value:0xc0272bbe18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.392974697s EvaluationString:[ var='B' labels={company_marker=assurantlifestyle, exported_endpoint=PublishTextMessage, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=assurantlifestyle, exported_endpoint=PublishTextMessage, exported_service=Conversation} value=0 ]} {Instance:company_marker=assurantlifestyle, exported_endpoint=RequestTransfer, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=assurantlifestyle, exported_endpoint=RequestTransfer, exported_service=Conversation Value:0xc0272bbeb8} C:{Var:C Labels:company_marker=assurantlifestyle, exported_endpoint=RequestTransfer, exported_service=Conversation Value:0xc0272bbf80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.392981465s EvaluationString:[ var='B' labels={company_marker=assurantlifestyle, exported_endpoint=RequestTransfer, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=assurantlifestyle, exported_endpoint=RequestTransfer, exported_service=Conversation} value=0 ]} {Instance:company_marker=dish, exported_endpoint=PublishEvent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=dish, exported_endpoint=PublishEvent, exported_service=Conversation Value:0xc0192b8050} C:{Var:C Labels:company_marker=dish, exported_endpoint=PublishEvent, exported_service=Conversation Value:0xc0192b80c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.392987433s EvaluationString:[ var='B' labels={company_marker=dish, exported_endpoint=PublishEvent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=dish, exported_endpoint=PublishEvent, exported_service=Conversation} value=0 ]} {Instance:company_marker=dish, exported_endpoint=PublishMessage, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=dish, exported_endpoint=PublishMessage, exported_service=Conversation Value:0xc0192b82d0} C:{Var:C Labels:company_marker=dish, exported_endpoint=PublishMessage, exported_service=Conversation Value:0xc0192b8260}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.392994855s EvaluationString:[ var='B' labels={company_marker=dish, exported_endpoint=PublishMessage, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=dish, exported_endpoint=PublishMessage, exported_service=Conversation} value=0 ]} {Instance:company_marker=dishget, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=dishget, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation Value:0xc0192b8370} C:{Var:C Labels:company_marker=dishget, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation Value:0xc0192b83e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393004512s EvaluationString:[ var='B' labels={company_marker=dishget, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=dishget, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation} value=0 ]} {Instance:company_marker=dishget, exported_endpoint=PublishIntent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=dishget, exported_endpoint=PublishIntent, exported_service=Conversation Value:0xc0192b8490} C:{Var:C Labels:company_marker=dishget, exported_endpoint=PublishIntent, exported_service=Conversation Value:0xc0192b8500}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393011048s EvaluationString:[ var='B' labels={company_marker=dishget, exported_endpoint=PublishIntent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=dishget, exported_endpoint=PublishIntent, exported_service=Conversation} value=0 ]} {Instance:company_marker=dishget, exported_endpoint=PublishMessage, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=dishget, exported_endpoint=PublishMessage, exported_service=Conversation Value:0xc0192b8830} C:{Var:C Labels:company_marker=dishget, exported_endpoint=PublishMessage, exported_service=Conversation Value:0xc0192b88b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393018905s EvaluationString:[ var='B' labels={company_marker=dishget, exported_endpoint=PublishMessage, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=dishget, exported_endpoint=PublishMessage, exported_service=Conversation} value=0 ]} {Instance:company_marker=dishpostpaid, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=dishpostpaid, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation Value:0xc0192b8aa0} C:{Var:C Labels:company_marker=dishpostpaid, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation Value:0xc0192b8aa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393025238s EvaluationString:[ var='B' labels={company_marker=dishpostpaid, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation} value=0.18843971912563978 ], [ var='C' labels={company_marker=dishpostpaid, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation} value=0 ]} {Instance:company_marker=dishpostpaid, exported_endpoint=PublishEvent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=dishpostpaid, exported_endpoint=PublishEvent, exported_service=Conversation Value:0xc0192b9198} C:{Var:C Labels:company_marker=dishpostpaid, exported_endpoint=PublishEvent, exported_service=Conversation Value:0xc0192b9760}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393064085s EvaluationString:[ var='B' labels={company_marker=dishpostpaid, exported_endpoint=PublishEvent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=dishpostpaid, exported_endpoint=PublishEvent, exported_service=Conversation} value=0 ]} {Instance:company_marker=dishpostpaid, exported_endpoint=PublishIntent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=dishpostpaid, exported_endpoint=PublishIntent, exported_service=Conversation Value:0xc0192b9a50} C:{Var:C Labels:company_marker=dishpostpaid, exported_endpoint=PublishIntent, exported_service=Conversation Value:0xc0192b9a58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393071223s EvaluationString:[ var='B' labels={company_marker=dishpostpaid, exported_endpoint=PublishIntent, exported_service=Conversation} value=0.21868925298528186 ], [ var='C' labels={company_marker=dishpostpaid, exported_endpoint=PublishIntent, exported_service=Conversation} value=0 ]} {Instance:company_marker=dishpostpaid, exported_endpoint=PublishMessage, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=dishpostpaid, exported_endpoint=PublishMessage, exported_service=Conversation Value:0xc0192b9cc0} C:{Var:C Labels:company_marker=dishpostpaid, exported_endpoint=PublishMessage, exported_service=Conversation Value:0xc0192b9b98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393077385s EvaluationString:[ var='B' labels={company_marker=dishpostpaid, exported_endpoint=PublishMessage, exported_service=Conversation} value=0.22612766295076764 ], [ var='C' labels={company_marker=dishpostpaid, exported_endpoint=PublishMessage, exported_service=Conversation} value=0 ]} {Instance:company_marker=dishwireless, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=dishwireless, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation Value:0xc0192b9de0} C:{Var:C Labels:company_marker=dishwireless, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation Value:0xc0192b9de8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393088193s EvaluationString:[ var='B' labels={company_marker=dishwireless, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=dishwireless, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation} value=0 ]} {Instance:company_marker=dishwireless, exported_endpoint=PublishEvent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=dishwireless, exported_endpoint=PublishEvent, exported_service=Conversation Value:0xc0192b9ea8} C:{Var:C Labels:company_marker=dishwireless, exported_endpoint=PublishEvent, exported_service=Conversation Value:0xc04b8da0e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393094878s EvaluationString:[ var='B' labels={company_marker=dishwireless, exported_endpoint=PublishEvent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=dishwireless, exported_endpoint=PublishEvent, exported_service=Conversation} value=0 ]} {Instance:company_marker=dishwireless, exported_endpoint=PublishForm, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=dishwireless, exported_endpoint=PublishForm, exported_service=Conversation Value:0xc04b8da2a8} C:{Var:C Labels:company_marker=dishwireless, exported_endpoint=PublishForm, exported_service=Conversation Value:0xc04b8da2a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.39310087s EvaluationString:[ var='B' labels={company_marker=dishwireless, exported_endpoint=PublishForm, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=dishwireless, exported_endpoint=PublishForm, exported_service=Conversation} value=0 ]} {Instance:company_marker=dishwireless, exported_endpoint=PublishIntent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=dishwireless, exported_endpoint=PublishIntent, exported_service=Conversation Value:0xc04b8da358} C:{Var:C Labels:company_marker=dishwireless, exported_endpoint=PublishIntent, exported_service=Conversation Value:0xc04b8da400}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393107063s EvaluationString:[ var='B' labels={company_marker=dishwireless, exported_endpoint=PublishIntent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=dishwireless, exported_endpoint=PublishIntent, exported_service=Conversation} value=0 ]} {Instance:company_marker=dishwireless, exported_endpoint=PublishMessage, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=dishwireless, exported_endpoint=PublishMessage, exported_service=Conversation Value:0xc04b8da660} C:{Var:C Labels:company_marker=dishwireless, exported_endpoint=PublishMessage, exported_service=Conversation Value:0xc04b8da668}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393113815s EvaluationString:[ var='B' labels={company_marker=dishwireless, exported_endpoint=PublishMessage, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=dishwireless, exported_endpoint=PublishMessage, exported_service=Conversation} value=0 ]} {Instance:company_marker=jetblue, exported_endpoint=AssignToAgent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=jetblue, exported_endpoint=AssignToAgent, exported_service=Conversation Value:0xc04b8dab78} C:{Var:C Labels:company_marker=jetblue, exported_endpoint=AssignToAgent, exported_service=Conversation Value:0xc04b8daad8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393121058s EvaluationString:[ var='B' labels={company_marker=jetblue, exported_endpoint=AssignToAgent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=jetblue, exported_endpoint=AssignToAgent, exported_service=Conversation} value=0 ]} {Instance:company_marker=jetblue, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=jetblue, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation Value:0xc04b8dad78} C:{Var:C Labels:company_marker=jetblue, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation Value:0xc04b8dae18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393130087s EvaluationString:[ var='B' labels={company_marker=jetblue, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=jetblue, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation} value=0 ]} {Instance:company_marker=jetblue, exported_endpoint=PublishEvent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=jetblue, exported_endpoint=PublishEvent, exported_service=Conversation Value:0xc04b8daf48} C:{Var:C Labels:company_marker=jetblue, exported_endpoint=PublishEvent, exported_service=Conversation Value:0xc04b8daf68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393136248s EvaluationString:[ var='B' labels={company_marker=jetblue, exported_endpoint=PublishEvent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=jetblue, exported_endpoint=PublishEvent, exported_service=Conversation} value=0 ]} {Instance:company_marker=jetblue, exported_endpoint=PublishIntent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=jetblue, exported_endpoint=PublishIntent, exported_service=Conversation Value:0xc04b8db028} C:{Var:C Labels:company_marker=jetblue, exported_endpoint=PublishIntent, exported_service=Conversation Value:0xc04b8db078}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.3931425s EvaluationString:[ var='B' labels={company_marker=jetblue, exported_endpoint=PublishIntent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=jetblue, exported_endpoint=PublishIntent, exported_service=Conversation} value=0 ]} {Instance:company_marker=jetblue, exported_endpoint=PublishMessage, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=jetblue, exported_endpoint=PublishMessage, exported_service=Conversation Value:0xc04b8db0f8} C:{Var:C Labels:company_marker=jetblue, exported_endpoint=PublishMessage, exported_service=Conversation Value:0xc04b8db178}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393148968s EvaluationString:[ var='B' labels={company_marker=jetblue, exported_endpoint=PublishMessage, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=jetblue, exported_endpoint=PublishMessage, exported_service=Conversation} value=0 ]} {Instance:company_marker=jetblue, exported_endpoint=PublishNewRepEvent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=jetblue, exported_endpoint=PublishNewRepEvent, exported_service=Conversation Value:0xc04b8db2c8} C:{Var:C Labels:company_marker=jetblue, exported_endpoint=PublishNewRepEvent, exported_service=Conversation Value:0xc04b8db1d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393155758s EvaluationString:[ var='B' labels={company_marker=jetblue, exported_endpoint=PublishNewRepEvent, exported_service=Conversation} value=0.3572240034333896 ], [ var='C' labels={company_marker=jetblue, exported_endpoint=PublishNewRepEvent, exported_service=Conversation} value=0 ]} {Instance:company_marker=jetblue, exported_endpoint=SendToAgent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=jetblue, exported_endpoint=SendToAgent, exported_service=Conversation Value:0xc04b8db3b8} C:{Var:C Labels:company_marker=jetblue, exported_endpoint=SendToAgent, exported_service=Conversation Value:0xc04b8db438}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393177683s EvaluationString:[ var='B' labels={company_marker=jetblue, exported_endpoint=SendToAgent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=jetblue, exported_endpoint=SendToAgent, exported_service=Conversation} value=0 ]} {Instance:company_marker=mtpanacea1, exported_endpoint=PublishNewRepEvent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=mtpanacea1, exported_endpoint=PublishNewRepEvent, exported_service=Conversation Value:0xc04b8db520} C:{Var:C Labels:company_marker=mtpanacea1, exported_endpoint=PublishNewRepEvent, exported_service=Conversation Value:0xc04b8db528}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393184242s EvaluationString:[ var='B' labels={company_marker=mtpanacea1, exported_endpoint=PublishNewRepEvent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=mtpanacea1, exported_endpoint=PublishNewRepEvent, exported_service=Conversation} value=0 ]} {Instance:company_marker=mtphoenix1, exported_endpoint=SendToAgent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=mtphoenix1, exported_endpoint=SendToAgent, exported_service=Conversation Value:0xc04b8db6c0} C:{Var:C Labels:company_marker=mtphoenix1, exported_endpoint=SendToAgent, exported_service=Conversation Value:0xc04b8db6c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393192356s EvaluationString:[ var='B' labels={company_marker=mtphoenix1, exported_endpoint=SendToAgent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=mtphoenix1, exported_endpoint=SendToAgent, exported_service=Conversation} value=0 ]} {Instance:company_marker=optimumfixed, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=optimumfixed, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation Value:0xc04b8db7e8} C:{Var:C Labels:company_marker=optimumfixed, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation Value:0xc04b8db870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393200245s EvaluationString:[ var='B' labels={company_marker=optimumfixed, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation} value=0.17190410079240415 ], [ var='C' labels={company_marker=optimumfixed, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation} value=0 ]} {Instance:company_marker=optimumfixed, exported_endpoint=PublishEvent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=optimumfixed, exported_endpoint=PublishEvent, exported_service=Conversation Value:0xc04b8db930} C:{Var:C Labels:company_marker=optimumfixed, exported_endpoint=PublishEvent, exported_service=Conversation Value:0xc04b8db938}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393207457s EvaluationString:[ var='B' labels={company_marker=optimumfixed, exported_endpoint=PublishEvent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=optimumfixed, exported_endpoint=PublishEvent, exported_service=Conversation} value=0 ]} {Instance:company_marker=optimumfixed, exported_endpoint=PublishIntent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=optimumfixed, exported_endpoint=PublishIntent, exported_service=Conversation Value:0xc04b8db9e8} C:{Var:C Labels:company_marker=optimumfixed, exported_endpoint=PublishIntent, exported_service=Conversation Value:0xc04b8dba90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393214014s EvaluationString:[ var='B' labels={company_marker=optimumfixed, exported_endpoint=PublishIntent, exported_service=Conversation} value=0.16663173902588435 ], [ var='C' labels={company_marker=optimumfixed, exported_endpoint=PublishIntent, exported_service=Conversation} value=0 ]} {Instance:company_marker=optimumfixed, exported_endpoint=PublishMessage, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=optimumfixed, exported_endpoint=PublishMessage, exported_service=Conversation Value:0xc04b8dbbe0} C:{Var:C Labels:company_marker=optimumfixed, exported_endpoint=PublishMessage, exported_service=Conversation Value:0xc04b8dbbe8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.39322595s EvaluationString:[ var='B' labels={company_marker=optimumfixed, exported_endpoint=PublishMessage, exported_service=Conversation} value=0.10886844757088965 ], [ var='C' labels={company_marker=optimumfixed, exported_endpoint=PublishMessage, exported_service=Conversation} value=0 ]} {Instance:company_marker=optimumfixed, exported_endpoint=SendToAgent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=optimumfixed, exported_endpoint=SendToAgent, exported_service=Conversation Value:0xc04b8dbd08} C:{Var:C Labels:company_marker=optimumfixed, exported_endpoint=SendToAgent, exported_service=Conversation Value:0xc04b8dbdb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393232638s EvaluationString:[ var='B' labels={company_marker=optimumfixed, exported_endpoint=SendToAgent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=optimumfixed, exported_endpoint=SendToAgent, exported_service=Conversation} value=0 ]} {Instance:company_marker=optimummobile, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=optimummobile, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation Value:0xc04b8dbf48} C:{Var:C Labels:company_marker=optimummobile, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation Value:0xc04b8dbf40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393238904s EvaluationString:[ var='B' labels={company_marker=optimummobile, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=optimummobile, exported_endpoint=PublishEphemeralEvent, exported_service=Conversation} value=0 ]} {Instance:company_marker=optimummobile, exported_endpoint=PublishEvent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=optimummobile, exported_endpoint=PublishEvent, exported_service=Conversation Value:0xc00bdf4a28} C:{Var:C Labels:company_marker=optimummobile, exported_endpoint=PublishEvent, exported_service=Conversation Value:0xc00bdf5570}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393245346s EvaluationString:[ var='B' labels={company_marker=optimummobile, exported_endpoint=PublishEvent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=optimummobile, exported_endpoint=PublishEvent, exported_service=Conversation} value=0 ]} {Instance:company_marker=optimummobile, exported_endpoint=PublishIntent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=optimummobile, exported_endpoint=PublishIntent, exported_service=Conversation Value:0xc04d87a450} C:{Var:C Labels:company_marker=optimummobile, exported_endpoint=PublishIntent, exported_service=Conversation Value:0xc04d87a458}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393251468s EvaluationString:[ var='B' labels={company_marker=optimummobile, exported_endpoint=PublishIntent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=optimummobile, exported_endpoint=PublishIntent, exported_service=Conversation} value=0 ]} {Instance:company_marker=optimummobile, exported_endpoint=PublishMessage, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=optimummobile, exported_endpoint=PublishMessage, exported_service=Conversation Value:0xc04d87b5f8} C:{Var:C Labels:company_marker=optimummobile, exported_endpoint=PublishMessage, exported_service=Conversation Value:0xc04d87bf00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.39325856s EvaluationString:[ var='B' labels={company_marker=optimummobile, exported_endpoint=PublishMessage, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=optimummobile, exported_endpoint=PublishMessage, exported_service=Conversation} value=0 ]} {Instance:company_marker=rcn, exported_endpoint=PublishEvent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=rcn, exported_endpoint=PublishEvent, exported_service=Conversation Value:0xc005b7c0f0} C:{Var:C Labels:company_marker=rcn, exported_endpoint=PublishEvent, exported_service=Conversation Value:0xc005b7c140}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393264603s EvaluationString:[ var='B' labels={company_marker=rcn, exported_endpoint=PublishEvent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=rcn, exported_endpoint=PublishEvent, exported_service=Conversation} value=0 ]} {Instance:company_marker=rcn, exported_endpoint=PublishMessage, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=rcn, exported_endpoint=PublishMessage, exported_service=Conversation Value:0xc005b7c860} C:{Var:C Labels:company_marker=rcn, exported_endpoint=PublishMessage, exported_service=Conversation Value:0xc005b7c8b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393272651s EvaluationString:[ var='B' labels={company_marker=rcn, exported_endpoint=PublishMessage, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=rcn, exported_endpoint=PublishMessage, exported_service=Conversation} value=0 ]} {Instance:company_marker=spectrum-cable, exported_endpoint=SendToAgent, exported_service=Conversation State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=spectrum-cable, exported_endpoint=SendToAgent, exported_service=Conversation Value:0xc005b7ca68} C:{Var:C Labels:company_marker=spectrum-cable, exported_endpoint=SendToAgent, exported_service=Conversation Value:0xc005b7ca60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393279166s EvaluationString:[ var='B' labels={company_marker=spectrum-cable, exported_endpoint=SendToAgent, exported_service=Conversation} value=0 ], [ var='C' labels={company_marker=spectrum-cable, exported_endpoint=SendToAgent, exported_service=Conversation} value=0 ]}]" duration=413.23565ms + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.395581802Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-receive-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.395530199Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.395466248Z caller=ruler.go:522 msg="tenant is owned by this instance" user=696656 slug=wshd groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7h0q7qi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.395420375Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.395432846Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-move-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.395376392Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.395314946Z caller=ruler.go:522 msg="tenant is owned by this instance" user=767536 slug=woutvandenberg groups=0 + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-move-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.395369613Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.395349613Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-lot-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.395266991Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.395227547Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7en0twn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.395162443Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.395122351Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-dispense-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.395072974Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-dispense-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.395066306Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7en0twn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.395108702Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v7en0twn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.395084502Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.394967343Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=683411 slug=wsensing + level=debug ts=2024-05-29T13:44:14.394927643Z caller=ruler.go:522 msg="tenant is owned by this instance" user=534414 slug=warlog groups=0 + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.394978096Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v76jyaye-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.394983301Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v76jyaye-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.39493961Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.394900109Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-diseases-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.394839511Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.394819452Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + level=debug ts=2024-05-29T13:44:14.394775115Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.394725094Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-consultations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.394748112Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.394656188Z caller=remote_instance_store.go:51 user=313382 slug=hyai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v75pqcal-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.394662798Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.394644947Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-consultations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.394656527Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:14.394615679Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=34.256832ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v73plxn6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.394561237Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-consultations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.394645462Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.394627134Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-conditions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.39457651Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.394525539Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.394553584Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=life-pubsub-worker-worker, cluster=lmnd-sandbox-us-east-1, container=kube-state-metrics, deployment=life-pubsub-worker-worker, endpoint=http, instance=10.32.87.231:8080, job=kube-state-metrics, namespace=sandbox, pod=kube-state-metrics-6c795d5489-lb4vc, region=us-east-1, service=kube-state-metrics, stage=sandbox" t=2024-05-29T13:44:14.39453071Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.394474152Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Failing Kafka Connectors" + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:14.394475053Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="region=us-east-1, service=kube-state-metrics, stage=sandbox" + Error parsing panelUID for alert annotationruleID2766dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=698963 slug=lemonade version=2 fingerprint=489847df6bc9aa5b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.394340715Z level=debug msg="Alert rule evaluated" results="[{Instance:app=life-pubsub-worker-worker, cluster=lmnd-sandbox-us-east-1, container=kube-state-metrics, deployment=life-pubsub-worker-worker, endpoint=http, instance=10.32.87.231:8080, job=kube-state-metrics, namespace=sandbox, pod=kube-state-metrics-6c795d5489-lb4vc, region=us-east-1, service=kube-state-metrics, stage=sandbox State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=life-pubsub-worker-worker, cluster=lmnd-sandbox-us-east-1, container=kube-state-metrics, deployment=life-pubsub-worker-worker, endpoint=http, instance=10.32.87.231:8080, job=kube-state-metrics, namespace=sandbox, pod=kube-state-metrics-6c795d5489-lb4vc, region=us-east-1, service=kube-state-metrics, stage=sandbox Value:0xc00a7ee760} THRESHOLD:{Var:THRESHOLD Labels:app=life-pubsub-worker-worker, cluster=lmnd-sandbox-us-east-1, container=kube-state-metrics, deployment=life-pubsub-worker-worker, endpoint=http, instance=10.32.87.231:8080, job=kube-state-metrics, namespace=sandbox, pod=kube-state-metrics-6c795d5489-lb4vc, region=us-east-1, service=kube-state-metrics, stage=sandbox Value:0xc00a7ee640}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393970642s EvaluationString:[ var='QUERY' labels={app=life-pubsub-worker-worker, cluster=lmnd-sandbox-us-east-1, container=kube-state-metrics, deployment=life-pubsub-worker-worker, endpoint=http, instance=10.32.87.231:8080, job=kube-state-metrics, namespace=sandbox, pod=kube-state-metrics-6c795d5489-lb4vc, region=us-east-1, service=kube-state-metrics, stage=sandbox} value=0 ], [ var='THRESHOLD' labels={app=life-pubsub-worker-worker, cluster=lmnd-sandbox-us-east-1, container=kube-state-metrics, deployment=life-pubsub-worker-worker, endpoint=http, instance=10.32.87.231:8080, job=kube-state-metrics, namespace=sandbox, pod=kube-state-metrics-6c795d5489-lb4vc, region=us-east-1, service=kube-state-metrics, stage=sandbox} value=0 ]}]" duration=63.525003ms + level=debug ts=2024-05-29T13:44:14.39438178Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.394195177Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare instance="__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect" t=2024-05-29T13:44:14.394099188Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349736 slug=elephanthealthcare t=2024-05-29T13:44:14.393938735Z level=debug msg="State manager processing evaluation results" resultCount=169 + logger=ngalert.scheduler user=349736 slug=elephanthealthcare version=23 fingerprint=7594331474e96e1d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.390232252Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2a858} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2a888}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382664713s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2a900} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2a938}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.38267815s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-clinics-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-clinics-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2a9d8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-clinics-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2a9a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382685331s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-clinics-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-clinics-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-clinics-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-clinics-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2aa40} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-clinics-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2aa78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382691128s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-clinics-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-clinics-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-conditions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-conditions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2aae0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-conditions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2ab18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382695174s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-conditions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-conditions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-conditions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-conditions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2ab88} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-conditions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2abc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382699312s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-conditions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-conditions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-consultations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-consultations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2ac28} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-consultations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2ac58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382706672s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-consultations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-consultations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-consultations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-consultations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2acc8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-consultations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2acf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382711614s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-consultations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-consultations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-diseases-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-diseases-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2ad58} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-diseases-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2ad90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382717457s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-diseases-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-diseases-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-diseases-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-diseases-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2adf0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-diseases-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2ae28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.38272809s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-diseases-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-diseases-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-dispense-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-dispense-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2ae98} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-dispense-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2aed0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382734422s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-dispense-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-dispense-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-dispense-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-dispense-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2af30} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-dispense-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2af68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382739305s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-dispense-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-dispense-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-lot-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-lot-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2afd0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-lot-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2b008}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382744922s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-lot-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-lot-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-lot-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-lot-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2b070} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-lot-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2b0a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382751046s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-lot-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-lot-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-move-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-move-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2b140} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-move-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2b110}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382755961s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-move-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-move-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-move-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-move-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2b1a0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-move-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2b1d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.38276051s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-move-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-move-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-receive-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-receive-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2b240} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-receive-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2b288}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382765145s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-receive-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-receive-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2b2f8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2b330}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382768953s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2b390} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2b3c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382773816s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2b470} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2b438}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382779232s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2b500} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2b4d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382782949s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-idempotency-key-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-idempotency-key-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2b570} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-idempotency-key-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2b5a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382787252s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-idempotency-key-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-idempotency-key-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-idempotency-key-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-idempotency-key-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2b608} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-idempotency-key-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2b640}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382790404s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-idempotency-key-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-idempotency-key-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2b6b0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2b6e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382794128s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2b750} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2b780}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382798625s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2b7e8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2b820}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382820325s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2b900} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2b988}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382827683s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2ba70} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2b9f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382832791s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2bb58} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2bad0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382836412s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-move-request-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2bbd0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2bc40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382842955s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2bce8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2bd20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382847467s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-sku-translation-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-sku-translation-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2bdc0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-sku-translation-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2bdf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382854433s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-sku-translation-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-sku-translation-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-lot-addition-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-lot-addition-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2bea0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-lot-addition-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc046e2bed0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382860489s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-lot-addition-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-lot-addition-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-lot-snapshot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-lot-snapshot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2bf80} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-lot-snapshot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc046e2bfb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382865786s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-lot-snapshot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-lot-snapshot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-lot-update-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-lot-update-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc004232070} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-lot-update-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc004232158}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.38287084s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-lot-update-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-lot-update-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc004232360} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc004232438}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382874768s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc0042327a0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc0042325c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382878807s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-stock-take-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-supplier-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-supplier-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc004232aa0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-supplier-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc004232b68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382882987s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-supplier-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-supplier-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-supplier-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-supplier-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc004232d88} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-supplier-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc004232fe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382887238s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-supplier-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-inventory-supplier-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-organisations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-organisations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc004233588} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-organisations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc004233550}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382891958s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-organisations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-organisations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-payment-payment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-payment-payment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc004233608} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-payment-payment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc004233638}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382896743s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-payment-payment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-payment-payment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-prescription-administration-schedules-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-prescription-administration-schedules-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc0042336b8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-prescription-administration-schedules-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc0042336f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382899868s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-prescription-administration-schedules-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-prescription-administration-schedules-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-prescription-dispensations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-prescription-dispensations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc004233768} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-prescription-dispensations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc0042337a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382904569s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-prescription-dispensations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-prescription-dispensations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-prescriptions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-prescriptions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc004233810} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-prescriptions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc004233848}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382912453s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-prescriptions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-prescriptions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-roles-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-roles-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc0042338b8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-roles-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc0042338e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382917312s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-roles-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-roles-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-ui-defaults-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-ui-defaults-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc004233958} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-ui-defaults-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc004233990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.38292143s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-ui-defaults-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-ui-defaults-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-users-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-users-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc0042339f8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-users-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc004233a30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382925648s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-users-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-users-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-users-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-users-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc004233ba0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-users-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc004233a98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382929544s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-users-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-kenya-users-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc004233c00} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc004233ca0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382933492s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc004233d20} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc004233d50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.38293717s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-anc-encounter-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-clinics-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-clinics-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc004233df0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-clinics-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc004233dc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382941167s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-clinics-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-clinics-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-clinics-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-clinics-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc004233e50} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-clinics-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc004233e80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382945705s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-clinics-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-clinics-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-conditions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-conditions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc004233ee8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-conditions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc004233f18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.38294981s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-conditions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-conditions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-consultations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-consultations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc004233f78} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-consultations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc004233fa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382953587s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-consultations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-consultations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-diseases-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-diseases-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033436058} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-diseases-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033436088}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382957565s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-diseases-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-diseases-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-dispense-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-dispense-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc0334360f0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-dispense-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033436120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382962298s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-dispense-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-dispense-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-lot-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-lot-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc0334361f0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-lot-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc0334361a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382966217s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-lot-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-lot-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-move-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-move-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033436260} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-move-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033436350}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382970568s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-move-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-move-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-receive-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-receive-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc0334363e8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-receive-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033436420}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382975012s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-receive-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-receive-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc0334364d0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033436508}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382980137s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc0334365a0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033436570}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382984432s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033436600} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033436630}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382989638s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033436690} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc0334366c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382993934s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033436760} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc0334367b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.382998085s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-audit-event-sku-modification-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-idempotency-key-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-idempotency-key-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc0334368a8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-idempotency-key-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033436910}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383004735s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-idempotency-key-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-idempotency-key-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-idempotency-key-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-idempotency-key-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033436970} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-idempotency-key-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc0334369a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.38300891s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-idempotency-key-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-idempotency-key-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033436a08} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033436a40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383013391s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033436ae8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033436ab0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383017748s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-lot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033436b58} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033436b88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383021555s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033436cd8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033436d80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383026324s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-fulfillment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033436df0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033436e28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383030545s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033436f28} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033436ec0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383035722s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-move-request-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033436f88} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033436fc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383039115s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-sku-external-reference-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033437028} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033437060}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.38304365s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-sku-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-sku-translation-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-sku-translation-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033437130} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-sku-translation-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc0334370f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383046885s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-sku-translation-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-sku-translation-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-addition-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-addition-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc0334371d0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-addition-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033437248}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.38305328s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-addition-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-addition-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-addition-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-addition-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033437410} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-addition-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc0334374a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383060235s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-addition-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-addition-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-snapshot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-snapshot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc0334376f0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-snapshot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033437720}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383067053s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-snapshot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-snapshot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-snapshot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-snapshot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033437780} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-snapshot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc0334377b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383075649s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-snapshot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-snapshot-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-update-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-update-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033437828} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-update-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033437860}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383082912s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-update-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-update-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-update-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-update-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc0334378c8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-update-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033437900}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383088802s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-update-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-lot-update-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc0334379a8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033437970}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383108078s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033437a20} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033437a60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383112342s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-stock-take-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-supplier-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-supplier-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033437b08} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-supplier-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033437ad0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383117985s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-supplier-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-inventory-supplier-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-organisations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-organisations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033437b70} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-organisations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033437ba8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383123138s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-organisations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-organisations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-organisations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-organisations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033437c40} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-organisations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033437c70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383128378s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-organisations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-organisations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-payment-healthpaypatientphoto-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-payment-healthpaypatientphoto-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033437cd0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-payment-healthpaypatientphoto-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033437d08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383141245s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-payment-healthpaypatientphoto-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-payment-healthpaypatientphoto-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-payment-healthpaypatientphoto-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-payment-healthpaypatientphoto-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033437da0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-payment-healthpaypatientphoto-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033437dd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383147102s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-payment-healthpaypatientphoto-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-payment-healthpaypatientphoto-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-payment-healthpaysubmission-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-payment-healthpaysubmission-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033437e88} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-payment-healthpaysubmission-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc033437e50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383156512s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-payment-healthpaysubmission-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-payment-healthpaysubmission-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-payment-payment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-payment-payment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033437f68} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-payment-payment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033437f30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383163915s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-payment-payment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-payment-payment-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-administration-schedules-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-administration-schedules-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc033437ff0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-administration-schedules-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc006420640}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383170888s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-administration-schedules-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-administration-schedules-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-administration-schedules-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-administration-schedules-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006420810} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-administration-schedules-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006420840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383178948s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-administration-schedules-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-administration-schedules-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-dispensations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-dispensations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc0064208a8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-dispensations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc0064208e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383184581s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-dispensations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-dispensations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-dispensations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-dispensations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006420990} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-dispensations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc0064209e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383190461s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-dispensations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescription-dispensations-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescriptions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescriptions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc006420a88} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescriptions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc006420a50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383194739s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescriptions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescriptions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescriptions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescriptions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006420af0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescriptions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006420b38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383198525s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescriptions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-prescriptions-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-roles-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-roles-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc006420bc8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-roles-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc006420bf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383203438s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-roles-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-roles-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-roles-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-roles-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006420c60} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-roles-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006420c98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383208032s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-roles-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-roles-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-ui-defaults-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-ui-defaults-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc006420d18} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-ui-defaults-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc006420d58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383211962s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-ui-defaults-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-ui-defaults-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-ui-defaults-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-ui-defaults-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006420dc8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-ui-defaults-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006420e00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.38321691s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-ui-defaults-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-ui-defaults-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-users-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-users-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc006420f10} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-users-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc006420ea8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383223122s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-users-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-users-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-users-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-users-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006420f70} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-users-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006420fa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383230374s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-users-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=gcs-nigeria-users-sink/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-clinics-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-clinics-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006421040} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-clinics-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006421010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383236702s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-clinics-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-clinics-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-conditions-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-conditions-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc0064210a0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-conditions-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc0064210d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383245092s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-conditions-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-conditions-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-consultations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-consultations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006421168} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-consultations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006421138}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383251741s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-consultations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-consultations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-diseases-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-diseases-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc006421208} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-diseases-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc006421240}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383257765s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-diseases-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-diseases-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-organisations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-organisations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc0064212a8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-organisations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc0064212d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383263747s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-organisations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-organisations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-roles-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-roles-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc006421338} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-roles-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc006421370}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383270335s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-roles-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-roles-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-users-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-users-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc0064213d8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-users-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006421430}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.38329326s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-users-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-kenya-users-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-clinics-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-clinics-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc006421778} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-clinics-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc006421498}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383301437s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-clinics-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-clinics-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-conditions-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-conditions-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc0064217e0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-conditions-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006421810}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383309135s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-conditions-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-conditions-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-consultations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-consultations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc006421a28} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-consultations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc0064219f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383315936s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-consultations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-consultations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-diseases-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-diseases-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006421a90} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-diseases-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006421ac8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.3833229s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-diseases-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-diseases-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-organisations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-organisations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc006421b30} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-organisations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc006421b60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383328927s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-organisations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-organisations-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-roles-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-roles-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006421bd0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-roles-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006421c00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383334637s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-roles-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-roles-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-users-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-users-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc006421c60} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-users-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc006421c98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383339365s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-users-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=mongo-nigeria-users-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-encounter-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-encounter-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006421d00} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-encounter-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006421d38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383344205s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-encounter-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-encounter-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-inventory-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-inventory-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc006421dd0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-inventory-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc006421da0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383348177s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-inventory-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-inventory-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-payment-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-payment-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006421e30} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-payment-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc006421e68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383352008s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-payment-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-payment-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-prescriptions-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-prescriptions-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc00f79a038} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-prescriptions-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc00f79a000}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383356005s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-prescriptions-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-prescriptions-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-ui-defaults-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-ui-defaults-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc00f79a0a0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-ui-defaults-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc00f79a0d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383361142s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-ui-defaults-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-kenya-ui-defaults-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-encounter-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-encounter-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc00f79a140} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-encounter-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc00f79a170}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.38336477s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-encounter-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-encounter-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-inventory-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-inventory-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc00f79a1d0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-inventory-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc00f79a208}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383369658s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-inventory-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-inventory-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-payment-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-payment-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc00f79a280} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-payment-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc00f79a2b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383373678s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-payment-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-payment-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-prescriptions-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-prescriptions-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc00f79a320} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-prescriptions-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect Value:0xc00f79a350}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383378858s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-prescriptions-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-prescriptions-source/production, instance=amazon-eu-west-1-rah3ee/10.10.159.147:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-ui-defaults-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-ui-defaults-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc00f79a3b8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-ui-defaults-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect Value:0xc00f79a3f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383384202s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-ui-defaults-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=amazon-eu-west-1-rah3ee/data-platform-kafka/shared-production, connector=postgres-nigeria-ui-defaults-source/production, instance=amazon-eu-west-1-rah3ee/10.10.137.146:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-anc-encounter-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-anc-encounter-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79a458} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-anc-encounter-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79a488}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383387532s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-anc-encounter-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-anc-encounter-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-clinics-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-clinics-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79a530} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-clinics-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79a4f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383392817s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-clinics-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-clinics-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-conditions-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-conditions-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79a598} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-conditions-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79a5d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.38339772s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-conditions-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-conditions-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-consultations-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-consultations-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79a668} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-consultations-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79a630}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383402112s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-consultations-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-consultations-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-diseases-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-diseases-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79a6d0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-diseases-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79a710}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383406174s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-diseases-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-diseases-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-dispense-sku-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-dispense-sku-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79a778} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-dispense-sku-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79a800}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383410562s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-dispense-sku-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-dispense-sku-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-lot-modification-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-lot-modification-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79aa80} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-lot-modification-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79aa48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383415964s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-lot-modification-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-lot-modification-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-move-lot-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-move-lot-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79ab28} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-move-lot-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79aaf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383420605s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-move-lot-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-move-lot-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-receive-lot-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-receive-lot-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79ab88} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-receive-lot-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79ac70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383425314s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-receive-lot-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-receive-lot-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79ace0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79ad10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383430117s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-sku-external-reference-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-sku-external-reference-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79ad80} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-sku-external-reference-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79adb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383433793s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-sku-external-reference-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-sku-external-reference-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-sku-modification-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-sku-modification-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79ae20} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-sku-modification-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79ae58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383438156s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-sku-modification-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-audit-event-sku-modification-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-idempotency-key-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-idempotency-key-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79aec0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-idempotency-key-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79aef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383442691s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-idempotency-key-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-idempotency-key-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-lot-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-lot-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79afd0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-lot-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79b000}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.38344651s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-lot-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-lot-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-move-request-fulfillment-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-move-request-fulfillment-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79b068} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-move-request-fulfillment-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79b098}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383450592s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-move-request-fulfillment-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-move-request-fulfillment-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-move-request-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-move-request-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79b1c8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-move-request-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79b198}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383455565s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-move-request-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-move-request-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-move-request-sku-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-move-request-sku-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79b230} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-move-request-sku-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79b260}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383460072s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-move-request-sku-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-move-request-sku-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-sku-external-reference-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-sku-external-reference-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79b2d0} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-sku-external-reference-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79b300}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383465382s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-sku-external-reference-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-sku-external-reference-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-sku-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-sku-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79b368} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-sku-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79b3b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383469625s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-sku-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-sku-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-sku-translation-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-sku-translation-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79b420} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-sku-translation-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79b450}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383474737s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-sku-translation-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-sku-translation-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-lot-addition-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-lot-addition-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79b500} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-lot-addition-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79b4d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383480021s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-lot-addition-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-lot-addition-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-lot-snapshot-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-lot-snapshot-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79b570} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-lot-snapshot-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79b5a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383484415s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-lot-snapshot-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-lot-snapshot-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-lot-update-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-lot-update-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79b640} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-lot-update-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79b610}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383489755s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-lot-update-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-lot-update-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79b6b8} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect Value:0xc00f79b6f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383494142s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-stock-take-sink/production, instance=rapidcompute-khi04/10.100.1.63:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-supplier-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-supplier-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79b768} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-supplier-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79b7e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.383498665s EvaluationString:[ var='A' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-supplier-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ], [ var='B' labels={__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-inventory-supplier-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect} value=0 ]} {Instance:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-organisations-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-organisations-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integrations/kafka-connect Value:0xc00f79b848} B:{Var:B Labels:__name__=kafka_connect_connect_worker_metrics_connector_failed_task_count, cluster=rapidcompute-khi04/data-platform-kafka/shared-production, connector=gcs-pakistan-organisations-sink/production, instance=rapidcompute-khi04/10.100.104.38:9404, job=integ + level=debug ts=2024-05-29T13:44:14.394199779Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=173730 slug=nikon t=2024-05-29T13:44:14.394038175Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=23.363149ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v6pjcck5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.393975821Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v6pjcck5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.39392244Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v6pjcck5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.393859389Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v6neqgu1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.393828109Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v6neqgu1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.393802709Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=MmGWRG74k, ref_id=A" t=2024-05-29T13:44:14.393830078Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=430961 slug=solifi version=2 fingerprint=310ae8e4157a23f2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.393752199Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=MmGWRG74k, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.393453728s EvaluationString:}]" duration=39.530463ms + level=debug ts=2024-05-29T13:44:14.393723559Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v6neqgu1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.393722388Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v6n0mp30-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.393670537Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v6n0mp30-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.393602787Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v6gl5oxn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.393153052Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v6gl5oxn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.393111752Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.393075825Z caller=ruler.go:522 msg="tenant is owned by this instance" user=716307 slug=whoyouprod groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v6fswuk4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.393021541Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v6fswuk4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.39294626Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.392910607Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.392825453Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.392747584Z caller=remote_alert_sender.go:94 user=538037 slug=drivewealth host=drivewealth-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.117.25:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddiv8oezcuy2oe alerts=1 + level=debug ts=2024-05-29T13:44:14.392703733Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.392699958Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v6fkb5o2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.392715848Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v6fkb5o2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.392689547Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v6fkb5o2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.392646977Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v6fkb5o2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.392615227Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v6e3c21b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.392540886Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.392264589Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.392107926Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.39224109Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:14.392199517Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=545401 slug=wouterraateland + level=debug ts=2024-05-29T13:44:14.392112459Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v6at8w7e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.392157162Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v6at8w7e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.392121911Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v6at8w7e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.392094651Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.392046175Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v69f7uc8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.39198422Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v69f7uc8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.391901489Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v665nonw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.391808608Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.391713312Z caller=client.go:80 msg="creating client for grafana instance" user=642049 addr=dns:///ziron-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.391679012Z caller=ruler.go:522 msg="tenant is owned by this instance" user=532641 slug=vozovoz groups=1 + level=debug ts=2024-05-29T13:44:14.391669593Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v665nonw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.391661527Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v665nonw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.391628126Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v60ccm22-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.391567526Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.391391509Z caller=client.go:80 msg="creating client for grafana instance" user=514312 addr=dns:///zephyrde-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.391274785Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.390285299Z caller=ruler.go:522 msg="tenant is owned by this instance" user=723203 slug=vmanzoni groups=0 + level=debug ts=2024-05-29T13:44:14.391302109Z caller=ruler.go:522 msg="tenant is owned by this instance" user=791033 slug=vanoord groups=3 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v5ydamdt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.391254003Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.391197208Z caller=ruler.go:522 msg="tenant is owned by this instance" user=545290 slug=webstter groups=0 + logger=ngalert.state.manager.persist user=679831 slug=joveostageaws t=2024-05-29T13:44:14.391212462Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=23.551497ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v5ydamdt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.391162662Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v5ydamdt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.391120981Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.391112018Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.390987006Z caller=ruler.go:522 msg="tenant is owned by this instance" user=523289 slug=ueftipafree groups=6 + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=spectrum-cable, exported_endpoint=predict_intents" t=2024-05-29T13:44:14.391050523Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v5jfe1e0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.39098212Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.390177298Z caller=ruler.go:522 msg="tenant is owned by this instance" user=606300 slug=tototon1x2 groups=1 + level=debug ts=2024-05-29T13:44:14.39082868Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v5jfe1e0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.390901589Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:14.390829304Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=502556 slug=varunpalekar003 + level=debug ts=2024-05-29T13:44:14.390542222Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.390805388Z caller=remote_alert_sender.go:94 user=120621 slug=jdall host=jdall-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.152.99.132:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=j6vMnK_7k alerts=1 + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishwireless, exported_endpoint=predict_intents" t=2024-05-29T13:44:14.390823607Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.390670103Z caller=client.go:80 msg="creating client for grafana instance" user=753269 addr=dns:///yellovoidmtr-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v5iiy6hj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.390688017Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v5iiy6hj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.390571186Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.390495701Z caller=ruler.go:522 msg="tenant is owned by this instance" user=660171 slug=timnijenhuis11 groups=0 + level=debug ts=2024-05-29T13:44:14.390587276Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.390534722Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.390528885Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=371756 slug=asapp version=13 fingerprint=896777ad08b6166b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.390378375Z level=debug msg="Alert rule evaluated" results="[{Instance:company_marker=american-airlines, exported_endpoint=predict_intents State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=american-airlines, exported_endpoint=predict_intents Value:0xc00645d4e0} C:{Var:C Labels:company_marker=american-airlines, exported_endpoint=predict_intents Value:0xc00645d4e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.389937273s EvaluationString:[ var='B' labels={company_marker=american-airlines, exported_endpoint=predict_intents} value=2409.900592992651 ], [ var='C' labels={company_marker=american-airlines, exported_endpoint=predict_intents} value=0 ]} {Instance:company_marker=assuranthousing, exported_endpoint=predict_intents State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=assuranthousing, exported_endpoint=predict_intents Value:0xc00645d548} C:{Var:C Labels:company_marker=assuranthousing, exported_endpoint=predict_intents Value:0xc00645d5b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.389961849s EvaluationString:[ var='B' labels={company_marker=assuranthousing, exported_endpoint=predict_intents} value=547.5205213532962 ], [ var='C' labels={company_marker=assuranthousing, exported_endpoint=predict_intents} value=0 ]} {Instance:company_marker=dishwireless, exported_endpoint=predict_intents State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=dishwireless, exported_endpoint=predict_intents Value:0xc00645d620} C:{Var:C Labels:company_marker=dishwireless, exported_endpoint=predict_intents Value:0xc00645d628}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.389969378s EvaluationString:[ var='B' labels={company_marker=dishwireless, exported_endpoint=predict_intents} value=295.1900284656288 ], [ var='C' labels={company_marker=dishwireless, exported_endpoint=predict_intents} value=0 ]} {Instance:company_marker=spectrum-cable, exported_endpoint=predict_intents State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=spectrum-cable, exported_endpoint=predict_intents Value:0xc00645d698} C:{Var:C Labels:company_marker=spectrum-cable, exported_endpoint=predict_intents Value:0xc00645d700}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.389975464s EvaluationString:[ var='B' labels={company_marker=spectrum-cable, exported_endpoint=predict_intents} value=2.9761295366084015 ], [ var='C' labels={company_marker=spectrum-cable, exported_endpoint=predict_intents} value=0 ]}]" duration=82.527716ms + level=debug ts=2024-05-29T13:44:14.3903519Z caller=ruler.go:522 msg="tenant is owned by this instance" user=739272 slug=svprojects groups=0 + level=debug ts=2024-05-29T13:44:14.390367848Z caller=remote_instance_store.go:51 user=154996 slug=veovo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.390451485Z caller=remote_instance_store.go:51 user=497819 slug=fornybar msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v59dqihq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.390458204Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="kubernetes_io_hostname=gke-blox-prod-cluster-keycloak-pool-c5a18aa7-zuvk" t=2024-05-29T13:44:14.390371164Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v5939rma-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.390342633Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v5939rma-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.390290143Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.390244899Z caller=client.go:80 msg="creating client for grafana instance" user=867755 addr=dns:///xgenpreprod-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=325783 slug=bloxprod instance="kubernetes_io_hostname=gke-blox-prod-cluster-keycloak-pool-8b27eb97-gujq" t=2024-05-29T13:44:14.390261713Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v592r3g2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.390185892Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.390167998Z caller=ruler.go:522 msg="tenant is owned by this instance" user=690880 slug=vica groups=0 + level=debug ts=2024-05-29T13:44:14.390135889Z caller=remote_instance_store.go:51 user=530352 slug=mtse msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=257565 slug=eddyson instance="ApplicationShortName=comm, BusinessService=eddyson yOD Shared Cloud @ OEDIV, EnvironmentType=prod, ITService=ySE Communication Proxy, InfrastructureElement=ycomm04.eds.yod-sc-pro.oediv.ondemand.services, PhysicalHostname=web-eds-119-u.oediv.dmz, TenantShortName=shared, __name__=jvm_memory_bytes_used, appCtx=comm2-shared-prod, area=nonheap, instance=127.0.0.1:8890, job=metrics: comm2-shared-prod" t=2024-05-29T13:44:14.390111013Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=530352 slug=mtse instance="__name__=up, host=ubuntuprd1, instance=localhost:9100, job=nodeexporter" t=2024-05-29T13:44:14.390075497Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v592r3g2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.39006219Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v592r3g2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.39003883Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v55cqfjy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.389982159Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=257565 slug=eddyson instance="ApplicationShortName=comm, BusinessService=eddyson yOD Shared Cloud @ OEDIV, EnvironmentType=prod, ITService=ySE Communication Proxy, InfrastructureElement=ycomm01.eds.yod-sc-pro.oediv.ondemand.services, PhysicalHostname=web-eds-119-u.oediv.dmz, TenantShortName=yodde01, __name__=jvm_memory_bytes_used, appCtx=comm-yodde01-prod, area=nonheap, instance=127.0.0.1:8888, job=metrics: comm-yodde01-prod" t=2024-05-29T13:44:14.389981369Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v4yepeyg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.389854878Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v4yepeyg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.389832768Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v4yepeyg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.389744927Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v4r5g20q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.389705857Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.389686573Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v4r5g20q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.389676896Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.389699583Z caller=remote_instance_store.go:51 user=432323 slug=lithic msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.389523066Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=325783 slug=bloxprod instance="kubernetes_io_hostname=gke-blox-prod-cluster-axon-pool-9fc2711c-4by4" t=2024-05-29T13:44:14.38971138Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=257565 slug=eddyson instance="ApplicationShortName=comm, BusinessService=LIS yOP Private Cloud @ Arvato, EnvironmentType=prod, ITService=ySE Communication Proxy, InfrastructureElement=gtunxlvi04871.server.arvato-systems.de, PhysicalHostname=gtunxlvi04871.server.arvato-systems.de, TenantShortName=lis-ccdi, __name__=jvm_memory_bytes_used, appCtx=comm-lis-ccdi-prod, area=nonheap, instance=127.0.0.1:8888, job=metrics: comm-lis-ccdi-prod" t=2024-05-29T13:44:14.389617643Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v4r5g20q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.389598145Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=432323 slug=lithic instance="event=cli.generate_change_records" t=2024-05-29T13:44:14.389574355Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=432323 slug=lithic instance="event=cli.generate_change_files" t=2024-05-29T13:44:14.3895027Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="kubernetes_io_hostname=gke-blox-prod-cluster-apps-pool-f94bcc8b-zcwm" t=2024-05-29T13:44:14.389431878Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=257565 slug=eddyson instance="ApplicationShortName=comm, BusinessService=BAHAG yOP Private Cloud @ Arvato, EnvironmentType=prod, ITService=ySE Communication Proxy, InfrastructureElement=1-esb-com-prod.bahag.com, PhysicalHostname=1-esb-com-prod.bahag.com, TenantShortName=bhg-esbext, __name__=jvm_memory_bytes_used, appCtx=comm-int-bhg-esbext-prod, area=nonheap, instance=10.49.206.129:60011, job=metrics: comm-int-bhg-esbext-prod" t=2024-05-29T13:44:14.389393628Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=257565 slug=eddyson instance="ApplicationShortName=comm, BusinessService=BAHAG yOP Private Cloud @ Arvato, EnvironmentType=prod, ITService=ySE Communication Proxy, InfrastructureElement=1-esb-com-prod.bahag.com, PhysicalHostname=1-esb-com-prod.bahag.com, TenantShortName=bhg-esbext, __name__=jvm_memory_bytes_used, appCtx=comm-int-bhg-esbext-prod, area=nonheap, instance=10.49.206.129:60011, job=metrics: comm-int-bhg-esbext-prod" t=2024-05-29T13:44:14.389373732Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.389364995Z caller=remote_instance_store.go:51 user=557231 slug=lnrsusinsuranceprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v4f3027z-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.389280072Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v4f3027z-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.389242632Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="kubernetes_io_hostname=gke-blox-prod-cluster-apps-pool-f94bcc8b-tt6l" t=2024-05-29T13:44:14.389283865Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.389252881Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=257565 slug=eddyson instance="ApplicationShortName=comm, BusinessService=BAHAG yOD Private Cloud @ OEDIV, EnvironmentType=prod, ITService=ySE Communication Proxy, InfrastructureElement=ycomm01.bhg.yod-pc-pro.oediv.ondemand.services, PhysicalHostname=pfs-eds-149-u.oediv.dmz, TenantShortName=bhg, __name__=jvm_memory_bytes_used, appCtx=comm-bhg-prod, area=nonheap, instance=127.0.0.1:8888, job=metrics: comm-bhg-prod" t=2024-05-29T13:44:14.389083212Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod t=2024-05-29T13:44:14.389063861Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v4f3027z-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.38906253Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:14.389015855Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:14.3890072Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=191103 slug=amazonadmin t=2024-05-29T13:44:14.388979682Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.388971016Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.388930577Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.388942826Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=163215 slug=tripadvisor instance= t=2024-05-29T13:44:14.388886715Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163215 slug=tripadvisor t=2024-05-29T13:44:14.3888441Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v48hbjn4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.388823607Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=163215 slug=tripadvisor version=3 fingerprint=b83986c9beac80fd attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.388791101Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc041b91c38} C:{Var:C Labels: Value:0xc041b91c40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.388453542s EvaluationString:[ var='B' labels={} value=0.8875354432213322 ], [ var='C' labels={} value=0 ]}]" duration=842.113891ms + logger=ngalert.state.manager.persist user=163215 slug=tripadvisor t=2024-05-29T13:44:14.388740921Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=163215 slug=tripadvisor instance="__shard=0, dc=mz, exported_job=claims-v2-autofile, instance=jnk04n:443, job=jenkins, owner=dev-ops, zone=mz" t=2024-05-29T13:44:14.388725445Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.388703219Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.388587767Z caller=remote_instance_store.go:51 user=656284 slug=cencosudx msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.388579753Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v42zvwqf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.388531564Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=325783 slug=bloxprod instance="kubernetes_io_hostname=gke-blox-prod-cluster-apps-pool-e8549c6d-kso9" t=2024-05-29T13:44:14.38853326Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.388493128Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.388405903Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v42bjo8l-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.388478854Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v42bjo8l-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.388317692Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.388281748Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.388242391Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v426q4nw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.388280862Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v426q4nw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.388254472Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.388154758Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=325783 slug=bloxprod instance="kubernetes_io_hostname=gke-blox-prod-cluster-apps-pool-3f67f659-vkjf" t=2024-05-29T13:44:14.388081513Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.387866099Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=325783 slug=bloxprod instance="kubernetes_io_hostname=gke-blox-prod-cluster-apps-pool-3f67f659-otym" t=2024-05-29T13:44:14.387903402Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.387161319Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v3y2pxyc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.387772077Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v3y2pxyc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.387632205Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v3y2pxyc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.387540194Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=325783 slug=bloxprod t=2024-05-29T13:44:14.387423758Z level=debug msg="State manager processing evaluation results" resultCount=21 + level=debug ts=2024-05-29T13:44:14.387075758Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v3xclrh2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.387422533Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v3wfe3t0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.387246191Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=home-claims-worker, cluster=lmnd-staging-us-east-1, container=kube-state-metrics, deployment=home-claims-worker, endpoint=http, instance=10.24.74.71:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-t4kxx, region=us-east-1, service=kube-state-metrics, stage=staging" t=2024-05-29T13:44:14.38725555Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v3v6xcz1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.386980118Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=337710 slug=metasky t=2024-05-29T13:44:14.386867396Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.696587ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v3v6xcz1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.386893857Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.386919707Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v3r343jx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.386806887Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.386862518Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.386636537Z caller=remote_instance_store.go:51 user=202755 slug=iwmedia msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v3r343jx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.386643595Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=112732 slug=gleamer t=2024-05-29T13:44:14.386653042Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=112732 slug=gleamer instance= t=2024-05-29T13:44:14.386638235Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112732 slug=gleamer t=2024-05-29T13:44:14.386577088Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v3gxowap-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.386537384Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.386438724Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v3gxowap-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.386466433Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:14.386416999Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=523054 slug=vialtopartners instance= t=2024-05-29T13:44:14.386390658Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=523054 slug=vialtopartners instance= t=2024-05-29T13:44:14.386383433Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=523054 slug=vialtopartners instance= t=2024-05-29T13:44:14.38637344Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v3gojk5h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.386412102Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v3gojk5h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.386387942Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.386264979Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v3gojk5h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.386222861Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.386209263Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v3dig1i1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.386106219Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v3dig1i1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.386004398Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.385965821Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.385744866Z caller=remote_instance_store.go:51 user=465816 slug=metricgamingqa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v36i49fy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.385735696Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.385731203Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v36i49fy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.385679395Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v36i49fy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.385669775Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.385628341Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.385511943Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v2rcqpav-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.385529953Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v2rcqpav-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.385489433Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=309009 slug=elestyle t=2024-05-29T13:44:14.385400559Z level=debug msg="Saving alert states done" count=5 max_state_save_concurrency=1 duration=115.249252ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v2rcqpav-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.385458413Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.385460774Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v2rcqpav-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.385415582Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.385367379Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v2ra5t8b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.385287151Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.38526372Z caller=remote_instance_store.go:51 user=214309 slug=spenmo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v2ra5t8b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.38521402Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v2nvl72h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.385068899Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v2nvl72h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.385024208Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=214309 slug=spenmo version=228 fingerprint=c7c28df587807b5b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.385015377Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A,C State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.384662237s EvaluationString:}]" duration=30.918922ms + level=debug ts=2024-05-29T13:44:14.384923715Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v2n0w99n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.384885217Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v2n0w99n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.384845166Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v2n0w99n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.384780976Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v2iewq4w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.384717195Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v2i7aejz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.384480803Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v2i7aejz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.384428712Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v2i7aejz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.384377282Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.384329757Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.384213256Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v2heczy0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.384270461Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v2heczy0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.38419249Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v2heczy0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.384152329Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v2heczy0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.384139289Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v2745duy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.384032988Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v2745duy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.384018888Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v1nk1exi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.383879537Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v1nk1exi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.383809246Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.383767943Z caller=remote_alert_sender.go:94 user=22398 slug=sunfolding host=sunfolding-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.37.117:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=b148bbeb-13f0-4a06-a31a-403f03de4bf6 alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v1nk1exi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.383733455Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v1kp0plm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.383625674Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=22398 slug=sunfolding t=2024-05-29T13:44:14.383672225Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.041427ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v1kp0plm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.383579213Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=916151 slug=hwbluepd t=2024-05-29T13:44:14.383493498Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.01487ms + level=debug ts=2024-05-29T13:44:14.383388153Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v1fbcj15-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.383379741Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v1fbcj15-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.383344971Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.383300526Z level=debug msg="State manager processing evaluation results" resultCount=6 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v1cw9rw4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.383302341Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.383245038Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v1cw9rw4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.38327813Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v1cw9rw4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.383125079Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.383064252Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v16d9os8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.383009788Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.382994405Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.382791521Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v16d9os8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.382704614Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.38273213Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.382635546Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.382555529Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v16d9os8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.382327631Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v13wr88a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.382188009Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.382215648Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v13wr88a-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.382110098Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v0v3vd3w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.381942817Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v0v3vd3w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.381877836Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v0uk212q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.381785615Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v0uk212q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.381775865Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.381709928Z caller=remote_instance_store.go:51 user=313382 slug=hyai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.381727567Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v0kzlsld-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.381455132Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=168155 slug=connexcs t=2024-05-29T13:44:14.381362039Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v0kkalk9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.381364441Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.381332583Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=168155 slug=connexcs instance= t=2024-05-29T13:44:14.381346452Z level=warn msg="Failed to take an image" dashboard=BgmnHTbZk panel=26 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v0gdollp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.38128318Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v0gdollp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.3812716Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v0gdollp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.381187899Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-v0gdollp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.381178049Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uzy3le6j-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.380910556Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.3809951Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.380889928Z caller=remote_instance_store.go:51 user=517596 slug=datar msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uztah8oj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.380843485Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uztah8oj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.380822085Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.380819462Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uztah8oj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.380711864Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.380644301Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=168155 slug=connexcs instance= t=2024-05-29T13:44:14.38062566Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=168155 slug=connexcs t=2024-05-29T13:44:14.380594171Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=168155 slug=connexcs version=1 fingerprint=f1e3a3c5a85333e2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.380520291Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Alerting Error: Results:map[] Values:map[B0:{Var:B Labels:aggregatedBy=average, name=de2js4 A Value:} B1:{Var:B Labels:aggregatedBy=average, name=de2js5 A Value:} B2:{Var:B Labels:aggregatedBy=average, name=de2js6 A Value:} B3:{Var:B Labels:aggregatedBy=average, name=de2js7 A Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.380144847s EvaluationString:[ var='B0' metric='A' labels={aggregatedBy=average, name=de2js4 A} value=null ], [ var='B1' metric='A' labels={aggregatedBy=average, name=de2js5 A} value=null ], [ var='B2' metric='A' labels={aggregatedBy=average, name=de2js6 A} value=null ], [ var='B3' metric='A' labels={aggregatedBy=average, name=de2js7 A} value=null ]}]" duration=117.210935ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uzmmtnmn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.380458931Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uzmmtnmn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.380422611Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:14.380315746Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uzlymnrx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.380367611Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:14.380294451Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uzlymnrx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.380258159Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.380244554Z caller=remote_instance_store.go:51 user=137351 slug=pinnacle21 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.380113335Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=813270 slug=adiante t=2024-05-29T13:44:14.379866292Z level=debug msg="Saving alert states" count=24 max_state_save_concurrency=1 + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.379849472Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.379824261Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.379820421Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uzaz8c0v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.380085238Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.380056474Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.38001095Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.379772621Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.37994937Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=633335 slug=promqlworkshop t=2024-05-29T13:44:14.379974762Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=51.273247ms + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.379764531Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.379759731Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.379901975Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.37974618Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.37973841Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.379861227Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.37972113Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.37985814Z caller=remote_instance_store.go:51 user=93046 slug=nese msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.379875937Z caller=remote_instance_store.go:51 user=206439 slug=relaypro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.37969484Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.37969117Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.37967914Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.379655529Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.379608079Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uz434x9e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.379784685Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.379574338Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uz096mg0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.379754514Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.379688093Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.379535608Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uz096mg0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.379629073Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.379513188Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.379504048Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.379487987Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.379616929Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uyz84i92-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.379597993Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uyz84i92-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.379572762Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=363785 slug=moonletmonitor t=2024-05-29T13:44:14.379520147Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=87.810579ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uyz84i92-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.379486131Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.379442247Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="application=posttrade_grpc_api, data_type=application, environment=UAT, host=drivewealth-backoffice-instrument-grpc-api-4-74wnn, location=ROSA, url=http://127.0.0.1:3004/actuator/prometheus" t=2024-05-29T13:44:14.379519295Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:45:40Z next_ends_at=2024-05-29T13:46:10Z + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.379427597Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.379420717Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uyyqygep-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.379444731Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.379412987Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.379405106Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.379391356Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uyyqygep-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.37937343Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=813270 slug=adiante instance="datasource_uid=caac8c14-1421-4300-8eb3-bb9d4df07615, ref_id=A" t=2024-05-29T13:44:14.379345806Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uywh5wjp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.379278959Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.379148879Z caller=remote_alert_sender.go:94 user=440643 slug=avyprod host=avyprod-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.149.114.85:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=Rd1A0s74z alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uywh5wjp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.379187288Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.379085937Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.379074508Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uyuxlxsv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.379073417Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.379033877Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uyuxlxsv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.379030117Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uynj5ist-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.379000587Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.379007823Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.378881186Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.846827ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uynj5ist-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.378876315Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.378835507Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uyn56s8b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.378788214Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uyn56s8b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.378686163Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.378598138Z caller=remote_instance_store.go:51 user=334644 slug=meiro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.378441045Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uymotl9b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.378458341Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=440643 slug=avyprod t=2024-05-29T13:44:14.37845557Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.9433ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uykji44y-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.37834094Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uykji44y-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.378309939Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uykji44y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.378263349Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uycgh8u4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.378102357Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.378074857Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.377689449Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uy7bu1cw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.377697223Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uy7bu1cw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.377625182Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uy7bu1cw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.377545472Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.377551306Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uy5gzvpv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.377478831Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uy5gzvpv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.37738535Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.377366989Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.726501ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uy5gzvpv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.377341449Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.377195251Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uy2jisgb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.377158368Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.377105572Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uy0akngw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.377099887Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.377074372Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.377027941Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.376904941Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uy0akngw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.376955736Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uy0akngw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.376930465Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uxxooyxh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.376809594Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uxxooyxh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.376782064Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uxxooyxh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.376712063Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.376720076Z caller=remote_alert_sender.go:94 user=190917 slug=d1cx host=d1cx-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.144.156.48:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=dc4227eb-cec4-4a83-bc36-734def4e5781 alerts=1 + logger=ngalert.state.manager.persist user=190917 slug=d1cx t=2024-05-29T13:44:14.376648351Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.450643ms + logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://yaqoot.services.testedge2.haqq.network, job=https://yaqoot.services.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://yaqoot.services.testedge2.haqq.network" t=2024-05-29T13:44:14.37660122Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.376528996Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://yaqoot.services.testedge2.haqq.network, job=https://yaqoot.services.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://yaqoot.services.testedge2.haqq.network" t=2024-05-29T13:44:14.376529372Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uxry8k4i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.37642776Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.376425276Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.fifa-2023-common-g4.coreSlave*.usersessions.status.fifa-2023-ps4.GaugeUS_gcp-grq_Slave,5)) Query" t=2024-05-29T13:44:14.376410054Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-raw-voiceai.algorithms.swearing-4.0.2-regex" t=2024-05-29T13:44:14.376316795Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-raw-voiceai.algorithms.swearing-4.0.2-regex" t=2024-05-29T13:44:14.376305353Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.376286815Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uxry8k4i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.376253028Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-raw-voiceai.algorithms.pii_scrubber-1.0.2-regex" t=2024-05-29T13:44:14.376239277Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uxog9mql-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.376180618Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.376202283Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-raw-voiceai.algorithms.multilingual_sentiment-3.0.2-chat" t=2024-05-29T13:44:14.376177598Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uxog9mql-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.376136067Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-raw-voiceai.algorithms.call_recording-3.0.2-regex" t=2024-05-29T13:44:14.376112147Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.376158523Z caller=remote_instance_store.go:51 user=127813 slug=clearsale msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=127813 slug=clearsale instance= t=2024-05-29T13:44:14.376096289Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.376054216Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-partial-voiceai.algorithms.real_time_assist-5.1.1-es" t=2024-05-29T13:44:14.37599591Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=127813 slug=clearsale version=7 fingerprint=aa42a25838fa61be attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.375995926Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.375806115s EvaluationString:}]" duration=198.747617ms + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-partial-voiceai.algorithms.real_time_assist-5.1.1-en" t=2024-05-29T13:44:14.375963605Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.375964445Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uxnblve6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.375947005Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=114286 slug=enverus t=2024-05-29T13:44:14.375909226Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-final-voiceai.algorithms.vertex-2.0.1-session_purpose" t=2024-05-29T13:44:14.375928042Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://websocket.wallet.testedge2.haqq.network, job=https://websocket.wallet.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://websocket.wallet.testedge2.haqq.network" t=2024-05-29T13:44:14.375835276Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.375905872Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-final-voiceai.algorithms.unsure_agent-2.0.3-regex" t=2024-05-29T13:44:14.375893515Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=114286 slug=enverus instance="datasource_uid=lzFWNTdGk, ref_id=A" t=2024-05-29T13:44:14.375866652Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.375817465Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uxnblve6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.375729343Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.375745526Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.historian backend=loki user=916144 slug=cmjjilpd t=2024-05-29T13:44:14.375717295Z level=debug msg="Done saving alert state history batch" + level=debug ts=2024-05-29T13:44:14.375641423Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-final-voiceai.algorithms.questions-7.0.2-chat" t=2024-05-29T13:44:14.375704043Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.375673028Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-final-voiceai.algorithms.pii-1.0.1-voice" t=2024-05-29T13:44:14.375681741Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uxmnw1k4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.375603982Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://verifier.explorer.testedge2.haqq.network/health, job=https://verifier.explorer.testedge2.haqq.network/health@testedge2, label_check=testedge2, label_site=https://verifier.explorer.testedge2.haqq.network/health" t=2024-05-29T13:44:14.375476692Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-final-voiceai.algorithms.information_request-1.0.1-nn" t=2024-05-29T13:44:14.375469951Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-final-voiceai.algorithms.information_request-1.0.1-nn" t=2024-05-29T13:44:14.375459722Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.375422271Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-final-voiceai.algorithms.chunking-8.0.2-call_purpose" t=2024-05-29T13:44:14.375436993Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-final-voiceai.algorithms.chapterization-1.0.2-regex" t=2024-05-29T13:44:14.375396981Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-final-voiceai.algorithms.chapterization-1.0.2-regex" t=2024-05-29T13:44:14.375385334Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uxmape11-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.375320169Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-final-voiceai.algorithms.cancellation-2.0.2-chat" t=2024-05-29T13:44:14.375366049Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-final-voiceai.algorithms.cancellation-2.0.2-chat" t=2024-05-29T13:44:14.375354736Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-final-voiceai.algorithms.ai_scorecard-1.0.2-ai_scorecard_es" t=2024-05-29T13:44:14.375295219Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.375197453Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-final-voiceai.algorithms.ai_playbooks-5.2.1-vertex_playbooks_test" t=2024-05-29T13:44:14.37519274Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-final-voiceai.algorithms.ai_playbooks-5.2.1-vertex_playbooks_test" t=2024-05-29T13:44:14.375181943Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-final-voiceai.algorithms.ai_playbooks-5.1.1-vertex_playbooks" t=2024-05-29T13:44:14.375148932Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uxlu5yd3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.375100657Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uxlu5yd3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.375080306Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-realtime-final-voiceai.algorithms.action_item-14.0.2-chat" t=2024-05-29T13:44:14.375083086Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.375034649Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=voiceai-staging, project_id=voiceai-staging, subscription_id=tastes-call-end-voiceai.algorithms.quality_calls-0.0.3-quality_call-v0-0-3" t=2024-05-29T13:44:14.375049113Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uxll4v61-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.374978535Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uxll4v61-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.374897324Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-raw-voiceai.algorithms.swearing-4.0.2-regex" t=2024-05-29T13:44:14.374859442Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-raw-voiceai.algorithms.sentiment-8.0.3-keras" t=2024-05-29T13:44:14.374801652Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-raw-voiceai.algorithms.pii_scrubber-1.0.2-regex" t=2024-05-29T13:44:14.374770439Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uxl0aoai-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.374693462Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uxl0aoai-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.374668482Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-raw-voiceai.algorithms.competitor_extraction-3.0.2-competitor" t=2024-05-29T13:44:14.374672641Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-raw-voiceai.algorithms.competitor_extraction-3.0.2-competitor" t=2024-05-29T13:44:14.374657529Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uxdq4zsw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.374555471Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-raw-voiceai.algorithms.call_recording-3.0.2-regex" t=2024-05-29T13:44:14.374622713Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.374417461Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-raw-voiceai.algorithms.call_metrics-4.0.1-incall" t=2024-05-29T13:44:14.374591079Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-partial-voiceai.algorithms.real_time_assist-5.1.1-es" t=2024-05-29T13:44:14.374527469Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-partial-voiceai.algorithms.real_time_assist-5.1.1-en" t=2024-05-29T13:44:14.374502609Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.374445205Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ux9ton82-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.37443286Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ux9ton82-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.37442116Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-final-voiceai.algorithms.vertex-2.0.1-session_purpose" t=2024-05-29T13:44:14.374457078Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ux9ton82-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.374364569Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-final-voiceai.algorithms.topic_clustering-2.0.2-topics" t=2024-05-29T13:44:14.374397336Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-final-voiceai.algorithms.support_defective-4.0.2-voice" t=2024-05-29T13:44:14.374352262Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ux9ton82-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.374299458Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-final-voiceai.algorithms.questions-7.0.2-voice" t=2024-05-29T13:44:14.374288324Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ux3xymex-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.374231848Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-final-voiceai.algorithms.pii-1.0.1-voice" t=2024-05-29T13:44:14.374220547Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ux3xymex-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.374162577Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-final-voiceai.algorithms.pcsat-1.19.2-pcsat" t=2024-05-29T13:44:14.374167667Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-final-voiceai.algorithms.pcsat-1.19.2-pcsat" t=2024-05-29T13:44:14.374155748Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ux3xymex-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.374097116Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-final-voiceai.algorithms.issue_resolution-3.0.2-chat" t=2024-05-29T13:44:14.37406162Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-final-voiceai.algorithms.issue_resolution-3.0.2-chat" t=2024-05-29T13:44:14.37405152Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-final-voiceai.algorithms.information_request-1.0.1-nn" t=2024-05-29T13:44:14.374026778Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-final-voiceai.algorithms.information_request-1.0.1-nn" t=2024-05-29T13:44:14.37401321Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uwxa51f1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.374012895Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-final-voiceai.algorithms.chunking-8.0.2-call_purpose" t=2024-05-29T13:44:14.373991986Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uwxa51f1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.373998215Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-final-voiceai.algorithms.chunking-8.0.2-call_purpose" t=2024-05-29T13:44:14.37398099Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-final-voiceai.algorithms.chapterization-1.0.2-regex" t=2024-05-29T13:44:14.373959379Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uwu8onp0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.373867754Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-final-voiceai.algorithms.ai_scorecard-1.0.2-ai_scorecard_es" t=2024-05-29T13:44:14.373826043Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-realtime-final-voiceai.algorithms.action_item-14.0.2-chat" t=2024-05-29T13:44:14.37355379Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-call-end-voiceai.algorithms.quality_calls-0.0.3-quality_call-v0-0-3" t=2024-05-29T13:44:14.373503108Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uwrprhz1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.373676212Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-call-end-voiceai.algorithms.dialpad_gpt-2.1.2-summarization_actionitem" t=2024-05-29T13:44:14.373468293Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-call-end-voiceai.algorithms.dialpad_gpt-2.1.2-call_purpose" t=2024-05-29T13:44:14.3734418Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.373655364Z caller=remote_image_capturer.go:33 user=163513 slug=dialpad rule_org_id=1 rule_uid=ddcuy2tnz124gf msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uwrprhz1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.373612611Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="project=talkiq-echelon, project_id=talkiq-echelon, subscription_id=tastes-call-end-voiceai.algorithms.dialpad_gpt-2.1.2-call_purpose" t=2024-05-29T13:44:14.373427133Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uwrprhz1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.373556901Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=174403 slug=indeez t=2024-05-29T13:44:14.373368498Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uwqwlboc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.37347315Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uwqwlboc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.37344133Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uwqvdvp9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.373397419Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.373200668Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.373216512Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uwot2y2i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.373267118Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uwot2y2i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.373246098Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uwot2y2i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.373216937Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.373177717Z caller=remote_instance_store.go:51 user=656284 slug=cencosudx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uwn0euxp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.373098966Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.373066934Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uwn0euxp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.373021155Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://social-share-3.social.testedge2.haqq.network, job=https://social-share-3.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-3.social.testedge2.haqq.network" t=2024-05-29T13:44:14.373048776Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uwg7jv94-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.372887164Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://social-share-3.social.testedge2.haqq.network, job=https://social-share-3.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-3.social.testedge2.haqq.network" t=2024-05-29T13:44:14.372900573Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.372896101Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.244.39:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=bdbhsq1zhid4wd alerts=1 + logger=ngalert.state.manager user=202755 slug=iwmedia instance="device=sda1, fstype=ext4, host=web1.de.fra.ovh.opsucht.cc, mode=rw, path=/" t=2024-05-29T13:44:14.372806131Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uwg7jv94-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.372827133Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uwbi2trl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.372787453Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uwbi2trl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.372760953Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=35223 slug=silkroad t=2024-05-29T13:44:14.372780349Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=34.112947ms + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.372805761Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=42.496838ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uwbi2trl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.372689862Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=202755 slug=iwmedia instance="device=sda1, fstype=ext4, host=sinusbot.iwmedia.dev, mode=rw, path=/" t=2024-05-29T13:44:14.372744396Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.372712198Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uwaakez4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.3725525Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=202755 slug=iwmedia instance="device=sda1, fstype=ext4, host=influxdb.iwmedia.dev, mode=rw, path=/" t=2024-05-29T13:44:14.372706088Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uwaakez4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.37252226Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=202755 slug=iwmedia instance="device=md2, fstype=ext4, host=ob1.fra.de.ovh.opsucht.cc, mode=rw, path=/" t=2024-05-29T13:44:14.372665302Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=C" t=2024-05-29T13:44:14.372615644Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=C" t=2024-05-29T13:44:14.37260652Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=22398 slug=sunfolding t=2024-05-29T13:44:14.372577884Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://social-share-2.social.testedge2.haqq.network, job=https://social-share-2.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-2.social.testedge2.haqq.network" t=2024-05-29T13:44:14.372619651Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.historian backend=loki user=916144 slug=cmjjilpd t=2024-05-29T13:44:14.372591694Z level=debug msg="Alert state changed creating annotation" newState=Normal oldState=Alerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uw5ppe8e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.372349378Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=202755 slug=iwmedia instance="device=md2, fstype=ext4, host=mc07.opsucht.iwmedia.ovh, mode=rw, path=/" t=2024-05-29T13:44:14.372528611Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=22398 slug=sunfolding version=1 fingerprint=6c59cea9a9cfa2ed attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.372523335Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-sunfolding, ref_id=C State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.372226871s EvaluationString:}]" duration=19.024722ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uw5ppe8e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.372275688Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://social-share-2.social.testedge2.haqq.network, job=https://social-share-2.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-2.social.testedge2.haqq.network" t=2024-05-29T13:44:14.372551018Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=916144 slug=cmjjilpd t=2024-05-29T13:44:14.372483751Z level=debug msg="Saving alert states done" count=100 max_state_save_concurrency=1 duration=1.526415635s + logger=ngalert.state.manager user=202755 slug=iwmedia instance="device=md2, fstype=ext4, host=mc04.opsucht.iwmedia.ovh, mode=rw, path=/" t=2024-05-29T13:44:14.372462293Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=202755 slug=iwmedia instance="device=md2, fstype=ext4, host=mc03.opsucht.iwmedia.ovh, mode=rw, path=/" t=2024-05-29T13:44:14.372417857Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.37241243Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uw4d2o3p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.372157576Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uw4d2o3p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.372081786Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=120621 slug=jdall version=1 fingerprint=23f3ac338267c03f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.372329012Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=xfavlxVMk, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.37210514s EvaluationString:}]" duration=143.310865ms + logger=ngalert.state.manager user=202755 slug=iwmedia instance="device=md2, fstype=ext4, host=mc01.opsucht.iwmedia.ovh, mode=rw, path=/" t=2024-05-29T13:44:14.372340688Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uvk37qr0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.371795993Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uvk37qr0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.371766882Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=202755 slug=iwmedia instance="device=md2, fstype=ext4, host=db3.fra.de.ovh.opsucht.cc, mode=rw, path=/" t=2024-05-29T13:44:14.372280313Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uv59apmt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.371430279Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=202755 slug=iwmedia instance="device=md2, fstype=ext4, host=db1.fra.de.ovh.opsucht.cc, mode=rw, path=/" t=2024-05-29T13:44:14.37221105Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=202755 slug=iwmedia instance="device=md2, fstype=ext4, host=db01.opsucht.iwmedia.ovh, mode=rw, path=/" t=2024-05-29T13:44:14.372121897Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=127813 slug=clearsale t=2024-05-29T13:44:14.371461975Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=48.700123ms + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:14.371447011Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=43.267021ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uuz4jla2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.371322728Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.371311874Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.156.57:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddbhspydhslc0b alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uuz4jla2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.371290507Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://rpc.tm.testedge2.haqq.network, job=https://rpc.tm.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://rpc.tm.testedge2.haqq.network" t=2024-05-29T13:44:14.370184033Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uuz4jla2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.371153856Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uuxhwtbx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.371054765Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uuv58cp6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.370694111Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uuic4tjx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.37053184Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uuic4tjx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.370487139Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uuic4tjx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.370387468Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uu82xc9n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.370356738Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uu82xc9n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.370207036Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.370942497Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uu7u9csq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.370169156Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uu7u9csq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.370133656Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uu7u9csq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.370054295Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uu6fqjf6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.369933853Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uu6fqjf6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.369798272Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uu5g4l3z-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.36957509Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uu4ob3ms-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.369330657Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uu4nz4v1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.369254566Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=173730 slug=nikon t=2024-05-29T13:44:14.370669314Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uu4nz4v1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.369090005Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uu35tql8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.368971733Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uu35tql8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.368915043Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=173730 slug=nikon t=2024-05-29T13:44:14.370572029Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=batchImportCleanUpJobCronJob alert" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uu2cbsd6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.368692711Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uu2cbsd6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.36865456Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uu28frtj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.368477978Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=253959 slug=roic instance="datasource_uid=wtXzQGN7k, ref_id=A" t=2024-05-29T13:44:14.3705078Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:46:00Z next_ends_at=2024-05-29T13:46:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uu0816e9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.368438918Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=253959 slug=roic instance="datasource_uid=wtXzQGN7k, ref_id=A" t=2024-05-29T13:44:14.370493771Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uu0816e9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.368379037Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uu0816e9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.368331457Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=253959 slug=roic t=2024-05-29T13:44:14.370435368Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uu0816e9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.368318767Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-utyhjfdp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.368264286Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-utyhjfdp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.368231606Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-utyhjfdp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.368217826Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-utyhjfdp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.368165565Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-utxfccmr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.368022194Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-utwt0qw3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.367920023Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uto9f7xj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.367723761Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-utf10dab-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.367569889Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-utcr3edl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.367519598Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-utcr3edl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.367425967Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ut6votfr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.367383507Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ut6votfr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.367327096Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.370087455Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ut5317kr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.367245486Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=456850 slug=juniz instance="Langhaus_ID=3, TE_ID=5" t=2024-05-29T13:44:14.370155885Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ut5317kr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.367159395Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ut5317kr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.367149985Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ut49olun-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.367116954Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ut1wnoiq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.366945722Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ut1wnoiq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.366930362Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ut1wnoiq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.366840781Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ut1wnoiq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.366826141Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.370062353Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-usuqqm66-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.366650909Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ustnibva-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.366567519Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.369746565Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.36973778Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.369839961Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-useier3z-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.366421137Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.369697705Z caller=remote_instance_store.go:51 user=154996 slug=veovo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.368890173Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.368583319Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.369470239Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://notifications.sender.testedge2.haqq.network, job=https://notifications.sender.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://notifications.sender.testedge2.haqq.network" t=2024-05-29T13:44:14.369470334Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.369434669Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=112732 slug=gleamer t=2024-05-29T13:44:14.36922386Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=58.661893ms + logger=ngalert.state.manager.persist user=310637 slug=notino t=2024-05-29T13:44:14.368805784Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.369037176Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.133087ms + logger=ngalert.state.manager user=310637 slug=notino instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.368786285Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=456850 slug=juniz instance="Langhaus_ID=2, TE_ID=3" t=2024-05-29T13:44:14.368754756Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.368716735Z caller=remote_alert_sender.go:94 user=174054 slug=netrading host=netrading-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.21.96:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=cDhIMj37z alerts=77 + level=info ts=2024-05-29T13:44:14.368704346Z caller=remote_alert_sender.go:94 user=174054 slug=netrading host=netrading-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.144.151.124:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=cDhIMj37z alerts=77 + logger=ngalert.state.manager user=456850 slug=juniz instance="Langhaus_ID=1, TE_ID=2" t=2024-05-29T13:44:14.368669109Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.367677019Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=456850 slug=juniz instance="Langhaus_ID=1, TE_ID=2" t=2024-05-29T13:44:14.368257898Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://jsonrpc.indexer.testedge2.haqq.network, job=https://jsonrpc.indexer.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://jsonrpc.indexer.testedge2.haqq.network" t=2024-05-29T13:44:14.368583534Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.368567069Z caller=remote_instance_store.go:51 user=916151 slug=hwbluepd msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=307381 slug=kambitaskforce t=2024-05-29T13:44:14.368528139Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance= t=2024-05-29T13:44:14.368511951Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance= t=2024-05-29T13:44:14.368497008Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=916151 slug=hwbluepd instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.368453668Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://indexer.safe.testedge2.haqq.network, job=https://indexer.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://indexer.safe.testedge2.haqq.network" t=2024-05-29T13:44:14.368418968Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.367440305Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://indexer.safe.testedge2.haqq.network, job=https://indexer.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://indexer.safe.testedge2.haqq.network" t=2024-05-29T13:44:14.368398901Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=916151 slug=hwbluepd t=2024-05-29T13:44:14.368373006Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=472647 slug=planet t=2024-05-29T13:44:14.36835554Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.188435ms + level=debug ts=2024-05-29T13:44:14.367401244Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.36827697Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=337710 slug=metasky t=2024-05-29T13:44:14.368164611Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=337710 slug=metasky t=2024-05-29T13:44:14.3680742Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=337710 slug=metasky version=1 fingerprint=eaae80e98d39e921 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.367944928Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=FZtIJts7z, ref_id=Avg State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.367559902s EvaluationString:}]" duration=200.960373ms + level=debug ts=2024-05-29T13:44:14.36719119Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.367914303Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.367951131Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.historian backend=loki user=174054 slug=netrading t=2024-05-29T13:44:14.367795942Z level=debug msg="Alert state changed creating annotation" newState=Alerting oldState=Pending + logger=ngalert.state.manager.persist user=679831 slug=joveostageaws t=2024-05-29T13:44:14.367656619Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=174054 slug=netrading t=2024-05-29T13:44:14.367670513Z level=debug msg="Saving alert states done" count=94 max_state_save_concurrency=1 duration=11.683071919s + logger=ngalert.state.manager user=679831 slug=joveostageaws instance="datasource_uid=a6f971c0-2a39-492e-8c6a-8034f1702671, ref_id=A" t=2024-05-29T13:44:14.367640886Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=679831 slug=joveostageaws instance="datasource_uid=a6f971c0-2a39-492e-8c6a-8034f1702671, ref_id=A" t=2024-05-29T13:44:14.367635434Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=679831 slug=joveostageaws instance="datasource_uid=a6f971c0-2a39-492e-8c6a-8034f1702671, ref_id=A" t=2024-05-29T13:44:14.367625235Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=679831 slug=joveostageaws instance="datasource_uid=a6f971c0-2a39-492e-8c6a-8034f1702671, ref_id=A" t=2024-05-29T13:44:14.367619261Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=111653 slug=theassociationmxp instance= t=2024-05-29T13:44:14.367629356Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.367533156Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.36753741Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.367466531Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.367473391Z caller=remote_image_capturer.go:33 user=440643 slug=avyprod rule_org_id=1 rule_uid=Rd1A0s74z msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:14.367328529Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-usbo1bhs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.366048793Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.366898916Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=659863 slug=fracek + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-us413o9q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.365879802Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-us413o9q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.365838801Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-us413o9q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.365766Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.366787219Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-us1tmlxl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.365352816Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-urx2ujbk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.365275995Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-urx2ujbk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.365265545Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-urx2ujbk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.365193724Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-urx2hg19-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.365104654Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-urnqhmm2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.364978542Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ur82dclc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.364849361Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.366328788Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ur6hwzu6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.36471889Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ur6hwzu6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.36470861Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.366303315Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=250150 slug=bizagi version=58 fingerprint=4f0adf6208863e12 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.366290798Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.366077865s EvaluationString:}]" duration=452.229592ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uqt3f32k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.364558148Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uqstbag3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.364509327Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uqstbag3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.364469677Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uqqjg32e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.364365706Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uqqjg32e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.364320016Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uqqjg32e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.364278735Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uqoknzet-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.364217804Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.364666949Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=544997 slug=cloudbuilders t=2024-05-29T13:44:14.366026946Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=info ts=2024-05-29T13:44:14.365975826Z caller=remote_image_capturer.go:61 user=544997 slug=cloudbuilders rule_org_id=1 rule_uid=aHWYkJx4k dashboard=b4_Rds74z panel=12 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=544997 slug=cloudbuilders instance="Policy=cis-iam-user-needs-mfa" t=2024-05-29T13:44:14.366003348Z level=warn msg="Failed to take an image" dashboard=b4_Rds74z panel=12 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=517596 slug=datar instance="provider_id=wallester_eur" t=2024-05-29T13:44:14.365835038Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.365758279Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.365715217Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=228733 slug=csmoney t=2024-05-29T13:44:14.365662988Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=40.403571ms + logger=ngalert.state.manager user=517596 slug=datar instance="provider_id=stripe" t=2024-05-29T13:44:14.365740728Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=517596 slug=datar t=2024-05-29T13:44:14.36553601Z level=debug msg="State manager processing evaluation results" resultCount=7 + level=debug ts=2024-05-29T13:44:14.365321767Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.365198366Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.365102581Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=544997 slug=cloudbuilders instance="Policy=cis-iam-user-needs-mfa" t=2024-05-29T13:44:14.365150424Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=544997 slug=cloudbuilders instance="Policy=cis-iam-user-needs-mfa" t=2024-05-29T13:44:14.365138059Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.scheduler user=544997 slug=cloudbuilders version=7 fingerprint=59d5f60cd1b7c7c0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.365031013Z level=debug msg="Alert rule evaluated" results="[{Instance:Policy=cis-iam-user-needs-mfa State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:Policy=cis-iam-user-needs-mfa Value:0xc00edf2310} C:{Var:C Labels:Policy=cis-iam-user-needs-mfa Value:0xc00edf2320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.364668747s EvaluationString:[ var='B' labels={Policy=cis-iam-user-needs-mfa} value=11 ], [ var='C' labels={Policy=cis-iam-user-needs-mfa} value=1 ]}]" duration=84.03743ms + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:14.365128212Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.364379462Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.364732555Z caller=remote_instance_store.go:51 user=895137 slug=uid2 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.364471384Z caller=remote_instance_store.go:51 user=543660 slug=jobcloudprogrammaticstage msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=543660 slug=jobcloudprogrammaticstage version=1 fingerprint=ef576cb703cb7989 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.364199627Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.363765379s EvaluationString:}]" duration=17.539743ms + logger=ngalert.state.manager user=265756 slug=vowfood instance= t=2024-05-29T13:44:14.364345024Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.364215415Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=265756 slug=vowfood t=2024-05-29T13:44:14.364300569Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.364240559Z caller=remote_instance_store.go:51 user=190917 slug=d1cx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=190917 slug=d1cx instance= t=2024-05-29T13:44:14.364161443Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=190917 slug=d1cx t=2024-05-29T13:44:14.36411157Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uqoknzet-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.364116783Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.364014087Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=245291 slug=pismo instance="StateMachineArn=arn:aws:states:sa-east-1:459584242408:stateMachine:CollectOptinRegistrarStateMachine" t=2024-05-29T13:44:14.363961238Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uqjytlz3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.363873791Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.36381221Z caller=remote_instance_store.go:51 user=874970 slug=nvidia msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uqjytlz3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.36381662Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.363845442Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uqijsi6r-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.363607388Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.363649846Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.363399879Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uqhhving-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.363341365Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=84535 slug=arweave t=2024-05-29T13:44:14.363305496Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uqhhving-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.363298795Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uqhhving-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.363270385Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=538355 slug=flogic t=2024-05-29T13:44:14.363161539Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.062403ms + level=debug ts=2024-05-29T13:44:14.363245251Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.363254729Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uqdrbo2s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.363228774Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uqdrbo2s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.363159474Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.363142647Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.363019312Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uqddddz1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.362987182Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=172772 slug=ppbtradingtribe t=2024-05-29T13:44:14.362917255Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.501973ms + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.362906931Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.fifa-2023-common-g4.coreSlave*.usersessions.status.fifa-2023-xone.GaugeUS_gcp-lhr_Slave,5)) Query" t=2024-05-29T13:44:14.362898659Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.fifa-2023-common-g4.coreSlave*.usersessions.status.fifa-2023-xone.GaugeUS_gcp-lhr_Slave,5)) Query" t=2024-05-29T13:44:14.362883598Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.362766184Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uqddddz1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.362729029Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=10926908bbb451c4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.362676485Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.fifa-2023-common-g4.coreSlave*.usersessions.status.fifa-2023-xone.GaugeUS_gcp-lhr_Slave,5)) Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc014ec0538} Threshold:{Var:Threshold Labels: Value:0xc014ec0590} compare:{Var:compare Labels:aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.fifa-2023-common-g4.coreSlave*.usersessions.status.fifa-2023-xone.GaugeUS_gcp-lhr_Slave,5)) Query Value:0xc014ec05f0} sum:{Var:sum Labels:aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.fifa-2023-common-g4.coreSlave*.usersessions.status.fifa-2023-xone.GaugeUS_gcp-lhr_Slave,5)) Query Value:0xc014ec0628}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.362345774s EvaluationString:[ var='Breaches' labels={} value=72 ], [ var='Threshold' labels={} value=2 ], [ var='compare' labels={aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.fifa-2023-common-g4.coreSlave*.usersessions.status.fifa-2023-xone.GaugeUS_gcp-lhr_Slave,5)) Query} value=0 ], [ var='sum' labels={aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.fifa-2023-common-g4.coreSlave*.usersessions.status.fifa-2023-xone.GaugeUS_gcp-lhr_Slave,5)) Query} value=0 ]}]" duration=63.164697ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uq9m293v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.362527487Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uq9m293v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.362451256Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uq9m293v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.362426966Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uq7c3yiy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.362105643Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uq7c3yiy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.362068232Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uq3rktvu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.362008612Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=628956 slug=testbizagi t=2024-05-29T13:44:14.361870679Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.361400115Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.361716863Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=cpbi30274ocl436p6ma0" t=2024-05-29T13:44:14.361733519Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=cpbi30274ocl436p6ma0" t=2024-05-29T13:44:14.361721958Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.361638042Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.361626842Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.36153329Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.361494175Z caller=remote_instance_store.go:51 user=543654 slug=jobcloudprogrammaticprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.361424694Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-upzs5kur-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.361335775Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=cn6b1srsj6j1mdqdhil0" t=2024-05-29T13:44:14.361413778Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=cliqutrruq7ct96cg01g" t=2024-05-29T13:44:14.36134195Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=cliqutrruq7ct96cg01g" t=2024-05-29T13:44:14.361331169Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.361357992Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.361320636Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-upx2cj2z-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.361167383Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-upx2cj2z-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.361145683Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=2f931a74742b0783 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.361249248Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.361037094s EvaluationString:}]" duration=194.660274ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-upjydvx4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.361115873Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-upjydvx4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.361021072Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-upjydvx4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.360991091Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-upceodar-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.360935981Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.361185792Z caller=remote_alert_sender.go:94 user=884866 slug=cnonumerique host=cnonumerique-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.51.155:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddlo2jcluksn4a alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-upceodar-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.36085661Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-upceodar-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.360778609Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uparosw9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.360739879Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uparosw9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.360671478Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=ciddv0m5utor16capvn0" t=2024-05-29T13:44:14.361187001Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=ciddv0m5utor16capvn0" t=2024-05-29T13:44:14.361170385Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.360643918Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=489921 slug=statuscake t=2024-05-29T13:44:14.361085896Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=info ts=2024-05-29T13:44:14.36104572Z caller=remote_alert_sender.go:94 user=884866 slug=cnonumerique host=cnonumerique-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.74.54:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddlo2jcluksn4a alerts=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance= t=2024-05-29T13:44:14.3609265Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=849729 slug=medopsimscare t=2024-05-29T13:44:14.360841769Z level=debug msg="Deleting alert states" count=2 + level=debug ts=2024-05-29T13:44:14.360790727Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:14.360781708Z level=debug msg="State manager processing evaluation results" resultCount=10 + logger=ngalert.state.manager user=849729 slug=medopsimscare t=2024-05-29T13:44:14.360832969Z level=info msg="Detected stale state entry" cacheID="[[\"__alert_rule_namespace_uid__\",\"cdby95grxsmwwf\"],[\"__alert_rule_uid__\",\"eddg9a204bh8gf\"],[\"alertname\",\"imscare-server-erros\"],[\"grafana_folder\",\"DevOps/IMS Care\"],[\"vhost\",\"preg.imscare.com\"]]" state=Normal reason=NoData + logger=ngalert.state.manager user=849729 slug=medopsimscare instance="vhost=ohallergy.imscare.com" t=2024-05-29T13:44:14.360819699Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.360854869Z caller=remote_instance_store.go:57 user=849729 slug=medopsimscare msg="calling DeleteAlertInstances - not implemented" + logger=ngalert.state.manager user=849729 slug=medopsimscare instance="vhost=ohallergy.imscare.com" t=2024-05-29T13:44:14.360808148Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=849729 slug=medopsimscare t=2024-05-29T13:44:14.360701277Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-up5c4isk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.360540657Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-up5c4isk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.360518877Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uopettl2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.360354445Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uopettl2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.360325025Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uopettl2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.360264364Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.360150202Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uoo4cyzj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.360186733Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uoo4cyzj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.360158993Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uoo4cyzj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.360085032Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.359951022Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uolj1fms-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.3598988Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.35984892Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.359803717Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uolj1fms-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.359805919Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uof0se27-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.359763799Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uof0se27-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.359619087Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uo9c72k9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.359558757Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uo9c72k9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.359536756Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uo9c72k9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.359483416Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uo18wd98-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.359335484Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.359342861Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uo18wd98-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.359254044Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-unz662q9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.359077382Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=765158 slug=stellarmenus t=2024-05-29T13:44:14.359004694Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.327563ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-unz662q9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.358956721Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-unym9bpg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.35890258Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.358838166Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-unxabya2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.358701518Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-unxabya2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.358645877Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.358764125Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.358734495Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.358596892Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-unxabya2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.358588827Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-unw1rcjy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.358549406Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.358533952Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-unw1rcjy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.358489286Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.358376582Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.35830359Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-unsxpzlv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.358330714Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.358262708Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-unqt43rt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.358109322Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.358097317Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-unh5w4k4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.357995681Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-unh5w4k4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.357878949Z level=debug msg="Keeping state" state=Normal + ts=2024-05-29T13:44:14.357933744Z caller=memberlist_logger.go:74 level=debug msg="Stream connection from=10.160.0.70:35980" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-unc4katj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.357807609Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-unc4katj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.357723658Z level=debug msg="Setting next state" handler=resultNormal + ts=2024-05-29T13:44:14.357856222Z caller=memberlist_logger.go:74 level=debug msg="Initiating push/pull sync with: grafana-ruler-6845d48444-b9k2l-7c624cc1 10.160.24.148:7946" + level=debug ts=2024-05-29T13:44:14.357599459Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-unbb7lwb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.357616247Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-unbb7lwb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.357583106Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-umzjuy8j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.357507546Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.357370866Z caller=remote_alert_sender.go:94 user=381989 slug=vanoordacf host=vanoordacf-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.49.78.16:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=b82c8265-188c-4129-a6fe-0ffb9c3784fb alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-umxvtd24-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.357306034Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-umxvtd24-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.357253623Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-umvu5j47-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.357180032Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-umvu5j47-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.357156832Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-umvu5j47-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.357102502Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-umvhqval-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.357016671Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.35689958Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-umoj3d2t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.356756098Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-umofuw47-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.356592976Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.356799053Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-um3gcj0v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.356403924Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-um3gcj0v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.356337554Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ulveaqrh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.356204172Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ulveaqrh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.356165562Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ulveaqrh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.356100681Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uluxxkfi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.355879239Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uluxxkfi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.355823848Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ulpzgurr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.355726227Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.356547307Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ulcigg4t-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.355444235Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ulcigg4t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.355389074Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ulcigg4t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.355360824Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ulbqs7o7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.355114781Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ulbqs7o7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.355076001Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ulbqs7o7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.35503958Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.356272363Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ul6wgufb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.354723577Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ul62sdgu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.354655926Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ul62sdgu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.354509695Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=334408 slug=voltagrid t=2024-05-29T13:44:14.356291075Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=334408 slug=voltagrid t=2024-05-29T13:44:14.356200641Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ul3nqae5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.354261942Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ukzt2f4x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.354119671Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ukzt2f4x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.35400617Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ukzt2f4x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.353908529Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ukzt2f4x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.353895989Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ukyktyxp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.353846048Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uktutgej-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.353657596Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ukhdvcaf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.353121421Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ukhdvcaf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.35302952Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ukeyvsu6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.352915328Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uka8bneo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.352875798Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uka8bneo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.352788667Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uk8ihbog-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.352760457Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uk8ihbog-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.352712256Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uk81rug0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.352633865Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uk81rug0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.352556315Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=321534 slug=atlasiq t=2024-05-29T13:44:14.35539371Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uk3y2tmf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.352529734Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uk3y2tmf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.352454614Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=397201 slug=zultys instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.355422559Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:14.355311742Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.355352866Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=806229 slug=simplisafe t=2024-05-29T13:44:14.35515295Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.355160133Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.35510751Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uk07r2r6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.352366593Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uk07r2r6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.352329242Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ujvdxr93-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.352264102Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ujvdxr93-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.352252682Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.354979463Z caller=remote_image_capturer.go:33 user=656284 slug=cencosudx rule_org_id=1 rule_uid=ddlhkoddq0g74f msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:14.355080538Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ujvdxr93-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.352213781Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.35489025Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.355017227Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.354975557Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uj6ssvft-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.35214758Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uj6ssvft-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.3521385Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.354981144Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.354983693Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uj5r8zur-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.352033259Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uj5r8zur-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.352024379Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=656284 slug=cencosudx instance="cluster=staging-1, deployment=vtex-punchout-dp, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-k8s-monitoring.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=ebisu" t=2024-05-29T13:44:14.354959493Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uj5r8zur-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.351997829Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.354924488Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uiqadeib-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.351959918Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.354894366Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.354909092Z caller=remote_image_capturer.go:33 user=656284 slug=cencosudx rule_org_id=1 rule_uid=ddlhkoddq0g74f msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=656284 slug=cencosudx instance="cluster=staging-1, deployment=users-api-dp, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-k8s-monitoring.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=ebisu" t=2024-05-29T13:44:14.35489896Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uiqadeib-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.351878708Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=656284 slug=cencosudx instance="cluster=staging-1, deployment=users-api-dp, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-k8s-monitoring.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=ebisu" t=2024-05-29T13:44:14.35488464Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uib5t9rx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.351672165Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uib5t9rx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.351641005Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.354695311Z caller=remote_alert_sender.go:94 user=55491 slug=demandbase host=demandbase-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.144.145.226:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=e31e2f14-0192-40f5-8610-d8f53327c7ef alerts=1 + logger=ngalert.state.manager user=656284 slug=cencosudx instance="cluster=staging-1, deployment=punchout-cronjob-dp, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-k8s-monitoring.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=ebisu" t=2024-05-29T13:44:14.354679936Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.354642652Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uhyuxvrt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.351575694Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uhwvhjsa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.351361332Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.354542672Z caller=remote_image_capturer.go:33 user=656284 slug=cencosudx rule_org_id=1 rule_uid=ddlhkoddq0g74f msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uhlvtmad-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.351253931Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.354414467Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.280172ms + logger=ngalert.state.manager.persist user=166705 slug=crossnokaye t=2024-05-29T13:44:14.354354359Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=42.141484ms + level=debug ts=2024-05-29T13:44:14.35437436Z caller=remote_image_capturer.go:33 user=656284 slug=cencosudx rule_org_id=1 rule_uid=ddlhkoddq0g74f msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:14.354302177Z caller=remote_image_capturer.go:33 user=656284 slug=cencosudx rule_org_id=1 rule_uid=ddlhkoddq0g74f msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=656284 slug=cencosudx instance="cluster=staging-1, deployment=easy-core-api-dp, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-k8s-monitoring.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=ebisu" t=2024-05-29T13:44:14.354204286Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.354026136Z caller=remote_instance_store.go:51 user=154996 slug=veovo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.351208651Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.353963516Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=656284 slug=cencosudx instance="cluster=staging-1, deployment=checkout-suscriber-dp, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-k8s-monitoring.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=ebisu" t=2024-05-29T13:44:14.35398193Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.353873065Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.353929683Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.353925649Z caller=remote_image_capturer.go:33 user=656284 slug=cencosudx rule_org_id=1 rule_uid=ddlhkoddq0g74f msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=656284 slug=cencosudx instance="cluster=staging-1, deployment=cencopim-punchout-dp, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-k8s-monitoring.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=ebisu" t=2024-05-29T13:44:14.353919688Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=656284 slug=cencosudx instance="cluster=staging-1, deployment=catalog-generator-api-dp, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-k8s-monitoring.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=ebisu" t=2024-05-29T13:44:14.353812916Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=656284 slug=cencosudx instance="cluster=staging-1, deployment=bucket-manager-dp, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-k8s-monitoring.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=ebisu" t=2024-05-29T13:44:14.353755255Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=656284 slug=cencosudx instance="cluster=staging-1, deployment=bucket-manager-dp, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-k8s-monitoring.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=ebisu" t=2024-05-29T13:44:14.353735004Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.353644332Z caller=remote_image_capturer.go:33 user=656284 slug=cencosudx rule_org_id=1 rule_uid=ddlhkoddq0g74f msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:14.353632995Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.353608825Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.353553994Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=656284 slug=cencosudx instance="cluster=staging-1, deployment=api-checkout-dp, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-k8s-monitoring.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=ebisu" t=2024-05-29T13:44:14.353599831Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:14.353546758Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.353565385Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=901230 slug=integromonitor t=2024-05-29T13:44:14.353541498Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=901230 slug=integromonitor instance= previous_handler=resultError t=2024-05-29T13:44:14.353528543Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=901230 slug=integromonitor instance= previous_handler=resultError t=2024-05-29T13:44:14.353522018Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.353486948Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=901230 slug=integromonitor t=2024-05-29T13:44:14.353486092Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.353246447Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.353193129Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.353089338Z caller=remote_instance_store.go:51 user=487988 slug=microstrategyits msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=127717 slug=engenoil t=2024-05-29T13:44:14.352988482Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.956875ms + logger=ngalert.state.manager user=125436 slug=caura instance="datasource_uid=grafanacloud-logs, ref_id=A,C" t=2024-05-29T13:44:14.352120972Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.352917621Z caller=remote_instance_store.go:51 user=557231 slug=lnrsusinsuranceprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=125436 slug=caura instance="datasource_uid=grafanacloud-logs, ref_id=A,C" t=2024-05-29T13:44:14.352108166Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.352852779Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.35286166Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=884866 slug=cnonumerique instance="datasource_uid=fdhk917z41xj4a, ref_id=A" t=2024-05-29T13:44:14.352790888Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.352665336Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.352619408Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.352593934Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=307381 slug=kambitaskforce t=2024-05-29T13:44:14.352505104Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.714823ms + level=debug ts=2024-05-29T13:44:14.352455153Z caller=remote_instance_store.go:51 user=536824 slug=forgerockit msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.352199715Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.351271684Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.351224992Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.350955515Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.350830513Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.350774245Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uhcaoa8c-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.350695895Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uhcaoa8c-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.350654805Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.35065633Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.350703141Z caller=remote_instance_store.go:51 user=334644 slug=meiro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uhbgp32k-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.350496893Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uhbgp32k-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.350471473Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uhbgp32k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.350427603Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=328453 slug=jitolabs t=2024-05-29T13:44:14.350328051Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.350188457Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.350178359Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=328453 slug=jitolabs version=1 fingerprint=26dbbe692f2c3a30 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.350178971Z level=debug msg="Alert rule evaluated" results="[{Instance:hostname=tokyo-mainnet-rpc-4, region=tokyo State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:hostname=tokyo-mainnet-rpc-4, region=tokyo Value:0xc010db9050} C:{Var:C Labels:hostname=tokyo-mainnet-rpc-4, region=tokyo Value:0xc010db9070}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.34986767s EvaluationString:[ var='B' labels={hostname=tokyo-mainnet-rpc-4, region=tokyo} value=-1 ], [ var='C' labels={hostname=tokyo-mainnet-rpc-4, region=tokyo} value=0 ]}]" duration=134.143675ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uh89sf4z-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.349948298Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ugv0qzhv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.349835307Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ugv0qzhv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.349805646Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ugv0qzhv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.349737326Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:14.3496513Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ugt7amvx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.349578234Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=430961 slug=solifi version=1 fingerprint=13073be521408dbf attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.349595687Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.349316396s EvaluationString:}]" duration=75.701071ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ugt7amvx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.349459683Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv instance="name=keepLastValue(eadp.gos.torch.prod.maverick-1-ps4.Gameplay_Users,5) Query" t=2024-05-29T13:44:14.349547298Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.349276026Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ugqkk6q2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.34916397Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ugpkh38s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.349105979Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.349138334Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=381989 slug=vanoordacf instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.347057354Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ugpkh38s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.348938867Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ugp7zm0y-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.348801306Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.348851339Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.348759966Z caller=remote_instance_store.go:51 user=874970 slug=nvidia msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ugj8a4p3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.348438802Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.348678995Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.348515821Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ug8kijq6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.348354091Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.348378797Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ug8kijq6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.348284591Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ug5ws6yp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.348150869Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.348198223Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=313382 slug=hyai instance="metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-kiwa-synthetic-monitor" t=2024-05-29T13:44:14.34807746Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.348103354Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ug5ws6yp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.348079219Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=313382 slug=hyai instance="metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-emec-update-core-tables" t=2024-05-29T13:44:14.34798427Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ug0quef7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.347968247Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ufxp6pg3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.347765885Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ufxp6pg3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.347688585Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ufvciarv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.347544423Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ufvciarv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.347413232Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.347239624Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=313382 slug=hyai instance="metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-hytest-study-synthetic-monitor" t=2024-05-29T13:44:14.34720278Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.347222651Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ufrkn6z4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.34723557Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.347201373Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:14.347122071Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.346893406Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uflqhuio-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.347039258Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ufledxry-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.346962607Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=313382 slug=hyai version=29 fingerprint=1b51946c919a3706 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.346558243Z level=debug msg="Alert rule evaluated" results="[{Instance:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-hytest-study-synthetic-monitor State:Normal Error: Results:map[] Values:map[SLINanoseconds:{Var:SLINanoseconds Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-hytest-study-synthetic-monitor Value:0xc054cf4ac8} SLISeconds:{Var:SLISeconds Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-hytest-study-synthetic-monitor Value:0xc054cf4b30} SLIThreshold:{Var:SLIThreshold Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-hytest-study-synthetic-monitor Value:0xc054cf4b38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.3456601s EvaluationString:[ var='SLINanoseconds' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-hytest-study-synthetic-monitor} value=8.4629599999996e+06 ], [ var='SLISeconds' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-hytest-study-synthetic-monitor} value=0.0084629599999996 ], [ var='SLIThreshold' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-hytest-study-synthetic-monitor} value=0 ]} {Instance:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-doggerbankd-synthetic-monitor State:Normal Error: Results:map[] Values:map[SLINanoseconds:{Var:SLINanoseconds Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-doggerbankd-synthetic-monitor Value:0xc054cf4b88} SLISeconds:{Var:SLISeconds Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-doggerbankd-synthetic-monitor Value:0xc054cf4c30} SLIThreshold:{Var:SLIThreshold Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-doggerbankd-synthetic-monitor Value:0xc054cf4c38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.345678347s EvaluationString:[ var='SLINanoseconds' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-doggerbankd-synthetic-monitor} value=6.540401000000203e+06 ], [ var='SLISeconds' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-doggerbankd-synthetic-monitor} value=0.006540401000000203 ], [ var='SLIThreshold' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-doggerbankd-synthetic-monitor} value=0 ]} {Instance:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-whitelee-update-core-tables State:Normal Error: Results:map[] Values:map[SLINanoseconds:{Var:SLINanoseconds Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-whitelee-update-core-tables Value:0xc054cf4c98} SLISeconds:{Var:SLISeconds Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-whitelee-update-core-tables Value:0xc054cf4d30} SLIThreshold:{Var:SLIThreshold Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-whitelee-update-core-tables Value:0xc054cf4d38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.345686945s EvaluationString:[ var='SLINanoseconds' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-whitelee-update-core-tables} value=1.1859865906061115e+09 ], [ var='SLISeconds' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-whitelee-update-core-tables} value=1.1859865906061116 ], [ var='SLIThreshold' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-whitelee-update-core-tables} value=0 ]} {Instance:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-whitelee-synthetic-monitor State:Normal Error: Results:map[] Values:map[SLINanoseconds:{Var:SLINanoseconds Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-whitelee-synthetic-monitor Value:0xc054cf4e70} SLISeconds:{Var:SLISeconds Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-whitelee-synthetic-monitor Value:0xc054cf4de0} SLIThreshold:{Var:SLIThreshold Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-whitelee-synthetic-monitor Value:0xc054cf4de8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.345694565s EvaluationString:[ var='SLINanoseconds' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-whitelee-synthetic-monitor} value=8.188179000000219e+06 ], [ var='SLISeconds' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-whitelee-synthetic-monitor} value=0.008188179000000219 ], [ var='SLIThreshold' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-whitelee-synthetic-monitor} value=0 ]} {Instance:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-hyai4res-synthetic-monitor State:Normal Error: Results:map[] Values:map[SLINanoseconds:{Var:SLINanoseconds Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-hyai4res-synthetic-monitor Value:0xc054cf4ed0} SLISeconds:{Var:SLISeconds Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-hyai4res-synthetic-monitor Value:0xc054cf4ed8} SLIThreshold:{Var:SLIThreshold Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-hyai4res-synthetic-monitor Value:0xc054cf4f50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.345702034s EvaluationString:[ var='SLINanoseconds' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-hyai4res-synthetic-monitor} value=5.927419999999529e+06 ], [ var='SLISeconds' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-hyai4res-synthetic-monitor} value=0.005927419999999529 ], [ var='SLIThreshold' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-hyai4res-synthetic-monitor} value=0 ]} {Instance:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-kiwa-update-core-tables State:Normal Error: Results:map[] Values:map[SLINanoseconds:{Var:SLINanoseconds Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-kiwa-update-core-tables Value:0xc054cf4fb0} SLISeconds:{Var:SLISeconds Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-kiwa-update-core-tables Value:0xc054cf4fb8} SLIThreshold:{Var:SLIThreshold Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-kiwa-update-core-tables Value:0xc054cf5030}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.345709491s EvaluationString:[ var='SLINanoseconds' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-kiwa-update-core-tables} value=4.2229783237499845e+08 ], [ var='SLISeconds' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-kiwa-update-core-tables} value=0.42229783237499846 ], [ var='SLIThreshold' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-kiwa-update-core-tables} value=0 ]} {Instance:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-emec-update-core-tables State:Normal Error: Results:map[] Values:map[SLINanoseconds:{Var:SLINanoseconds Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-emec-update-core-tables Value:0xc054cf5090} SLISeconds:{Var:SLISeconds Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-emec-update-core-tables Value:0xc054cf5098} SLIThreshold:{Var:SLIThreshold Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-emec-update-core-tables Value:0xc054cf5100}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.345716141s EvaluationString:[ var='SLINanoseconds' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-emec-update-core-tables} value=2.7332468000000067e+07 ], [ var='SLISeconds' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-emec-update-core-tables} value=0.027332468000000068 ], [ var='SLIThreshold' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-emec-update-core-tables} value=0 ]} {Instance:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-kiwa-synthetic-monitor State:Normal Error: Results:map[] Values:map[SLINanoseconds:{Var:SLINanoseconds Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-kiwa-synthetic-monitor Value:0xc054cf5150} SLISeconds:{Var:SLISeconds Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-kiwa-synthetic-monitor Value:0xc054cf5158} SLIThreshold:{Var:SLIThreshold Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-kiwa-synthetic-monitor Value:0xc054cf51c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.345722321s EvaluationString:[ var='SLINanoseconds' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-kiwa-synthetic-monitor} value=4.715361000000322e+06 ], [ var='SLISeconds' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-kiwa-synthetic-monitor} value=0.004715361000000322 ], [ var='SLIThreshold' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-kiwa-synthetic-monitor} value=0 ]} {Instance:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-emec-synthetic-monitor State:Normal Error: Results:map[] Values:map[SLINanoseconds:{Var:SLINanoseconds Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-emec-synthetic-monitor Value:0xc054cf5210} SLISeconds:{Var:SLISeconds Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-emec-synthetic-monitor Value:0xc054cf5218} SLIThreshold:{Var:SLIThreshold Labels:metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-emec-synthetic-monitor Value:0xc054cf5280}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.345728471s EvaluationString:[ var='SLINanoseconds' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-emec-synthetic-monitor} value=5.362270000000081e+06 ], [ var='SLISeconds' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-emec-synthetic-monitor} value=0.005362270000000081 ], [ var='SLIThreshold' labels={metric.name=value_execution_times_mean, resource.label.function_name=hyai-dev-emec-synthetic-monitor} value=0 ]}]" duration=155.632648ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ufledxry-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.346845056Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uff3wwqq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.346517733Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.346365746Z caller=remote_instance_store.go:51 user=228733 slug=csmoney msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.346429334Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.346411688Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=60603 slug=avalaratax version=1 fingerprint=87441edd0cbccce6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.346187602Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.345812102s EvaluationString:}]" duration=102.82916ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ufdqruh6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.346160699Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=538355 slug=flogic instance="account_id=641264638977, dimension_DBInstanceIdentifier=amp-summary-prod, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:rds:ap-northeast-1:641264638977:db:amp-summary-prod, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter" t=2024-05-29T13:44:14.346072412Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538355 slug=flogic t=2024-05-29T13:44:14.346031888Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=538355 slug=flogic version=2 fingerprint=e3978dfba17b06bb attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.345919608Z level=debug msg="Alert rule evaluated" results="[{Instance:account_id=641264638977, dimension_DBInstanceIdentifier=amp-summary-prod, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:rds:ap-northeast-1:641264638977:db:amp-summary-prod, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=641264638977, dimension_DBInstanceIdentifier=amp-summary-prod, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:rds:ap-northeast-1:641264638977:db:amp-summary-prod, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter Value:0xc005679778} N:{Var:N Labels:account_id=641264638977, dimension_DBInstanceIdentifier=amp-summary-prod, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:rds:ap-northeast-1:641264638977:db:amp-summary-prod, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter Value:0xc005679528} O:{Var:O Labels:account_id=641264638977, dimension_DBInstanceIdentifier=amp-summary-prod, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:rds:ap-northeast-1:641264638977:db:amp-summary-prod, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter Value:0xc005679600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.345313046s EvaluationString:[ var='A' labels={account_id=641264638977, dimension_DBInstanceIdentifier=amp-summary-prod, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:rds:ap-northeast-1:641264638977:db:amp-summary-prod, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter} value=39.77128028869629 ], [ var='N' labels={account_id=641264638977, dimension_DBInstanceIdentifier=amp-summary-prod, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:rds:ap-northeast-1:641264638977:db:amp-summary-prod, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter} value=39.77128028869629 ], [ var='O' labels={account_id=641264638977, dimension_DBInstanceIdentifier=amp-summary-prod, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:rds:ap-northeast-1:641264638977:db:amp-summary-prod, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter} value=0 ]}]" duration=11.647111ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ufc66fwp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.345955127Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.345859279Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uf6b8l9p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.345751415Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=765158 slug=stellarmenus instance="__name__=up, instance=grafana-prod, job=Menu-Tracker" t=2024-05-29T13:44:14.34566667Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=765158 slug=stellarmenus t=2024-05-29T13:44:14.34563524Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.345599367Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uf4iek9w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.345548853Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uf4iek9w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.345513842Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uf4iek9w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.345448942Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.345447668Z caller=remote_instance_store.go:51 user=172772 slug=ppbtradingtribe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uf4772yo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.34531753Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=172772 slug=ppbtradingtribe version=5 fingerprint=e88d4ec5dd37d100 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.345228601Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.344976685s EvaluationString:}]" duration=129.838198ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uf4772yo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.345210689Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.345198302Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.345102312Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uew7gia8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.345076928Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.344979508Z caller=remote_instance_store.go:51 user=652086 slug=unihosted msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ueohcag6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.344800575Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ueohcag6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.344775275Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uemfixc0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.344708854Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uemfixc0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.344671174Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uel0zo8q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.344541302Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uel0zo8q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.344512962Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uel0zo8q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.344434441Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.344173493Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uegf7toz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.344173529Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.344093218Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.344053358Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ueg0mhyh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.344044597Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ueg0mhyh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.344016617Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:14.343993157Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ueeyakk3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.343890856Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ueeyakk3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.343829335Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ueeyakk3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.343790395Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.343568989Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ueb5nwsi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.343689604Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ueb5nwsi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.343616013Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ueatwg4k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.343516242Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ueatwg4k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.343487961Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ueatwg4k-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.343449321Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ueatwg4k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.34336107Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ue4xtmk8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.343002637Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.342795466Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ue4kqdg4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.342730734Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ue3blk5g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.342434951Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ue2ta95g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.34234409Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ue2ta95g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.342282879Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.342131942Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.342090913Z caller=remote_instance_store.go:51 user=925529 slug=diggerhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.342066797Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info ts=2024-05-29T13:44:14.341964048Z caller=grafana.go:247 user=557231 slug=lnrsusinsuranceprod msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=16" groups=60 alerts=0 + level=debug ts=2024-05-29T13:44:14.341941638Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-udxb2msd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.341743974Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-udp8k6jp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.341578402Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-udns04dh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.34136432Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-udns04dh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.341324989Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-udns04dh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.341252099Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-udji4r4m-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.341151148Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-udji4r4m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.341084367Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-udji4r4m-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.341028426Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-udji4r4m-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.341013776Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-udix5fya-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.340952995Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.340798655Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=426229 slug=accelbyte t=2024-05-29T13:44:14.340818118Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.97277ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ud82zd4o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.340780354Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ud82zd4o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.340764944Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ud82zd4o-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.340612162Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ucxzwuys-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.340485071Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ucx6h2by-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.34039264Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ucx6h2by-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.34037263Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ucx6h2by-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.340303429Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ucx6h2by-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.340230688Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ucr386bx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.340190738Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ucr386bx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.340122087Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ucihz9l8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.339991026Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ucihz9l8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.339917025Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ucemkx6v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.339746053Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ucemkx6v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.339595312Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.339606894Z caller=remote_instance_store.go:51 user=557231 slug=lnrsusinsuranceprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.339496999Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.339523159Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.339486709Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.339481469Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ucehl5nr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.33942205Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.339385687Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ucehl5nr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.339355159Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.339348564Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=30.352631ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uc904zrr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.339326239Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uc904zrr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.339315989Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.339304663Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.339196875Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uc7j76p2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.339152107Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uc7j76p2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.339076796Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uc5mnaul-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.338870984Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uc5mnaul-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.338839334Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.339092427Z caller=remote_instance_store.go:51 user=127717 slug=engenoil msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uc5f15ah-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.338732723Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uc5f15ah-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.338668592Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uc5f15ah-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.338656872Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uc5f15ah-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.338611811Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=127717 slug=engenoil instance="datasource_uid=-c-NDQGGk, ref_id=A" t=2024-05-29T13:44:14.338973135Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=127717 slug=engenoil version=1 fingerprint=47c8a8e335c243f5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.338850306Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=-c-NDQGGk, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.338493236s EvaluationString:}]" duration=839.804508ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uc41v8ep-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.33848091Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ubxjlbe1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.33844362Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.338906497Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.338907613Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.338848128Z caller=remote_instance_store.go:51 user=334644 slug=meiro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ubxjlbe1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.338373789Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.338811001Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.338736234Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.338692598Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ubrvtlic-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.338336229Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ubrvtlic-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.338279728Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=35223 slug=silkroad t=2024-05-29T13:44:14.338664413Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=35223 slug=silkroad instance= t=2024-05-29T13:44:14.338650748Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.338625984Z caller=remote_instance_store.go:51 user=236496 slug=improbable msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=326992 slug=dropcontact instance= t=2024-05-29T13:44:14.338563105Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.338235718Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ubpd786e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.338179437Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.33854666Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=326992 slug=dropcontact instance= t=2024-05-29T13:44:14.338555309Z level=debug msg="Execution error state is Alerting" handler=resultAlerting previous_handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ubpd786e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.338136807Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=236496 slug=improbable t=2024-05-29T13:44:14.338481854Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ubpd786e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.338108536Z level=debug msg="Setting next state" handler=resultNormal + level=error ts=2024-05-29T13:44:14.338450861Z caller=remote_rule_evaluator.go:110 user=326992 slug=dropcontact msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.scheduler user=326992 slug=dropcontact version=2 fingerprint=40153c23f1f7d97e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.33848344Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=2.63866ms + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.33850033Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.360726ms + level=debug ts=2024-05-29T13:44:14.338388499Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.338410291Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.338322909Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ubpd786e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.338067916Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ublj62yx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.337880464Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ubg17cm8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.337848954Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.337849141Z caller=remote_instance_store.go:51 user=191103 slug=amazonadmin msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:14.337814377Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:14.337801092Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=191103 slug=amazonadmin version=147 fingerprint=ec7c0eca3658da97 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.337708339Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.337506592s EvaluationString:}]" duration=74.22265ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ubfab0mr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.337586881Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ubejrf1n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.33745727Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ubejrf1n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.337327048Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ubejrf1n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.337317048Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.337251288Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.337083731Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uaxw8csk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.337224697Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.337171828Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uaxw8csk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.337194347Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.337123627Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uaw0hgab-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.337099776Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uaw0hgab-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.336995035Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uaw0hgab-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.336938724Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uat3gbaq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.336852323Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uat3gbaq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.336827963Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uaskfe5j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.336685092Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.336596296Z caller=remote_instance_store.go:51 user=354676 slug=gridmarketenergy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uasepwm5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.33649898Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank instance="arena=UNKNOWN, aws_ecs_launchtype=fargate, aws_ecs_task_known_status=RUNNING, aws_ecs_task_revision=90, cloud_availability_zone=eu-central-1a, ecs_cluster_name=PROD-IAM, ecs_task_definition=PROD-IAM-STS, ecs_task_id=9b599b5080b142c5adb23bee86d5c70f, environment=PROD" t=2024-05-29T13:44:14.336448194Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uasepwm5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.336412539Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=adennlvlmzi0wf, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:14.336330795Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:45:40Z next_ends_at=2024-05-29T13:46:10Z + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=adennlvlmzi0wf, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:14.336305248Z level=debug msg="Execution keep last state is Alerting" handler=resultAlerting + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=adennlvlmzi0wf, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:14.336284962Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=114492 slug=railsbank version=1 fingerprint=7391e61447beb843 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.336224029Z level=debug msg="Alert rule evaluated" results="[{Instance:arena=UNKNOWN, aws_ecs_launchtype=fargate, aws_ecs_task_known_status=RUNNING, aws_ecs_task_revision=90, cloud_availability_zone=eu-central-1a, ecs_cluster_name=PROD-IAM, ecs_task_definition=PROD-IAM-STS, ecs_task_id=9b599b5080b142c5adb23bee86d5c70f, environment=PROD State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:arena=UNKNOWN, aws_ecs_launchtype=fargate, aws_ecs_task_known_status=RUNNING, aws_ecs_task_revision=90, cloud_availability_zone=eu-central-1a, ecs_cluster_name=PROD-IAM, ecs_task_definition=PROD-IAM-STS, ecs_task_id=9b599b5080b142c5adb23bee86d5c70f, environment=PROD Value:0xc02b0d1bd0} C:{Var:C Labels:arena=UNKNOWN, aws_ecs_launchtype=fargate, aws_ecs_task_known_status=RUNNING, aws_ecs_task_revision=90, cloud_availability_zone=eu-central-1a, ecs_cluster_name=PROD-IAM, ecs_task_definition=PROD-IAM-STS, ecs_task_id=9b599b5080b142c5adb23bee86d5c70f, environment=PROD Value:0xc02b0d1d88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.335784652s EvaluationString:[ var='B' labels={arena=UNKNOWN, aws_ecs_launchtype=fargate, aws_ecs_task_known_status=RUNNING, aws_ecs_task_revision=90, cloud_availability_zone=eu-central-1a, ecs_cluster_name=PROD-IAM, ecs_task_definition=PROD-IAM-STS, ecs_task_id=9b599b5080b142c5adb23bee86d5c70f, environment=PROD} value=6.989761092150171 ], [ var='C' labels={arena=UNKNOWN, aws_ecs_launchtype=fargate, aws_ecs_task_known_status=RUNNING, aws_ecs_task_revision=90, cloud_availability_zone=eu-central-1a, ecs_cluster_name=PROD-IAM, ecs_task_definition=PROD-IAM-STS, ecs_task_id=9b599b5080b142c5adb23bee86d5c70f, environment=PROD} value=0 ]} {Instance:arena=UNKNOWN, aws_ecs_launchtype=fargate, aws_ecs_task_known_status=RUNNING, aws_ecs_task_revision=90, cloud_availability_zone=eu-central-1b, ecs_cluster_name=PROD-IAM, ecs_task_definition=PROD-IAM-STS, ecs_task_id=ac90c0488c1b4914a003cf996122f3f8, environment=PROD State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:arena=UNKNOWN, aws_ecs_launchtype=fargate, aws_ecs_task_known_status=RUNNING, aws_ecs_task_revision=90, cloud_availability_zone=eu-central-1b, ecs_cluster_name=PROD-IAM, ecs_task_definition=PROD-IAM-STS, ecs_task_id=ac90c0488c1b4914a003cf996122f3f8, environment=PROD Value:0xc02d134060} C:{Var:C Labels:arena=UNKNOWN, aws_ecs_launchtype=fargate, aws_ecs_task_known_status=RUNNING, aws_ecs_task_revision=90, cloud_availability_zone=eu-central-1b, ecs_cluster_name=PROD-IAM, ecs_task_definition=PROD-IAM-STS, ecs_task_id=ac90c0488c1b4914a003cf996122f3f8, environment=PROD Value:0xc02d134120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.335807923s EvaluationString:[ var='B' labels={arena=UNKNOWN, aws_ecs_launchtype=fargate, aws_ecs_task_known_status=RUNNING, aws_ecs_task_revision=90, cloud_availability_zone=eu-central-1b, ecs_cluster_name=PROD-IAM, ecs_task_definition=PROD-IAM-STS, ecs_task_id=ac90c0488c1b4914a003cf996122f3f8, environment=PROD} value=6.781456953642384 ], [ var='C' labels={arena=UNKNOWN, aws_ecs_launchtype=fargate, aws_ecs_task_known_status=RUNNING, aws_ecs_task_revision=90, cloud_availability_zone=eu-central-1b, ecs_cluster_name=PROD-IAM, ecs_task_definition=PROD-IAM-STS, ecs_task_id=ac90c0488c1b4914a003cf996122f3f8, environment=PROD} value=0 ]}]" duration=245.560478ms + level=debug ts=2024-05-29T13:44:14.336326483Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=adennlvlmzi0wf, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:14.336277559Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.336335822Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=adennlvlmzi0wf, ref_id=A" t=2024-05-29T13:44:14.336266864Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uasepwm5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.336281128Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uak0867j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.336215607Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uak0867j-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.336033635Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uair8mip-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.335913824Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=481955 slug=letsswap24 t=2024-05-29T13:44:14.335742801Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=50.413346ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uair8mip-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.335731052Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.335600084Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.253636ms + level=debug ts=2024-05-29T13:44:14.335513301Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uaikt14o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.33556762Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uai0spqg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.335321488Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uai0spqg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.335295477Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=webhook" t=2024-05-29T13:44:14.335331251Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-uai0spqg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.335259817Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=vpa" t=2024-05-29T13:44:14.335202785Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=vpa" t=2024-05-29T13:44:14.335177043Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ua7cj9nv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.335108746Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ua7cj9nv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.335049695Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ua2bw3v4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.334991704Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ua2bw3v4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.334925794Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=server" t=2024-05-29T13:44:14.334921909Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ua1g4jp5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.334775442Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=secret-store-automation" t=2024-05-29T13:44:14.334772902Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.334762465Z caller=remote_alert_sender.go:94 user=389502 slug=ciscoiot host=ciscoiot-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.144.235.17:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=bdlb3fdblq2gwc alerts=3 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ua1g4jp5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.334735862Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u9s1c1vr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.334639391Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.334580478Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.334519296Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.334474959Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=895137 slug=uid2 version=42 fingerprint=0c30829655431151 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.334320073Z level=debug msg="Alert rule evaluated" results="[{Instance:application=uid2-optout, cluster=uid2-us-east-2, container=uid2-optout, env=prod, job=uid_pods, ns=uid, pod=uid2-optout-v2-0, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-optout, cluster=uid2-us-east-2, container=uid2-optout, env=prod, job=uid_pods, ns=uid, pod=uid2-optout-v2-0, test_override=uid_pods Value:0xc02ab49350} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-optout, cluster=uid2-us-east-2, container=uid2-optout, env=prod, job=uid_pods, ns=uid, pod=uid2-optout-v2-0, test_override=uid_pods Value:0xc02ab48e68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.333692789s EvaluationString:[ var='QUERY' labels={application=uid2-optout, cluster=uid2-us-east-2, container=uid2-optout, env=prod, job=uid_pods, ns=uid, pod=uid2-optout-v2-0, test_override=uid_pods} value=12.05020920502092 ], [ var='THRESHOLD' labels={application=uid2-optout, cluster=uid2-us-east-2, container=uid2-optout, env=prod, job=uid_pods, ns=uid, pod=uid2-optout-v2-0, test_override=uid_pods} value=0 ]} {Instance:application=uid2-optout, cluster=uid2-us-east-2, container=uid2-optout, env=prod, job=uid_pods, ns=uid, pod=uid2-optout-v2-1, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-optout, cluster=uid2-us-east-2, container=uid2-optout, env=prod, job=uid_pods, ns=uid, pod=uid2-optout-v2-1, test_override=uid_pods Value:0xc02ab49548} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-optout, cluster=uid2-us-east-2, container=uid2-optout, env=prod, job=uid_pods, ns=uid, pod=uid2-optout-v2-1, test_override=uid_pods Value:0xc02ab49760}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.333715199s EvaluationString:[ var='QUERY' labels={application=uid2-optout, cluster=uid2-us-east-2, container=uid2-optout, env=prod, job=uid_pods, ns=uid, pod=uid2-optout-v2-1, test_override=uid_pods} value=12.050262985832015 ], [ var='THRESHOLD' labels={application=uid2-optout, cluster=uid2-us-east-2, container=uid2-optout, env=prod, job=uid_pods, ns=uid, pod=uid2-optout-v2-1, test_override=uid_pods} value=0 ]} {Instance:application=uid2-optout, cluster=uid2-us-east-2, container=uid2-optout, env=prod, job=uid_pods, ns=uid, pod=uid2-optout-v2-2, test_override=uid_pods State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:application=uid2-optout, cluster=uid2-us-east-2, container=uid2-optout, env=prod, job=uid_pods, ns=uid, pod=uid2-optout-v2-2, test_override=uid_pods Value:0xc02ab49b40} THRESHOLD:{Var:THRESHOLD Labels:application=uid2-optout, cluster=uid2-us-east-2, container=uid2-optout, env=prod, job=uid_pods, ns=uid, pod=uid2-optout-v2-2, test_override=uid_pods Value:0xc02ab49c00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.333727299s EvaluationString:[ var='QUERY' labels={application=uid2-optout, cluster=uid2-us-east-2, container=uid2-optout, env=prod, job=uid_pods, ns=uid, pod=uid2-optout-v2-2, test_override=uid_pods} value=12.05020920502092 ], [ var='THRESHOLD' labels={application=uid2-optout, cluster=uid2-us-east-2, container=uid2-optout, env=prod, job=uid_pods, ns=uid, pod=uid2-optout-v2-2, test_override=uid_pods} value=0 ]}]" duration=12.541145ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u9q2cs91-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.334457219Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u9q2cs91-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.334426299Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID523dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=692010 slug=mercariusprod t=2024-05-29T13:44:14.334341356Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.762332ms + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=rotation-salts-prod" t=2024-05-29T13:44:14.334384787Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.334244942Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.334267558Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u9o2d8p0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.334214146Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.334104047Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=rotation-master-key-prod" t=2024-05-29T13:44:14.334229145Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=VtFd5GIVz, ref_id=A" t=2024-05-29T13:44:14.334184151Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u9o2d8p0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.334160826Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=VtFd5GIVz, ref_id=A" t=2024-05-29T13:44:14.334177464Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=VtFd5GIVz, ref_id=A" t=2024-05-29T13:44:14.334167487Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.33414408Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=389502 slug=ciscoiot t=2024-05-29T13:44:14.334142253Z level=debug msg="Saving alert states done" count=4 max_state_save_concurrency=1 duration=72.991271ms + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=VtFd5GIVz, ref_id=A" t=2024-05-29T13:44:14.334130021Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=repo-server" t=2024-05-29T13:44:14.334100805Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=repo-server" t=2024-05-29T13:44:14.334088904Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u9jmoxe0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.334003674Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u9jmoxe0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.333899193Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.333904209Z caller=remote_instance_store.go:51 user=767797 slug=mgmresorts msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u9fup5rm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.333851563Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u9fup5rm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.333821812Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=pushgateway-server" t=2024-05-29T13:44:14.333812988Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=767797 slug=mgmresorts t=2024-05-29T13:44:14.333750598Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u9fum9fc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.333683171Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u9fum9fc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.33362234Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u9fum9fc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.33361263Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u9bsjhl8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.33358009Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u9bsjhl8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.333546759Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u9bsjhl8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.333500949Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.333490581Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.333493997Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.333454988Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u9ace74d-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.333198956Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.333207312Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u98u8wwg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.333126785Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=node-driver-registrar" t=2024-05-29T13:44:14.333090456Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u98u8wwg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.332934173Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.332845425Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.332826885Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.332816894Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u97pzfx8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.332898883Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.332803964Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.332797262Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.332767722Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.332758641Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=m3db-prod-rep2" t=2024-05-29T13:44:14.332792179Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.332594252Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.332581483Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.332560312Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u97o7pet-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.33264917Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=m3db-prod-rep1" t=2024-05-29T13:44:14.332666044Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=m3db-prod-rep1" t=2024-05-29T13:44:14.332649886Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u97o7pet-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.33261575Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.332404054Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.332518699Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u8zot615-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.332553569Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.332478078Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u8zot615-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.332511539Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.332446522Z caller=remote_instance_store.go:51 user=288032 slug=dapperlabssre msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u8vq79vv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.332432518Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u8vq79vv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.332411478Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=m3db-operator" t=2024-05-29T13:44:14.332381492Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=m3db-operator" t=2024-05-29T13:44:14.332368897Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID229dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:14.332323692Z level=debug msg="Saving alert states done" count=10 max_state_save_concurrency=1 duration=960.265462ms + logger=ngalert.scheduler user=288032 slug=dapperlabssre version=1 fingerprint=d0409396f887ab11 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.332257018Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.33190992s EvaluationString:}]" duration=15.728087ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u8vj8qaz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.332251446Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u8vj8qaz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.332218986Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=loki-server" t=2024-05-29T13:44:14.332230631Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.332214636Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=471861 slug=planetstaging t=2024-05-29T13:44:14.332202695Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=471861 slug=planetstaging version=2 fingerprint=d3b7c46e8d98d18e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.332099565Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.331834759s EvaluationString:}]" duration=19.555836ms + level=debug ts=2024-05-29T13:44:14.332023274Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=liveness-probe" t=2024-05-29T13:44:14.332102603Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.331981531Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u8isu52a-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.331847472Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.331745541Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u8e8z03u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.331597229Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=grafana-server" t=2024-05-29T13:44:14.331520896Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u8e0c60s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.331465978Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.331320193Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=external-secrets" t=2024-05-29T13:44:14.3312887Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=4947 slug=mediamath instance= t=2024-05-29T13:44:14.331284576Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.331239493Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.331207234Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=4947 slug=mediamath version=1 fingerprint=d57fd9f9f74e3f29 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.331163785Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.33091014s EvaluationString:}]" duration=22.970838ms + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=external-dns" t=2024-05-29T13:44:14.33114665Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u81ai0gq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.331060304Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.330879223Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u81ai0gq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.330987673Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.330920468Z caller=remote_instance_store.go:51 user=452115 slug=ybmetrics msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=exporter" t=2024-05-29T13:44:14.330999234Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u7ogkd21-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.330946432Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u7ogkd21-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.33069325Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=euid-validator-v2-prod-candidate-operator" t=2024-05-29T13:44:14.330763283Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u7niy05g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.330635209Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance= t=2024-05-29T13:44:14.330631533Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u7niy05g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.330523528Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u7niy05g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.330452837Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=euid-tcportal" t=2024-05-29T13:44:14.330398605Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=euid-tcportal" t=2024-05-29T13:44:14.330388116Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.330350859Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u7mt2u41-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.330256755Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=euid-optout-cronjob" t=2024-05-29T13:44:14.330266347Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.330267808Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=4947 slug=mediamath instance="datasource_uid=000000020, ref_id=A,B" t=2024-05-29T13:44:14.33027819Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.330189707Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u7gvz9ui-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.330121174Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.330064295Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u7gvz9ui-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.330052483Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=euid-operator" t=2024-05-29T13:44:14.329981164Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.32981538Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.329788544Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=euid-admin" t=2024-05-29T13:44:14.329736863Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u77ria4p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.32969801Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u6zx1e3w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.329537188Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.329509093Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.329405243Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u6zx1e3w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.329278295Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u6wtm1uq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.329229275Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u6wtm1uq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.329145544Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.329138674Z caller=remote_instance_store.go:51 user=874970 slug=nvidia msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.329260759Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.048099ms + level=debug ts=2024-05-29T13:44:14.329100764Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.329118726Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u6vmleoz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.32876176Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=633335 slug=promqlworkshop t=2024-05-29T13:44:14.328696712Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.328671107Z caller=remote_instance_store.go:51 user=477402 slug=infleqtion msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u6td21fi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.328653819Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=config-reloader" t=2024-05-29T13:44:14.328671637Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=633335 slug=promqlworkshop instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.328654276Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.328634232Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.328489895Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u6td21fi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.328540238Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.328562514Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.328560483Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.328556515Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u6td21fi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.328430707Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.328502015Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.328481401Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.328481409Z caller=remote_alert_sender.go:94 user=251760 slug=forgerock host=forgerock-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.50.150:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=edj2pnw9nnzswb alerts=4 + logger=ngalert.scheduler user=369319 slug=undagrid version=21 fingerprint=f023e325e21e42a1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.328403249Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.328060909s EvaluationString:}]" duration=25.689044ms + level=debug ts=2024-05-29T13:44:14.328392075Z caller=remote_instance_store.go:51 user=70430 slug=dapperlabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.328445211Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.328457449Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=70430 slug=dapperlabs instance= t=2024-05-29T13:44:14.32833129Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.328361709Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=251760 slug=forgerock t=2024-05-29T13:44:14.328372702Z level=debug msg="Saving alert states done" count=9 max_state_save_concurrency=1 duration=1.606057878s + logger=ngalert.state.manager user=70430 slug=dapperlabs instance= t=2024-05-29T13:44:14.328317996Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u6sxcfo7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.328257765Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:14.328178001Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=cert-manager-controller" t=2024-05-29T13:44:14.328275274Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=114492 slug=railsbank t=2024-05-29T13:44:14.32810996Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.328060558Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.328078974Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.327972894Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.327961798Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=412779 slug=microstrategy instance="agent_hostname=env-326451laio1eastus2, cloud_platform=Azure, customer_id=A224, device=/dev/mapper/rootvg-homelv, env_id=326451, env_name=A224 Techint Prod, env_type=prod, fstype=xfs, instance=env-326451laio1eastus2, job=integrations/node_exporter, mountpoint=/home, region=eastus2, stage=preprod" t=2024-05-29T13:44:14.328025912Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:46:00Z next_ends_at=2024-05-29T13:46:10Z + logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.327951062Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u6driovh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.327919121Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u6driovh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.327870701Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.327937867Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=aws-node" t=2024-05-29T13:44:14.32790302Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=aws-node" t=2024-05-29T13:44:14.327890717Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=aws-load-balancer-controller" t=2024-05-29T13:44:14.327747548Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy t=2024-05-29T13:44:14.327701906Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.scheduler user=412779 slug=microstrategy version=113 fingerprint=864ae7f57fe53ef5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.327465341Z level=debug msg="Alert rule evaluated" results="[{Instance:agent_hostname=env-326451laio1eastus2, cloud_platform=Azure, customer_id=A224, device=/dev/mapper/rootvg-homelv, env_id=326451, env_name=A224 Techint Prod, env_type=prod, fstype=xfs, instance=env-326451laio1eastus2, job=integrations/node_exporter, mountpoint=/home, region=eastus2, stage=preprod State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:agent_hostname=env-326451laio1eastus2, cloud_platform=Azure, customer_id=A224, device=/dev/mapper/rootvg-homelv, env_id=326451, env_name=A224 Techint Prod, env_type=prod, fstype=xfs, instance=env-326451laio1eastus2, job=integrations/node_exporter, mountpoint=/home, region=eastus2, stage=preprod Value:0xc011c2b220} B:{Var:B Labels:agent_hostname=env-326451laio1eastus2, cloud_platform=Azure, customer_id=A224, device=/dev/mapper/rootvg-homelv, env_id=326451, env_name=A224 Techint Prod, env_type=prod, fstype=xfs, instance=env-326451laio1eastus2, job=integrations/node_exporter, mountpoint=/home, region=eastus2, stage=preprod Value:0xc011c2b300} C:{Var:C Labels:agent_hostname=env-326451laio1eastus2, cloud_platform=Azure, customer_id=A224, device=/dev/mapper/rootvg-homelv, env_id=326451, env_name=A224 Techint Prod, env_type=prod, fstype=xfs, instance=env-326451laio1eastus2, job=integrations/node_exporter, mountpoint=/home, region=eastus2, stage=preprod Value:0xc011c2b3f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.326782974s EvaluationString:[ var='A' labels={agent_hostname=env-326451laio1eastus2, cloud_platform=Azure, customer_id=A224, device=/dev/mapper/rootvg-homelv, env_id=326451, env_name=A224 Techint Prod, env_type=prod, fstype=xfs, instance=env-326451laio1eastus2, job=integrations/node_exporter, mountpoint=/home, region=eastus2, stage=preprod} value=99.99952199583946 ], [ var='B' labels={agent_hostname=env-326451laio1eastus2, cloud_platform=Azure, customer_id=A224, device=/dev/mapper/rootvg-homelv, env_id=326451, env_name=A224 Techint Prod, env_type=prod, fstype=xfs, instance=env-326451laio1eastus2, job=integrations/node_exporter, mountpoint=/home, region=eastus2, stage=preprod} value=99.99952199583946 ], [ var='C' labels={agent_hostname=env-326451laio1eastus2, cloud_platform=Azure, customer_id=A224, device=/dev/mapper/rootvg-homelv, env_id=326451, env_name=A224 Techint Prod, env_type=prod, fstype=xfs, instance=env-326451laio1eastus2, job=integrations/node_exporter, mountpoint=/home, region=eastus2, stage=preprod} value=1 ]} {Instance:agent_hostname=env-329278laio1eastus, cloud_platform=AZURE, customer_id=A005, device=/dev/mapper/rootvg-homelv, env_id=env-329278, env_name=SEC_U12_PenTest, env_type=prod, fstype=xfs, instance=env-329278laio1eastus, job=integrations/node_exporter, mountpoint=/home, region=eastus, stage=preprod State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:agent_hostname=env-329278laio1eastus, cloud_platform=AZURE, customer_id=A005, device=/dev/mapper/rootvg-homelv, env_id=env-329278, env_name=SEC_U12_PenTest, env_type=prod, fstype=xfs, instance=env-329278laio1eastus, job=integrations/node_exporter, mountpoint=/home, region=eastus, stage=preprod Value:0xc011c2b7c8} B:{Var:B Labels:agent_hostname=env-329278laio1eastus, cloud_platform=AZURE, customer_id=A005, device=/dev/mapper/rootvg-homelv, env_id=env-329278, env_name=SEC_U12_PenTest, env_type=prod, fstype=xfs, instance=env-329278laio1eastus, job=integrations/node_exporter, mountpoint=/home, region=eastus, stage=preprod Value:0xc011c2b8d8} C:{Var:C Labels:agent_hostname=env-329278laio1eastus, cloud_platform=AZURE, customer_id=A005, device=/dev/mapper/rootvg-homelv, env_id=env-329278, env_name=SEC_U12_PenTest, env_type=prod, fstype=xfs, instance=env-329278laio1eastus, job=integrations/node_exporter, mountpoint=/home, region=eastus, stage=preprod Value:0xc011c2b650}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.32681908s EvaluationString:[ var='A' labels={agent_hostname=env-329278laio1eastus, cloud_platform=AZURE, customer_id=A005, device=/dev/mapper/rootvg-homelv, env_id=env-329278, env_name=SEC_U12_PenTest, env_type=prod, fstype=xfs, instance=env-329278laio1eastus, job=integrations/node_exporter, mountpoint=/home, region=eastus, stage=preprod} value=99.99952199583946 ], [ var='B' labels={agent_hostname=env-329278laio1eastus, cloud_platform=AZURE, customer_id=A005, device=/dev/mapper/rootvg-homelv, env_id=env-329278, env_name=SEC_U12_PenTest, env_type=prod, fstype=xfs, instance=env-329278laio1eastus, job=integrations/node_exporter, mountpoint=/home, region=eastus, stage=preprod} value=99.99952199583946 ], [ var='C' labels={agent_hostname=env-329278laio1eastus, cloud_platform=AZURE, customer_id=A005, device=/dev/mapper/rootvg-homelv, env_id=env-329278, env_name=SEC_U12_PenTest, env_type=prod, fstype=xfs, instance=env-329278laio1eastus, job=integrations/node_exporter, mountpoint=/home, region=eastus, stage=preprod} value=1 ]}]" duration=235.08019ms + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:14.327616089Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=applicationset-controller" t=2024-05-29T13:44:14.327595851Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance="datasource_uid=000000002, ref_id=A" t=2024-05-29T13:44:14.327584791Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u6df9z6y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.327628238Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u6df9z6y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.327601958Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=applicationset-controller" t=2024-05-29T13:44:14.327583653Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u5z5dht0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.327534857Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=288032 slug=dapperlabssre version=1 fingerprint=cbde50723543a69a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.327489155Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=000000002, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.327227508s EvaluationString:}]" duration=41.745303ms + logger=ngalert.state.manager.persist user=656459 slug=activeport t=2024-05-29T13:44:14.327485761Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.422803ms + level=debug ts=2024-05-29T13:44:14.327493649Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.327446451Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=application-controller" t=2024-05-29T13:44:14.327464129Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u5z5dht0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.327410726Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u5xg148i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.327376996Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u5xg148i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.327312395Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=anubis-kpop" t=2024-05-29T13:44:14.327335997Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.327240917Z caller=remote_instance_store.go:51 user=491157 slug=prd01wr msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u5xg148i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.327204084Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=alloy" t=2024-05-29T13:44:14.327207996Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=alloy" t=2024-05-29T13:44:14.327196785Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=491157 slug=prd01wr version=1 fingerprint=6127473736d1dce6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.327006571Z level=debug msg="Alert rule evaluated" results="[{Instance:DatabaseClass=db.r5.xlarge State:NoData Error: Results:map[] Values:map[D:{Var:D Labels:DatabaseClass=db.r5.xlarge Value:} E:{Var:E Labels:DatabaseClass=db.r5.xlarge Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.326655822s EvaluationString:[ var='D' labels={DatabaseClass=db.r5.xlarge} value=null ], [ var='E' labels={DatabaseClass=db.r5.xlarge} value=null ]}]" duration=115.435231ms + level=debug ts=2024-05-29T13:44:14.327071466Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-prod, container=alertmanager-server" t=2024-05-29T13:44:14.327076628Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.326942319Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u5rv4kcs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.326932831Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u5rv4kcs-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.326863691Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.32681508Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.326737539Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=vpa" t=2024-05-29T13:44:14.326587017Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u5icbkbi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.326404416Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u5icbkbi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.326299955Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u5gxp16g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.326107603Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.326066422Z caller=remote_instance_store.go:51 user=173730 slug=nikon msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=secret-init" t=2024-05-29T13:44:14.326104679Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.326007883Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.3259653Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=320778 slug=omegaai instance="datasource_uid=k3-C8xH4z, ref_id=A" t=2024-05-29T13:44:14.326002796Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=rotation-site-keys-integ" t=2024-05-29T13:44:14.326002923Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=320778 slug=omegaai t=2024-05-29T13:44:14.325960116Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u4yfk4u2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.325711099Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=548157 slug=kushkiprod instance="datasource_uid=ddfda265-8321-4dab-9f53-1af50b9462b9, ref_id=A" t=2024-05-29T13:44:14.325627747Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=548157 slug=kushkiprod version=240 fingerprint=c8fda1a39e3d95f2 attempt=1 now=2024-05-29T13:44:00Z t=2024-05-29T13:44:14.325452604Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=ddfda265-8321-4dab-9f53-1af50b9462b9, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:00 +0000 UTC EvaluationDuration:14.324934656s EvaluationString:}]" duration=4.189054817s + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u4y8kuet-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.325495677Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u4y8kuet-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.325469126Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u4ll8bq2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.325400856Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=pushgateway-server" t=2024-05-29T13:44:14.325407168Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.325337537Z caller=remote_instance_store.go:51 user=328755 slug=infogrideu msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=228733 slug=csmoney instance="datasource_uid=grafanacloud-logs, ref_id=B" t=2024-05-29T13:44:14.325243896Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=228733 slug=csmoney instance="datasource_uid=grafanacloud-logs, ref_id=B" t=2024-05-29T13:44:14.325236384Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=328755 slug=infogrideu t=2024-05-29T13:44:14.325272289Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=228733 slug=csmoney instance="datasource_uid=grafanacloud-logs, ref_id=B" t=2024-05-29T13:44:14.325187587Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=228733 slug=csmoney t=2024-05-29T13:44:14.32514999Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u4kvy98h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.325211524Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=328755 slug=infogrideu version=1 fingerprint=60f944a77aeebd4a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.325079219Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[Expression:{Var:Expression Labels: Value:0xc0022ea830} Query:{Var:Query Labels: Value:0xc0022ea7f0} Reducer:{Var:Reducer Labels: Value:0xc0022ea808}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.324738189s EvaluationString:[ var='Expression' labels={} value=0 ], [ var='Query' labels={} value=0 ], [ var='Reducer' labels={} value=0 ]}]" duration=22.306808ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u4kvy98h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.325160873Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=438855 slug=teckresources t=2024-05-29T13:44:14.325143307Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=438855 slug=teckresources instance= t=2024-05-29T13:44:14.325125786Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u4kvy98h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.325087312Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u4kvy98h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.325071302Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u4hgkk4v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.325011892Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=private-site-data-refresh-integ" t=2024-05-29T13:44:14.325010438Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=private-site-data-refresh-integ" t=2024-05-29T13:44:14.324998098Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u4gyzn91-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.324790649Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u4gyzn91-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.324764379Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.324740533Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=node-driver-registrar" t=2024-05-29T13:44:14.324768737Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.324692126Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u4gyzn91-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.324622008Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.324625721Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u4d9t89a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.324466586Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=metrics-server" t=2024-05-29T13:44:14.32464921Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=m3db-operator" t=2024-05-29T13:44:14.324517609Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.324382845Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u48ifm0g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.324304514Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u48ifm0g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.324263294Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.321326046Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.321209054Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=m3db-integ-rep1" t=2024-05-29T13:44:14.324283877Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u48ifm0g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.324186523Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=m3db-integ-rep0" t=2024-05-29T13:44:14.324175004Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=m3db-integ-rep0" t=2024-05-29T13:44:14.324161326Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u487oo0o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.324086062Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=471861 slug=planetstaging instance= t=2024-05-29T13:44:14.324064733Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=471861 slug=planetstaging t=2024-05-29T13:44:14.324035564Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u487oo0o-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.323935951Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=kubeinfo-agent" t=2024-05-29T13:44:14.323818625Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.323779623Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=307381 slug=kambitaskforce t=2024-05-29T13:44:14.323614179Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=kube-state-metrics" t=2024-05-29T13:44:14.323695962Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u3p950l6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.323649948Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=307381 slug=kambitaskforce version=16 fingerprint=eeb4bb608ab0fe5e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.323509236Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.323111654s EvaluationString:}]" duration=20.464864ms + level=debug ts=2024-05-29T13:44:14.323594291Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.323476372Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u3p950l6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.323512136Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=kube-proxy" t=2024-05-29T13:44:14.323541287Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.323447428Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.323423569Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u3kr3ipd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.323392715Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u3kr3ipd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.323339724Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u3ezdanp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.323169153Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u3ezdanp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.323131312Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u3ezdanp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.323021671Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.32298471Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.322909962Z caller=ruler.go:522 msg="tenant is owned by this instance" user=708906 slug=wbw groups=0 + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=exporter" t=2024-05-29T13:44:14.322920838Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=127813 slug=clearsale t=2024-05-29T13:44:14.32275238Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.322782508Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=127813 slug=clearsale instance= t=2024-05-29T13:44:14.322720288Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.32269176Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=675749 slug=whalfman + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=euid-optout-cronjob" t=2024-05-29T13:44:14.322714946Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u36lkqff-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.322601567Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=277970 slug=teckresourcestest t=2024-05-29T13:44:14.322459977Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=26.658633ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u36lkqff-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.322452215Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.322476588Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u36lkqff-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.322360694Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=127813 slug=clearsale t=2024-05-29T13:44:14.322423204Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=37.938046ms + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=euid-admin" t=2024-05-29T13:44:14.32227141Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=ebs-plugin" t=2024-05-29T13:44:14.322043492Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u2ys4zxr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.32189982Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=csi-provisioner" t=2024-05-29T13:44:14.321811598Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u2ys4zxr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.321727898Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u2ys4zxr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.321701448Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.321605178Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.321601483Z caller=remote_instance_store.go:51 user=481955 slug=letsswap24 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u2y7pndw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.321661287Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u2y7pndw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.321632657Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=coredns" t=2024-05-29T13:44:14.321571618Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=coredns" t=2024-05-29T13:44:14.321558086Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.321363623Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.321433825Z caller=remote_alert_sender.go:94 user=269887 slug=blackrockdev host=blackrockdev-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.246.241:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fdifrsfy7ebcwb alerts=1 + logger=ngalert.state.manager.persist user=269887 slug=blackrockdev t=2024-05-29T13:44:14.321347607Z level=debug msg="Saving alert states done" count=5 max_state_save_concurrency=1 duration=331.177525ms + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=config-reloader" t=2024-05-29T13:44:14.321320044Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.321268264Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=430961 slug=solifi version=7 fingerprint=432bf4144c7bd71a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.3212092Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.320887908s EvaluationString:}]" duration=88.498771ms + level=info component=discovery ts=2024-05-29T13:44:14.321141745Z caller=client.go:80 msg="creating client for grafana instance" user=515819 addr=dns:///wwwslaists-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u2t9ziqb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.321149212Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.321073245Z caller=ruler.go:522 msg="tenant is owned by this instance" user=705918 slug=vilea groups=1 + level=debug ts=2024-05-29T13:44:14.321150233Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.321115526Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.321037425Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=cert-manager-webhook" t=2024-05-29T13:44:14.321113631Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=cert-manager-webhook" t=2024-05-29T13:44:14.321102731Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.320931804Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=cert-manager-controller" t=2024-05-29T13:44:14.320995622Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=155740 slug=routific t=2024-05-29T13:44:14.3209632Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u2q2gj0j-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.3209209Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=155740 slug=routific instance= t=2024-05-29T13:44:14.320954381Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u2q2gj0j-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.32090745Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=155740 slug=routific instance= t=2024-05-29T13:44:14.320944895Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=155740 slug=routific t=2024-05-29T13:44:14.320913395Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u2q2gj0j-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.320835829Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.320820284Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=155740 slug=routific version=2 fingerprint=2a1681caad3c974f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.320852957Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.320559965s EvaluationString:}]" duration=118.915635ms + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=cert-manager-cainjector" t=2024-05-29T13:44:14.320866968Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=cert-manager-cainjector" t=2024-05-29T13:44:14.320854375Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.320625353Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u2o5jsli-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.320653137Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112732 slug=gleamer instance="appId=CHSAINTLO01, component=gmsk-proxy-api, id=full, instance=gmsk-proxy-api:8090, job=gmsk-proxy-api, origin_prometheus=CHSAINTLO01" t=2024-05-29T13:44:14.320555797Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112732 slug=gleamer instance="appId=CHSAINTLO01, component=gmsk-proxy-api, id=full, instance=gmsk-proxy-api:8090, job=gmsk-proxy-api, origin_prometheus=CHSAINTLO01" t=2024-05-29T13:44:14.320540357Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=112732 slug=gleamer version=1 fingerprint=5da913ecab0d2cec attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.320425788Z level=debug msg="Alert rule evaluated" results="[{Instance:appId=CHSAINTLO01, component=gmsk-proxy-api, id=full, instance=gmsk-proxy-api:8090, job=gmsk-proxy-api, origin_prometheus=CHSAINTLO01 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:appId=CHSAINTLO01, component=gmsk-proxy-api, id=full, instance=gmsk-proxy-api:8090, job=gmsk-proxy-api, origin_prometheus=CHSAINTLO01 Value:0xc0332a6920} B:{Var:B Labels:appId=CHSAINTLO01, component=gmsk-proxy-api, id=full, instance=gmsk-proxy-api:8090, job=gmsk-proxy-api, origin_prometheus=CHSAINTLO01 Value:0xc0332a6990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.320130045s EvaluationString:[ var='A' labels={appId=CHSAINTLO01, component=gmsk-proxy-api, id=full, instance=gmsk-proxy-api:8090, job=gmsk-proxy-api, origin_prometheus=CHSAINTLO01} value=314.267899191 ], [ var='B' labels={appId=CHSAINTLO01, component=gmsk-proxy-api, id=full, instance=gmsk-proxy-api:8090, job=gmsk-proxy-api, origin_prometheus=CHSAINTLO01} value=0 ]}]" duration=11.623179ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u2lrn062-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.320495915Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u2lrn062-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.320466675Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u2lrn062-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.320412534Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=aws-load-balancer-controller" t=2024-05-29T13:44:14.320462916Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u2jz787b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.320277303Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.320375452Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:14.320318938Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=561759 slug=wastedboys + logger=ngalert.state.manager user=873368 slug=euid instance="cluster=euid-nonprod, container=applicationset-controller" t=2024-05-29T13:44:14.320356136Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:14.320235749Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=698963 slug=lemonade version=3 fingerprint=a5f836e23a62a299 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.320162279Z level=debug msg="Alert rule evaluated" results="[{Instance:app=lemodel-platform-interactive-command-listener-worker, pod=lemodel-platform-interactive-command-listener-worker-66479d2qpv State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=lemodel-platform-interactive-command-listener-worker, pod=lemodel-platform-interactive-command-listener-worker-66479d2qpv Value:0xc0258f1950} THRESHOLD:{Var:THRESHOLD Labels:app=lemodel-platform-interactive-command-listener-worker, pod=lemodel-platform-interactive-command-listener-worker-66479d2qpv Value:0xc0258f1980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.319819175s EvaluationString:[ var='QUERY' labels={app=lemodel-platform-interactive-command-listener-worker, pod=lemodel-platform-interactive-command-listener-worker-66479d2qpv} value=0 ], [ var='THRESHOLD' labels={app=lemodel-platform-interactive-command-listener-worker, pod=lemodel-platform-interactive-command-listener-worker-66479d2qpv} value=0 ]}]" duration=51.048383ms + level=debug ts=2024-05-29T13:44:14.320137012Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u2e8oxep-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.319892859Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.319995131Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.319828852Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.319752793Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.319642496Z caller=remote_instance_store.go:51 user=738479 slug=gohero msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=873368 slug=euid version=42 fingerprint=428692e75c49886b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.318169339Z level=debug msg="Alert rule evaluated" results="[{Instance:cluster=euid-nonprod State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod Value:0xc0051251a8} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod Value:0xc0051251f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.315994736s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod} value=9127 ], [ var='THRESHOLD' labels={cluster=euid-nonprod} value=0 ]} {Instance:cluster=euid-nonprod, container=admin-data-sourcing-integ State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=admin-data-sourcing-integ Value:0xc005125250} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=admin-data-sourcing-integ Value:0xc0051252a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316004476s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=admin-data-sourcing-integ} value=18 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=admin-data-sourcing-integ} value=0 ]} {Instance:cluster=euid-nonprod, container=alertmanager-server State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=alertmanager-server Value:0xc005125300} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=alertmanager-server Value:0xc005125350}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316007156s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=alertmanager-server} value=23 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=alertmanager-server} value=0 ]} {Instance:cluster=euid-nonprod, container=alloy State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=alloy Value:0xc0051253b8} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=alloy Value:0xc005125400}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316010926s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=alloy} value=173 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=alloy} value=0 ]} {Instance:cluster=euid-nonprod, container=anubis-kpop State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=anubis-kpop Value:0xc0051254e0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=anubis-kpop Value:0xc005125490}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316013976s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=anubis-kpop} value=67 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=anubis-kpop} value=0 ]} {Instance:cluster=euid-nonprod, container=application-controller State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=application-controller Value:0xc005125540} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=application-controller Value:0xc005125590}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316016746s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=application-controller} value=25 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=application-controller} value=0 ]} {Instance:cluster=euid-nonprod, container=applicationset-controller State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=applicationset-controller Value:0xc005125640} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=applicationset-controller Value:0xc005125600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316018816s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=applicationset-controller} value=21 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=applicationset-controller} value=0 ]} {Instance:cluster=euid-nonprod, container=aws-load-balancer-controller State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=aws-load-balancer-controller Value:0xc0051256a0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=aws-load-balancer-controller Value:0xc0051256f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316020966s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=aws-load-balancer-controller} value=42 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=aws-load-balancer-controller} value=0 ]} {Instance:cluster=euid-nonprod, container=aws-node State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=aws-node Value:0xc005125768} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=aws-node Value:0xc0051257b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316024926s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=aws-node} value=146 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=aws-node} value=0 ]} {Instance:cluster=euid-nonprod, container=cert-controller State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=cert-controller Value:0xc005125830} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=cert-controller Value:0xc005125890}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316027606s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=cert-controller} value=15 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=cert-controller} value=0 ]} {Instance:cluster=euid-nonprod, container=cert-manager-cainjector State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=cert-manager-cainjector Value:0xc0051258f0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=cert-manager-cainjector Value:0xc005125940}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316030586s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=cert-manager-cainjector} value=67 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=cert-manager-cainjector} value=0 ]} {Instance:cluster=euid-nonprod, container=cert-manager-controller State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=cert-manager-controller Value:0xc0051259a0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=cert-manager-controller Value:0xc0051259f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316033026s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=cert-manager-controller} value=67 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=cert-manager-controller} value=0 ]} {Instance:cluster=euid-nonprod, container=cert-manager-webhook State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=cert-manager-webhook Value:0xc005125a50} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=cert-manager-webhook Value:0xc005125aa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316035206s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=cert-manager-webhook} value=67 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=cert-manager-webhook} value=0 ]} {Instance:cluster=euid-nonprod, container=cluster-proportional-autoscaler State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=cluster-proportional-autoscaler Value:0xc005125b00} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=cluster-proportional-autoscaler Value:0xc005125b50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316037286s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=cluster-proportional-autoscaler} value=25 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=cluster-proportional-autoscaler} value=0 ]} {Instance:cluster=euid-nonprod, container=config-reloader State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=config-reloader Value:0xc005125bd0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=config-reloader Value:0xc005125c30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316040346s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=config-reloader} value=152 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=config-reloader} value=0 ]} {Instance:cluster=euid-nonprod, container=controller State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=controller Value:0xc005125cb0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=controller Value:0xc005125d10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316042276s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=controller} value=125 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=controller} value=0 ]} {Instance:cluster=euid-nonprod, container=coredns State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=coredns Value:0xc005125d78} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=coredns Value:0xc005125dc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316044596s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=coredns} value=109 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=coredns} value=0 ]} {Instance:cluster=euid-nonprod, container=csi-attacher State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=csi-attacher Value:0xc005125e40} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=csi-attacher Value:0xc005125ea0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316046296s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=csi-attacher} value=63 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=csi-attacher} value=0 ]} {Instance:cluster=euid-nonprod, container=csi-provisioner State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=csi-provisioner Value:0xc005125f20} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=csi-provisioner Value:0xc005125f80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316048926s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=csi-provisioner} value=63 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=csi-provisioner} value=0 ]} {Instance:cluster=euid-nonprod, container=csi-resizer State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=csi-resizer Value:0xc001fe0060} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=csi-resizer Value:0xc001fe0010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316053026s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=csi-resizer} value=63 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=csi-resizer} value=0 ]} {Instance:cluster=euid-nonprod, container=ebs-plugin State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=ebs-plugin Value:0xc001fe00e0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=ebs-plugin Value:0xc001fe0140}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316054786s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=ebs-plugin} value=221 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=ebs-plugin} value=0 ]} {Instance:cluster=euid-nonprod, container=etcd State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=etcd Value:0xc001fe01a8} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=etcd Value:0xc001fe01f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316056696s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=etcd} value=61 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=etcd} value=0 ]} {Instance:cluster=euid-nonprod, container=euid-admin State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=euid-admin Value:0xc001fe02d0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=euid-admin Value:0xc001fe0280}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316058626s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=euid-admin} value=358 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=euid-admin} value=0 ]} {Instance:cluster=euid-nonprod, container=euid-core State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=euid-core Value:0xc001fe0350} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=euid-core Value:0xc001fe03b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316060336s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=euid-core} value=46984 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=euid-core} value=0 ]} {Instance:cluster=euid-nonprod, container=euid-operator State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=euid-operator Value:0xc001fe04c0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=euid-operator Value:0xc001fe0440}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316062236s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=euid-operator} value=22872 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=euid-operator} value=0 ]} {Instance:cluster=euid-nonprod, container=euid-optout State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=euid-optout Value:0xc001fe0550} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=euid-optout Value:0xc001fe05b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316064146s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=euid-optout} value=149023 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=euid-optout} value=0 ]} {Instance:cluster=euid-nonprod, container=euid-optout-cronjob State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=euid-optout-cronjob Value:0xc001fe0610} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=euid-optout-cronjob Value:0xc001fe0660}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316065996s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=euid-optout-cronjob} value=36 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=euid-optout-cronjob} value=0 ]} {Instance:cluster=euid-nonprod, container=euid-tcportal State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=euid-tcportal Value:0xc001fe0740} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=euid-tcportal Value:0xc001fe06f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316068016s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=euid-tcportal} value=304 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=euid-tcportal} value=0 ]} {Instance:cluster=euid-nonprod, container=exporter State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=exporter Value:0xc001fe07b8} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=exporter Value:0xc001fe0808}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316070496s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=exporter} value=150 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=exporter} value=0 ]} {Instance:cluster=euid-nonprod, container=external-dns State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=external-dns Value:0xc001fe0880} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=external-dns Value:0xc001fe0910}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316072436s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=external-dns} value=23 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=external-dns} value=0 ]} {Instance:cluster=euid-nonprod, container=external-secrets State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=external-secrets Value:0xc001fe09a0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=external-secrets Value:0xc001fe0a00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316075356s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=external-secrets} value=21 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=external-secrets} value=0 ]} {Instance:cluster=euid-nonprod, container=grafana-agent-opencost State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=grafana-agent-opencost Value:0xc001fe0a60} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=grafana-agent-opencost Value:0xc001fe0ab0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316077246s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=grafana-agent-opencost} value=29 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=grafana-agent-opencost} value=0 ]} {Instance:cluster=euid-nonprod, container=grafana-server State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=grafana-server Value:0xc001fe0b30} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=grafana-server Value:0xc001fe0b90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316079256s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=grafana-server} value=1881 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=grafana-server} value=0 ]} {Instance:cluster=euid-nonprod, container=kube-proxy State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=kube-proxy Value:0xc001fe0c10} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=kube-proxy Value:0xc001fe0c70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316081916s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=kube-proxy} value=130 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=kube-proxy} value=0 ]} {Instance:cluster=euid-nonprod, container=kube-state-metrics State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=kube-state-metrics Value:0xc001fe0d08} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=kube-state-metrics Value:0xc001fe0ce0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316083966s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=kube-state-metrics} value=44 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=kube-state-metrics} value=0 ]} {Instance:cluster=euid-nonprod, container=kubeinfo-agent State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=kubeinfo-agent Value:0xc001fe0dc0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=kubeinfo-agent Value:0xc001fe0e20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316086816s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=kubeinfo-agent} value=25 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=kubeinfo-agent} value=0 ]} {Instance:cluster=euid-nonprod, container=liveness-probe State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=liveness-probe Value:0xc001fe0ea0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=liveness-probe Value:0xc001fe0f00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316089606s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=liveness-probe} value=193 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=liveness-probe} value=0 ]} {Instance:cluster=euid-nonprod, container=loki-server State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=loki-server Value:0xc001fe0f80} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=loki-server Value:0xc001fe1010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316091576s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=loki-server} value=57 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=loki-server} value=0 ]} {Instance:cluster=euid-nonprod, container=m3db-integ-rep0 State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=m3db-integ-rep0 Value:0xc001fe1090} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=m3db-integ-rep0 Value:0xc001fe10f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316093326s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=m3db-integ-rep0} value=1003 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=m3db-integ-rep0} value=0 ]} {Instance:cluster=euid-nonprod, container=m3db-integ-rep1 State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=m3db-integ-rep1 Value:0xc001fe11b8} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=m3db-integ-rep1 Value:0xc001fe1180}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316095066s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=m3db-integ-rep1} value=1004 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=m3db-integ-rep1} value=0 ]} {Instance:cluster=euid-nonprod, container=m3db-integ-rep2 State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=m3db-integ-rep2 Value:0xc001fe12a0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=m3db-integ-rep2 Value:0xc001fe1250}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316096926s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=m3db-integ-rep2} value=1003 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=m3db-integ-rep2} value=0 ]} {Instance:cluster=euid-nonprod, container=m3db-operator State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=m3db-operator Value:0xc001fe1380} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=m3db-operator Value:0xc001fe1330}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316098526s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=m3db-operator} value=15 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=m3db-operator} value=0 ]} {Instance:cluster=euid-nonprod, container=metrics-server State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=metrics-server Value:0xc001fe1400} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=metrics-server Value:0xc001fe1460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316100926s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=metrics-server} value=67 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=metrics-server} value=0 ]} {Instance:cluster=euid-nonprod, container=node-driver-registrar State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=node-driver-registrar Value:0xc001fe1510} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=node-driver-registrar Value:0xc001fe14d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316105386s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=node-driver-registrar} value=130 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=node-driver-registrar} value=0 ]} {Instance:cluster=euid-nonprod, container=node-exporter State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=node-exporter Value:0xc001fe1590} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=node-exporter Value:0xc001fe15f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316107396s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=node-exporter} value=154 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=node-exporter} value=0 ]} {Instance:cluster=euid-nonprod, container=private-site-data-refresh-integ State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=private-site-data-refresh-integ Value:0xc001fe1660} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=private-site-data-refresh-integ Value:0xc001fe16b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316109166s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=private-site-data-refresh-integ} value=18 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=private-site-data-refresh-integ} value=0 ]} {Instance:cluster=euid-nonprod, container=prometheus-server State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=prometheus-server Value:0xc001fe1710} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=prometheus-server Value:0xc001fe1790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316111226s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=prometheus-server} value=33 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=prometheus-server} value=0 ]} {Instance:cluster=euid-nonprod, container=pull-secrets-automation State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=pull-secrets-automation Value:0xc001fe1800} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=pull-secrets-automation Value:0xc001fe1850}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316113256s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=pull-secrets-automation} value=15 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=pull-secrets-automation} value=0 ]} {Instance:cluster=euid-nonprod, container=pushgateway-server State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=pushgateway-server Value:0xc001fe18b0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=pushgateway-server Value:0xc001fe1900}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316115086s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=pushgateway-server} value=19 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=pushgateway-server} value=0 ]} {Instance:cluster=euid-nonprod, container=redis State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=redis Value:0xc001fe1958} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=redis Value:0xc001fe19b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316116886s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=redis} value=21 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=redis} value=0 ]} {Instance:cluster=euid-nonprod, container=repo-server State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=repo-server Value:0xc001fe1a30} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=repo-server Value:0xc001fe1a90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316118976s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=repo-server} value=50 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=repo-server} value=0 ]} {Instance:cluster=euid-nonprod, container=rotation-master-key-integ State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=rotation-master-key-integ Value:0xc001fe1b40} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=rotation-master-key-integ Value:0xc001fe1b00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316120806s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=rotation-master-key-integ} value=18 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=rotation-master-key-integ} value=0 ]} {Instance:cluster=euid-nonprod, container=rotation-salts-integ State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=rotation-salts-integ Value:0xc001fe1ba0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=rotation-salts-integ Value:0xc001fe1bf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316123096s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=rotation-salts-integ} value=6 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=rotation-salts-integ} value=0 ]} {Instance:cluster=euid-nonprod, container=rotation-site-keys-integ State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=rotation-site-keys-integ Value:0xc001fe1c50} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=rotation-site-keys-integ Value:0xc001fe1ca0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316125266s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=rotation-site-keys-integ} value=6 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=rotation-site-keys-integ} value=0 ]} {Instance:cluster=euid-nonprod, container=secret-init State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=secret-init Value:0xc001fe1d20} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=secret-init Value:0xc001fe1d80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316128696s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=secret-init} value=2 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=secret-init} value=0 ]} {Instance:cluster=euid-nonprod, container=secret-store-automation State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=secret-store-automation Value:0xc001fe1e30} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=secret-store-automation Value:0xc001fe1df0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316130446s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=secret-store-automation} value=20 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=secret-store-automation} value=0 ]} {Instance:cluster=euid-nonprod, container=server State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=server Value:0xc001fe1e98} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=server Value:0xc001fe1ed8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316133166s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=server} value=42 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=server} value=0 ]} {Instance:cluster=euid-nonprod, container=volumemodifier State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=volumemodifier Value:0xc001fe1f60} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=volumemodifier Value:0xc001fe1fc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316135246s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=volumemodifier} value=63 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=volumemodifier} value=0 ]} {Instance:cluster=euid-nonprod, container=vpa State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=vpa Value:0xc006d64080} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=vpa Value:0xc006d64040}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316136876s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=vpa} value=137 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=vpa} value=0 ]} {Instance:cluster=euid-nonprod, container=webhook State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-nonprod, container=webhook Value:0xc006d640e8} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-nonprod, container=webhook Value:0xc006d64130}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316138656s EvaluationString:[ var='QUERY' labels={cluster=euid-nonprod, container=webhook} value=15 ], [ var='THRESHOLD' labels={cluster=euid-nonprod, container=webhook} value=0 ]} {Instance:cluster=euid-prod State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod Value:0xc006d64170} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod Value:0xc006d64230}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316140406s EvaluationString:[ var='QUERY' labels={cluster=euid-prod} value=13105 ], [ var='THRESHOLD' labels={cluster=euid-prod} value=0 ]} {Instance:cluster=euid-prod, container=admin-data-sourcing-prod State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=admin-data-sourcing-prod Value:0xc006d64290} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=admin-data-sourcing-prod Value:0xc006d642e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316142166s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=admin-data-sourcing-prod} value=18 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=admin-data-sourcing-prod} value=0 ]} {Instance:cluster=euid-prod, container=alertmanager-server State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=alertmanager-server Value:0xc006d64340} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=alertmanager-server Value:0xc006d64390}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316143936s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=alertmanager-server} value=23 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=alertmanager-server} value=0 ]} {Instance:cluster=euid-prod, container=alloy State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=alloy Value:0xc006d643f0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=alloy Value:0xc006d64440}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316146526s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=alloy} value=313 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=alloy} value=0 ]} {Instance:cluster=euid-prod, container=anubis-kpop State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=anubis-kpop Value:0xc006d644c0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=anubis-kpop Value:0xc006d64520}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316148446s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=anubis-kpop} value=63 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=anubis-kpop} value=0 ]} {Instance:cluster=euid-prod, container=application-controller State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=application-controller Value:0xc006d64580} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=application-controller Value:0xc006d64950}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316150016s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=application-controller} value=21 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=application-controller} value=0 ]} {Instance:cluster=euid-prod, container=applicationset-controller State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=applicationset-controller Value:0xc006d649b0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=applicationset-controller Value:0xc006d64a00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316152096s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=applicationset-controller} value=21 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=applicationset-controller} value=0 ]} {Instance:cluster=euid-prod, container=aws-load-balancer-controller State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=aws-load-balancer-controller Value:0xc006d64ab0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=aws-load-balancer-controller Value:0xc006d64a70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316154196s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=aws-load-balancer-controller} value=46 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=aws-load-balancer-controller} value=0 ]} {Instance:cluster=euid-prod, container=aws-node State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=aws-node Value:0xc006d64b30} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=aws-node Value:0xc006d64b80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316156266s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=aws-node} value=296 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=aws-node} value=0 ]} {Instance:cluster=euid-prod, container=cert-controller State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=cert-controller Value:0xc006d64c00} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=cert-controller Value:0xc006d64c60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316157927s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=cert-controller} value=15 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=cert-controller} value=0 ]} {Instance:cluster=euid-prod, container=cert-manager-cainjector State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=cert-manager-cainjector Value:0xc006d64f70} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=cert-manager-cainjector Value:0xc006d64cd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316159777s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=cert-manager-cainjector} value=71 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=cert-manager-cainjector} value=0 ]} {Instance:cluster=euid-prod, container=cert-manager-controller State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=cert-manager-controller Value:0xc006d64fd0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=cert-manager-controller Value:0xc006d65020}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316162557s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=cert-manager-controller} value=67 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=cert-manager-controller} value=0 ]} {Instance:cluster=euid-prod, container=cert-manager-webhook State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=cert-manager-webhook Value:0xc006d650d0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=cert-manager-webhook Value:0xc006d65090}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316164307s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=cert-manager-webhook} value=67 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=cert-manager-webhook} value=0 ]} {Instance:cluster=euid-prod, container=cluster-proportional-autoscaler State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=cluster-proportional-autoscaler Value:0xc006d65130} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=cluster-proportional-autoscaler Value:0xc006d65180}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316165997s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=cluster-proportional-autoscaler} value=21 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=cluster-proportional-autoscaler} value=0 ]} {Instance:cluster=euid-prod, container=config-reloader State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=config-reloader Value:0xc006d65200} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=config-reloader Value:0xc006d65260}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316167857s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=config-reloader} value=278 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=config-reloader} value=0 ]} {Instance:cluster=euid-prod, container=controller State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=controller Value:0xc006d652e0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=controller Value:0xc006d65340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316170037s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=controller} value=125 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=controller} value=0 ]} {Instance:cluster=euid-prod, container=coredns State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=coredns Value:0xc006d653a0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=coredns Value:0xc006d653f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316171717s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=coredns} value=113 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=coredns} value=0 ]} {Instance:cluster=euid-prod, container=csi-attacher State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=csi-attacher Value:0xc006d654d0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=csi-attacher Value:0xc006d65480}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316175117s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=csi-attacher} value=72 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=csi-attacher} value=0 ]} {Instance:cluster=euid-prod, container=csi-provisioner State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=csi-provisioner Value:0xc006d65550} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=csi-provisioner Value:0xc006d655b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316177587s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=csi-provisioner} value=67 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=csi-provisioner} value=0 ]} {Instance:cluster=euid-prod, container=csi-resizer State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=csi-resizer Value:0xc006d65630} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=csi-resizer Value:0xc006d65690}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316179287s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=csi-resizer} value=63 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=csi-resizer} value=0 ]} {Instance:cluster=euid-prod, container=ebs-plugin State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=ebs-plugin Value:0xc006d65710} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=ebs-plugin Value:0xc006d65770}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316181047s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=ebs-plugin} value=343 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=ebs-plugin} value=0 ]} {Instance:cluster=euid-prod, container=etcd State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=etcd Value:0xc006d657d0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=etcd Value:0xc006d65820}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316182827s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=etcd} value=65 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=etcd} value=0 ]} {Instance:cluster=euid-prod, container=euid-admin State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=euid-admin Value:0xc006d658a0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=euid-admin Value:0xc006d65900}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316186597s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=euid-admin} value=335 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=euid-admin} value=0 ]} {Instance:cluster=euid-prod, container=euid-core State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=euid-core Value:0xc006d65980} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=euid-core Value:0xc006d659e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316188717s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=euid-core} value=94846 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=euid-core} value=0 ]} {Instance:cluster=euid-prod, container=euid-operator State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=euid-operator Value:0xc006d65a70} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=euid-operator Value:0xc006d65af0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316190587s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=euid-operator} value=52669 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=euid-operator} value=0 ]} {Instance:cluster=euid-prod, container=euid-optout State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=euid-optout Value:0xc006d65b70} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=euid-optout Value:0xc006d65bd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316192987s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=euid-optout} value=156713 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=euid-optout} value=0 ]} {Instance:cluster=euid-prod, container=euid-optout-cronjob State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=euid-optout-cronjob Value:0xc006d65c80} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=euid-optout-cronjob Value:0xc006d65c50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316197417s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=euid-optout-cronjob} value=36 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=euid-optout-cronjob} value=0 ]} {Instance:cluster=euid-prod, container=euid-tcportal State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=euid-tcportal Value:0xc006d65d30} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=euid-tcportal Value:0xc006d65d90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316199557s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=euid-tcportal} value=26647 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=euid-tcportal} value=0 ]} {Instance:cluster=euid-prod, container=euid-validator-prod-nginx-mirror State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=euid-validator-prod-nginx-mirror Value:0xc006d65df0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=euid-validator-prod-nginx-mirror Value:0xc006d65e40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316201727s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=euid-validator-prod-nginx-mirror} value=19 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=euid-validator-prod-nginx-mirror} value=0 ]} {Instance:cluster=euid-prod, container=euid-validator-v2-prod State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=euid-validator-v2-prod Value:0xc006d65ef0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=euid-validator-v2-prod Value:0xc006d65eb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316203847s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=euid-validator-v2-prod} value=586 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=euid-validator-v2-prod} value=0 ]} {Instance:cluster=euid-prod, container=euid-validator-v2-prod-candidate-operator State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=euid-validator-v2-prod-candidate-operator Value:0xc006d65fa0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=euid-validator-v2-prod-candidate-operator Value:0xc006d65f60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316206297s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=euid-validator-v2-prod-candidate-operator} value=2095 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=euid-validator-v2-prod-candidate-operator} value=0 ]} {Instance:cluster=euid-prod, container=euid-validator-v2-prod-reference-operator State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=euid-validator-v2-prod-reference-operator Value:0xc008f48000} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=euid-validator-v2-prod-reference-operator Value:0xc008f48050}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316209147s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=euid-validator-v2-prod-reference-operator} value=2092 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=euid-validator-v2-prod-reference-operator} value=0 ]} {Instance:cluster=euid-prod, container=exporter State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=exporter Value:0xc008f480c8} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=exporter Value:0xc008f48120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316211047s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=exporter} value=300 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=exporter} value=0 ]} {Instance:cluster=euid-prod, container=external-dns State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=external-dns Value:0xc008f481a0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=external-dns Value:0xc008f48200}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316213047s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=external-dns} value=19 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=external-dns} value=0 ]} {Instance:cluster=euid-prod, container=external-secrets State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=external-secrets Value:0xc008f48280} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=external-secrets Value:0xc008f482e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316214657s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=external-secrets} value=21 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=external-secrets} value=0 ]} {Instance:cluster=euid-prod, container=grafana-agent-opencost State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=grafana-agent-opencost Value:0xc008f48340} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=grafana-agent-opencost Value:0xc008f48390}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316216427s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=grafana-agent-opencost} value=25 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=grafana-agent-opencost} value=0 ]} {Instance:cluster=euid-prod, container=grafana-server State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=grafana-server Value:0xc008f48410} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=grafana-server Value:0xc008f48470}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316219417s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=grafana-server} value=1628 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=grafana-server} value=0 ]} {Instance:cluster=euid-prod, container=kube-proxy State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=kube-proxy Value:0xc008f484f0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=kube-proxy Value:0xc008f48550}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316221047s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=kube-proxy} value=268 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=kube-proxy} value=0 ]} {Instance:cluster=euid-prod, container=kube-state-metrics State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=kube-state-metrics Value:0xc008f485b0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=kube-state-metrics Value:0xc008f48600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316223117s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=kube-state-metrics} value=40 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=kube-state-metrics} value=0 ]} {Instance:cluster=euid-prod, container=kubeinfo-agent State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=kubeinfo-agent Value:0xc008f48680} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=kubeinfo-agent Value:0xc008f486e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316226317s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=kubeinfo-agent} value=25 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=kubeinfo-agent} value=0 ]} {Instance:cluster=euid-prod, container=liveness-probe State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=liveness-probe Value:0xc008f48760} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=liveness-probe Value:0xc008f487c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316228207s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=liveness-probe} value=327 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=liveness-probe} value=0 ]} {Instance:cluster=euid-prod, container=loki-server State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=loki-server Value:0xc008f48870} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=loki-server Value:0xc008f488f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316229857s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=loki-server} value=61 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=loki-server} value=0 ]} {Instance:cluster=euid-prod, container=m3db-operator State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=m3db-operator Value:0xc008f48970} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=m3db-operator Value:0xc008f489d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316231737s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=m3db-operator} value=15 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=m3db-operator} value=0 ]} {Instance:cluster=euid-prod, container=m3db-prod-rep0 State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=m3db-prod-rep0 Value:0xc008f48a50} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=m3db-prod-rep0 Value:0xc008f48ab0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316233477s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=m3db-prod-rep0} value=955 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=m3db-prod-rep0} value=0 ]} {Instance:cluster=euid-prod, container=m3db-prod-rep1 State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=m3db-prod-rep1 Value:0xc008f48b30} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=m3db-prod-rep1 Value:0xc008f48b90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316235157s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=m3db-prod-rep1} value=979 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=m3db-prod-rep1} value=0 ]} {Instance:cluster=euid-prod, container=m3db-prod-rep2 State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=m3db-prod-rep2 Value:0xc008f48c10} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=m3db-prod-rep2 Value:0xc008f48c80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316236737s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=m3db-prod-rep2} value=978 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=m3db-prod-rep2} value=0 ]} {Instance:cluster=euid-prod, container=metrics-server State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=metrics-server Value:0xc008f48d00} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=metrics-server Value:0xc008f48d60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316238357s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=metrics-server} value=67 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=metrics-server} value=0 ]} {Instance:cluster=euid-prod, container=node-driver-registrar State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=node-driver-registrar Value:0xc008f48dc0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=node-driver-registrar Value:0xc008f48e10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316240147s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=node-driver-registrar} value=264 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=node-driver-registrar} value=0 ]} {Instance:cluster=euid-prod, container=node-exporter State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=node-exporter Value:0xc008f48e90} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=node-exporter Value:0xc008f48ef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316242547s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=node-exporter} value=312 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=node-exporter} value=0 ]} {Instance:cluster=euid-prod, container=private-site-data-refresh-prod State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=private-site-data-refresh-prod Value:0xc008f48fa0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=private-site-data-refresh-prod Value:0xc008f48f60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316245237s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=private-site-data-refresh-prod} value=6 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=private-site-data-refresh-prod} value=0 ]} {Instance:cluster=euid-prod, container=prometheus-server State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=prometheus-server Value:0xc008f49000} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=prometheus-server Value:0xc008f49050}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316247057s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=prometheus-server} value=23 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=prometheus-server} value=0 ]} {Instance:cluster=euid-prod, container=pull-secrets-automation State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=pull-secrets-automation Value:0xc008f490b0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=pull-secrets-automation Value:0xc008f49100}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316248977s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=pull-secrets-automation} value=19 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=pull-secrets-automation} value=0 ]} {Instance:cluster=euid-prod, container=pushgateway-server State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=pushgateway-server Value:0xc008f49160} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=pushgateway-server Value:0xc008f491b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316250697s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=pushgateway-server} value=19 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=pushgateway-server} value=0 ]} {Instance:cluster=euid-prod, container=redis State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=redis Value:0xc008f49210} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=redis Value:0xc008f49260}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316252407s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=redis} value=21 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=redis} value=0 ]} {Instance:cluster=euid-prod, container=repo-server State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=repo-server Value:0xc008f492e0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=repo-server Value:0xc008f49340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316254207s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=repo-server} value=50 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=repo-server} value=0 ]} {Instance:cluster=euid-prod, container=rotation-master-key-prod State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=rotation-master-key-prod Value:0xc008f493a0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=rotation-master-key-prod Value:0xc008f493f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316255967s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=rotation-master-key-prod} value=6 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=rotation-master-key-prod} value=0 ]} {Instance:cluster=euid-prod, container=rotation-salts-prod State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=rotation-salts-prod Value:0xc008f49450} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=rotation-salts-prod Value:0xc008f494a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316257747s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=rotation-salts-prod} value=6 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=rotation-salts-prod} value=0 ]} {Instance:cluster=euid-prod, container=rotation-site-keys-prod State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=rotation-site-keys-prod Value:0xc008f49500} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=rotation-site-keys-prod Value:0xc008f49550}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316259427s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=rotation-site-keys-prod} value=6 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=rotation-site-keys-prod} value=0 ]} {Instance:cluster=euid-prod, container=secret-init State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=secret-init Value:0xc008f495d0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=secret-init Value:0xc008f49630}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316262017s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=secret-init} value=2 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=secret-init} value=0 ]} {Instance:cluster=euid-prod, container=secret-store-automation State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=secret-store-automation Value:0xc008f49690} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=secret-store-automation Value:0xc008f496e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316264647s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=secret-store-automation} value=15 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=secret-store-automation} value=0 ]} {Instance:cluster=euid-prod, container=server State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=server Value:0xc008f49790} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=server Value:0xc008f49750}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316266647s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=server} value=46 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=server} value=0 ]} {Instance:cluster=euid-prod, container=volumemodifier State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=volumemodifier Value:0xc008f49870} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=volumemodifier Value:0xc008f49820}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316268667s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=volumemodifier} value=67 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=volumemodifier} value=0 ]} {Instance:cluster=euid-prod, container=vpa State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=vpa Value:0xc008f498d0} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=vpa Value:0xc008f49920}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316270727s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=vpa} value=129 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=vpa} value=0 ]} {Instance:cluster=euid-prod, container=webhook State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:cluster=euid-prod, container=webhook Value:0xc008f49980} THRESHOLD:{Var:THRESHOLD Labels:cluster=euid-prod, container=webhook Value:0xc008f499d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.316273197s EvaluationString:[ var='QUERY' labels={cluster=euid-prod, container=webhook} value=15 ], [ var='THRESHOLD' labels={cluster=euid-prod, container=webhook} value=0 ]}]" duration=1.758218284s + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u264bccn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.319622146Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u23i0lc1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.319513715Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=315799 slug=sentio t=2024-05-29T13:44:14.319420426Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u23i0lc1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.319444625Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u23i0lc1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.319345893Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=315799 slug=sentio version=3 fingerprint=5cce426ad98f0417 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.319131219Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.318661024s EvaluationString:}]" duration=29.90205ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u1zuzzph-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.319278393Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.319232326Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=459086 slug=metricgamingprd version=22 fingerprint=e614df1c293df98e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.319111676Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.318825154s EvaluationString:}]" duration=20.85318ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u1hjgujb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.318636026Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u1hjgujb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.318608696Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.318603317Z caller=remote_instance_store.go:51 user=465816 slug=metricgamingqa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.318612491Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=692010 slug=mercariusprod t=2024-05-29T13:44:14.318570768Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.318528784Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u1dhhdvi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.318467134Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u1dhhdvi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.318427494Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u1dhhdvi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.318294273Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.318296668Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.318223124Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + Error parsing panelUID for alert annotationruleID2937dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=174675 slug=journalprod t=2024-05-29T13:44:14.318011793Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=88.660473ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u193co9p-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.3180073Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.317984116Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=522347 slug=vitkucera + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u193co9p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.317890189Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.317641154Z caller=remote_alert_sender.go:94 user=527204 slug=lnrsusinsurancenonprod host=lnrsusinsurancenonprod-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.227.131:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=a353a994-3f5c-4cc7-abad-8aba145938c0 alerts=1 + level=debug ts=2024-05-29T13:44:14.317634192Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u0wt49bs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.317689297Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u0wt49bs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.317676126Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u0uzf3lu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.317639446Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.317331309Z caller=client.go:80 msg="creating client for grafana instance" user=747070 addr=dns:///worbee-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u0uzf3lu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.317481214Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.317283109Z caller=ruler.go:522 msg="tenant is owned by this instance" user=694575 slug=varianprod groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u0t5fwfm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.317380563Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.31734834Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u0t5fwfm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.317277122Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u0sksji8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.317211402Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u0sksji8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.317139541Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:14.317110305Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:14.317103301Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u0r34qbq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.316987729Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=4 fingerprint=e09816446cd80273 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.316970706Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=5.664857ms + level=error ts=2024-05-29T13:44:14.316916613Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u0r34qbq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.316906708Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.316789904Z caller=ruler.go:522 msg="tenant is owned by this instance" user=723334 slug=umcgdemo groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u0kczyjn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.316722697Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:14.316576502Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=494005 slug=viind + level=debug ts=2024-05-29T13:44:14.316546302Z caller=ruler.go:522 msg="tenant is owned by this instance" user=494005 slug=viind groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u0f7r0ad-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.316463284Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.316268892Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:14.316397501Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=762825 slug=unesco + level=debug ts=2024-05-29T13:44:14.316279742Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.fifa-2023-common-g4.coreSlave*.usersessions.status.fifa-2023-xone.GaugeUS_gcp-scl_Slave,5)) Query" t=2024-05-29T13:44:14.316197322Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u0902pno-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.316190571Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u05xmg6q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.31611176Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=554711 slug=bekci instance= t=2024-05-29T13:44:14.316089725Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=207c1d148cb8266d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.315952588Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.fifa-2023-common-g4.coreSlave*.usersessions.status.fifa-2023-xone.GaugeUS_gcp-scl_Slave,5)) Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc0b186dac0} IgnoreBelow:{Var:IgnoreBelow Labels: Value:0xc0b186dac8} Threshold:{Var:Threshold Labels: Value:0xc0b186db70} compare:{Var:compare Labels:aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.fifa-2023-common-g4.coreSlave*.usersessions.status.fifa-2023-xone.GaugeUS_gcp-scl_Slave,5)) Query Value:0xc0b186dba0} sum:{Var:sum Labels:aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.fifa-2023-common-g4.coreSlave*.usersessions.status.fifa-2023-xone.GaugeUS_gcp-scl_Slave,5)) Query Value:0xc0b186da28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.31564346s EvaluationString:[ var='Breaches' labels={} value=1 ], [ var='IgnoreBelow' labels={} value=100 ], [ var='Threshold' labels={} value=-20 ], [ var='compare' labels={aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.fifa-2023-common-g4.coreSlave*.usersessions.status.fifa-2023-xone.GaugeUS_gcp-scl_Slave,5)) Query} value=0 ], [ var='sum' labels={aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.fifa-2023-common-g4.coreSlave*.usersessions.status.fifa-2023-xone.GaugeUS_gcp-scl_Slave,5)) Query} value=0 ]}]" duration=100.224317ms + level=debug ts=2024-05-29T13:44:14.316000298Z caller=remote_instance_store.go:51 user=530405 slug=zetetic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u05xmg6q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.315963599Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=530405 slug=zetetic t=2024-05-29T13:44:14.315963553Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=530405 slug=zetetic instance="chain=Kusama, pool=Watermelon" t=2024-05-29T13:44:14.315940924Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u04sybq7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.315859558Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=530405 slug=zetetic t=2024-05-29T13:44:14.315884117Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u04sybq7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.315797467Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-u04sybq7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.315689456Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.315503792Z caller=ruler.go:522 msg="tenant is owned by this instance" user=605625 slug=ucsautomation groups=0 + level=debug ts=2024-05-29T13:44:14.315486992Z caller=ruler.go:522 msg="tenant is owned by this instance" user=506824 slug=twl groups=0 + level=debug ts=2024-05-29T13:44:14.315422991Z caller=ruler.go:522 msg="tenant is owned by this instance" user=521042 slug=unibussgiro groups=3 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tzwqpfqt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.315199301Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.315112814Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:14.315034488Z caller=client.go:80 msg="creating client for grafana instance" user=561759 addr=dns:///wastedboys-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.314937447Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tzwqpfqt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.3151102Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.315033719Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.315035027Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.314940033Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tzsi1e7y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.314916408Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tzsi1e7y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.314896988Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.314815186Z caller=client.go:80 msg="creating client for grafana instance" user=708339 addr=dns:///wair-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tzn0zgqv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.314656015Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tzn0zgqv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.314639245Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tzlzhbwy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.314527754Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tzlzhbwy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.314492784Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tzlzhbwy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.314449953Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tzlzhbwy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.314403613Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.314472304Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112387 slug=lucidhq instance="ClientId=252450725677, DomainName=logs" t=2024-05-29T13:44:14.314432865Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq t=2024-05-29T13:44:14.314388606Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=112387 slug=lucidhq version=3 fingerprint=28887ba7fdcf3801 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.314323093Z level=debug msg="Alert rule evaluated" results="[{Instance:ClientId=252450725677, DomainName=logs State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:ClientId=252450725677, DomainName=logs Value:0xc00a779550} D:{Var:D Labels:ClientId=252450725677, DomainName=logs Value:0xc00a779508}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.314001407s EvaluationString:[ var='C' labels={ClientId=252450725677, DomainName=logs} value=14.2 ], [ var='D' labels={ClientId=252450725677, DomainName=logs} value=0 ]}]" duration=80.316098ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tzkw9sj2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.314285822Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.314277403Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tzkw9sj2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.31411352Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=656459 slug=activeport t=2024-05-29T13:44:14.314011175Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tzfs48qn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.313972528Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tzfs48qn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.313944088Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.313795376Z caller=ruler.go:522 msg="tenant is owned by this instance" user=826749 slug=truebv groups=2 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tzfk0ral-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.313753616Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=55491 slug=demandbase instance="datasource_uid=000000384, ref_id=A" t=2024-05-29T13:44:14.313725791Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.313543909Z caller=remote_instance_store.go:51 user=497819 slug=fornybar msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tzettlhd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.313507063Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.313472864Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=497819 slug=fornybar instance="agent_hostname=tor, instance=tor, job=node_exporter_yggdrasil, name=utetid.service, notify=critical, state=failed, type=simple" t=2024-05-29T13:44:14.313459101Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tza31lsy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.313303941Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=497819 slug=fornybar instance="agent_hostname=tor, instance=tor, job=node_exporter_yggdrasil, name=smgfakt.service, notify=critical, state=failed, type=simple" t=2024-05-29T13:44:14.313363456Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=497819 slug=fornybar instance="agent_hostname=tor, instance=tor, job=node_exporter_yggdrasil, name=prometheus-scraper.service, notify=default, state=failed, type=simple" t=2024-05-29T13:44:14.313263798Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.313182524Z caller=remote_instance_store.go:51 user=477402 slug=infleqtion msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tz8nofa7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.31317492Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tz8nofa7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.313097869Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.313024438Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=497819 slug=fornybar instance="agent_hostname=tor, instance=tor, job=node_exporter_yggdrasil, name=ask-raw-nucs.service, notify=critical, state=failed, type=simple" t=2024-05-29T13:44:14.312997735Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tz7cs0pd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.312824056Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=497819 slug=fornybar instance="agent_hostname=tor, instance=tor, job=node_exporter_yggdrasil, name=ask-bronze-pointconnect-api.service, notify=critical, state=failed, type=simple" t=2024-05-29T13:44:14.312841535Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tz5krzkk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.312701795Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=497819 slug=fornybar instance="agent_hostname=panda02, instance=panda02, job=node_exporter_yggdrasil, name=prometheus-scraper.service, notify=default, state=failed, type=simple" t=2024-05-29T13:44:14.312649788Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tyjx1or9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.312584124Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=497819 slug=fornybar instance="agent_hostname=panda01, instance=panda01, job=node_exporter_yggdrasil, name=prometheus-scraper.service, notify=default, state=failed, type=simple" t=2024-05-29T13:44:14.312498247Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tyjx1or9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.312482323Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=497819 slug=fornybar instance="agent_hostname=az0130vm0001, instance=az0130vm0001, job=node_exporter_yggdrasil, name=aggregat-tilgjengelighet.service, notify=default, state=failed, type=simple" t=2024-05-29T13:44:14.312417251Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tyjx1or9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.312468063Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tyfw2xuj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.312432402Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.312424859Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tyfw2xuj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.312410782Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=497819 slug=fornybar t=2024-05-29T13:44:14.312291923Z level=debug msg="State manager processing evaluation results" resultCount=10 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tyfw2xuj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.312281531Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tyc0gwby-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.31223023Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.312243331Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tyc0gwby-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.312117549Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=166705 slug=crossnokaye version=5 fingerprint=7e670af8fcec2e3b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.312088902Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=B State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.311847159s EvaluationString:}]" duration=16.874524ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ty6rflv3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.312081009Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.311934927Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:14.311896958Z caller=client.go:80 msg="creating client for grafana instance" user=757842 addr=dns:///vlegeltech-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:14.311860758Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=537859 slug=tourtanops + level=debug ts=2024-05-29T13:44:14.311890017Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114492 slug=railsbank instance="DBInstanceIdentifier=iam-beta-120220127085023821300000015" t=2024-05-29T13:44:14.311732398Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:14.311634526Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=60.696522ms + logger=ngalert.scheduler user=114492 slug=railsbank version=2 fingerprint=f8619a14980eed4a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.311586885Z level=debug msg="Alert rule evaluated" results="[{Instance:DBInstanceIdentifier=iam-beta-120220127085023821300000015 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=iam-beta-120220127085023821300000015 Value:0xc02e817580} C:{Var:C Labels:DBInstanceIdentifier=iam-beta-120220127085023821300000015 Value:0xc02e817588}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.311234315s EvaluationString:[ var='B' labels={DBInstanceIdentifier=iam-beta-120220127085023821300000015} value=6.644076544e+09 ], [ var='C' labels={DBInstanceIdentifier=iam-beta-120220127085023821300000015} value=0 ]}]" duration=135.370038ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ty2rsdvt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.311653304Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-txymp3hc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.311458422Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-txymp3hc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.311427952Z level=debug msg="Setting next state" handler=resultNormal + level=debug component=discovery ts=2024-05-29T13:44:14.311377433Z caller=retry.go:58 user=398905 msg="retrying grpc request" method=/remoteruler.rules.v1.RulesService/GetByRuleGroup attempt=3 + level=debug ts=2024-05-29T13:44:14.311254168Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.311249659Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.311117238Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.311075953Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.311084683Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-txy4t58c-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.310945307Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.310777647Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=728121 slug=tiental + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-txwji1jk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.310737745Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-txwji1jk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.310645784Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-txwji1jk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.310635844Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-txuxyjvy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.310602173Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112732 slug=gleamer instance= t=2024-05-29T13:44:14.310531409Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-txuxyjvy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.310439492Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-txs1jywo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.310407431Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-txs1jywo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.310341321Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-txs1jywo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.31026974Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-txniqtn1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.310117268Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-txniqtn1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.310092298Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-txmirqhj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.309904166Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-txgmfwhx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.309784345Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.309714637Z caller=ruler.go:522 msg="tenant is owned by this instance" user=505213 slug=timoschiller groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-txduwbw3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.309687494Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-txduwbw3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.309634013Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-txbetmcx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.309511582Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tx4awiia-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.30932207Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.309156682Z caller=remote_instance_store.go:51 user=487988 slug=microstrategyits msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tx4awiia-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.309168219Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tx2fntm2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.309105488Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=516613 slug=blackrocktp t=2024-05-29T13:44:14.309051715Z level=debug msg="Saving alert states done" count=83 max_state_save_concurrency=1 duration=1.66306064s + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.308992847Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.309032079Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.308983182Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.308980577Z caller=remote_alert_sender.go:94 user=49099 slug=falconx host=falconx-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.152.80.40:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=a3ef1340-fd19-45e8-be63-ba0d87eaee11 alerts=1 + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.308974471Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:14.308946972Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=245291 slug=pismo version=21 fingerprint=ca27669a7fd2d02e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.308892075Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.308572561s EvaluationString:}]" duration=210.05302ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tx1sdzkk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.308863586Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.30885031Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.308860511Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tx1sdzkk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.308804015Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tx1sdzkk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.308782175Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.308721232Z caller=remote_instance_store.go:51 user=311292 slug=rampeu msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-twrgxo9m-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.308747474Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-twrgxo9m-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.308737444Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=550657 slug=garrigues t=2024-05-29T13:44:14.308684124Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.308665501Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.30864717Z caller=remote_instance_store.go:51 user=334644 slug=meiro msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=700783 slug=gsgmedia t=2024-05-29T13:44:14.308476993Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.228016ms + level=debug ts=2024-05-29T13:44:14.30843451Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.308201649Z caller=remote_instance_store.go:51 user=401509 slug=redefined msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-twiaw0pa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.30829546Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-twcza579-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.308061567Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=344017 slug=descript t=2024-05-29T13:44:14.308028921Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.730445ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-twcza579-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.307963726Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.307965582Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-twbnge2x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.307899376Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.307761965Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.307756699Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-twbnge2x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.307777234Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-twbnge2x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.307743404Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tvyjh3a7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.307641643Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.307558508Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.307600222Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:14.307511917Z caller=client.go:80 msg="creating client for grafana instance" user=666079 addr=dns:///ventiro-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tvtu4pue-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.307484731Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:14.307471416Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=686384 slug=theforum + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tvtu4pue-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.307435101Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tvthiblo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.30737932Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.307432816Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=559154 slug=techexperts + level=debug ts=2024-05-29T13:44:14.307295167Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tvthiblo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.30730728Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tvs5r3q6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.307217919Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.307216362Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tvs5r3q6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.307174678Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.307047023Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=384712 slug=nearinc instance="metric.label.instance_name=testnet-dumper-mig-us-central1-qfv1, resource.label.instance_id=8254252561270548755, resource.label.project_id=near-core, resource.label.zone=us-central1-c, resource.type=gce_instance" t=2024-05-29T13:44:14.307039615Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=384712 slug=nearinc instance="metric.label.instance_name=testnet-dumper-mig-us-central1-qfv1, resource.label.instance_id=8254252561270548755, resource.label.project_id=near-core, resource.label.zone=us-central1-c, resource.type=gce_instance" t=2024-05-29T13:44:14.307028072Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tvgoiwcm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.307043157Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.306911811Z caller=client.go:80 msg="creating client for grafana instance" user=640011 addr=dns:///variandev-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=384712 slug=nearinc instance="metric.label.instance_name=testnet-mig-us-central1-3rc7, resource.label.instance_id=8178080435807691907, resource.label.project_id=near-core, resource.label.zone=us-central1-c, resource.type=gce_instance" t=2024-05-29T13:44:14.306935683Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.306729209Z caller=client.go:80 msg="creating client for grafana instance" user=791033 addr=dns:///vanoord-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=384712 slug=nearinc instance="metric.label.instance_name=testnet-mig-europe-west1-gcnw, resource.label.instance_id=1080637899606828184, resource.label.project_id=near-core, resource.label.zone=europe-west1-c, resource.type=gce_instance" t=2024-05-29T13:44:14.306894383Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=384712 slug=nearinc instance="metric.label.instance_name=testnet-mig-asia-east1-x3s9, resource.label.instance_id=8778584724271768725, resource.label.project_id=near-core, resource.label.zone=asia-east1-b, resource.type=gce_instance" t=2024-05-29T13:44:14.306859665Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=384712 slug=nearinc instance="metric.label.instance_name=testnet-mig-asia-east1-x3s9, resource.label.instance_id=8778584724271768725, resource.label.project_id=near-core, resource.label.zone=asia-east1-b, resource.type=gce_instance" t=2024-05-29T13:44:14.306845293Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.306569429Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.306720656Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tvbjxsei-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.306719704Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tvbjxsei-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.306681203Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tvbjxsei-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.306652543Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.306596165Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tv544vow-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.306583912Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.306495492Z caller=remote_alert_sender.go:94 user=158536 slug=clearsaleantifraude host=clearsaleantifraude-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.207.35:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=e5eb2b63-d55c-4eb4-95cc-fb4411d133b9 alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tv544vow-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.306459801Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tuqxfwn1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.306229149Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.306092179Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.306091458Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.306053595Z caller=remote_instance_store.go:51 user=93046 slug=nese msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.306007519Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + Error parsing panelUID for alert annotationruleID313dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=235691 slug=om2 t=2024-05-29T13:44:14.305955988Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=39.12499ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tuhznaw6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.305918475Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tuhznaw6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.305872495Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:14.3058164Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=550580 slug=stienen + level=debug ts=2024-05-29T13:44:14.3057947Z caller=ruler.go:522 msg="tenant is owned by this instance" user=550580 slug=stienen groups=0 + level=debug ts=2024-05-29T13:44:14.305739801Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=111653 slug=theassociationmxp t=2024-05-29T13:44:14.305772726Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=46.298297ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tueocu2m-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.305627712Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.305592998Z caller=client.go:80 msg="creating client for grafana instance" user=521042 addr=dns:///unibussgiro-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager.persist user=318220 slug=deepalert t=2024-05-29T13:44:14.305491736Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=info component=discovery ts=2024-05-29T13:44:14.305554398Z caller=client.go:80 msg="creating client for grafana instance" user=762825 addr=dns:///unesco-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tudun600-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.305509501Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.305467997Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=528406 slug=thukral + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tudun600-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.305449901Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=318220 slug=deepalert instance="datasource_uid=UA-lPq_nz, ref_id=A" t=2024-05-29T13:44:14.305461432Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.304218285Z caller=ruler.go:522 msg="tenant is owned by this instance" user=603338 slug=thomassirus groups=0 + level=debug ts=2024-05-29T13:44:14.304226185Z caller=ruler.go:522 msg="tenant is owned by this instance" user=528406 slug=thukral groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tuc0ar62-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.30539748Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tu71dih8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.305264229Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.305078194Z caller=client.go:80 msg="creating client for grafana instance" user=523289 addr=dns:///ueftipafree-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tu71dih8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.305175178Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.305053993Z caller=ruler.go:522 msg="tenant is owned by this instance" user=491178 slug=timoschroeder00 groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tu71dih8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.305145587Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tu613a10-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.305094797Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tu613a10-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.305068597Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.305040993Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=926805 slug=timeadev + level=info component=discovery ts=2024-05-29T13:44:14.304996093Z caller=client.go:80 msg="creating client for grafana instance" user=699442 addr=dns:///udcontrol-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tu613a10-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.305022806Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:14.304959892Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=670894 slug=thecasystems + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tu41jbwx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.304882575Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.304632889Z caller=client.go:80 msg="creating client for grafana instance" user=605625 addr=dns:///ucsautomation-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tu2ak9as-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.304705313Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tu2ak9as-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.304599022Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ttthmzm0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.304558891Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.30454183Z caller=remote_instance_store.go:51 user=196413 slug=form3production msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ttthmzm0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.30443916Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=196413 slug=form3production instance="Region=eu-west-2, ServiceLimit=DB instances, ServiceName=RDS" t=2024-05-29T13:44:14.304436557Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tttbdwjg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.304367249Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:14.304376075Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.250206ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tttbdwjg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.304327859Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tttbdwjg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.304258238Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.304156942Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ttpxcvpf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.304167957Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ttpxcvpf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.304028916Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.303964183Z caller=client.go:80 msg="creating client for grafana instance" user=639083 addr=dns:///ucaremedical-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ttg4up87-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.303919305Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ttg4up87-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.303820674Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.303817582Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=518166 slug=techwizarddd + level=info ts=2024-05-29T13:44:14.303817785Z caller=remote_alert_sender.go:94 user=537072 slug=devbitvavo host=devbitvavo-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.64.41:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=f1affa47-2cf9-4b31-bef4-f9c8825c98b7 alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ttbbx5xi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.303668712Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tt5ki2za-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.303627272Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.302970169Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tt5ki2za-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.303532831Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=527204 slug=lnrsusinsurancenonprod instance="datasource_uid=llb99-bVk, ref_id=A" t=2024-05-29T13:44:14.303528105Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=527204 slug=lnrsusinsurancenonprod instance="datasource_uid=llb99-bVk, ref_id=A" t=2024-05-29T13:44:14.303475755Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.303464625Z caller=remote_instance_store.go:51 user=319327 slug=cvi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tt1070am-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.30347735Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=319327 slug=cvi t=2024-05-29T13:44:14.303315112Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tt0i3bu1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.303274518Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tt0i3bu1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.303206138Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tt0i3bu1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.303098646Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tswn6y84-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.303069336Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tswn6y84-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.303017406Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tswn6y84-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.303006746Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.302972174Z caller=ruler.go:522 msg="tenant is owned by this instance" user=635966 slug=swegon groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tswn6y84-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.302977995Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tswn6y84-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.302955375Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tslev9tk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.302923685Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.302682842Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tsjh5c73-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.302656212Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.302713433Z caller=remote_instance_store.go:51 user=554491 slug=safeskyindustries msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tsjh5c73-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.302618602Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.302576712Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tsj2mpnj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.30242879Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.302306752Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ts9k49ym-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.302328269Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.302282485Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:14.302268767Z caller=client.go:80 msg="creating client for grafana instance" user=706233 addr=dns:///tsccarexs-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:14.302250067Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=540403 slug=teufelaudio + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ts862xsq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.302151007Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ts862xsq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.302120786Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ts862xsq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.302011865Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=438855 slug=teckresources t=2024-05-29T13:44:14.301955398Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=27.239622ms + level=debug ts=2024-05-29T13:44:14.301926061Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ts83g3yw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.301793593Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.301897479Z caller=remote_instance_store.go:51 user=118996 slug=awsswiftproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=456850 slug=juniz t=2024-05-29T13:44:14.301841388Z level=debug msg="State manager processing evaluation results" resultCount=6 + level=debug ts=2024-05-29T13:44:14.301796747Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.301807311Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=139426 slug=nmsalerts instance= t=2024-05-29T13:44:14.301795038Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=118996 slug=awsswiftproduction instance= t=2024-05-29T13:44:14.301832206Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.301753603Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.301735711Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.301676867Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=438185 slug=nodeinfra t=2024-05-29T13:44:14.301695836Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=438185 slug=nodeinfra instance="chain=SUI, cloud=PhoenixNAP, deployment=production, instance=131.153.159.29:9184, job=prod-SUI-ssfn, network=mainnet, node_name=prod_SUI_ssfn_sgp_1, region=singapore, servicetype=ssfn" t=2024-05-29T13:44:14.301680496Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ts83g3yw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.301549011Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.301500709Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.30150843Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=438185 slug=nodeinfra t=2024-05-29T13:44:14.301519545Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="chain=SUI" + level=warn ts=2024-05-29T13:44:14.301423259Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=711280 slug=theatlas + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ts3l4da6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.301360339Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.301313886Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.301311372Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.301268258Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-trzsj2bx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.301246687Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.301170381Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-trqc8hoo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.301147006Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-trqc8hoo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.301003515Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-trm6s8oh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.300897114Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-trm3lr6b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.300699272Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-trl6oh3h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.300588581Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-trl6oh3h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.30055967Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.30047895Z caller=ruler.go:522 msg="tenant is owned by this instance" user=504797 slug=streamcircle groups=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-trl6oh3h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.30051625Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.299859144Z caller=client.go:80 msg="creating client for grafana instance" user=605126 addr=dns:///torsion-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-trl6oh3h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.300413499Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-trgn3kw5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.300208327Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-trgn3kw5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.300156976Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.299823794Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.300060233Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tre2vnzg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.300017135Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tr4womgy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.299886294Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.299667442Z caller=client.go:80 msg="creating client for grafana instance" user=666188 addr=dns:///toretangen-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager.persist user=354676 slug=gridmarketenergy t=2024-05-29T13:44:14.299675667Z level=debug msg="Saving alert states" count=1580 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tr2lkmlq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.299616561Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696798 slug=mcv instance="datasource_uid=a7c4b457-6a7a-416c-b994-1407dd32ed34, ref_id=Query,QueryPrevious" t=2024-05-29T13:44:14.299599985Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv instance="datasource_uid=a7c4b457-6a7a-416c-b994-1407dd32ed34, ref_id=Query,QueryPrevious" t=2024-05-29T13:44:14.299567692Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tr2lkmlq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.2995514Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:14.299551275Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=dd49cdc263f4befe attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.299479368Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=a7c4b457-6a7a-416c-b994-1407dd32ed34, ref_id=Query,QueryPrevious State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.299310912s EvaluationString:}]" duration=41.873621ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tqwrlilb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.299492579Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.299489212Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.299410155Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.299355785Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tqwrlilb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.299323878Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tqthwj7t-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.299259767Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.299262785Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.299238355Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.299181518Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.299172901Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.299151889Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.299114805Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.299106942Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.299066072Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.299055861Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.299029671Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.299019626Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tqthwj7t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.299044035Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.299005062Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.299030769Z caller=remote_rule_evaluator.go:193 user=656459 slug=activeport msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.29898889Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tqthwj7t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.299029555Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298961438Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298956361Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298929532Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298917485Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.298924153Z caller=remote_instance_store.go:51 user=491157 slug=prd01wr msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298896177Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298877852Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298870518Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298863783Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=491157 slug=prd01wr t=2024-05-29T13:44:14.298883771Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tqnu5zgw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.298897453Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tqnu5zgw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.298855873Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=63636 slug=streamelements t=2024-05-29T13:44:14.298861744Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=491157 slug=prd01wr instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298860441Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=491157 slug=prd01wr version=4 fingerprint=6559500f261e068a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.29875996Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.298465428s EvaluationString:}]" duration=22.65253ms + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298824427Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=63636 slug=streamelements instance= t=2024-05-29T13:44:14.298844027Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.298750834Z caller=client.go:80 msg="creating client for grafana instance" user=604098 addr=dns:///toblu96-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298799515Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.298760643Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.298753987Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298740085Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tqj2ex4q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.298672501Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tqj2ex4q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.298616451Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298693322Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298683491Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tqj2ex4q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.29854959Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298659252Z level=debug msg="Setting next state" handler=resultNoData + level=warn ts=2024-05-29T13:44:14.298659633Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=541531 slug=temesvari + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298641971Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.298622032Z caller=ruler.go:522 msg="tenant is owned by this instance" user=541531 slug=temesvari groups=0 + level=info component=discovery ts=2024-05-29T13:44:14.298539632Z caller=client.go:80 msg="creating client for grafana instance" user=713023 addr=dns:///titovevgev-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.29860128Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.298473031Z caller=ruler.go:522 msg="tenant is owned by this instance" user=622381 slug=tenabledevops groups=0 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298572622Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298566792Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.29846906Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298417772Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298393482Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298371385Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tqc57h0r-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.298364518Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298349602Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298340757Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298272699Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298258724Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298232946Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298199312Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298186677Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298179948Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.298098728Z caller=ruler.go:522 msg="tenant is owned by this instance" user=511188 slug=tappelt groups=0 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298144896Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298116402Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298095132Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298080865Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298071359Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.298050287Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tq60qj7w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.298091215Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tq60qj7w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.298063035Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297987976Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tq60qj7w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.298040585Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tq2i8bz5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.297999434Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297892154Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.29783108Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=info component=discovery ts=2024-05-29T13:44:14.297855925Z caller=client.go:80 msg="creating client for grafana instance" user=505213 addr=dns:///timoschiller-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297787571Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=273717 slug=seventyfivef t=2024-05-29T13:44:14.297853966Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=55.086765ms + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297749577Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.297860097Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.29774499Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tpuueb63-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.297797092Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297698062Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297686968Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297656147Z level=debug msg="Setting next state" handler=resultNoData + level=warn ts=2024-05-29T13:44:14.297669123Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=557897 slug=storebrandmaverick + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297649399Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tpuueb63-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.297687491Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.29764448Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297629734Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297618272Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297597512Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297566221Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297560768Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297545189Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tprmjn5l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.29755343Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297516617Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tprmjn5l-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.297493779Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297502598Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.297449621Z caller=ruler.go:522 msg="tenant is owned by this instance" user=557897 slug=storebrandmaverick groups=0 + level=debug ts=2024-05-29T13:44:14.297464103Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297483358Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297468884Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297457343Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297367106Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297326729Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tporhi2d-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.297355378Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tporhi2d-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.297332017Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297250339Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297193824Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.297216533Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297164542Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297152809Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tpnrzvr9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.297156116Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297144628Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297106129Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297090154Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.297084859Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297080416Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.297027475Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tpnrm151-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.297021324Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.296968878Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.296961385Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.296906254Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.296848436Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.29681186Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tpe9ultb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.296769652Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.296741191Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.296705432Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.296687361Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tp60dct8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.296674411Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.296629499Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.296684095Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.296620604Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.296607632Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.296570699Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tp60dct8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.296537899Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.296547665Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.296521756Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.296002782Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.296501088Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.296526172Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.296459121Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.296435456Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tp5no08e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.296422688Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.296384064Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tp5no08e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.296362467Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.296350632Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tp21mpsq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.296264766Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.296227822Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.296226878Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.296209073Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.296149644Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.296205851Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tp21mpsq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.296209506Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.296144589Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.296115622Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.29609518Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.296067549Z caller=remote_instance_store.go:51 user=108112 slug=btctrader msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tp0u89er-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.296000504Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.296029152Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.296025838Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295990972Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295977629Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.29596095Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295938813Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295920233Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.295884515Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295901611Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.295851938Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.29584326Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.2958384Z caller=remote_instance_store.go:51 user=277970 slug=teckresourcestest msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:14.295817482Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=57.948071ms + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295829411Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.295779109Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295747319Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-to5tvq32-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.29566679Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295721919Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295713704Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-to5tvq32-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.29560838Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.295632768Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=806229 slug=simplisafe instance="endpoint=/v1/subscriptions/{sid}/location" t=2024-05-29T13:44:14.295594175Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=277970 slug=teckresourcestest t=2024-05-29T13:44:14.295669446Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295622371Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="endpoint=/v1/subscriptions/{sid}" t=2024-05-29T13:44:14.295559124Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=2 fingerprint=ca44a25b73c55abd attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.295614935Z level=error msg="Failed to evaluate rule" error="failed to build query 'Crowd Motor': data source not found" duration=5.711071ms + level=error ts=2024-05-29T13:44:14.295574112Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'Crowd Motor': data source not found" + logger=ngalert.state.manager user=806229 slug=simplisafe instance="endpoint=/v1/cameras?sid" t=2024-05-29T13:44:14.295533955Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295541244Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.295567491Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295531213Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295519994Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=806229 slug=simplisafe instance="endpoint=/v1/cameras/{uuid}?sid" t=2024-05-29T13:44:14.295461774Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295509111Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-to5tvq32-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.295549279Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295486158Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295452Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-to0umxca-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.295489678Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295429434Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295409418Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295399685Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295366625Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-to0umxca-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.295402118Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-to0umxca-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.295371217Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295286982Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=warn ts=2024-05-29T13:44:14.295343502Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=695279 slug=secforhire + level=debug ts=2024-05-29T13:44:14.295327128Z caller=remote_instance_store.go:51 user=451427 slug=rocketchat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tnz0tzl6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.295309517Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295216617Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.295174419Z caller=remote_instance_store.go:51 user=151289 slug=everflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295128058Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295109453Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.29509691Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.295140809Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:14.2951583Z caller=client.go:80 msg="creating client for grafana instance" user=528406 addr=dns:///thukral-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295089681Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295068328Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tnro4buz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.295057864Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295016618Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.295004864Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.295020068Z caller=remote_instance_store.go:51 user=407477 slug=inventa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294994771Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tnro4buz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.294987513Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tnro4buz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.294943663Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.294985298Z caller=client.go:80 msg="creating client for grafana instance" user=519668 addr=dns:///therealnoz-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294921128Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.29489708Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.294985082Z caller=remote_instance_store.go:51 user=109452 slug=deltarisk msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294890152Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294876716Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294869091Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=109452 slug=deltarisk t=2024-05-29T13:44:14.294937767Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=109452 slug=deltarisk instance="datasource_uid=grafanacloud-prom, ref_id=B" t=2024-05-29T13:44:14.294858691Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tnro4buz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.294740051Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294771515Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.294706279Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294707121Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294695563Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294676271Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294598562Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294580678Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=342039 slug=criblcloud t=2024-05-29T13:44:14.294572858Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=68.734028ms + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294529643Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294464438Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294456824Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294433935Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294414465Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294361761Z level=debug msg="Setting next state" handler=resultNoData + level=info component=discovery ts=2024-05-29T13:44:14.294431593Z caller=client.go:80 msg="creating client for grafana instance" user=516268 addr=dns:///theok-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294340345Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294329518Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294264307Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tnbu1wka-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.294426548Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294245518Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294235334Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294227565Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294182125Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294143865Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.294101656Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.29409062Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tnbu1wka-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.294224105Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tnbu1wka-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.294187105Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.294185291Z caller=ruler.go:522 msg="tenant is owned by this instance" user=757560 slug=samplestack groups=0 + logger=ngalert.scheduler user=344017 slug=descript version=3 fingerprint=a8c8496f6e7d7bf8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.294116158Z level=debug msg="Alert rule evaluated" results="[{Instance:resource.label.project_id=production-273614, resource.type=k8s_container State:Normal Error: Results:map[] Values:map[Reduce:{Var:Reduce Labels:resource.label.project_id=production-273614, resource.type=k8s_container Value:0xc00edd5ad8} Threshold:{Var:Threshold Labels:resource.label.project_id=production-273614, resource.type=k8s_container Value:0xc00edd5b40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.293737803s EvaluationString:[ var='Reduce' labels={resource.label.project_id=production-273614, resource.type=k8s_container} value=0.0032013170125798096 ], [ var='Threshold' labels={resource.label.project_id=production-273614, resource.type=k8s_container} value=0 ]}]" duration=487.633922ms + level=debug ts=2024-05-29T13:44:14.294037183Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293994118Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tn73gnc4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.294080324Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293970951Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293962055Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=info component=discovery ts=2024-05-29T13:44:14.293945788Z caller=client.go:80 msg="creating client for grafana instance" user=670894 addr=dns:///thecasystems-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.293927788Z caller=ruler.go:522 msg="tenant is owned by this instance" user=520651 slug=robertogiacomozzi groups=8 + level=debug ts=2024-05-29T13:44:14.293981664Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293798859Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.293904384Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=49099 slug=falconx instance= t=2024-05-29T13:44:14.29370402Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293787825Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tn5z5330-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.293879972Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.293841017Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293767813Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293747925Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tn5s2l0y-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.293803671Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293724856Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293701958Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293670266Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tn5s2l0y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.293752931Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tn5s2l0y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.29373161Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293631091Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293590407Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tn5s2l0y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.29369284Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293562561Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tn4afc6i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.29366139Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.293607785Z caller=client.go:80 msg="creating client for grafana instance" user=711280 addr=dns:///theatlas-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.29353464Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293510277Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tn4afc6i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.293611469Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tn4afc6i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.293571329Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293486635Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tn18vipx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.293499508Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293478209Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tn18vipx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.293483818Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tn18vipx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.293447837Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tn18vipx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.293425947Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293460005Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tn18vipx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.293381927Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293412141Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.29310278Z caller=ruler.go:522 msg="tenant is owned by this instance" user=525826 slug=sqills groups=0 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293379747Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tmxmj0ol-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.293333386Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tmxmj0ol-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.293312086Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.292731877Z caller=client.go:80 msg="creating client for grafana instance" user=540403 addr=dns:///teufelaudio-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293347895Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293322103Z level=debug msg="Setting next state" handler=resultNoData + level=info component=discovery ts=2024-05-29T13:44:14.292592976Z caller=client.go:80 msg="creating client for grafana instance" user=675436 addr=dns:///testamento-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293216644Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.29318398Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.291644967Z caller=client.go:80 msg="creating client for grafana instance" user=518166 addr=dns:///techwizarddd-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:14.291618666Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=610036 slug=raulvillamor + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293154329Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.291586066Z caller=ruler.go:522 msg="tenant is owned by this instance" user=603024 slug=quie groups=0 + level=debug ts=2024-05-29T13:44:14.291286463Z caller=ruler.go:522 msg="tenant is owned by this instance" user=505315 slug=senseworks groups=1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293124838Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293107745Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tmwckhmz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.293190675Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293092269Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:14.291278263Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=713429 slug=rb2wovar + level=debug ts=2024-05-29T13:44:14.291260763Z caller=ruler.go:522 msg="tenant is owned by this instance" user=713429 slug=rb2wovar groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tmwckhmz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.293138944Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.291035461Z caller=client.go:80 msg="creating client for grafana instance" user=652727 addr=dns:///symfonia-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:14.29098586Z caller=client.go:80 msg="creating client for grafana instance" user=505289 addr=dns:///swisnl-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293075762Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293067631Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.293079508Z caller=remote_instance_store.go:51 user=137351 slug=pinnacle21 msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:14.290824759Z caller=client.go:80 msg="creating client for grafana instance" user=739272 addr=dns:///svprojects-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:14.290714058Z caller=client.go:80 msg="creating client for grafana instance" user=606339 addr=dns:///sunrock-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tmwckhmz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.293027543Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293028898Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.293017364Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.292947515Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.292903955Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.290612957Z caller=ruler.go:522 msg="tenant is owned by this instance" user=733505 slug=smlogistik groups=0 + level=info component=discovery ts=2024-05-29T13:44:14.290580056Z caller=client.go:80 msg="creating client for grafana instance" user=766256 addr=dns:///strnull-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager.persist user=155740 slug=routific t=2024-05-29T13:44:14.292967016Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=55.980995ms + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.292882529Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tmqaujjo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.292986053Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.292873017Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.292836535Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.290515256Z caller=client.go:80 msg="creating client for grafana instance" user=557897 addr=dns:///storebrandmaverick-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tmqaujjo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.292892082Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.290491656Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=647053 slug=spyder + level=debug ts=2024-05-29T13:44:14.290467055Z caller=ruler.go:522 msg="tenant is owned by this instance" user=647053 slug=spyder groups=0 + level=debug ts=2024-05-29T13:44:14.290462455Z caller=ruler.go:522 msg="tenant is owned by this instance" user=665264 slug=simplisticlabs groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tmqaujjo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.292824701Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.290258153Z caller=ruler.go:522 msg="tenant is owned by this instance" user=604462 slug=sowatec groups=0 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.292778122Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.292754582Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug component=discovery ts=2024-05-29T13:44:14.289651348Z caller=retry.go:58 user=307450 msg="retrying grpc request" method=/remoteruler.rules.v1.RulesService/GetByRuleGroup attempt=3 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tmp8hh0d-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.29278501Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tmp8hh0d-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.29275411Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.292665366Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.292684149Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.292634767Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.292592396Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.292582972Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.292516131Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.292443014Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.29237633Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.292339908Z caller=remote_instance_store.go:51 user=700783 slug=gsgmedia msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.292217355Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.292207034Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.292199115Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.292158029Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.292147831Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.292137662Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.292090531Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tlvd78ir-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.292100613Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tlvd78ir-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.292070323Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.292071477Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.292038897Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.291975842Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tlvd78ir-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.291986632Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tlvd78ir-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.291975672Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291983442Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.291984081Z caller=remote_instance_store.go:51 user=185895 slug=gradle msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=a1685b47c8642bdc attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.291686835Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.plantsvszombies-gw3-{ps4}.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYER_SLOTS_CLIENT_SERVER_DEDICATED_*-sjc,5)) Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc056930b48} Threshold:{Var:Threshold Labels: Value:0xc056930b70} compare:{Var:compare Labels:aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.plantsvszombies-gw3-{ps4}.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYER_SLOTS_CLIENT_SERVER_DEDICATED_*-sjc,5)) Query Value:0xc056930ba0} sum:{Var:sum Labels:aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.plantsvszombies-gw3-{ps4}.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYER_SLOTS_CLIENT_SERVER_DEDICATED_*-sjc,5)) Query Value:0xc056930bc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.290573069s EvaluationString:[ var='Breaches' labels={} value=72 ], [ var='Threshold' labels={} value=2 ], [ var='compare' labels={aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.plantsvszombies-gw3-{ps4}.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYER_SLOTS_CLIENT_SERVER_DEDICATED_*-sjc,5)) Query} value=0 ], [ var='sum' labels={aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.plantsvszombies-gw3-{ps4}.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYER_SLOTS_CLIENT_SERVER_DEDICATED_*-sjc,5)) Query} value=0 ]}]" duration=53.381116ms + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291935877Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291902617Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tljnz37f-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.291878761Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.291819509Z caller=remote_instance_store.go:51 user=401509 slug=redefined msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tljnz37f-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.29182092Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.291799706Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291768712Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291758859Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291738707Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.29170148Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291726639Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291690779Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291683903Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.29165401Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291666557Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tlh3uxjt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.291692689Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tlh3uxjt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.291662399Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291658835Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tlh3uxjt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.291639119Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291627268Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291606397Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tldj4zf4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.291608788Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tldj4zf4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.291585268Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291518544Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tldj4zf4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.291534818Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291484671Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291462698Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291430579Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291422591Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.29141441Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291404567Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291394636Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291382257Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.291353886Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev96.gradle.org, instance=dev96.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.291426353Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tld420bb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.291403486Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291279083Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291274054Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291259652Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev96.gradle.org, instance=dev96.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.291286029Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291177723Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291149226Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tld420bb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.291206854Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291137264Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291101254Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tlb5eo40-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.291118153Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tlb5eo40-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.291078673Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tlb5eo40-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.291055843Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.291045769Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev95.gradle.org, instance=dev95.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.290988635Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tlakjk58-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.290965872Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290992604Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290961319Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.290528143Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290943794Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290910777Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tkzke5wl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.290746549Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290877263Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev95.gradle.org, instance=dev95.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.290833409Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.290753303Z caller=remote_instance_store.go:51 user=487988 slug=microstrategyits msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tkzke5wl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.290701589Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290836214Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tkzke5wl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.290658948Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290800466Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290792909Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290770366Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290746184Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.290566956Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev94.gradle.org, instance=dev94.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.290631956Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290641901Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev94.gradle.org, instance=dev94.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.290622639Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290599434Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290578467Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tkqxg7jf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.290497657Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tkoajfqd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.290471677Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290548742Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tkoajfqd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.290381706Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290519747Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290507299Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290497787Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290474092Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290454714Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290389283Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290376096Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290267484Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev93.gradle.org, instance=dev93.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.290314616Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tkfgzp9s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.290311075Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290256402Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290246719Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290239374Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tkfgzp9s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.290243414Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290232053Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tkfgzp9s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.290226344Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290197204Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290141074Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.290154513Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290038771Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290030265Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.290006539Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289987994Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.290112545Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=537072 slug=devbitvavo t=2024-05-29T13:44:14.29009441Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289980243Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tkdaq9hp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.290054692Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tkdaq9hp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.290027222Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289957776Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tkbzycc3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.289986712Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289821719Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289805777Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28979682Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289738277Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289664528Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tk68k3oh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.28978866Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289568419Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tk68k3oh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.289764329Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289547764Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=557231 slug=lnrsusinsuranceprod t=2024-05-29T13:44:14.289664163Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.647063ms + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289500027Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289481044Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tk68k3oh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.289667068Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tk63srum-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.289637078Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.2893943Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tk63srum-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.289615158Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698103 slug=vericast instance= t=2024-05-29T13:44:14.289642286Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289349323Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698103 slug=vericast instance= t=2024-05-29T13:44:14.289627665Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289340827Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289296768Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=698103 slug=vericast version=54 fingerprint=73f32a4a03cf9a64 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.289538713Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.289224872s EvaluationString:}]" duration=74.181258ms + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289271494Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289256771Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev89.gradle.org, instance=dev89.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.289543051Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289249047Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289217901Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289165247Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289154193Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.28937727Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289081081Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289054599Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tjnvsnxx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.289286014Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.289026568Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288983506Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288956325Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.289207354Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.289186502Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288894587Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tjnvsnxx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.289218694Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.289181425Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev88.gradle.org, instance=dev88.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.289213003Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288863276Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.28919478Z caller=remote_image_capturer.go:54 user=201644 slug=thoughtspot rule_org_id=1 rule_uid=jC9iiYKVz dashboard=s7TH-EUC1 panel=18 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288847418Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288839661Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288829098Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288817761Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288791572Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288772456Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288717712Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288710131Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tjj8fv89-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.289081672Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288672187Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288664741Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.288889383Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288658161Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=201644 slug=thoughtspot instance= t=2024-05-29T13:44:14.289051998Z level=debug msg="Execution error state is Alerting" handler=resultAlerting previous_handler=resultError + level=debug ts=2024-05-29T13:44:14.288987529Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev88.gradle.org, instance=dev88.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.289024481Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tjj8fv89-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.288930511Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288607844Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288601263Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.288822712Z caller=remote_instance_store.go:51 user=334644 slug=meiro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev87.gradle.org, instance=dev87.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.288916227Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.288790547Z caller=grafana.go:247 user=557231 slug=lnrsusinsuranceprod msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=16" groups=66 alerts=0 + logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:14.288798575Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.95552ms + level=debug ts=2024-05-29T13:44:14.288793278Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev87.gradle.org, instance=dev87.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.2888027Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288430707Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tjhuo0a2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.288775169Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tjhuo0a2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.288765339Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288413228Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288408153Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288403281Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288374656Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.28861841Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288354877Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288346313Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tjhp2i4k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.288632528Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev86.gradle.org, instance=dev86.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.288633374Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288200627Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.288435363Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288193695Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288188983Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tjg4s4l9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.288458276Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288146309Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288138385Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288116422Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288107209Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.288289712Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288071107Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.288302708Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev83.gradle.org, instance=dev83.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.288334816Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.288308034Z caller=remote_instance_store.go:51 user=490806 slug=kampd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288034493Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.2880194Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.288013717Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=490806 slug=kampd instance="datasource_uid=h299JOc4z, ref_id=A" t=2024-05-29T13:44:14.288246Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.288207797Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28798983Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28797753Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28797032Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishwireless" t=2024-05-29T13:44:14.288156571Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287962705Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287952727Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.288168366Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287945009Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.288138771Z caller=remote_instance_store.go:51 user=477402 slug=infleqtion msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=363785 slug=moonletmonitor version=3 fingerprint=cb3a84418c043bc2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.287967989Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.287592318s EvaluationString:}]" duration=19.675314ms + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287914298Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev82.gradle.org, instance=dev82.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.288166541Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287895007Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:14.28811921Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287837814Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287828963Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287812286Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28780201Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=assurantlifestyle" t=2024-05-29T13:44:14.287972258Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287761773Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287754028Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287743878Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287734456Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287722425Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=american-airlines" t=2024-05-29T13:44:14.287887277Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev81.gradle.org, instance=dev81.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.287895294Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tjdjq3pj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.287750359Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.287721102Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp t=2024-05-29T13:44:14.287747622Z level=debug msg="State manager processing evaluation results" resultCount=7 + level=debug ts=2024-05-29T13:44:14.287699419Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.287670411Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287675018Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287664657Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287654134Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.287633213Z caller=remote_instance_store.go:51 user=109928 slug=deadhappy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287617756Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28758932Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287581847Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tjdjq3pj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.287625847Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tjdjq3pj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.287605527Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tjc6zivr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.287549207Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=109928 slug=deadhappy version=1 fingerprint=5f06be77f55af994 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.287473718Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[N0:{Var:N Labels: Value:} N1:{Var:N Labels: Value:} N10:{Var:N Labels: Value:} N11:{Var:N Labels: Value:} N12:{Var:N Labels: Value:} N2:{Var:N Labels: Value:} N3:{Var:N Labels: Value:} N4:{Var:N Labels: Value:} N5:{Var:N Labels: Value:} N6:{Var:N Labels: Value:} N7:{Var:N Labels: Value:} N8:{Var:N Labels: Value:} N9:{Var:N Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.287053177s EvaluationString:[ var='N0' metric='NoData' labels={} value=null ], [ var='N1' metric='NoData' labels={} value=null ], [ var='N2' metric='NoData' labels={} value=null ], [ var='N3' metric='NoData' labels={} value=null ], [ var='N4' metric='NoData' labels={} value=null ], [ var='N5' metric='NoData' labels={} value=null ], [ var='N6' metric='NoData' labels={} value=null ], [ var='N7' metric='NoData' labels={} value=null ], [ var='N8' metric='NoData' labels={} value=null ], [ var='N9' metric='NoData' labels={} value=null ], [ var='N10' metric='NoData' labels={} value=null ], [ var='N11' metric='NoData' labels={} value=null ], [ var='N12' metric='NoData' labels={} value=null ]}]" duration=2.485396761s + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28747214Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tjc6zivr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.287507446Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tjc6zivr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.287452666Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev79.gradle.org, instance=dev79.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.287485202Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287367328Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287357125Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287337623Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287294394Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev79.gradle.org, instance=dev79.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.287345744Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287247687Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287237816Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287213848Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287206552Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28719733Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287188084Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287177362Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28709307Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tj6nwj8p-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.287129202Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.287085572Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tj6nwj8p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.287087132Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tj6nwj8p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.287057392Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286965661Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286955798Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.287034993Z caller=remote_instance_store.go:51 user=543604 slug=kingmakers msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tj6nwj8p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.286996681Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286930949Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tj4y04rz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.28694406Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28690519Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tj4y04rz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.286747428Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286863224Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tj4s9vte-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.286713598Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286791774Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286780322Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286750297Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286728336Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286696874Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tj4s9vte-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.286642407Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286673847Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tj4s9vte-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.286602707Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev76.gradle.org, instance=dev76.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.286680505Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286635912Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286628466Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286614977Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286607475Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.286629426Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28658715Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.286509026Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tj2jqqxp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.286467285Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev76.gradle.org, instance=dev76.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.286529963Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28644925Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28642697Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286385617Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286362903Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tixau61x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.286373575Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28631207Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286292381Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28628191Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286271508Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.286336323Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286240251Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286199203Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev75.gradle.org, instance=dev75.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.286225038Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286190194Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tiop2inw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.286154352Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28613087Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=333193 slug=peeriq instance="DAG=--, State=failed, Task=--" t=2024-05-29T13:44:14.286124027Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286088891Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286052347Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286024933Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.286014308Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285985694Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285953421Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285943762Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285923843Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=333193 slug=peeriq t=2024-05-29T13:44:14.285929703Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285890885Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=333193 slug=peeriq version=53 fingerprint=84d76f8d4b13a475 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.285843843Z level=debug msg="Alert rule evaluated" results="[{Instance:DAG=--, State=failed, Task=-- State:Normal Error: Results:map[] Values:map[Failure Count:{Var:Failure Count Labels:DAG=--, State=failed, Task=-- Value:0xc01004e558} Failure Threshold:{Var:Failure Threshold Labels:DAG=--, State=failed, Task=-- Value:0xc01004e580}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.285258937s EvaluationString:[ var='Failure Count' labels={DAG=--, State=failed, Task=--} value=0 ], [ var='Failure Threshold' labels={DAG=--, State=failed, Task=--} value=0 ]}]" duration=175.075046ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tijeh0br-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.28589151Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285849869Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tijeh0br-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.285839559Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28582564Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev73.gradle.org, instance=dev73.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.285908788Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285798789Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285765202Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285743429Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285732823Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev72.gradle.org, instance=dev72.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.285800137Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.285693632Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tihaiq1x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.285747418Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285663248Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28565126Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285632563Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tih4jtvk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.285608497Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285568538Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285556496Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.285590865Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285490189Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tih4jtvk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.285568186Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285427077Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28540163Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.285447629Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.285457309Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285360774Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285342572Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tib3mjw9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.285415705Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tib3mjw9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.285388024Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev71.gradle.org, instance=dev71.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.285378514Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285256185Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=197492 slug=nbi t=2024-05-29T13:44:14.28524151Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.001089ms + logger=ngalert.state.manager user=84360 slug=sib instance= t=2024-05-29T13:44:14.285374369Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tib3mjw9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.285319834Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28515746Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev70.gradle.org, instance=dev70.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.285263077Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285115185Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285104412Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28507851Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ti6vkbq4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.285172572Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=630397 slug=tatin instance= t=2024-05-29T13:44:14.285193655Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285029799Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.285017504Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=848777 slug=opsalert instance="__name__=probe_http_status_code, config_version=1715334868818575104, instance=https://sxgitstg.dryice.ai/, job=SX-GIT Staging UI, probe=Mumbai" t=2024-05-29T13:44:14.285138981Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=848777 slug=opsalert instance="__name__=probe_http_status_code, config_version=1715334868818575104, instance=https://sxgitstg.dryice.ai/, job=SX-GIT Staging UI, probe=Mumbai" t=2024-05-29T13:44:14.285122391Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284996785Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=630397 slug=tatin t=2024-05-29T13:44:14.285090229Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.285114597Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ti6vkbq4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.285097071Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284986081Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284964474Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ti3r678g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.285055631Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="__name__=probe_success, config_version=1715008306594085120, instance=https://api.shine.fr/v1/companies/onboarding/liveness_check, job=Liveness Check companies-onboarding-v1, probe=Paris" t=2024-05-29T13:44:14.285024302Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284899881Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284890698Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.web03-01.web03-01 A" t=2024-05-29T13:44:14.285041586Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=848777 slug=opsalert t=2024-05-29T13:44:14.28494739Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.scheduler user=848777 slug=opsalert version=55 fingerprint=218fccc34cf7cb3e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.284815179Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=probe_http_status_code, config_version=1715334868818575104, instance=https://sxgitstg.dryice.ai/, job=SX-GIT Staging UI, probe=Mumbai State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=probe_http_status_code, config_version=1715334868818575104, instance=https://sxgitstg.dryice.ai/, job=SX-GIT Staging UI, probe=Mumbai Value:0xc010c92e50} C:{Var:C Labels:__name__=probe_http_status_code, config_version=1715334868818575104, instance=https://sxgitstg.dryice.ai/, job=SX-GIT Staging UI, probe=Mumbai Value:0xc010c92ea8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.284328806s EvaluationString:[ var='A' labels={__name__=probe_http_status_code, config_version=1715334868818575104, instance=https://sxgitstg.dryice.ai/, job=SX-GIT Staging UI, probe=Mumbai} value=200 ], [ var='C' labels={__name__=probe_http_status_code, config_version=1715334868818575104, instance=https://sxgitstg.dryice.ai/, job=SX-GIT Staging UI, probe=Mumbai} value=0 ]} {Instance:__name__=probe_http_status_code, config_version=1715334868818575104, instance=https://sxgitstg.dryice.ai/, job=SX-GIT Staging UI, probe=Singapore State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=probe_http_status_code, config_version=1715334868818575104, instance=https://sxgitstg.dryice.ai/, job=SX-GIT Staging UI, probe=Singapore Value:0xc010c92f58} C:{Var:C Labels:__name__=probe_http_status_code, config_version=1715334868818575104, instance=https://sxgitstg.dryice.ai/, job=SX-GIT Staging UI, probe=Singapore Value:0xc010c93008}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.284350736s EvaluationString:[ var='A' labels={__name__=probe_http_status_code, config_version=1715334868818575104, instance=https://sxgitstg.dryice.ai/, job=SX-GIT Staging UI, probe=Singapore} value=200 ], [ var='C' labels={__name__=probe_http_status_code, config_version=1715334868818575104, instance=https://sxgitstg.dryice.ai/, job=SX-GIT Staging UI, probe=Singapore} value=0 ]}]" duration=9.611627ms + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284742439Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-thy7b1yy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.284903399Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284702684Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-thy7b1yy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.284826899Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev69.gradle.org, instance=dev69.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.284867614Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284677672Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284649394Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-thy7b1yy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.284775498Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284608009Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-thsfln8w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.284748238Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-thsfln8w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.284736968Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-thsfln8w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.284649217Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev68.gradle.org, instance=dev68.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.284750505Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-thsfln8w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.284627937Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284541125Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev68.gradle.org, instance=dev68.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.284742395Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284530521Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284477184Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=615392 slug=shinemetrics version=10 fingerprint=85fd0bcca64f18e7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.284543825Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=probe_success, config_version=1715008306594085120, instance=https://api.shine.fr/v1/companies/onboarding/liveness_check, job=Liveness Check companies-onboarding-v1, probe=Amsterdam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=probe_success, config_version=1715008306594085120, instance=https://api.shine.fr/v1/companies/onboarding/liveness_check, job=Liveness Check companies-onboarding-v1, probe=Amsterdam Value:0xc051128cf8} B:{Var:B Labels:__name__=probe_success, config_version=1715008306594085120, instance=https://api.shine.fr/v1/companies/onboarding/liveness_check, job=Liveness Check companies-onboarding-v1, probe=Amsterdam Value:0xc051128d68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.283808977s EvaluationString:[ var='A' labels={__name__=probe_success, config_version=1715008306594085120, instance=https://api.shine.fr/v1/companies/onboarding/liveness_check, job=Liveness Check companies-onboarding-v1, probe=Amsterdam} value=1 ], [ var='B' labels={__name__=probe_success, config_version=1715008306594085120, instance=https://api.shine.fr/v1/companies/onboarding/liveness_check, job=Liveness Check companies-onboarding-v1, probe=Amsterdam} value=0 ]} {Instance:__name__=probe_success, config_version=1715008306594085120, instance=https://api.shine.fr/v1/companies/onboarding/liveness_check, job=Liveness Check companies-onboarding-v1, probe=Paris State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=probe_success, config_version=1715008306594085120, instance=https://api.shine.fr/v1/companies/onboarding/liveness_check, job=Liveness Check companies-onboarding-v1, probe=Paris Value:0xc051128e28} B:{Var:B Labels:__name__=probe_success, config_version=1715008306594085120, instance=https://api.shine.fr/v1/companies/onboarding/liveness_check, job=Liveness Check companies-onboarding-v1, probe=Paris Value:0xc051128e88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.283824447s EvaluationString:[ var='A' labels={__name__=probe_success, config_version=1715008306594085120, instance=https://api.shine.fr/v1/companies/onboarding/liveness_check, job=Liveness Check companies-onboarding-v1, probe=Paris} value=1 ], [ var='B' labels={__name__=probe_success, config_version=1715008306594085120, instance=https://api.shine.fr/v1/companies/onboarding/liveness_check, job=Liveness Check companies-onboarding-v1, probe=Paris} value=0 ]}]" duration=11.066726ms + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev68.gradle.org, instance=dev68.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.284660754Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.284604597Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284409742Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284395844Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.284489885Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.284460834Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284363926Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-thsew9m2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.284572416Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284343585Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.284407722Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-thsew9m2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.284543546Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev67.gradle.org, instance=dev67.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.28454437Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev67.gradle.org, instance=dev67.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.284531684Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=127813 slug=clearsale t=2024-05-29T13:44:14.284481917Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284291566Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.survey03-04.survey03-04 A" t=2024-05-29T13:44:14.28450814Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284231904Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.284380442Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.survey03-04.survey03-04 A" t=2024-05-29T13:44:14.284496774Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.survey03-03.survey03-03 A" t=2024-05-29T13:44:14.284453752Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.survey03-02.survey03-02 A" t=2024-05-29T13:44:14.284419779Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284147139Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284123174Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-thir10tq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.284355874Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-thir10tq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.284325634Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.survey03-01.survey03-01 A" t=2024-05-29T13:44:14.28436734Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284046037Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.284032714Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.supapi03-03.supapi03-03 A" t=2024-05-29T13:44:14.28431231Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283959594Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-thg5su4i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.284219612Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-thg5su4i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.284209332Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283904463Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283841188Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283801471Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283793396Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28377087Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=310637 slug=notino t=2024-05-29T13:44:14.284085772Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=310637 slug=notino instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.284066315Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283762301Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=310637 slug=notino instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.284030347Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283728452Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.284007059Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283666804Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev66.gradle.org, instance=dev66.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.284025244Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283656622Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283639059Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.283972437Z caller=remote_instance_store.go:51 user=830631 slug=api3 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283562397Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283539006Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283517137Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283497893Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283473173Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.postfix.ip-10-76-36-84 A" t=2024-05-29T13:44:14.283869842Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-thbzq8lz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.283792548Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283455132Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev65.gradle.org, instance=dev65.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.283813912Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283441733Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-thbzq8lz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.283741028Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.port03-01.port03-01 A" t=2024-05-29T13:44:14.283776089Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28342213Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283411987Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.283711454Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-th71kdgh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.283690057Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283371779Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev64.gradle.org, instance=dev64.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.2836823Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283333211Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-th71kdgh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.283659287Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-th71kdgh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.283596376Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283260632Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283241527Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283211813Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283192594Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.28349073Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283113082Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283092703Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283081662Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283071941Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-th5d8ss7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.283389274Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev63.gradle.org, instance=dev63.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.283438973Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.283381155Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.592567ms + logger=ngalert.state.manager.persist user=45099 slug=iyuyue t=2024-05-29T13:44:14.283034343Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=21.464259ms + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283033818Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.panapi03-02.panapi03-02 A" t=2024-05-29T13:44:14.28341928Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.283008456Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.panapi03-01.panapi03-01 A" t=2024-05-29T13:44:14.283388917Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.282924703Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.newrelic-private-location.ip-10-76-43-51 A" t=2024-05-29T13:44:14.283333559Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.newrelic-private-location.ip-10-76-43-51 A" t=2024-05-29T13:44:14.283301547Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.282860416Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-th1muv8f-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.283185742Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev62.gradle.org, instance=dev62.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.283225756Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev62.gradle.org, instance=dev62.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.283211671Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=698103 slug=vericast t=2024-05-29T13:44:14.28317067Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.979825ms + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.282775833Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.282747981Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.282695911Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tgzrjsty-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.283065901Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tgzrjsty-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.28302384Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.282605173Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.283022258Z caller=remote_instance_store.go:51 user=432323 slug=lithic msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=444728 slug=stgnextgen t=2024-05-29T13:44:14.282900902Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.33034ms + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.282554054Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:14.282963318Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.282523226Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tgpq43h2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.282915619Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.282482875Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev61.gradle.org, instance=dev61.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.282914399Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.282368053Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.282296078Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev61.gradle.org, instance=dev61.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.282753727Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.282259706Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.282221287Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tgjl1h8d-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.282662446Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.282165121Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.282086335Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.282076681Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tgjl1h8d-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.282541675Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.282053383Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.282028222Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.282008398Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tggbcroi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.282456104Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281955027Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28194457Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.282446921Z caller=remote_instance_store.go:51 user=357638 slug=usepower msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281936861Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28192613Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev58.gradle.org, instance=dev58.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.282450001Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281874275Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tggbcroi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.282413104Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=173374 slug=felmo instance= t=2024-05-29T13:44:14.282438432Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tggbcroi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.282385924Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.lrid02-01.lrid02-01 A" t=2024-05-29T13:44:14.282414456Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tggbcroi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.282347083Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28180932Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=306439 slug=caiprod t=2024-05-29T13:44:14.28231875Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.786373ms + logger=ngalert.state.manager user=173374 slug=felmo t=2024-05-29T13:44:14.282361412Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281777485Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281760643Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281750193Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.logstash.ip-10-76-42-240 A" t=2024-05-29T13:44:14.28234895Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281739911Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281718989Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tgfsl6ae-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.282230192Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.logstash.ip-10-76-36-230 A" t=2024-05-29T13:44:14.282304226Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.logstash.ip-10-76-35-74 A" t=2024-05-29T13:44:14.282270536Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.logstash.ip-10-76-35-74 A" t=2024-05-29T13:44:14.282260563Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tgfsl6ae-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.282188092Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281652026Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28164688Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.281774502Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281630942Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tgf9n5iv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.28207983Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.282126071Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tgf9n5iv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.281954819Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281540143Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tgdwgdy0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.281919939Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281529495Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281506965Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tgdwgdy0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.281860898Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.ints03-03.ints03-03 A" t=2024-05-29T13:44:14.282113063Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.ints03-03.ints03-03 A" t=2024-05-29T13:44:14.282101745Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28143695Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tgdwgdy0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.281823478Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev40.gradle.org, instance=dev40.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.282046326Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tg5r6ib2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.281731457Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281391181Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev40.gradle.org, instance=dev40.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.282031435Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28138017Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.dpi03-03.dpi03-03 A" t=2024-05-29T13:44:14.282024555Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281358813Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281347323Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281327085Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.dpi03-02.dpi03-02 A" t=2024-05-29T13:44:14.281974348Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281269898Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281247569Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281227749Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281191531Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.carbon-relay-ng.ip-10-76-43-30 A" t=2024-05-29T13:44:14.281880081Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281157625Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281134035Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281124971Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev39.gradle.org, instance=dev39.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.281815772Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28110265Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28109445Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28107264Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281054401Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281040147Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.281031695Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280968517Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280939772Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280928123Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tftp02n1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.281560045Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280880358Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280861424Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280847042Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280840012Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev38.gradle.org, instance=dev38.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.281539947Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280812199Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280780486Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.2814749Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280751489Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev37.gradle.org, instance=dev37.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.281428745Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=JtJ0ju04z, ref_id=A" t=2024-05-29T13:44:14.281433224Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280671361Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280660461Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280647242Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=JtJ0ju04z, ref_id=A" t=2024-05-29T13:44:14.281409448Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev37.gradle.org, instance=dev37.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.281415941Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28061989Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280609418Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.scheduler user=430961 slug=solifi version=5 fingerprint=23c7fc5fcec0b99a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.281300026Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=JtJ0ju04z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.281011963s EvaluationString:}]" duration=36.410683ms +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280587005Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280564563Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28046486Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280453486Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280446391Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.28040724Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280370084Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev36.gradle.org, instance=dev36.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.281149514Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280325584Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev36.gradle.org, instance=dev36.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.281124198Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.280932029Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280241697Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280185989Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280145975Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280118056Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280089962Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280081397Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280058221Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev36.gradle.org, instance=dev36.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.280974931Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.280015223Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279989439Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279975761Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279913815Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tfrn2v6y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.280764597Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev35.gradle.org, instance=dev35.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.280823467Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279855715Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279844529Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev35.gradle.org, instance=dev35.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.280698566Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.280643838Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279716939Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:14.280545496Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev34.gradle.org, instance=dev34.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.280576746Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279625103Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279578631Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279518663Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279507376Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279455972Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279444422Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279433566Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tf7xvwky-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.280363353Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=687021 slug=heviai t=2024-05-29T13:44:14.280291921Z level=debug msg="Saving alert states done" count=20 max_state_save_concurrency=1 duration=313.794479ms +level=debug ts=2024-05-29T13:44:14.280326185Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tf43vmwi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.280227112Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tf43vmwi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.280183981Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tf43vmwi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.280154691Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.280107577Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.44625ms +level=debug ts=2024-05-29T13:44:14.280076661Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev33.gradle.org, instance=dev33.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.280093726Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tf3rp8jt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.28007621Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.280014063Z caller=remote_instance_store.go:51 user=679029 slug=joveoprodaws msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279420965Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tf3rp8jt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.280001099Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279934562Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279414137Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279929062Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279391721Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev32.gradle.org, instance=dev32.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.27996655Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev32.gradle.org, instance=dev32.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.279950933Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279355184Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279335526Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279323591Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tf1o0gh5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.279913688Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tf1o0gh5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.279883158Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279275879Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279259884Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27925242Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:14.279886192Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev32.gradle.org, instance=dev32.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.279801176Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-teww8lsg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.279786517Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:14.279824387Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279161061Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279152019Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279139093Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279130202Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.scheduler user=114492 slug=railsbank version=1 fingerprint=3f5f841b3b92f474 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.279667409Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[A0:{Var:A Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.279425692s EvaluationString:[ var='A0' metric='NoData' labels={} value=null ]}]" duration=133.949552ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-teww8lsg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.279735026Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.279108941Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-teww8lsg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.279667156Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27901437Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278998106Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278982486Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tetr0zzi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.279597825Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27892221Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tetr0zzi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.279555925Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tetr0zzi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.279526914Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278879267Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278801239Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tetr0zzi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.279458174Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.279469679Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ten2unnw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.279419313Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278776297Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ten2unnw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.279390713Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278662483Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278650816Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278634439Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ten2unnw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.279328662Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278587968Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278579256Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ten2unnw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.279258812Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tem19k4s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.279208611Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tem19k4s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.279157811Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278503382Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tem19k4s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.27913024Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278469649Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tem19k4s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.27905918Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278427706Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278417488Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tefbvygf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.279022639Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278376583Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tefbvygf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.278996649Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278340349Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.278984927Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278268333Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278258226Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tefbvygf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.278955498Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278201811Z level=debug msg="Keeping state" state=Normal +level=info ts=2024-05-29T13:44:14.278834754Z caller=remote_alert_sender.go:94 user=705083 slug=mediakindsaas host=mediakindsaas-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.49.104.169:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fdlc07p7v7ny8b alerts=1 +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278179423Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:14.278816111Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278146633Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=612525 slug=adleyeview instance= t=2024-05-29T13:44:14.278796452Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278124399Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:14.278808493Z caller=remote_instance_store.go:51 user=150145 slug=pleasant msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.278055616Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=150145 slug=pleasant t=2024-05-29T13:44:14.27874769Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=PROD - Memcached Services status" +logger=ngalert.state.manager.persist user=705083 slug=mediakindsaas t=2024-05-29T13:44:14.278706446Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.857839ms +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277963465Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277952981Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277946048Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277883426Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277839458Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tea02eum-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.278646965Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev307.gradle.org, instance=dev307.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.278646342Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277811038Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27771625Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277615406Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=206107 slug=hydrolix t=2024-05-29T13:44:14.277648406Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.scheduler user=206107 slug=hydrolix version=6 fingerprint=3c8e20a66704289b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.277493937Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=bfa2c0a5-7fee-456c-85b0-a8a88dea7b0b, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.277211995s EvaluationString:}]" duration=241.319239ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-te73mk53-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.278546674Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277583223Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277564675Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277510936Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277502226Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27749008Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277456636Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277426806Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277406568Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277395507Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev306.gradle.org, instance=dev306.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.278385121Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-te73mk53-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.278357872Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-te3i32du-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.278304302Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277313442Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-te3i32du-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.278254661Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277281529Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277246712Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-te3i32du-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.278226401Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277238852Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277186943Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277172733Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277150579Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-te3i32du-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.27815446Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev306.gradle.org, instance=dev306.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.278189347Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277089813Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.277055451Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tdxxcsr3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.278055589Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276983907Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276973196Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276954006Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276940481Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276931195Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276873768Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276865861Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:14.277912009Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276834132Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276826453Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276798816Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276791181Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276762216Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tdtg8log-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.277802737Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276729586Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276699919Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tdtg8log-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.277723526Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.277771175Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276662908Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tdqmzr3k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.277683865Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.277673995Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276624092Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276616725Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276572486Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276563175Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276552866Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tdqmzr3k-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.277588054Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276501207Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tdqmzr3k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.277546484Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276475279Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276432083Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276412249Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276395953Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276382921Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276334795Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tdl7hlc0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.277476873Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276292128Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tdl7hlc0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.277448603Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276245069Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=311292 slug=rampeu t=2024-05-29T13:44:14.27738808Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27621413Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tdl7hlc0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.277380982Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276204253Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276196995Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276183396Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276155319Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tdl7hlc0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.277306191Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.277327434Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276132835Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276120099Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev302.gradle.org, instance=dev302.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.277295477Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276093849Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276086198Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.276065499Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=311292 slug=rampeu instance="__name__=rabbitmq_channel_messages_unroutable_returned_total, cluster=prod, container=rabbitmq, endpoint=prometheus, environment=prod, instance=10.100.14.41:15692, job=rabbitmq, namespace=prod, pod=rabbitmq-server-1, service=rabbitmq" t=2024-05-29T13:44:14.27727575Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=311292 slug=rampeu instance="__name__=rabbitmq_channel_messages_unroutable_returned_total, cluster=prod, container=rabbitmq, endpoint=prometheus, environment=prod, instance=10.100.14.41:15692, job=rabbitmq, namespace=prod, pod=rabbitmq-server-1, service=rabbitmq" t=2024-05-29T13:44:14.27726377Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tdix93sl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.277236761Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tdix93sl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.27720789Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=311292 slug=rampeu instance="__name__=rabbitmq_channel_messages_unroutable_returned_total, cluster=prod, container=rabbitmq, endpoint=prometheus, environment=prod, instance=10.100.13.65:15692, job=rabbitmq, namespace=prod, pod=rabbitmq-server-0, service=rabbitmq" t=2024-05-29T13:44:14.27717307Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.275964638Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev302.gradle.org, instance=dev302.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.277122157Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.275934018Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev302.gradle.org, instance=dev302.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.277109135Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.277078105Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.275894828Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.275820369Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.275809879Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.275750152Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.276944036Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-td8ik0ii-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.276955488Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-td5ipik1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.276916217Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.275693052Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.275618105Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-td5ipik1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.276840807Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.275586348Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.275550603Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.275521995Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.275490165Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27545972Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.275438329Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-td4aadse-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.276686395Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev300.gradle.org, instance=dev300.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.276660443Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.276656401Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.275383367Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.275353887Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:14.276602849Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.275327612Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.apis03-02.apis03-02 A" t=2024-05-29T13:44:14.27654554Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://grafana.haqqgen.dev/testedge2/prometheus, job=Grafana Self-Hosted Prometheus check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/prometheus" t=2024-05-29T13:44:14.276571351Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.274691137Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tcxpkl6l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.276522763Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.275170109Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.275162436Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.275139416Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27505496Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.275044402Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.275031375Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager.persist user=686395 slug=containerfoundation t=2024-05-29T13:44:14.276373517Z level=debug msg="Saving alert states done" count=31 max_state_save_concurrency=1 duration=399.177394ms +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274990364Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev276.gradle.org, instance=dev276.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.276396069Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274881265Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274847675Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tcxgcr2s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.276299671Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=206107 slug=hydrolix t=2024-05-29T13:44:14.274774327Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274830612Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://grafana.haqqgen.dev/testedge2/prometheus, job=Grafana Self-Hosted Prometheus check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/prometheus" t=2024-05-29T13:44:14.276312156Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.276273895Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tcxgcr2s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.276261431Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.apis03-01.apis03-01 A" t=2024-05-29T13:44:14.276266344Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274727854Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274690708Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev274.gradle.org, instance=dev274.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.276229298Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274661505Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274645549Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274636592Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274630345Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274619841Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=prd-bf.apis03-01.apis03-01 A" t=2024-05-29T13:44:14.276163355Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274607253Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274598438Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274565944Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.276107898Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev273.gradle.org, instance=dev273.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.276111215Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274476429Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274446818Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274437919Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tcwz8yk7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.276019508Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274398Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274327704Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274291158Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tcwz8yk7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.275969448Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274217316Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager.persist user=713300 slug=tpcnanonprod t=2024-05-29T13:44:14.275865148Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.75294ms +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27408067Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274068355Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tcug9w5v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.275900927Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.274003405Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273988111Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273974977Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273954459Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273944334Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273938023Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273929479Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273922931Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273917167Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273873095Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273844724Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273834019Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev271.gradle.org, instance=dev271.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.275804374Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.275755162Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273786679Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tctl2zit-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.275714945Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273748902Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273741504Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tctl2zit-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.275694545Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.275698661Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273719543Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273708314Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273701249Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273693626Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273665676Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tctl2zit-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.275665994Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27365888Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27364687Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273635922Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273620132Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273610006Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273603857Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273589314Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tckgy56f-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.275540203Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273567992Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273559419Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273553374Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273542445Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273536692Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273527149Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:14.275307807Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.275442841Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27350925Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tc3ai4hh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.275402912Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.275383064Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273481543Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.275396487Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.275333091Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273463198Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.275325377Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:14.275360801Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.274676448Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273422765Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tc3ai4hh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.275325371Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273417271Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273404314Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273396939Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:14.275299951Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273379395Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273359474Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tc2i6xbu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.2752476Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tc2i6xbu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.27521735Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273298697Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:14.275185303Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tc2i6xbu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.275194969Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273291524Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273286967Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273267629Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27324821Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273241554Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tc0y8c1y-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.275124389Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273217422Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tc0y8c1y-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.275102029Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev266.gradle.org, instance=dev266.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.275090128Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273196527Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27318916Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tc0y8c1y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.275072838Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273158411Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273153474Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:14.274570592Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273131712Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273124469Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.274907801Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" +level=info ts=2024-05-29T13:44:14.274910228Z caller=remote_alert_sender.go:94 user=204161 slug=blackrock host=blackrock-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.189.120:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=adeyyn1aclkowe alerts=2 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tbziroh4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.274900396Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tbziroh4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.274870246Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.273037341Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272997221Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tbsma88e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.274796275Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.274785165Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev264.gradle.org, instance=dev264.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.27478794Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tbsma88e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.274713184Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272965957Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:14.274747869Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272959106Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:14.274770106Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.274687419Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272920104Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:14.274510842Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.27462198Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=438855 slug=teckresources instance="datasource_uid=000000096, ref_id=A,B" t=2024-05-29T13:44:14.274678958Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:14.274660839Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272877604Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=438855 slug=teckresources version=5 fingerprint=fb5755828e9b76db attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.274482469Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=000000096, ref_id=A,B State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.254085849s EvaluationString:}]" duration=151.944527ms +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev263.gradle.org, instance=dev263.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.274657304Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev263.gradle.org, instance=dev263.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.27464132Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.274617488Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=396586 slug=opengov t=2024-05-29T13:44:14.274571006Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.449799ms +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272838416Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27281964Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272814497Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272790626Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272777343Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272772469Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tbsma88e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.274512332Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272757353Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev262.gradle.org, instance=dev262.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.274512406Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272749626Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272729744Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:14.274477136Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tbq3vqc7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.274414381Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=830631 slug=api3 t=2024-05-29T13:44:14.274413359Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272672444Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=info ts=2024-05-29T13:44:14.274385204Z caller=remote_alert_sender.go:94 user=70430 slug=dapperlabs host=dapperlabs-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.144.208.13:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=a8ec3c0a-5fdf-44ed-a13a-9f22e4dcc874 alerts=1 +level=debug ts=2024-05-29T13:44:14.274346785Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" +level=info ts=2024-05-29T13:44:14.2743936Z caller=remote_alert_sender.go:94 user=70430 slug=dapperlabs host=dapperlabs-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.176.29:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=a8ec3c0a-5fdf-44ed-a13a-9f22e4dcc874 alerts=1 +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272654028Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:14.274386192Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272602824Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272587077Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.2725768Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=830631 slug=api3 version=16 fingerprint=4fa7be7e36822a42 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.274277457Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.273926815s EvaluationString:}]" duration=23.639568ms +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272565745Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:14.274276778Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272534276Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27249156Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272484381Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://gateway.safe.testedge2.haqq.network, job=https://gateway.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://gateway.safe.testedge2.haqq.network" t=2024-05-29T13:44:14.274205902Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272468713Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=554491 slug=safeskyindustries instance="cluster=westeurope, container=sync-statics, namespace=production" t=2024-05-29T13:44:14.274133195Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.274105294Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272450094Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=554491 slug=safeskyindustries instance="cluster=westeurope, container=sync-statics, namespace=production" t=2024-05-29T13:44:14.274121795Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272437785Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev259.gradle.org, instance=dev259.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.274112574Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=698103 slug=vericast version=78 fingerprint=13fc31d7f8745a42 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.274065899Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=c13fb34c-8963-4873-abd2-b22581a9c47f, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.273639449s EvaluationString:}]" duration=55.239564ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tb78rweq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.273993057Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tb78rweq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.273981967Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev259.gradle.org, instance=dev259.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.27410193Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272401426Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=554491 slug=safeskyindustries instance="cluster=canadacentral, container=sync-statics, namespace=production" t=2024-05-29T13:44:14.274058594Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.274048557Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272316931Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=554491 slug=safeskyindustries instance="cluster=canadacentral, container=sync-server, namespace=production" t=2024-05-29T13:44:14.273998593Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272284889Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272278913Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev258.gradle.org, instance=dev258.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.27397996Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272260121Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev258.gradle.org, instance=dev258.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.273966224Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27225531Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272224651Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272219655Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272201441Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272194161Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272182193Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272177508Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev257.gradle.org, instance=dev257.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.273830575Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.273657621Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272144896Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272134566Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:14.273776119Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.086792ms +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272104585Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:14.273771189Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://explorer.testedge2.haqq.network/api/v1/health, job=https://explorer.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://explorer.testedge2.haqq.network" t=2024-05-29T13:44:14.273780802Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272074971Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev256.gradle.org, instance=dev256.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.273710734Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=843304 slug=ppcgroup instance= t=2024-05-29T13:44:14.273730989Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272061238Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=843304 slug=ppcgroup instance= t=2024-05-29T13:44:14.273718989Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev256.gradle.org, instance=dev256.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.27369513Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.272047636Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tb6yvutb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.273644264Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=843304 slug=ppcgroup version=41 fingerprint=a9cd53d50e6d79e0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.273597787Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.266131622s EvaluationString:}]" duration=14.917929ms +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271997973Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271990236Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271978272Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev255.gradle.org, instance=dev255.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.27357302Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271944722Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271931336Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271924446Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271919906Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271908864Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271904787Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271898985Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271889628Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev254.gradle.org, instance=dev254.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.273458917Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-taossrof-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.273388141Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271832936Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271782851Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271760993Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271753763Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-taossrof-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.27331546Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271704618Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271648242Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271628276Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev252.gradle.org, instance=dev252.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.27316991Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271566832Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:14.272966827Z caller=remote_instance_store.go:51 user=243675 slug=oneschema msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271552768Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.27271977Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271522625Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=127813 slug=clearsale t=2024-05-29T13:44:14.272952016Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=28.525818ms +level=debug ts=2024-05-29T13:44:14.273046647Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tafdmheq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.273066228Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tafdmheq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.273049397Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271462575Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.272998259Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271453421Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tafdmheq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.272988827Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev251.gradle.org, instance=dev251.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.272988753Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271315201Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271250628Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271211308Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.272725162Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27112334Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271112776Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271103872Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-tabnvkpr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.272748834Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271093016Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev249.gradle.org, instance=dev249.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.272745435Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271047544Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271036845Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271024725Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ta7qbe33-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.272668794Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.271004962Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.historian backend=loki user=297794 slug=leanix t=2024-05-29T13:44:14.272544149Z level=debug msg="Done saving alert state history batch" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ta50e19o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.272557732Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev248.gradle.org, instance=dev248.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.272595414Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27092189Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270913961Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ta50e19o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.272545962Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270894576Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ta50e19o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.272503702Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27088631Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev247.gradle.org, instance=dev247.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.272451028Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ta50e19o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.272492212Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270821216Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270793075Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=543654 slug=jobcloudprogrammaticprod t=2024-05-29T13:44:14.272334492Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.208007ms +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270761584Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270722064Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ta347ksy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.27236378Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ta347ksy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.27234242Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ta347ksy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.27231098Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.272310867Z caller=remote_instance_store.go:51 user=543604 slug=kingmakers msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270689318Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270682132Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev246.gradle.org, instance=dev246.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.272298359Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270629872Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270605312Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://ecosystem-rates.testedge2.haqq.network, job=https://ecosystem-rates.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://ecosystem-rates.testedge2.haqq.network" t=2024-05-29T13:44:14.272165841Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270575767Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev245.gradle.org, instance=dev245.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.272173457Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270529355Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270516576Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270506692Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270470788Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27046147Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t9yymgky-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.272120268Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270439865Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t9yymgky-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.272098518Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.272083848Z caller=remote_instance_store.go:51 user=557231 slug=lnrsusinsuranceprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270411905Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270403093Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270389962Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270376351Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270354635Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270344267Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:14.272021896Z caller=remote_instance_store.go:51 user=150145 slug=pleasant msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t9yymgky-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.272016777Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27028568Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27026668Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270261708Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270254971Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager.persist user=150145 slug=pleasant t=2024-05-29T13:44:14.271992263Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=150145 slug=pleasant instance= t=2024-05-29T13:44:14.271982637Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270229491Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t9ybgztq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.271871525Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t9ybgztq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.271851105Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27021914Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270211882Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev243.gradle.org, instance=dev243.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.271871727Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270174243Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270157243Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270139793Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="databasename=uata" t=2024-05-29T13:44:14.271814754Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270111198Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="databasename=uata" t=2024-05-29T13:44:14.271802207Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.27010648Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:14.271737865Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://config.safe.testedge2.haqq.network, job=https://config.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://config.safe.testedge2.haqq.network" t=2024-05-29T13:44:14.27172011Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t9xqggfs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.271716044Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t9xqggfs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.271705064Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.271542216Z caller=remote_image_capturer.go:33 user=444728 slug=stgnextgen rule_org_id=1 rule_uid=ZOn3Ry-4k msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t9vsg107-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.271572632Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=444728 slug=stgnextgen t=2024-05-29T13:44:14.271568761Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=444728 slug=stgnextgen instance= t=2024-05-29T13:44:14.27153034Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:56:10Z next_ends_at=2024-05-29T14:00:10Z +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270091433Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=444728 slug=stgnextgen t=2024-05-29T13:44:14.271493585Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270075005Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t9ooe1vn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.271510402Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270046665Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.270039838Z level=debug msg="Keeping state" state=Normal +level=error ts=2024-05-29T13:44:14.271411253Z caller=remote_rule_evaluator.go:110 user=444728 slug=stgnextgen msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" +logger=ngalert.scheduler user=444728 slug=stgnextgen version=1 fingerprint=247d65c9f0c7634a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.271447096Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=2.667174ms +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26996429Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev240.gradle.org, instance=dev240.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.271446837Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269933802Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269929079Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269910339Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269874054Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269854708Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t9l9vit5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.2713022Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.27129723Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" +logger=ngalert.state.historian backend=loki user=309009 slug=elestyle t=2024-05-29T13:44:14.271268212Z level=debug msg="Done saving alert state history batch" +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev238.gradle.org, instance=dev238.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.271184483Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269657339Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269645773Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t9hsdmib-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.271052147Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269564843Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t9hsdmib-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.271010307Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269555407Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269539497Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269531765Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269524393Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269484121Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t9hsdmib-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.270945186Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269456372Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev236.gradle.org, instance=dev236.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.270932616Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269446211Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t9amkg8d-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.270892955Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.27089566Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26939717Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26938624Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=697627 slug=haqq instance="instance=https://backend.wallet.testedge2.haqq.network, job=https://backend.wallet.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://backend.wallet.testedge2.haqq.network" t=2024-05-29T13:44:14.270882788Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev235.gradle.org, instance=dev235.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.270792628Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269320671Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269291069Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269271007Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26922609Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.270636827Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.270584752Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.270654694Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269202988Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269194998Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269139005Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.27060859Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t9a7fsb3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.270614012Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t9a7fsb3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.270586162Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev234.gradle.org, instance=dev234.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.270596657Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269093159Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=697627 slug=haqq version=1 fingerprint=ef2aacdde91aa541 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.269435503Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=https://backend.wallet.testedge2.haqq.network, job=https://backend.wallet.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://backend.wallet.testedge2.haqq.network State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://backend.wallet.testedge2.haqq.network, job=https://backend.wallet.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://backend.wallet.testedge2.haqq.network Value:0xc07fd1f080} B:{Var:B Labels:instance=https://backend.wallet.testedge2.haqq.network, job=https://backend.wallet.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://backend.wallet.testedge2.haqq.network Value:0xc07fd1f0d0} C:{Var:C Labels:instance=https://backend.wallet.testedge2.haqq.network, job=https://backend.wallet.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://backend.wallet.testedge2.haqq.network Value:0xc07fd1f110}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183488243s EvaluationString:[ var='A' labels={instance=https://backend.wallet.testedge2.haqq.network, job=https://backend.wallet.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://backend.wallet.testedge2.haqq.network} value=3 ], [ var='B' labels={instance=https://backend.wallet.testedge2.haqq.network, job=https://backend.wallet.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://backend.wallet.testedge2.haqq.network} value=3 ], [ var='C' labels={instance=https://backend.wallet.testedge2.haqq.network, job=https://backend.wallet.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://backend.wallet.testedge2.haqq.network} value=0 ]} {Instance:instance=https://config.safe.testedge2.haqq.network, job=https://config.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://config.safe.testedge2.haqq.network State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://config.safe.testedge2.haqq.network, job=https://config.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://config.safe.testedge2.haqq.network Value:0xc07fd1f1d0} B:{Var:B Labels:instance=https://config.safe.testedge2.haqq.network, job=https://config.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://config.safe.testedge2.haqq.network Value:0xc07fd1f210} C:{Var:C Labels:instance=https://config.safe.testedge2.haqq.network, job=https://config.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://config.safe.testedge2.haqq.network Value:0xc07fd1f250}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183511235s EvaluationString:[ var='A' labels={instance=https://config.safe.testedge2.haqq.network, job=https://config.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://config.safe.testedge2.haqq.network} value=3 ], [ var='B' labels={instance=https://config.safe.testedge2.haqq.network, job=https://config.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://config.safe.testedge2.haqq.network} value=3 ], [ var='C' labels={instance=https://config.safe.testedge2.haqq.network, job=https://config.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://config.safe.testedge2.haqq.network} value=0 ]} {Instance:instance=https://ecosystem-rates.testedge2.haqq.network, job=https://ecosystem-rates.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://ecosystem-rates.testedge2.haqq.network State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://ecosystem-rates.testedge2.haqq.network, job=https://ecosystem-rates.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://ecosystem-rates.testedge2.haqq.network Value:0xc07fd1f2f0} B:{Var:B Labels:instance=https://ecosystem-rates.testedge2.haqq.network, job=https://ecosystem-rates.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://ecosystem-rates.testedge2.haqq.network Value:0xc07fd1f330} C:{Var:C Labels:instance=https://ecosystem-rates.testedge2.haqq.network, job=https://ecosystem-rates.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://ecosystem-rates.testedge2.haqq.network Value:0xc07fd1f398}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183526239s EvaluationString:[ var='A' labels={instance=https://ecosystem-rates.testedge2.haqq.network, job=https://ecosystem-rates.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://ecosystem-rates.testedge2.haqq.network} value=3 ], [ var='B' labels={instance=https://ecosystem-rates.testedge2.haqq.network, job=https://ecosystem-rates.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://ecosystem-rates.testedge2.haqq.network} value=3 ], [ var='C' labels={instance=https://ecosystem-rates.testedge2.haqq.network, job=https://ecosystem-rates.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://ecosystem-rates.testedge2.haqq.network} value=0 ]} {Instance:instance=https://explorer.testedge2.haqq.network/api/v1/health, job=https://explorer.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://explorer.testedge2.haqq.network State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://explorer.testedge2.haqq.network/api/v1/health, job=https://explorer.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://explorer.testedge2.haqq.network Value:0xc07fd1f498} B:{Var:B Labels:instance=https://explorer.testedge2.haqq.network/api/v1/health, job=https://explorer.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://explorer.testedge2.haqq.network Value:0xc07fd1f4e0} C:{Var:C Labels:instance=https://explorer.testedge2.haqq.network/api/v1/health, job=https://explorer.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://explorer.testedge2.haqq.network Value:0xc07fd1f528}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183537005s EvaluationString:[ var='A' labels={instance=https://explorer.testedge2.haqq.network/api/v1/health, job=https://explorer.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://explorer.testedge2.haqq.network} value=3 ], [ var='B' labels={instance=https://explorer.testedge2.haqq.network/api/v1/health, job=https://explorer.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://explorer.testedge2.haqq.network} value=3 ], [ var='C' labels={instance=https://explorer.testedge2.haqq.network/api/v1/health, job=https://explorer.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://explorer.testedge2.haqq.network} value=0 ]} {Instance:instance=https://gateway.safe.testedge2.haqq.network, job=https://gateway.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://gateway.safe.testedge2.haqq.network State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://gateway.safe.testedge2.haqq.network, job=https://gateway.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://gateway.safe.testedge2.haqq.network Value:0xc07fd1f5c0} B:{Var:B Labels:instance=https://gateway.safe.testedge2.haqq.network, job=https://gateway.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://gateway.safe.testedge2.haqq.network Value:0xc07fd1f608} C:{Var:C Labels:instance=https://gateway.safe.testedge2.haqq.network, job=https://gateway.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://gateway.safe.testedge2.haqq.network Value:0xc07fd1f660}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.18354866s EvaluationString:[ var='A' labels={instance=https://gateway.safe.testedge2.haqq.network, job=https://gateway.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://gateway.safe.testedge2.haqq.network} value=3 ], [ var='B' labels={instance=https://gateway.safe.testedge2.haqq.network, job=https://gateway.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://gateway.safe.testedge2.haqq.network} value=3 ], [ var='C' labels={instance=https://gateway.safe.testedge2.haqq.network, job=https://gateway.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://gateway.safe.testedge2.haqq.network} value=0 ]} {Instance:instance=https://generator-shares.social.testedge2.haqq.network, job=https://generator-shares.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://generator-shares.social.testedge2.haqq.network State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://generator-shares.social.testedge2.haqq.network, job=https://generator-shares.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://generator-shares.social.testedge2.haqq.network Value:0xc07fd1f738} B:{Var:B Labels:instance=https://generator-shares.social.testedge2.haqq.network, job=https://generator-shares.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://generator-shares.social.testedge2.haqq.network Value:0xc07fd1f780} C:{Var:C Labels:instance=https://generator-shares.social.testedge2.haqq.network, job=https://generator-shares.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://generator-shares.social.testedge2.haqq.network Value:0xc07fd1f6f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183559339s EvaluationString:[ var='A' labels={instance=https://generator-shares.social.testedge2.haqq.network, job=https://generator-shares.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://generator-shares.social.testedge2.haqq.network} value=3 ], [ var='B' labels={instance=https://generator-shares.social.testedge2.haqq.network, job=https://generator-shares.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://generator-shares.social.testedge2.haqq.network} value=3 ], [ var='C' labels={instance=https://generator-shares.social.testedge2.haqq.network, job=https://generator-shares.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://generator-shares.social.testedge2.haqq.network} value=0 ]} {Instance:instance=https://grafana.haqqgen.dev/testedge2/loki/loki/api/v1/labels, job=Grafana Self-Hosted Loki check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/loki State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://grafana.haqqgen.dev/testedge2/loki/loki/api/v1/labels, job=Grafana Self-Hosted Loki check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/loki Value:0xc07fd1f9e8} B:{Var:B Labels:instance=https://grafana.haqqgen.dev/testedge2/loki/loki/api/v1/labels, job=Grafana Self-Hosted Loki check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/loki Value:0xc07fd1fa40} C:{Var:C Labels:instance=https://grafana.haqqgen.dev/testedge2/loki/loki/api/v1/labels, job=Grafana Self-Hosted Loki check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/loki Value:0xc07fd1f8c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183574177s EvaluationString:[ var='A' labels={instance=https://grafana.haqqgen.dev/testedge2/loki/loki/api/v1/labels, job=Grafana Self-Hosted Loki check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/loki} value=3 ], [ var='B' labels={instance=https://grafana.haqqgen.dev/testedge2/loki/loki/api/v1/labels, job=Grafana Self-Hosted Loki check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/loki} value=3 ], [ var='C' labels={instance=https://grafana.haqqgen.dev/testedge2/loki/loki/api/v1/labels, job=Grafana Self-Hosted Loki check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/loki} value=0 ]} {Instance:instance=https://grafana.haqqgen.dev/testedge2/prometheus, job=Grafana Self-Hosted Prometheus check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/prometheus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://grafana.haqqgen.dev/testedge2/prometheus, job=Grafana Self-Hosted Prometheus check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/prometheus Value:0xc07fd1fae0} B:{Var:B Labels:instance=https://grafana.haqqgen.dev/testedge2/prometheus, job=Grafana Self-Hosted Prometheus check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/prometheus Value:0xc07fd1fb20} C:{Var:C Labels:instance=https://grafana.haqqgen.dev/testedge2/prometheus, job=Grafana Self-Hosted Prometheus check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/prometheus Value:0xc07fd1fb68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183584018s EvaluationString:[ var='A' labels={instance=https://grafana.haqqgen.dev/testedge2/prometheus, job=Grafana Self-Hosted Prometheus check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/prometheus} value=3 ], [ var='B' labels={instance=https://grafana.haqqgen.dev/testedge2/prometheus, job=Grafana Self-Hosted Prometheus check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/prometheus} value=3 ], [ var='C' labels={instance=https://grafana.haqqgen.dev/testedge2/prometheus, job=Grafana Self-Hosted Prometheus check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/prometheus} value=0 ]} {Instance:instance=https://grafana.haqqgen.dev/testedge2/tempo/api/search, job=Grafana Self-Hosted Tempo check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/tempo State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://grafana.haqqgen.dev/testedge2/tempo/api/search, job=Grafana Self-Hosted Tempo check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/tempo Value:0xc07fd1fc00} B:{Var:B Labels:instance=https://grafana.haqqgen.dev/testedge2/tempo/api/search, job=Grafana Self-Hosted Tempo check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/tempo Value:0xc07fd1fc40} C:{Var:C Labels:instance=https://grafana.haqqgen.dev/testedge2/tempo/api/search, job=Grafana Self-Hosted Tempo check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/tempo Value:0xc07fd1fc98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183611408s EvaluationString:[ var='A' labels={instance=https://grafana.haqqgen.dev/testedge2/tempo/api/search, job=Grafana Self-Hosted Tempo check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/tempo} value=3 ], [ var='B' labels={instance=https://grafana.haqqgen.dev/testedge2/tempo/api/search, job=Grafana Self-Hosted Tempo check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/tempo} value=3 ], [ var='C' labels={instance=https://grafana.haqqgen.dev/testedge2/tempo/api/search, job=Grafana Self-Hosted Tempo check testedge2, label_check=testedge2, label_site=https://grafana.haqqgen.dev/testedge2/tempo} value=0 ]} {Instance:instance=https://indexer.safe.testedge2.haqq.network, job=https://indexer.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://indexer.safe.testedge2.haqq.network State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://indexer.safe.testedge2.haqq.network, job=https://indexer.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://indexer.safe.testedge2.haqq.network Value:0xc07fd1fdf8} B:{Var:B Labels:instance=https://indexer.safe.testedge2.haqq.network, job=https://indexer.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://indexer.safe.testedge2.haqq.network Value:0xc07fd1fe48} C:{Var:C Labels:instance=https://indexer.safe.testedge2.haqq.network, job=https://indexer.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://indexer.safe.testedge2.haqq.network Value:0xc07fd1fe90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183617428s EvaluationString:[ var='A' labels={instance=https://indexer.safe.testedge2.haqq.network, job=https://indexer.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://indexer.safe.testedge2.haqq.network} value=3 ], [ var='B' labels={instance=https://indexer.safe.testedge2.haqq.network, job=https://indexer.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://indexer.safe.testedge2.haqq.network} value=3 ], [ var='C' labels={instance=https://indexer.safe.testedge2.haqq.network, job=https://indexer.safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://indexer.safe.testedge2.haqq.network} value=0 ]} {Instance:instance=https://jsonrpc.indexer.testedge2.haqq.network, job=https://jsonrpc.indexer.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://jsonrpc.indexer.testedge2.haqq.network State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://jsonrpc.indexer.testedge2.haqq.network, job=https://jsonrpc.indexer.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://jsonrpc.indexer.testedge2.haqq.network Value:0xc07fd1ff78} B:{Var:B Labels:instance=https://jsonrpc.indexer.testedge2.haqq.network, job=https://jsonrpc.indexer.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://jsonrpc.indexer.testedge2.haqq.network Value:0xc0130be030} C:{Var:C Labels:instance=https://jsonrpc.indexer.testedge2.haqq.network, job=https://jsonrpc.indexer.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://jsonrpc.indexer.testedge2.haqq.network Value:0xc07fd1ff20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183625647s EvaluationString:[ var='A' labels={instance=https://jsonrpc.indexer.testedge2.haqq.network, job=https://jsonrpc.indexer.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://jsonrpc.indexer.testedge2.haqq.network} value=3 ], [ var='B' labels={instance=https://jsonrpc.indexer.testedge2.haqq.network, job=https://jsonrpc.indexer.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://jsonrpc.indexer.testedge2.haqq.network} value=3 ], [ var='C' labels={instance=https://jsonrpc.indexer.testedge2.haqq.network, job=https://jsonrpc.indexer.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://jsonrpc.indexer.testedge2.haqq.network} value=0 ]} {Instance:instance=https://metadata.social.testedge2.haqq.network, job=https://metadata.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://metadata.social.testedge2.haqq.network State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://metadata.social.testedge2.haqq.network, job=https://metadata.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://metadata.social.testedge2.haqq.network Value:0xc0130be0b0} B:{Var:B Labels:instance=https://metadata.social.testedge2.haqq.network, job=https://metadata.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://metadata.social.testedge2.haqq.network Value:0xc0130be0f8} C:{Var:C Labels:instance=https://metadata.social.testedge2.haqq.network, job=https://metadata.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://metadata.social.testedge2.haqq.network Value:0xc0130be140}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183637199s EvaluationString:[ var='A' labels={instance=https://metadata.social.testedge2.haqq.network, job=https://metadata.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://metadata.social.testedge2.haqq.network} value=3 ], [ var='B' labels={instance=https://metadata.social.testedge2.haqq.network, job=https://metadata.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://metadata.social.testedge2.haqq.network} value=3 ], [ var='C' labels={instance=https://metadata.social.testedge2.haqq.network, job=https://metadata.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://metadata.social.testedge2.haqq.network} value=0 ]} {Instance:instance=https://notifications.sender.testedge2.haqq.network, job=https://notifications.sender.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://notifications.sender.testedge2.haqq.network State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://notifications.sender.testedge2.haqq.network, job=https://notifications.sender.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://notifications.sender.testedge2.haqq.network Value:0xc0130be1b8} B:{Var:B Labels:instance=https://notifications.sender.testedge2.haqq.network, job=https://notifications.sender.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://notifications.sender.testedge2.haqq.network Value:0xc0130be200} C:{Var:C Labels:instance=https://notifications.sender.testedge2.haqq.network, job=https://notifications.sender.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://notifications.sender.testedge2.haqq.network Value:0xc0130be240}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183645687s EvaluationString:[ var='A' labels={instance=https://notifications.sender.testedge2.haqq.network, job=https://notifications.sender.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://notifications.sender.testedge2.haqq.network} value=3 ], [ var='B' labels={instance=https://notifications.sender.testedge2.haqq.network, job=https://notifications.sender.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://notifications.sender.testedge2.haqq.network} value=3 ], [ var='C' labels={instance=https://notifications.sender.testedge2.haqq.network, job=https://notifications.sender.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://notifications.sender.testedge2.haqq.network} value=0 ]} {Instance:instance=https://rpc-ws.eth.testedge2.haqq.network, job=https://rpc-ws.eth.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://rpc-ws.eth.testedge2.haqq.network State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://rpc-ws.eth.testedge2.haqq.network, job=https://rpc-ws.eth.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://rpc-ws.eth.testedge2.haqq.network Value:0xc0130be2c8} B:{Var:B Labels:instance=https://rpc-ws.eth.testedge2.haqq.network, job=https://rpc-ws.eth.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://rpc-ws.eth.testedge2.haqq.network Value:0xc0130be310} C:{Var:C Labels:instance=https://rpc-ws.eth.testedge2.haqq.network, job=https://rpc-ws.eth.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://rpc-ws.eth.testedge2.haqq.network Value:0xc0130be358}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.18365491s EvaluationString:[ var='A' labels={instance=https://rpc-ws.eth.testedge2.haqq.network, job=https://rpc-ws.eth.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://rpc-ws.eth.testedge2.haqq.network} value=3 ], [ var='B' labels={instance=https://rpc-ws.eth.testedge2.haqq.network, job=https://rpc-ws.eth.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://rpc-ws.eth.testedge2.haqq.network} value=3 ], [ var='C' labels={instance=https://rpc-ws.eth.testedge2.haqq.network, job=https://rpc-ws.eth.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://rpc-ws.eth.testedge2.haqq.network} value=0 ]} {Instance:instance=https://rpc.eth.testedge2.haqq.network, job=https://rpc.eth.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://rpc.eth.testedge2.haqq.network State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://rpc.eth.testedge2.haqq.network, job=https://rpc.eth.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://rpc.eth.testedge2.haqq.network Value:0xc0130be3e0} B:{Var:B Labels:instance=https://rpc.eth.testedge2.haqq.network, job=https://rpc.eth.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://rpc.eth.testedge2.haqq.network Value:0xc0130be420} C:{Var:C Labels:instance=https://rpc.eth.testedge2.haqq.network, job=https://rpc.eth.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://rpc.eth.testedge2.haqq.network Value:0xc0130be460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.1836633s EvaluationString:[ var='A' labels={instance=https://rpc.eth.testedge2.haqq.network, job=https://rpc.eth.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://rpc.eth.testedge2.haqq.network} value=3 ], [ var='B' labels={instance=https://rpc.eth.testedge2.haqq.network, job=https://rpc.eth.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://rpc.eth.testedge2.haqq.network} value=3 ], [ var='C' labels={instance=https://rpc.eth.testedge2.haqq.network, job=https://rpc.eth.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://rpc.eth.testedge2.haqq.network} value=0 ]} {Instance:instance=https://rpc.tm.testedge2.haqq.network, job=https://rpc.tm.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://rpc.tm.testedge2.haqq.network State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://rpc.tm.testedge2.haqq.network, job=https://rpc.tm.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://rpc.tm.testedge2.haqq.network Value:0xc0130be4e0} B:{Var:B Labels:instance=https://rpc.tm.testedge2.haqq.network, job=https://rpc.tm.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://rpc.tm.testedge2.haqq.network Value:0xc0130be520} C:{Var:C Labels:instance=https://rpc.tm.testedge2.haqq.network, job=https://rpc.tm.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://rpc.tm.testedge2.haqq.network Value:0xc0130be560}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.18367285s EvaluationString:[ var='A' labels={instance=https://rpc.tm.testedge2.haqq.network, job=https://rpc.tm.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://rpc.tm.testedge2.haqq.network} value=3 ], [ var='B' labels={instance=https://rpc.tm.testedge2.haqq.network, job=https://rpc.tm.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://rpc.tm.testedge2.haqq.network} value=3 ], [ var='C' labels={instance=https://rpc.tm.testedge2.haqq.network, job=https://rpc.tm.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://rpc.tm.testedge2.haqq.network} value=0 ]} {Instance:instance=https://safe.testedge2.haqq.network, job=https://safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://safe.testedge2.haqq.network State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://safe.testedge2.haqq.network, job=https://safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://safe.testedge2.haqq.network Value:0xc0130be628} B:{Var:B Labels:instance=https://safe.testedge2.haqq.network, job=https://safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://safe.testedge2.haqq.network Value:0xc0130be670} C:{Var:C Labels:instance=https://safe.testedge2.haqq.network, job=https://safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://safe.testedge2.haqq.network Value:0xc0130be5e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183684193s EvaluationString:[ var='A' labels={instance=https://safe.testedge2.haqq.network, job=https://safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://safe.testedge2.haqq.network} value=3 ], [ var='B' labels={instance=https://safe.testedge2.haqq.network, job=https://safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://safe.testedge2.haqq.network} value=3 ], [ var='C' labels={instance=https://safe.testedge2.haqq.network, job=https://safe.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://safe.testedge2.haqq.network} value=0 ]} {Instance:instance=https://social-share-1.social.testedge2.haqq.network, job=https://social-share-1.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-1.social.testedge2.haqq.network State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://social-share-1.social.testedge2.haqq.network, job=https://social-share-1.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-1.social.testedge2.haqq.network Value:0xc0130be770} B:{Var:B Labels:instance=https://social-share-1.social.testedge2.haqq.network, job=https://social-share-1.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-1.social.testedge2.haqq.network Value:0xc0130be6e8} C:{Var:C Labels:instance=https://social-share-1.social.testedge2.haqq.network, job=https://social-share-1.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-1.social.testedge2.haqq.network Value:0xc0130be730}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.18369437s EvaluationString:[ var='A' labels={instance=https://social-share-1.social.testedge2.haqq.network, job=https://social-share-1.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-1.social.testedge2.haqq.network} value=3 ], [ var='B' labels={instance=https://social-share-1.social.testedge2.haqq.network, job=https://social-share-1.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-1.social.testedge2.haqq.network} value=3 ], [ var='C' labels={instance=https://social-share-1.social.testedge2.haqq.network, job=https://social-share-1.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-1.social.testedge2.haqq.network} value=0 ]} {Instance:instance=https://social-share-2.social.testedge2.haqq.network, job=https://social-share-2.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-2.social.testedge2.haqq.network State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://social-share-2.social.testedge2.haqq.network, job=https://social-share-2.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-2.social.testedge2.haqq.network Value:0xc0130be870} B:{Var:B Labels:instance=https://social-share-2.social.testedge2.haqq.network, job=https://social-share-2.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-2.social.testedge2.haqq.network Value:0xc0130be7e8} C:{Var:C Labels:instance=https://social-share-2.social.testedge2.haqq.network, job=https://social-share-2.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-2.social.testedge2.haqq.network Value:0xc0130be830}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183702551s EvaluationString:[ var='A' labels={instance=https://social-share-2.social.testedge2.haqq.network, job=https://social-share-2.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-2.social.testedge2.haqq.network} value=3 ], [ var='B' labels={instance=https://social-share-2.social.testedge2.haqq.network, job=https://social-share-2.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-2.social.testedge2.haqq.network} value=3 ], [ var='C' labels={instance=https://social-share-2.social.testedge2.haqq.network, job=https://social-share-2.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-2.social.testedge2.haqq.network} value=0 ]} {Instance:instance=https://social-share-3.social.testedge2.haqq.network, job=https://social-share-3.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-3.social.testedge2.haqq.network State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://social-share-3.social.testedge2.haqq.network, job=https://social-share-3.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-3.social.testedge2.haqq.network Value:0xc0130be950} B:{Var:B Labels:instance=https://social-share-3.social.testedge2.haqq.network, job=https://social-share-3.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-3.social.testedge2.haqq.network Value:0xc0130be990} C:{Var:C Labels:instance=https://social-share-3.social.testedge2.haqq.network, job=https://social-share-3.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-3.social.testedge2.haqq.network Value:0xc0130be910}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183712139s EvaluationString:[ var='A' labels={instance=https://social-share-3.social.testedge2.haqq.network, job=https://social-share-3.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-3.social.testedge2.haqq.network} value=3 ], [ var='B' labels={instance=https://social-share-3.social.testedge2.haqq.network, job=https://social-share-3.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-3.social.testedge2.haqq.network} value=3 ], [ var='C' labels={instance=https://social-share-3.social.testedge2.haqq.network, job=https://social-share-3.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-3.social.testedge2.haqq.network} value=0 ]} {Instance:instance=https://social-share-4.social.testedge2.haqq.network, job=https://social-share-4.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-4.social.testedge2.haqq.network State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://social-share-4.social.testedge2.haqq.network, job=https://social-share-4.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-4.social.testedge2.haqq.network Value:0xc0130bea68} B:{Var:B Labels:instance=https://social-share-4.social.testedge2.haqq.network, job=https://social-share-4.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-4.social.testedge2.haqq.network Value:0xc0130beab0} C:{Var:C Labels:instance=https://social-share-4.social.testedge2.haqq.network, job=https://social-share-4.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-4.social.testedge2.haqq.network Value:0xc0130bea20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183720987s EvaluationString:[ var='A' labels={instance=https://social-share-4.social.testedge2.haqq.network, job=https://social-share-4.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-4.social.testedge2.haqq.network} value=3 ], [ var='B' labels={instance=https://social-share-4.social.testedge2.haqq.network, job=https://social-share-4.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-4.social.testedge2.haqq.network} value=3 ], [ var='C' labels={instance=https://social-share-4.social.testedge2.haqq.network, job=https://social-share-4.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-4.social.testedge2.haqq.network} value=0 ]} {Instance:instance=https://social-share-5.social.testedge2.haqq.network, job=https://social-share-5.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-5.social.testedge2.haqq.network State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://social-share-5.social.testedge2.haqq.network, job=https://social-share-5.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-5.social.testedge2.haqq.network Value:0xc0130bebb0} B:{Var:B Labels:instance=https://social-share-5.social.testedge2.haqq.network, job=https://social-share-5.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-5.social.testedge2.haqq.network Value:0xc0130beb30} C:{Var:C Labels:instance=https://social-share-5.social.testedge2.haqq.network, job=https://social-share-5.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-5.social.testedge2.haqq.network Value:0xc0130beb70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183732632s EvaluationString:[ var='A' labels={instance=https://social-share-5.social.testedge2.haqq.network, job=https://social-share-5.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-5.social.testedge2.haqq.network} value=3 ], [ var='B' labels={instance=https://social-share-5.social.testedge2.haqq.network, job=https://social-share-5.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-5.social.testedge2.haqq.network} value=3 ], [ var='C' labels={instance=https://social-share-5.social.testedge2.haqq.network, job=https://social-share-5.social.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://social-share-5.social.testedge2.haqq.network} value=0 ]} {Instance:instance=https://stats.explorer.testedge2.haqq.network/health, job=https://stats.explorer.testedge2.haqq.network/health@testedge2, label_check=testedge2, label_site=https://stats.explorer.testedge2.haqq.network/health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://stats.explorer.testedge2.haqq.network/health, job=https://stats.explorer.testedge2.haqq.network/health@testedge2, label_check=testedge2, label_site=https://stats.explorer.testedge2.haqq.network/health Value:0xc0130becb0} B:{Var:B Labels:instance=https://stats.explorer.testedge2.haqq.network/health, job=https://stats.explorer.testedge2.haqq.network/health@testedge2, label_check=testedge2, label_site=https://stats.explorer.testedge2.haqq.network/health Value:0xc0130bec30} C:{Var:C Labels:instance=https://stats.explorer.testedge2.haqq.network/health, job=https://stats.explorer.testedge2.haqq.network/health@testedge2, label_check=testedge2, label_site=https://stats.explorer.testedge2.haqq.network/health Value:0xc0130bec70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183741498s EvaluationString:[ var='A' labels={instance=https://stats.explorer.testedge2.haqq.network/health, job=https://stats.explorer.testedge2.haqq.network/health@testedge2, label_check=testedge2, label_site=https://stats.explorer.testedge2.haqq.network/health} value=3 ], [ var='B' labels={instance=https://stats.explorer.testedge2.haqq.network/health, job=https://stats.explorer.testedge2.haqq.network/health@testedge2, label_check=testedge2, label_site=https://stats.explorer.testedge2.haqq.network/health} value=3 ], [ var='C' labels={instance=https://stats.explorer.testedge2.haqq.network/health, job=https://stats.explorer.testedge2.haqq.network/health@testedge2, label_check=testedge2, label_site=https://stats.explorer.testedge2.haqq.network/health} value=0 ]} {Instance:instance=https://testedge2.haqq.network/api/v1/health, job=https://testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://testedge2.haqq.network State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://testedge2.haqq.network/api/v1/health, job=https://testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://testedge2.haqq.network Value:0xc0130bedc0} B:{Var:B Labels:instance=https://testedge2.haqq.network/api/v1/health, job=https://testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://testedge2.haqq.network Value:0xc0130bed38} C:{Var:C Labels:instance=https://testedge2.haqq.network/api/v1/health, job=https://testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://testedge2.haqq.network Value:0xc0130bed80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183749691s EvaluationString:[ var='A' labels={instance=https://testedge2.haqq.network/api/v1/health, job=https://testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://testedge2.haqq.network} value=3 ], [ var='B' labels={instance=https://testedge2.haqq.network/api/v1/health, job=https://testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://testedge2.haqq.network} value=3 ], [ var='C' labels={instance=https://testedge2.haqq.network/api/v1/health, job=https://testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://testedge2.haqq.network} value=0 ]} {Instance:instance=https://verifier.explorer.testedge2.haqq.network/health, job=https://verifier.explorer.testedge2.haqq.network/health@testedge2, label_check=testedge2, label_site=https://verifier.explorer.testedge2.haqq.network/health State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://verifier.explorer.testedge2.haqq.network/health, job=https://verifier.explorer.testedge2.haqq.network/health@testedge2, label_check=testedge2, label_site=https://verifier.explorer.testedge2.haqq.network/health Value:0xc0130bee88} B:{Var:B Labels:instance=https://verifier.explorer.testedge2.haqq.network/health, job=https://verifier.explorer.testedge2.haqq.network/health@testedge2, label_check=testedge2, label_site=https://verifier.explorer.testedge2.haqq.network/health Value:0xc0130beed0} C:{Var:C Labels:instance=https://verifier.explorer.testedge2.haqq.network/health, job=https://verifier.explorer.testedge2.haqq.network/health@testedge2, label_check=testedge2, label_site=https://verifier.explorer.testedge2.haqq.network/health Value:0xc0130bee40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183757348s EvaluationString:[ var='A' labels={instance=https://verifier.explorer.testedge2.haqq.network/health, job=https://verifier.explorer.testedge2.haqq.network/health@testedge2, label_check=testedge2, label_site=https://verifier.explorer.testedge2.haqq.network/health} value=3 ], [ var='B' labels={instance=https://verifier.explorer.testedge2.haqq.network/health, job=https://verifier.explorer.testedge2.haqq.network/health@testedge2, label_check=testedge2, label_site=https://verifier.explorer.testedge2.haqq.network/health} value=3 ], [ var='C' labels={instance=https://verifier.explorer.testedge2.haqq.network/health, job=https://verifier.explorer.testedge2.haqq.network/health@testedge2, label_check=testedge2, label_site=https://verifier.explorer.testedge2.haqq.network/health} value=0 ]} {Instance:instance=https://websocket.wallet.testedge2.haqq.network, job=https://websocket.wallet.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://websocket.wallet.testedge2.haqq.network State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://websocket.wallet.testedge2.haqq.network, job=https://websocket.wallet.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://websocket.wallet.testedge2.haqq.network Value:0xc0130bef60} B:{Var:B Labels:instance=https://websocket.wallet.testedge2.haqq.network, job=https://websocket.wallet.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://websocket.wallet.testedge2.haqq.network Value:0xc0130befa0} C:{Var:C Labels:instance=https://websocket.wallet.testedge2.haqq.network, job=https://websocket.wallet.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://websocket.wallet.testedge2.haqq.network Value:0xc0130befe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183768623s EvaluationString:[ var='A' labels={instance=https://websocket.wallet.testedge2.haqq.network, job=https://websocket.wallet.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://websocket.wallet.testedge2.haqq.network} value=3 ], [ var='B' labels={instance=https://websocket.wallet.testedge2.haqq.network, job=https://websocket.wallet.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://websocket.wallet.testedge2.haqq.network} value=3 ], [ var='C' labels={instance=https://websocket.wallet.testedge2.haqq.network, job=https://websocket.wallet.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://websocket.wallet.testedge2.haqq.network} value=0 ]} {Instance:instance=https://yaqoot.services.testedge2.haqq.network, job=https://yaqoot.services.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://yaqoot.services.testedge2.haqq.network State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=https://yaqoot.services.testedge2.haqq.network, job=https://yaqoot.services.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://yaqoot.services.testedge2.haqq.network Value:0xc0130bf070} B:{Var:B Labels:instance=https://yaqoot.services.testedge2.haqq.network, job=https://yaqoot.services.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://yaqoot.services.testedge2.haqq.network Value:0xc0130bf0b0} C:{Var:C Labels:instance=https://yaqoot.services.testedge2.haqq.network, job=https://yaqoot.services.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://yaqoot.services.testedge2.haqq.network Value:0xc0130bf0f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183778237s EvaluationString:[ var='A' labels={instance=https://yaqoot.services.testedge2.haqq.network, job=https://yaqoot.services.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://yaqoot.services.testedge2.haqq.network} value=3 ], [ var='B' labels={instance=https://yaqoot.services.testedge2.haqq.network, job=https://yaqoot.services.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://yaqoot.services.testedge2.haqq.network} value=3 ], [ var='C' labels={instance=https://yaqoot.services.testedge2.haqq.network, job=https://yaqoot.services.testedge2.haqq.network@testedge2, label_check=testedge2, label_site=https://yaqoot.services.testedge2.haqq.network} value=0 ]}]" duration=100.488519ms +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269073411Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t97wy5pu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.270534102Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269042608Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269036344Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269006612Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.269000494Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268982961Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268960385Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268951375Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268937742Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268930935Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268922988Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t97foaw2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.270304739Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268893717Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:14.270263375Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26888258Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268846445Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26883139Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t94uk84n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.270202608Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t94uk84n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.270171188Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268810453Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26879432Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268787544Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268768553Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26876252Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager.persist user=679029 slug=joveoprodaws t=2024-05-29T13:44:14.269892208Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=26.534826ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t903ru7p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.269898965Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev229.gradle.org, instance=dev229.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.269870125Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=309009 slug=elestyle instance="node=ip-10-10-43-85.ap-northeast-1.compute.internal" t=2024-05-29T13:44:14.269897839Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.269850418Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev228.gradle.org, instance=dev228.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.269720339Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=884866 slug=cnonumerique version=34 fingerprint=e13c61ffa70a151d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.269639514Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=fdhk917z41xj4a, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.269340969s EvaluationString:}]" duration=9.817512ms +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268736916Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268730718Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev227.gradle.org, instance=dev227.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.269597078Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268716961Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager.persist user=306439 slug=caiprod t=2024-05-29T13:44:14.269526179Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268666805Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26863407Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:14.26941884Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" +logger=ngalert.scheduler user=306439 slug=caiprod version=3 fingerprint=19b002d9ab773d78 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.269428997Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=DggGZ2cVz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.269120385s EvaluationString:}]" duration=23.387052ms +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268612345Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268591845Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268577323Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=309009 slug=elestyle t=2024-05-29T13:44:14.269365735Z level=debug msg="State manager processing evaluation results" resultCount=5 +logger=ngalert.scheduler user=309009 slug=elestyle version=1 fingerprint=024583b43d44e173 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.269224333Z level=debug msg="Alert rule evaluated" results="[{Instance:node=ip-10-10-42-141.ap-northeast-1.compute.internal State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:node=ip-10-10-42-141.ap-northeast-1.compute.internal Value:0xc00387b2e0} C:{Var:C Labels:node=ip-10-10-42-141.ap-northeast-1.compute.internal Value:0xc00387b2d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.268845582s EvaluationString:[ var='B' labels={node=ip-10-10-42-141.ap-northeast-1.compute.internal} value=0.1206896551724138 ], [ var='C' labels={node=ip-10-10-42-141.ap-northeast-1.compute.internal} value=0 ]} {Instance:node=ip-10-10-42-166.ap-northeast-1.compute.internal State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:node=ip-10-10-42-166.ap-northeast-1.compute.internal Value:0xc00387b300} C:{Var:C Labels:node=ip-10-10-42-166.ap-northeast-1.compute.internal Value:0xc00387b310}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.268858777s EvaluationString:[ var='B' labels={node=ip-10-10-42-166.ap-northeast-1.compute.internal} value=0.017241379310344827 ], [ var='C' labels={node=ip-10-10-42-166.ap-northeast-1.compute.internal} value=0 ]} {Instance:node=ip-10-10-43-85.ap-northeast-1.compute.internal State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:node=ip-10-10-43-85.ap-northeast-1.compute.internal Value:0xc00387b340} C:{Var:C Labels:node=ip-10-10-43-85.ap-northeast-1.compute.internal Value:0xc00387b330}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.268864932s EvaluationString:[ var='B' labels={node=ip-10-10-43-85.ap-northeast-1.compute.internal} value=0.13793103448275862 ], [ var='C' labels={node=ip-10-10-43-85.ap-northeast-1.compute.internal} value=0 ]} {Instance:node=ip-10-10-44-18.ap-northeast-1.compute.internal State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:node=ip-10-10-44-18.ap-northeast-1.compute.internal Value:0xc00387b370} C:{Var:C Labels:node=ip-10-10-44-18.ap-northeast-1.compute.internal Value:0xc00387b360}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.268870284s EvaluationString:[ var='B' labels={node=ip-10-10-44-18.ap-northeast-1.compute.internal} value=0.06896551724137931 ], [ var='C' labels={node=ip-10-10-44-18.ap-northeast-1.compute.internal} value=0 ]} {Instance:node=ip-10-10-48-200.ap-northeast-1.compute.internal State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:node=ip-10-10-48-200.ap-northeast-1.compute.internal Value:0xc00387b390} C:{Var:C Labels:node=ip-10-10-48-200.ap-northeast-1.compute.internal Value:0xc00387b3a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.268875202s EvaluationString:[ var='B' labels={node=ip-10-10-48-200.ap-northeast-1.compute.internal} value=0.05172413793103448 ], [ var='C' labels={node=ip-10-10-48-200.ap-northeast-1.compute.internal} value=0 ]}]" duration=166.451971ms +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268529734Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268511287Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t8je378k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.269340989Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t8iqy8k5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.269310659Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t8iqy8k5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.269288119Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268400754Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268356093Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t8h4id4o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.269089677Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268269234Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev225.gradle.org, instance=dev225.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.269081019Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268234461Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +2024/05/29 13:44:14 ERROR: [transport] Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII "too_many_pings". +logger=ngalert.state.manager user=332534 slug=adevintakijiji instance="resource.label.database_id=ca-kijiji-production-up0f:ngdb-box-gcp-readreplica, resource.label.project_id=ca-kijiji-production-up0f, resource.label.region=us-east4, resource.type=cloudsql_database" t=2024-05-29T13:44:14.26904956Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268205929Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t803lyi9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.268994216Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26814132Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev225.gradle.org, instance=dev225.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.26896655Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268116724Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t803lyi9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.268943965Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t803lyi9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.268923085Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t803lyi9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.268866395Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=316418 slug=workmotion t=2024-05-29T13:44:14.268799141Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 +logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiName=prod-workmotion-tenant-api, Method=--, Resource=/expense-schema-datasources/{proxy+}, Stage=--" t=2024-05-29T13:44:14.26876624Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t7vnao4v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.268836194Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.268013108Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t7vnao4v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.268758203Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiName=prod-workmotion-tenant-api, Method=--, Resource=/expense-schema-datasources/{proxy+}, Stage=--" t=2024-05-29T13:44:14.268722328Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiName=prod-workmotion-tenant-api, Method=--, Resource=/expense-schema-datasources/{proxy+}, Stage=--" t=2024-05-29T13:44:14.268688895Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267984141Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267979068Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t7vnao4v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.268727103Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t7vnao4v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.268704553Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267959789Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev224.gradle.org, instance=dev224.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.268703043Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267953139Z level=debug msg="Keeping state" state=Normal +2024/05/29 13:44:14 ERROR: [transport] Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII "too_many_pings". +logger=ngalert.scheduler user=316418 slug=workmotion version=3 fingerprint=6917dcf20e8dd1af attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.268344877Z level=debug msg="Alert rule evaluated" results="[{Instance:ApiName=prod-workmotion-tenant-api, Method=--, Resource=/expense-schema-datasources/{proxy+}, Stage=-- State:NoData Error: Results:map[] Values:map[B:{Var:B Labels:ApiName=prod-workmotion-tenant-api, Method=--, Resource=/expense-schema-datasources/{proxy+}, Stage=-- Value:} C:{Var:C Labels:ApiName=prod-workmotion-tenant-api, Method=--, Resource=/expense-schema-datasources/{proxy+}, Stage=-- Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.22076952s EvaluationString:[ var='B' labels={ApiName=prod-workmotion-tenant-api, Method=--, Resource=/expense-schema-datasources/{proxy+}, Stage=--} value=null ], [ var='C' labels={ApiName=prod-workmotion-tenant-api, Method=--, Resource=/expense-schema-datasources/{proxy+}, Stage=--} value=null ]}]" duration=99.363591ms +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26791247Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager.persist user=316418 slug=workmotion t=2024-05-29T13:44:14.26835841Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=99.057337ms +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267907673Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:14.268587123Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267890152Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t7m0j9dy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.268517661Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267871109Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t7m0j9dy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.268502261Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.268473127Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26783342Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267813235Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267796252Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev223.gradle.org, instance=dev223.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.268409276Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t7m0j9dy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.26839631Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267766047Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t7kef8do-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.268293819Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267751657Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.268303999Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=357638 slug=usepower instance="DBInstanceIdentifier=service-user-stg" t=2024-05-29T13:44:14.268290399Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267713699Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267702554Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t7kef8do-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.268231228Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26766955Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.268261801Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267624528Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t7kef8do-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.268162927Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=357638 slug=usepower instance="DBInstanceIdentifier=service-payment-method-stg" t=2024-05-29T13:44:14.268154579Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev222.gradle.org, instance=dev222.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.268119171Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev222.gradle.org, instance=dev222.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.268105473Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t79lrovm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.268058516Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267543521Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=sandbox, node=ip-10-40-63-200.us-west-2.compute.internal" t=2024-05-29T13:44:14.268086202Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=357638 slug=usepower instance="DBInstanceIdentifier=service-email-stg" t=2024-05-29T13:44:14.268052874Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=sandbox, node=ip-10-40-60-73.us-west-2.compute.internal" t=2024-05-29T13:44:14.26801307Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.267992256Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t79lrovm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.267970905Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267473754Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=357638 slug=usepower instance="DBInstanceIdentifier=service-card-stg" t=2024-05-29T13:44:14.267934436Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267459296Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev221.gradle.org, instance=dev221.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.267953306Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=sandbox, node=ip-10-40-56-87.us-west-2.compute.internal" t=2024-05-29T13:44:14.267937139Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267408011Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267367059Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267359639Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267345243Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26729178Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t79f2kxq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.267769633Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267284115Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267272816Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26726299Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267255118Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267247181Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267236259Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267230371Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=sandbox, node=ip-10-40-52-39.us-west-2.compute.internal" t=2024-05-29T13:44:14.267760095Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267223252Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267213724Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267202208Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267197543Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267189409Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267168866Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=357638 slug=usepower instance="DBInstanceIdentifier=service-brand-stg" t=2024-05-29T13:44:14.267713962Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t79f2kxq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.267648882Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.267101595Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t78lq7o2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.267611722Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.267598061Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.266947104Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.266927634Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="__name__=aws_sqs_approximate_number_of_messages_visible_average, env=production, exported_job=aws_sqs, hostname=ip-10-0-18-158.ec2.internal, instance=localhost:9106, job=training-metrics-scrapper, queue_name=training-gamification-dead-letter-queue" t=2024-05-29T13:44:14.267513885Z level=warn msg="Failed to take an image" dashboard=afe346f5-4182-4db4-910b-cc09ba4c9edc panel=2 error="rpc error: code = Code(422) desc = screenshots unavailable" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.266864646Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.266854039Z level=debug msg="Keeping state" state=Normal +level=info ts=2024-05-29T13:44:14.267481866Z caller=remote_image_capturer.go:61 user=196013 slug=inmediasoftware rule_org_id=1 rule_uid=a66a16ed-9dee-4d9a-b8ec-42026383f9f8 dashboard=afe346f5-4182-4db4-910b-cc09ba4c9edc panel=2 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.266837278Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=sandbox, node=ip-10-40-36-174.us-west-2.compute.internal" t=2024-05-29T13:44:14.267511422Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.266782771Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.266753691Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.266725245Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev219.gradle.org, instance=dev219.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.267344204Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev219.gradle.org, instance=dev219.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.267331928Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.266638169Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=sandbox, node=ip-10-40-24-29.us-west-2.compute.internal" t=2024-05-29T13:44:14.267315424Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.266628506Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26660326Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=sandbox, node=ip-10-40-24-29.us-west-2.compute.internal" t=2024-05-29T13:44:14.267303851Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.266554366Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=sandbox, node=ip-10-40-22-194.us-west-2.compute.internal" t=2024-05-29T13:44:14.267253468Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=713300 slug=tpcnanonprod t=2024-05-29T13:44:14.267110339Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev219.gradle.org, instance=dev219.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.267205901Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=713300 slug=tpcnanonprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.267086649Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26650312Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=713300 slug=tpcnanonprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.267076708Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.266492406Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t70mfhf0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.267136427Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.266462788Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.266451761Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev218.gradle.org, instance=dev218.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.267075277Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.266340791Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.266332934Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t6ztszl4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.267015906Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=sandbox, node=ip-10-40-16-252.us-west-2.compute.internal" t=2024-05-29T13:44:14.266943455Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t6x6da5q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.266874554Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev218.gradle.org, instance=dev218.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.266920354Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.266892411Z caller=remote_instance_store.go:51 user=705083 slug=mediakindsaas msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.266113648Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=production, node=ip-10-20-58-131.us-west-2.compute.internal" t=2024-05-29T13:44:14.266874538Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=235691 slug=om2 instance="datasource_uid=XnTUTcn7z, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:14.266807485Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26609328Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=235691 slug=om2 instance="datasource_uid=XnTUTcn7z, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:14.266799714Z level=debug msg="Execution keep last state is Normal" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.266048366Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=705083 slug=mediakindsaas version=5 fingerprint=846125b2be52818c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.266662396Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=b84ff395-2362-4f0f-81bc-23c7d9c9005b, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.262951441s EvaluationString:}]" duration=84.719576ms +logger=ngalert.state.manager.persist user=516446 slug=awarehqdev t=2024-05-29T13:44:14.266600292Z level=debug msg="Deleting alert states" count=1 +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.266017588Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.266010176Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="__name__=aws_sqs_approximate_number_of_messages_visible_average, env=production, exported_job=aws_sqs, hostname=ip-10-0-18-158.ec2.internal, instance=localhost:9106, job=training-metrics-scrapper, queue_name=training-gamification-dead-letter-queue" t=2024-05-29T13:44:14.26666665Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=516446 slug=awarehqdev instance="EndpointName=v3-password, Series=queryc7290121be964414a8ebfa05f0c94492" t=2024-05-29T13:44:14.26655319Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.266675897Z caller=remote_instance_store.go:51 user=516446 slug=awarehqdev msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.266004664Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=196013 slug=inmediasoftware t=2024-05-29T13:44:14.266609885Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265990963Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev217.gradle.org, instance=dev217.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.266667667Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265981764Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265969727Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265924773Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265918282Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26590582Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265894248Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev216.gradle.org, instance=dev216.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.26651568Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.266430289Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26584972Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265842093Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.2658201Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:14.26634191Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t6vlcb7v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.266377869Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26579401Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t6vlcb7v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.266355689Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265776321Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265756686Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=office, node=ip-10-0-61-218.us-west-2.compute.internal" t=2024-05-29T13:44:14.266323236Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=office, node=ip-10-0-57-248.us-west-2.compute.internal" t=2024-05-29T13:44:14.266238886Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t6d9uov7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.266218707Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=office, node=ip-10-0-57-248.us-west-2.compute.internal" t=2024-05-29T13:44:14.266210136Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265675192Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265656256Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:14.266144426Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265610401Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev215.gradle.org, instance=dev215.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.266092537Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t6d9uov7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.266152397Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=office, node=ip-10-0-56-29.us-west-2.compute.internal" t=2024-05-29T13:44:14.266120057Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev215.gradle.org, instance=dev215.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.266077551Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t6d9uov7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.266123446Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t6d9uov7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.266113256Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26554589Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265533455Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265523258Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.266079001Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265512377Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265504358Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265491174Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=318831 slug=grafanavianet t=2024-05-29T13:44:14.266034788Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.788998ms +level=debug ts=2024-05-29T13:44:14.266017586Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=office, node=ip-10-0-56-190.us-west-2.compute.internal" t=2024-05-29T13:44:14.266008761Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265452848Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t632qnrw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.266011295Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265415802Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t632qnrw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.265981625Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265392848Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26537063Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265360371Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265338724Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=office, node=ip-10-0-55-47.us-west-2.compute.internal" t=2024-05-29T13:44:14.265893152Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t62wf0y1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.265853444Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265262198Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265247323Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=office, node=ip-10-0-54-24.us-west-2.compute.internal" t=2024-05-29T13:44:14.265827348Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265239621Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t62wf0y1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.265814603Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265218936Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265211403Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265188841Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=office, node=ip-10-0-52-198.us-west-2.compute.internal" t=2024-05-29T13:44:14.26573943Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265161128Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=office, node=ip-10-0-52-198.us-west-2.compute.internal" t=2024-05-29T13:44:14.265726628Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.265112397Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:14.265603209Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264989633Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t60gyvfm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.265578031Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=office, node=ip-10-0-50-29.us-west-2.compute.internal" t=2024-05-29T13:44:14.265547081Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t60gyvfm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.26554097Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=691102 slug=deluxeconfdev t=2024-05-29T13:44:14.26541299Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26485451Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264823145Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=691102 slug=deluxeconfdev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.265377009Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264812843Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=691102 slug=deluxeconfdev t=2024-05-29T13:44:14.265362769Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5ph0bx0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.265411199Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=office, node=ip-10-0-47-148.us-west-2.compute.internal" t=2024-05-29T13:44:14.265455397Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264772318Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=debug ts=2024-05-29T13:44:14.265364285Z caller=remote_instance_store.go:51 user=22398 slug=sunfolding msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264759128Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.265360806Z caller=remote_instance_store.go:51 user=146728 slug=dgc msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26474537Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26473815Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev212.gradle.org, instance=dev212.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.265351587Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.265280292Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264704863Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264697275Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26468797Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev211.gradle.org, instance=dev211.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.265237277Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.2646774Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5ph0bx0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.265197877Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev211.gradle.org, instance=dev211.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.265225378Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264635676Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5ph0bx0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.265138336Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5pahl31-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.265095646Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5pahl31-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.265069806Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5pahl31-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.265028295Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264608198Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264603507Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +level=info ts=2024-05-29T13:44:14.265156795Z caller=remote_alert_sender.go:94 user=233137 slug=mirrornode host=mirrornode-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.144.247.70:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddk8b6b3eubk0a alerts=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5pahl31-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.265006195Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=22398 slug=sunfolding t=2024-05-29T13:44:14.265126113Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264598314Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager.persist user=233137 slug=mirrornode t=2024-05-29T13:44:14.265051194Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=93.611116ms +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26455276Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5p0hvip-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.264936054Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264547898Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5p0hvip-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.264911614Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264522023Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264504191Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264498144Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264493508Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager.persist user=421567 slug=nexx360 t=2024-05-29T13:44:14.26483892Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=421567 slug=nexx360 instance= t=2024-05-29T13:44:14.264790504Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev210.gradle.org, instance=dev210.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.264946336Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=421567 slug=nexx360 instance= t=2024-05-29T13:44:14.26478011Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5p0hvip-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.264880424Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=office, node=ip-10-0-39-32.us-west-2.compute.internal" t=2024-05-29T13:44:14.264901047Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5p0hvip-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.264826373Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264465608Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.264825369Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5p0hvip-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.264805563Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev21.gradle.org, instance=dev21.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.264866379Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26442575Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5ozu080-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.264724962Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5ozu080-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.264715732Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5ozu080-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.264686402Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264390494Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5ozu080-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.264663271Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264380384Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264275973Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264254619Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.26465334Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264231038Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5n9qrei-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.264635521Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=office, node=ip-10-0-35-89.us-west-2.compute.internal" t=2024-05-29T13:44:14.264635246Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264169637Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264146567Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264118668Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264084348Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5n9qrei-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.26453427Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264074743Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264042461Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.264034179Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.264492665Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263979369Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263971262Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.264412005Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263950534Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263922343Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263912953Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263901778Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=307381 slug=kambitaskforce t=2024-05-29T13:44:14.264210492Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.44641ms + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26389119Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5j0le27-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.264333568Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263859703Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev207.gradle.org, instance=dev207.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.264311086Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.264185677Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263790164Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263783011Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=office, node=ip-10-0-31-141.us-west-2.compute.internal" t=2024-05-29T13:44:14.264289648Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263773266Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263751394Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.264146665Z caller=remote_instance_store.go:51 user=557231 slug=lnrsusinsuranceprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263727547Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5h5f2ai-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.264177116Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev207.gradle.org, instance=dev207.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.264168824Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5h5f2ai-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.264125426Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263656647Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263648391Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263620865Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.264112085Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.264055906Z caller=remote_instance_store.go:51 user=615392 slug=shinemetrics msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263575129Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5h5f2ai-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.264096066Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:14.264031696Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.624699ms + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26354008Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263524848Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263519142Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=office, node=ip-10-0-29-173.us-west-2.compute.internal" t=2024-05-29T13:44:14.264048986Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=615392 slug=shinemetrics t=2024-05-29T13:44:14.263989637Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5eyu3e4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.263983885Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263486974Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="__name__=probe_success, config_version=1715008305716142080, instance=https://api.shine.fr/v2/treezor/users/liveness_check, job=Liveness Check treezor-users-v2, probe=Paris" t=2024-05-29T13:44:14.26394591Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263458741Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26345243Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=office, node=ip-10-0-27-217.us-west-2.compute.internal" t=2024-05-29T13:44:14.263962463Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev206.gradle.org, instance=dev206.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.263922421Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=office, node=ip-10-0-25-85.us-west-2.compute.internal" t=2024-05-29T13:44:14.263880368Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263413991Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5b1vr72-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.263850933Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263389667Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263382475Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263368255Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263360404Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev205.gradle.org, instance=dev205.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.263825819Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263326241Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=office, node=ip-10-0-25-205.us-west-2.compute.internal" t=2024-05-29T13:44:14.263772124Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263317545Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5acncy9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.263739382Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263276075Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=office, node=ip-10-0-24-233.us-west-2.compute.internal" t=2024-05-29T13:44:14.263679548Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev205.gradle.org, instance=dev205.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.263729457Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263252125Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263245865Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263238515Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev205.gradle.org, instance=dev205.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.263721078Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263190178Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5acncy9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.263684411Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263183667Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.263598656Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t5acncy9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.263617711Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263163231Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263105105Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev204.gradle.org, instance=dev204.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.26351879Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t58l2j4k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.26349722Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t58l2j4k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.263481469Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263041669Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.263381948Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t58hqazc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.263364048Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263011105Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev203.gradle.org, instance=dev203.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.263383859Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.263005961Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.263356079Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262997971Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262956247Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev203.gradle.org, instance=dev203.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.263310921Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.263300597Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev203.gradle.org, instance=dev203.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.263301303Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.263272287Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262935019Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262929907Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262916243Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262911402Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t56lodtg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.263242237Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262906439Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262894547Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.263109787Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.26312296Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=janeway, node=ip-10-50-52-105.us-west-2.compute.internal" t=2024-05-29T13:44:14.263076788Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t52d6uc7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.263084565Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t52d6uc7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.263056515Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262824915Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262799671Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262783585Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=janeway, node=ip-10-50-41-1.us-west-2.compute.internal" t=2024-05-29T13:44:14.262971575Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262772058Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev201.gradle.org, instance=dev201.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.262988496Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t51y6zt7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.262964684Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262740759Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26272308Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.262900007Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.262555426Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262673381Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t510rd6r-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.262859033Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev201.gradle.org, instance=dev201.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.262900504Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262640572Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.262553816Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t510rd6r-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.262829523Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262610145Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262597163Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev200.gradle.org, instance=dev200.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.262821175Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t510rd6r-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.262724192Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t510rd6r-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.262703401Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262555097Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=janeway, node=ip-10-50-28-84.us-west-2.compute.internal" t=2024-05-29T13:44:14.262500813Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262545084Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=tmpfs, env=janeway, node=ip-10-50-16-64.us-west-2.compute.internal" t=2024-05-29T13:44:14.262333834Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262524177Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=sandbox, node=ip-10-40-63-200.us-west-2.compute.internal" t=2024-05-29T13:44:14.262236683Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=sandbox, node=ip-10-40-60-73.us-west-2.compute.internal" t=2024-05-29T13:44:14.262163821Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26247876Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t4fcichb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.262623481Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262435215Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t4fcichb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.26256036Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.26251345Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262355014Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t4fcichb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.26253124Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262290128Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.262515393Z caller=remote_instance_store.go:51 user=477402 slug=infleqtion msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262279607Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=477402 slug=infleqtion t=2024-05-29T13:44:14.262468651Z level=debug msg="Saving alert states" count=9 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t4esr50i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.262483229Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262237425Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t4esr50i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.262473019Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=477402 slug=infleqtion instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262451218Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev175.gradle.org, instance=dev175.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.262437952Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev175.gradle.org, instance=dev175.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.262422896Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=477402 slug=infleqtion instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262442083Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=477402 slug=infleqtion instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26243537Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=477402 slug=infleqtion instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26243015Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t4esr50i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.262389838Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262127543Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262094804Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=477402 slug=infleqtion instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262356047Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev175.gradle.org, instance=dev175.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.262309941Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262074159Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262066308Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=477402 slug=infleqtion instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262309547Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=477402 slug=infleqtion instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26225573Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=477402 slug=infleqtion instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26224665Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t4df2mt1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.262289177Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=477402 slug=infleqtion instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262233553Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.262021031Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t46fyhcq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.262210876Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261999744Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev174.gradle.org, instance=dev174.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.262181599Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261967106Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.262098285Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t46fyhcq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.262149686Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev174.gradle.org, instance=dev174.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.262169172Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.262145863Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261894954Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261879683Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=396586 slug=opengov t=2024-05-29T13:44:14.262118645Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261864587Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=sandbox, node=ip-10-40-56-87.us-west-2.compute.internal" t=2024-05-29T13:44:14.262086281Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t46b1jmy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.262066725Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261783452Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t46b1jmy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.262004754Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261776147Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev174.gradle.org, instance=dev174.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.262033745Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t46b1jmy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.261975834Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261741007Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.261924847Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261731072Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev173.gradle.org, instance=dev173.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.261938392Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=sandbox, node=ip-10-40-52-39.us-west-2.compute.internal" t=2024-05-29T13:44:14.261924749Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261659169Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t4202kxy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.261900363Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261539767Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=45099 slug=iyuyue t=2024-05-29T13:44:14.261566577Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261530225Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261524212Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261517157Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261480077Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t4202kxy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.261805782Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261458979Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t4202kxy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.261783962Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=sandbox, node=ip-10-40-40-68.us-west-2.compute.internal" t=2024-05-29T13:44:14.261769229Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t3xr9gkc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.261745982Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261440063Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev172.gradle.org, instance=dev172.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.261706758Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261421459Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261413774Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev172.gradle.org, instance=dev172.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.261691746Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t3xr9gkc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.26164056Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261394327Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261381532Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261361997Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261330217Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t3wr2lyq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.26154891Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=sandbox, node=ip-10-40-34-18.us-west-2.compute.internal" t=2024-05-29T13:44:14.261574198Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261291214Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.26155247Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t3wr2lyq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.261515729Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t3wr2lyq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.261493279Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26126314Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261258321Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=sandbox, node=ip-10-40-24-29.us-west-2.compute.internal" t=2024-05-29T13:44:14.261479795Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.261447819Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.261443131Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev171.gradle.org, instance=dev171.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.261431434Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261175584Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t3qs8cd5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.261371488Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t3qs8cd5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.261344677Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t3cg6uhq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.261306827Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.2611395Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:14.261316025Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="region=eu-west-1, service=kube-state-metrics, stage=development" + level=debug ts=2024-05-29T13:44:14.26130415Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261117929Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=698963 slug=lemonade version=5 fingerprint=5babcad2a205caf5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.261194732Z level=debug msg="Alert rule evaluated" results="[{Instance:app=monolith-partners-api, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=monolith-partners-api, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development State:Alerting Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=monolith-partners-api, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=monolith-partners-api, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development Value:0xc00576e790} THRESHOLD:{Var:THRESHOLD Labels:app=monolith-partners-api, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=monolith-partners-api, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development Value:0xc00576e6b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.260814014s EvaluationString:[ var='QUERY' labels={app=monolith-partners-api, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=monolith-partners-api, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development} value=1 ], [ var='THRESHOLD' labels={app=monolith-partners-api, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=monolith-partners-api, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development} value=1 ]}]" duration=135.871056ms + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261101129Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=sandbox, node=ip-10-40-20-137.us-west-2.compute.internal" t=2024-05-29T13:44:14.261210505Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t3cg6uhq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.261208756Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.261184943Z caller=remote_instance_store.go:51 user=543654 slug=jobcloudprogrammaticprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261079989Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261060986Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.261049216Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=543654 slug=jobcloudprogrammaticprod t=2024-05-29T13:44:14.261122192Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=info ts=2024-05-29T13:44:14.261023588Z caller=grafana.go:247 user=127813 slug=clearsale msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=b4ceb5fc-e07d-4fd0-98e0-503bf2474435" groups=1 alerts=0 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26103828Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev170.gradle.org, instance=dev170.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.261110997Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev170.gradle.org, instance=dev170.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.261094212Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod t=2024-05-29T13:44:14.261025062Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=389502 slug=ciscoiot instance="psa_cluster=phx" t=2024-05-29T13:44:14.261070816Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t344lyp7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.260989164Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260919036Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260908739Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260900637Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=389502 slug=ciscoiot instance="psa_cluster=lyn" t=2024-05-29T13:44:14.261017545Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=389502 slug=ciscoiot instance= t=2024-05-29T13:44:14.260968713Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=389502 slug=ciscoiot t=2024-05-29T13:44:14.26093263Z level=debug msg="State manager processing evaluation results" resultCount=4 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t31b61ds-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.260855192Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260832474Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t31b61ds-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.260823032Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=production, node=ip-10-20-52-228.us-west-2.compute.internal" t=2024-05-29T13:44:14.260839463Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260777135Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260765737Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260750906Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260586966Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.260695067Z caller=grafana.go:247 user=127813 slug=clearsale msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=b4ceb5fc-e07d-4fd0-98e0-503bf2474435" groups=0 alerts=0 + logger=ngalert.state.manager user=45099 slug=iyuyue t=2024-05-29T13:44:14.260630961Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260580834Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260556389Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26053361Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t30dtsib-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.26062399Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:14.260688259Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t30dtsib-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.2605878Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26051768Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:14.260672059Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260506598Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260464595Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260457792Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev168.gradle.org, instance=dev168.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.260621049Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260450886Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260437931Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260433192Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260415741Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260410141Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t30dtsib-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.260506139Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260393744Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t30dtsib-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.260463278Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=production, node=ip-10-20-25-103.us-west-2.compute.internal" t=2024-05-29T13:44:14.260511125Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260375766Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260364005Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.26033368Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260315032Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t2w2vr1o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.260419368Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t2w2vr1o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.260359497Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260256026Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260216601Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.260340322Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev167.gradle.org, instance=dev167.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.260349173Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260122101Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t2w2vr1o-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.260317137Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260097737Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260066266Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t2u7nfqz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.260266246Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.260268218Z caller=remote_instance_store.go:51 user=846513 slug=npc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.260004493Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259992607Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259973123Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev167.gradle.org, instance=dev167.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.26021534Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.260175215Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.2601389Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259847613Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259836856Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.260053605Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259802429Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev165.gradle.org, instance=dev165.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.260087433Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259787681Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259760807Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259752332Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259729657Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.260053532Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.260030706Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.953788ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t2tcpuxo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.260033124Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259658436Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t2phmfpv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.259972913Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259598714Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259577641Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=office, node=ip-10-0-56-190.us-west-2.compute.internal" t=2024-05-29T13:44:14.260023321Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259566591Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259555279Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259544077Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259537014Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev165.gradle.org, instance=dev165.gradle.org, job=node, mountpoint=/home/tcagent1/agent/temp, os=Ubuntu" t=2024-05-29T13:44:14.260000215Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259445929Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev148.gradle.org, instance=dev148.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.259914794Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=office, node=ip-10-0-54-24.us-west-2.compute.internal" t=2024-05-29T13:44:14.259860559Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259382758Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t2pgu9xy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.259782181Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259352548Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev147.gradle.org, instance=dev147.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.259801472Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259291313Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=office, node=ip-10-0-52-198.us-west-2.compute.internal" t=2024-05-29T13:44:14.259776126Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259246015Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t2pgu9xy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.259743061Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t2lstc8y-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.259713091Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=office, node=ip-10-0-52-198.us-west-2.compute.internal" t=2024-05-29T13:44:14.259734971Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259225666Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=office, node=ip-10-0-52-184.us-west-2.compute.internal" t=2024-05-29T13:44:14.259660242Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t2lstc8y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.25963903Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259193582Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t2lstc8y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.25960059Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t2jeq1hw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.259573029Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.259594288Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259183363Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259167196Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=548157 slug=kushkiprod t=2024-05-29T13:44:14.259513093Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.10469ms + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259155838Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259145341Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev144.gradle.org, instance=dev144.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.259508798Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=office, node=ip-10-0-50-29.us-west-2.compute.internal" t=2024-05-29T13:44:14.259556202Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=office, node=ip-10-0-47-148.us-west-2.compute.internal" t=2024-05-29T13:44:14.259461486Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=111653 slug=theassociationmxp instance= t=2024-05-29T13:44:14.259429121Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259063627Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259056753Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t2esxocd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.259429728Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t2esxocd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.259405788Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259027002Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=111653 slug=theassociationmxp t=2024-05-29T13:44:14.259385136Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.259022261Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t2esxocd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.259375017Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258995517Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev143.gradle.org, instance=dev143.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.259317241Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t2esxocd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.259325647Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258957633Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t29ja46r-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.259265236Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258950893Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258940099Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258916853Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t29ja46r-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.259195435Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258891292Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258885015Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t279dpvl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.259070174Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258875639Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=office, node=ip-10-0-39-32.us-west-2.compute.internal" t=2024-05-29T13:44:14.259075736Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258857055Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258830548Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t279dpvl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.259042384Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=office, node=ip-10-0-37-86.us-west-2.compute.internal" t=2024-05-29T13:44:14.258988042Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258794037Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t1q0iwjy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.258927043Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.258931766Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258760695Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258755853Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t1q0iwjy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.258874822Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258748087Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t1ojqxar-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.258818082Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258715036Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=office, node=ip-10-0-35-89.us-west-2.compute.internal" t=2024-05-29T13:44:14.258833199Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258702301Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25868293Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258676463Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258647961Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25864093Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t1ojqxar-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.258747471Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t1ojqxar-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.258726781Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev124.gradle.org, instance=dev124.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.258743628Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258614593Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t1n6vd49-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.25865951Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258605596Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258570521Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=office, node=ip-10-0-33-218.us-west-2.compute.internal" t=2024-05-29T13:44:14.258647617Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t1n6vd49-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.258606049Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258510215Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t1n6vd49-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.258552309Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258477963Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=office, node=ip-10-0-31-141.us-west-2.compute.internal" t=2024-05-29T13:44:14.258552951Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258439179Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258412914Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258400383Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258381613Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25837225Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25836465Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=office, node=ip-10-0-29-85.us-west-2.compute.internal" t=2024-05-29T13:44:14.25845006Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t1epdti3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.258411627Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-fd50be4c22a64ad1, persistentvolumeclaim=main-main-7qd5-pgdata" t=2024-05-29T13:44:14.258264695Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-d3abda48447b4e7b, persistentvolumeclaim=data-rabbitmq-2" t=2024-05-29T13:44:14.25823848Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t1cywk5e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.258295376Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-afef2893ae524e8e, persistentvolumeclaim=data-rabbitmq-1" t=2024-05-29T13:44:14.258153338Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev120.gradle.org, instance=dev120.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.258280898Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-afef2893ae524e8e, persistentvolumeclaim=data-rabbitmq-1" t=2024-05-29T13:44:14.258141748Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t1cywk5e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.258255376Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-9a5ff03e38f6470d, persistentvolumeclaim=data-prometheus-0" t=2024-05-29T13:44:14.258115559Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-9a5ff03e38f6470d, persistentvolumeclaim=data-prometheus-0" t=2024-05-29T13:44:14.258036734Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258098943Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=office, node=ip-10-0-25-85.us-west-2.compute.internal" t=2024-05-29T13:44:14.258163796Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.258056648Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25793692Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t1butf13-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.258105474Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0x1ab7w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.258077924Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-5957f80053bd497b, persistentvolumeclaim=data-zookeeper-1" t=2024-05-29T13:44:14.257982758Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance= t=2024-05-29T13:44:14.25810613Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-4e598af87a124eaf, persistentvolumeclaim=data-redpanda-0" t=2024-05-29T13:44:14.257935563Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25792549Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd t=2024-05-29T13:44:14.25804888Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.257909523Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0x1ab7w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.258028103Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25788472Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0x1ab7w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.257968053Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.257861953Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0wufh1f-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.257917762Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-0985df04560a42ad, persistentvolumeclaim=data-rabbitmq-0" t=2024-05-29T13:44:14.257848392Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-0985df04560a42ad, persistentvolumeclaim=data-rabbitmq-0" t=2024-05-29T13:44:14.257838512Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix t=2024-05-29T13:44:14.257790054Z level=debug msg="State manager processing evaluation results" resultCount=11 + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev118.gradle.org, instance=dev118.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.257864695Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0wufh1f-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.257834891Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.257726571Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0wufh1f-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.257813361Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0tkinwj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.257782211Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.257701936Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25767997Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.257668887Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.257666985Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.257640906Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.257679431Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=office, node=ip-10-0-17-88.us-west-2.compute.internal" t=2024-05-29T13:44:14.257652217Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.257616339Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0tkinwj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.25765784Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=office, node=ip-10-0-17-88.us-west-2.compute.internal" t=2024-05-29T13:44:14.257609404Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0rp3d13-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.257627109Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25760855Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev117.gradle.org, instance=dev117.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.25768892Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.257578187Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0rp3d13-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.257604499Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25751496Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.257506347Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0rp3d13-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.257522028Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.257418667Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=janeway, node=ip-10-50-56-180.us-west-2.compute.internal" t=2024-05-29T13:44:14.257513768Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.257329448Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:14.257434238Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=24.789277ms + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.257285908Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev105.gradle.org, instance=dev105.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.257453001Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.257409638Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.257248166Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.257236513Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.257206523Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev104.gradle.org, instance=dev104.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.257330286Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.257161015Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.257264Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.257096447Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=777670 slug=fakku t=2024-05-29T13:44:14.257200868Z level=debug msg="Saving alert states done" count=41 max_state_save_concurrency=1 duration=401.082807ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0owvrdm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.257188725Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.257076307Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.257045987Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev103.gradle.org, instance=dev103.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.257193631Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.257026627Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=tmpfs, fstype=tmpfs, host=dev103.gradle.org, instance=dev103.gradle.org, job=node, mountpoint=/tmp, os=Ubuntu" t=2024-05-29T13:44:14.25718158Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.257004162Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25699642Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0owvrdm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.257146724Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256974577Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25695558Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.257031445Z caller=remote_instance_store.go:51 user=401509 slug=redefined msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256933272Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0ihnfc3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.257031893Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256884155Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256872275Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=janeway, node=ip-10-50-35-27.us-west-2.compute.internal" t=2024-05-29T13:44:14.257026076Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256860843Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256839717Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=eu-build-cache.gradle.org, instance=eu-build-cache.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.256955491Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256773013Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0ezua50-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.256842461Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=janeway, node=ip-10-50-34-72.us-west-2.compute.internal" t=2024-05-29T13:44:14.25694019Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0ezua50-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.256814941Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256704852Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=374423 slug=bitburst t=2024-05-29T13:44:14.256730091Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.893033ms + level=debug ts=2024-05-29T13:44:14.256803703Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.256754584Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0ezua50-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.25670342Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256575527Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0cub0dc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.256621809Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256530979Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256523312Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256512806Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=shm, env=janeway, node=ip-10-50-28-226.us-west-2.compute.internal" t=2024-05-29T13:44:14.256650892Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0cub0dc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.256549338Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.256639569Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256469036Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256438872Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev94.gradle.org, instance=dev94.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.256608485Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25639666Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0cooeid-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.256392926Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev93.gradle.org, instance=dev93.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.256539014Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0cooeid-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.256321916Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0akouv6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.256290295Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev93.gradle.org, instance=dev93.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.256528467Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0akouv6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.256259995Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t0akouv6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.256213585Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256343617Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=698963 slug=lemonade version=1 fingerprint=7a476a8274454b6a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.256392341Z level=debug msg="Alert rule evaluated" results="[{Instance:app=notifications-event-worker, cluster=lmnd-production-us-east-1, container=kube-state-metrics, deployment=notifications-event-worker, endpoint=http, instance=10.16.27.82:8080, job=kube-state-metrics, namespace=production, pod=kube-state-metrics-6c795d5489-v5txw, region=us-east-1, service=kube-state-metrics, stage=production State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=notifications-event-worker, cluster=lmnd-production-us-east-1, container=kube-state-metrics, deployment=notifications-event-worker, endpoint=http, instance=10.16.27.82:8080, job=kube-state-metrics, namespace=production, pod=kube-state-metrics-6c795d5489-v5txw, region=us-east-1, service=kube-state-metrics, stage=production Value:0xc030f32b60} THRESHOLD:{Var:THRESHOLD Labels:app=notifications-event-worker, cluster=lmnd-production-us-east-1, container=kube-state-metrics, deployment=notifications-event-worker, endpoint=http, instance=10.16.27.82:8080, job=kube-state-metrics, namespace=production, pod=kube-state-metrics-6c795d5489-v5txw, region=us-east-1, service=kube-state-metrics, stage=production Value:0xc030f32ee0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.255966956s EvaluationString:[ var='QUERY' labels={app=notifications-event-worker, cluster=lmnd-production-us-east-1, container=kube-state-metrics, deployment=notifications-event-worker, endpoint=http, instance=10.16.27.82:8080, job=kube-state-metrics, namespace=production, pod=kube-state-metrics-6c795d5489-v5txw, region=us-east-1, service=kube-state-metrics, stage=production} value=0 ], [ var='THRESHOLD' labels={app=notifications-event-worker, cluster=lmnd-production-us-east-1, container=kube-state-metrics, deployment=notifications-event-worker, endpoint=http, instance=10.16.27.82:8080, job=kube-state-metrics, namespace=production, pod=kube-state-metrics-6c795d5489-v5txw, region=us-east-1, service=kube-state-metrics, stage=production} value=0 ]}]" duration=52.83182ms + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256280319Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256269054Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256257132Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.256331303Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev92.gradle.org, instance=dev92.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.256429986Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256249607Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.256310545Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.256241088Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev92.gradle.org, instance=dev92.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.256415582Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=sandbox, node=ip-10-40-63-200.us-west-2.compute.internal" t=2024-05-29T13:44:14.256445967Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256218631Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256179757Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t07hd53e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.255989112Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=sandbox, node=ip-10-40-60-73.us-west-2.compute.internal" t=2024-05-29T13:44:14.256318242Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256172524Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-t07hd53e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.255964732Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25615226Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev91.gradle.org, instance=dev91.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.256265673Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=sandbox, node=ip-10-40-56-87.us-west-2.compute.internal" t=2024-05-29T13:44:14.256221097Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256083467Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.256049519Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255997714Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev89.gradle.org, instance=dev89.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.256113181Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255979058Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255840863Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255832725Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255822662Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=sandbox, node=ip-10-40-40-68.us-west-2.compute.internal" t=2024-05-29T13:44:14.255903101Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sztvbys4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.25576687Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255736113Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255713897Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sztvbys4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.25574455Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.255783185Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255680562Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:14.255706788Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-szrsx7ne-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.255709679Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-szrsx7ne-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.255682839Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255594271Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255563667Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255545133Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255524389Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=sandbox, node=ip-10-40-24-29.us-west-2.compute.internal" t=2024-05-29T13:44:14.255642774Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255516636Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255508713Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev80.gradle.org, instance=dev80.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.25561775Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-szrsx7ne-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.255582858Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25544838Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255439227Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-szrsx7ne-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.255560438Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255398348Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-szqsctoa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.255527097Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25539119Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255383912Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255364137Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255355914Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-szqsctoa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.255440327Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.2553445Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev79.gradle.org, instance=dev79.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.255473217Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255333258Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25526136Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-szqsctoa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.255391136Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255242796Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=sandbox, node=ip-10-40-20-137.us-west-2.compute.internal" t=2024-05-29T13:44:14.255372846Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255229261Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-szlmp270-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.255304385Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25518643Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255127353Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.255269834Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25509617Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev77.gradle.org, instance=dev77.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.255245274Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-szlmp270-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.255243524Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-szlmp270-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.255220814Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-szlmp270-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.255191314Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255052505Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=production, node=ip-10-20-58-131.us-west-2.compute.internal" t=2024-05-29T13:44:14.255211832Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25504135Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255029427Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.255021791Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-szi0l267-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.255100733Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=production, node=ip-10-20-52-228.us-west-2.compute.internal" t=2024-05-29T13:44:14.25511196Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.254935654Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=production, node=ip-10-20-52-228.us-west-2.compute.internal" t=2024-05-29T13:44:14.255099595Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.254917546Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.255010534Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.255022507Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.254884135Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=production, node=ip-10-20-42-149.us-west-2.compute.internal" t=2024-05-29T13:44:14.255000353Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25486067Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.254802044Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-szgtoolp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.254957312Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.254743747Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-szgtoolp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.254905091Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.254699821Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-szgtoolp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.254892841Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.254680642Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.254640892Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=production, node=ip-10-20-31-205.us-west-2.compute.internal" t=2024-05-29T13:44:14.254884629Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-szckw0mh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.254835Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.254598304Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-szckw0mh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.254743129Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=production, node=ip-10-20-25-103.us-west-2.compute.internal" t=2024-05-29T13:44:14.254809792Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev71.gradle.org, instance=dev71.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.254802058Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25455066Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.254540507Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.254511246Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.254489276Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.254481197Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.254468469Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.254459531Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.254417734Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.254372906Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sz8flz5p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.254599318Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sz8flz5p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.254589108Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sz8flz5p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.254560947Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.254316009Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sz8flz5p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.254539327Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.254254562Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev69.gradle.org, instance=dev69.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.254578884Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=office, node=ip-10-0-56-29.us-west-2.compute.internal" t=2024-05-29T13:44:14.254528143Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:14.254473421Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.835331ms + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev68.gradle.org, instance=dev68.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.254497398Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.254203456Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sz7f1hsr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.254457506Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.254168571Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.254098842Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sz4paa4a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.254267714Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.253984733Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.253959109Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=office, node=ip-10-0-55-47.us-west-2.compute.internal" t=2024-05-29T13:44:14.254313913Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.254225929Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25394086Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev66.gradle.org, instance=dev66.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.254265243Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=office, node=ip-10-0-54-24.us-west-2.compute.internal" t=2024-05-29T13:44:14.254213832Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25389367Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sz4paa4a-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.254196584Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.253875912Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-syyrgjfb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.254100013Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.253835903Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.253830808Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.253807715Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=228733 slug=csmoney t=2024-05-29T13:44:14.253958252Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.253771013Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-syyrgjfb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.253941661Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25376103Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=office, node=ip-10-0-50-29.us-west-2.compute.internal" t=2024-05-29T13:44:14.253933287Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=office, node=ip-10-0-47-148.us-west-2.compute.internal" t=2024-05-29T13:44:14.25386548Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.253753196Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-syypu2mr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.25387101Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev64.gradle.org, instance=dev64.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.253887161Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25371395Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-syypu2mr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.2538073Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.253667539Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-syypu2mr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.253754369Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.253626688Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-syq20cdr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.253714229Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.253619033Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev63.gradle.org, instance=dev63.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.253722108Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.253597501Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.253615261Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.253497326Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-syq20cdr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.253609998Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-syq20cdr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.253572567Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.253609692Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.253446571Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-syq20cdr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.253544187Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.253331845Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=office, node=ip-10-0-42-38.us-west-2.compute.internal" t=2024-05-29T13:44:14.253524266Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25328575Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25324233Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.253285116Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.253219423Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=office, node=ip-10-0-37-86.us-west-2.compute.internal" t=2024-05-29T13:44:14.253336727Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25313919Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sylcjfq3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.253287214Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25311265Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev39.gradle.org, instance=dev39.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.253224173Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25307756Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.253071015Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.253101673Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev39.gradle.org, instance=dev39.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.253211227Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.253036826Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25302687Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.253009584Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252989137Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev38.gradle.org, instance=dev38.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.253084808Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-syaoyuac-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.253063092Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev38.gradle.org, instance=dev38.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.253073246Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252904498Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252891745Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252878763Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=office, node=ip-10-0-33-218.us-west-2.compute.internal" t=2024-05-29T13:44:14.252995824Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252860268Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-syaoyuac-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.252921381Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252810723Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252803805Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252762263Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sy840k2z-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.252811769Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25275246Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252731895Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252725428Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252703318Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252693133Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252684314Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252679351Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252674263Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=office, node=ip-10-0-29-173.us-west-2.compute.internal" t=2024-05-29T13:44:14.252712186Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sy3rizqr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.252665228Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25265143Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.25264644Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252641862Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252619825Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252612934Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252608324Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sy3rizqr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.252603557Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sy3rizqr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.252576287Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252576911Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252572306Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.252521971Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252554079Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev34.gradle.org, instance=dev34.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.252556825Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sy3rizqr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.252522527Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252541631Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252529808Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252521295Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252516323Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252501964Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=354676 slug=gridmarketenergy instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.252472403Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sy3qshjn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.252490876Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sy3qshjn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.252426536Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev33.gradle.org, instance=dev33.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.252454757Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev33.gradle.org, instance=dev33.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.252441194Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.252378348Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=354676 slug=gridmarketenergy t=2024-05-29T13:44:14.252329233Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev32.gradle.org, instance=dev32.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.252319613Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=354676 slug=gridmarketenergy version=13 fingerprint=4fcaac1cbd354238 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.252176039Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.251873385s EvaluationString:}]" duration=63.531365ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=office, node=ip-10-0-20-136.us-west-2.compute.internal" t=2024-05-29T13:44:14.252198944Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=762357 slug=dahikod t=2024-05-29T13:44:14.252026146Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sxueypip-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.251976801Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=janeway, node=ip-10-50-56-180.us-west-2.compute.internal" t=2024-05-29T13:44:14.251918118Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=janeway, node=ip-10-50-52-105.us-west-2.compute.internal" t=2024-05-29T13:44:14.251679574Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sxrk673p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.251569957Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev307.gradle.org, instance=dev307.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.251659711Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sxpkwge5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.251508616Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.251583917Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev306.gradle.org, instance=dev306.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.251515738Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sxpkwge5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.251383875Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=janeway, node=ip-10-50-35-27.us-west-2.compute.internal" t=2024-05-29T13:44:14.25143117Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sxnejhtt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.251269094Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.251083692Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=janeway, node=ip-10-50-31-69.us-west-2.compute.internal" t=2024-05-29T13:44:14.251281184Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.251190474Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sxnejhtt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.251210693Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sxlozxpz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.251165813Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sxlozxpz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.251141492Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=janeway, node=ip-10-50-28-226.us-west-2.compute.internal" t=2024-05-29T13:44:14.251140551Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/root, env=janeway, node=ip-10-50-28-226.us-west-2.compute.internal" t=2024-05-29T13:44:14.251126523Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev303.gradle.org, instance=dev303.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.251097657Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sxlozxpz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.251079942Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev303.gradle.org, instance=dev303.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.251083769Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sxlozxpz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.251023401Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.250991411Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sxj1fi14-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.250980701Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sxj1fi14-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.25095217Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev302.gradle.org, instance=dev302.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.25096247Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sxj1fi14-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.25090503Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme2n1, env=sandbox, node=ip-10-40-20-158.us-west-2.compute.internal" t=2024-05-29T13:44:14.25093604Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=288032 slug=dapperlabssre version=1 fingerprint=3c3074c1bb3eb27a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.250689062Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=Ejs1P5xVk, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.250316001s EvaluationString:}]" duration=21.98958ms + logger=ngalert.state.manager.persist user=537072 slug=devbitvavo t=2024-05-29T13:44:14.250820157Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.688929ms + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev301.gradle.org, instance=dev301.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.250862033Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme2n1, env=production, node=ip-10-20-58-131.us-west-2.compute.internal" t=2024-05-29T13:44:14.250848201Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme2n1, env=production, node=ip-10-20-58-131.us-west-2.compute.internal" t=2024-05-29T13:44:14.250836999Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.250707636Z caller=remote_instance_store.go:51 user=334644 slug=meiro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme2n1, env=office, node=ip-10-0-33-218.us-west-2.compute.internal" t=2024-05-29T13:44:14.250752049Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev300.gradle.org, instance=dev300.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.250771914Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sxbhwvvd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.250678458Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme2n1, env=janeway, node=ip-10-50-34-72.us-west-2.compute.internal" t=2024-05-29T13:44:14.250679161Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sxbhwvvd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.250643027Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme2n1, env=janeway, node=ip-10-50-34-72.us-west-2.compute.internal" t=2024-05-29T13:44:14.250672103Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sxbhwvvd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.250617037Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.250602352Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=sandbox, node=ip-10-40-63-200.us-west-2.compute.internal" t=2024-05-29T13:44:14.250604481Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.250572097Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev225.gradle.org, instance=dev225.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.25057239Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sx1jhvbx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.250507456Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sx1jhvbx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.250432205Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=148654 slug=tinybeans version=3 fingerprint=67493dfd8ecdd76d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.25039634Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.250121479s EvaluationString:}]" duration=48.509986ms + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev223.gradle.org, instance=dev223.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.250323184Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=sandbox, node=ip-10-40-55-108.us-west-2.compute.internal" t=2024-05-29T13:44:14.250328145Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.250240959Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev222.gradle.org, instance=dev222.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.250209858Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=sandbox, node=ip-10-40-43-165.us-west-2.compute.internal" t=2024-05-29T13:44:14.250165022Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sx16ulcs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.250159212Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sx16ulcs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.250137922Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev221.gradle.org, instance=dev221.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.250090665Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.249990952Z caller=remote_instance_store.go:51 user=514639 slug=karatech msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-swymt5x6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.2499722Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=sandbox, node=ip-10-40-34-18.us-west-2.compute.internal" t=2024-05-29T13:44:14.249916548Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=sandbox, node=ip-10-40-34-18.us-west-2.compute.internal" t=2024-05-29T13:44:14.249905091Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-swui0dk5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.249876219Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev219.gradle.org, instance=dev219.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.249845506Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-swt9lisc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.249671587Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev218.gradle.org, instance=dev218.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.249714706Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=340750 slug=aptoslabs t=2024-05-29T13:44:14.249710859Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=sandbox, node=ip-10-40-22-194.us-west-2.compute.internal" t=2024-05-29T13:44:14.249703301Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-swt9lisc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.249638597Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=sandbox, node=ip-10-40-20-158.us-west-2.compute.internal" t=2024-05-29T13:44:14.249590496Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev217.gradle.org, instance=dev217.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.249567505Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-swt9lisc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.249515826Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-swt9lisc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.249492065Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.24948757Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-swsvqurw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.249429205Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=sandbox, node=ip-10-40-20-137.us-west-2.compute.internal" t=2024-05-29T13:44:14.249498356Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=sandbox, node=ip-10-40-20-137.us-west-2.compute.internal" t=2024-05-29T13:44:14.249484233Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.249408789Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=sandbox, node=ip-10-40-16-252.us-west-2.compute.internal" t=2024-05-29T13:44:14.249394126Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev215.gradle.org, instance=dev215.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.249310122Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=production, node=ip-10-20-58-131.us-west-2.compute.internal" t=2024-05-29T13:44:14.2492875Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev215.gradle.org, instance=dev215.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.24929634Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-swp0woz5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.249253663Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=production, node=ip-10-20-58-131.us-west-2.compute.internal" t=2024-05-29T13:44:14.249273436Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-swp0woz5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.249211963Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.249203194Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-swp0woz5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.249188852Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev214.gradle.org, instance=dev214.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.249172132Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=production, node=ip-10-20-52-228.us-west-2.compute.internal" t=2024-05-29T13:44:14.249175875Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-swjmv7gf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.249105211Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.249078478Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=production, node=ip-10-20-42-149.us-west-2.compute.internal" t=2024-05-29T13:44:14.24910194Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=production, node=ip-10-20-42-149.us-west-2.compute.internal" t=2024-05-29T13:44:14.249078932Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev213.gradle.org, instance=dev213.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.249048787Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.249051157Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=production, node=ip-10-20-31-205.us-west-2.compute.internal" t=2024-05-29T13:44:14.249011413Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-swjmv7gf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.24896166Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-swb5slmk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.248897619Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=production, node=ip-10-20-25-103.us-west-2.compute.internal" t=2024-05-29T13:44:14.24892568Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev212.gradle.org, instance=dev212.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.248890153Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.248853373Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev212.gradle.org, instance=dev212.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.248877837Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-swb5slmk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.248749608Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=office, node=ip-10-0-61-218.us-west-2.compute.internal" t=2024-05-29T13:44:14.248759063Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev210.gradle.org, instance=dev210.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.24871269Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=office, node=ip-10-0-57-248.us-west-2.compute.internal" t=2024-05-29T13:44:14.24868573Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.248666026Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sw9mpwr2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.248625037Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=office, node=ip-10-0-56-29.us-west-2.compute.internal" t=2024-05-29T13:44:14.248606756Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sw7migzn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.248419164Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=office, node=ip-10-0-56-190.us-west-2.compute.internal" t=2024-05-29T13:44:14.248508745Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=office, node=ip-10-0-55-47.us-west-2.compute.internal" t=2024-05-29T13:44:14.248422213Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=548157 slug=kushkiprod t=2024-05-29T13:44:14.248399942Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev208.gradle.org, instance=dev208.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.248397633Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=548157 slug=kushkiprod instance="datasource_uid=ddfda265-8321-4dab-9f53-1af50b9462b9, ref_id=A" t=2024-05-29T13:44:14.248384752Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:00Z next_ends_at=2024-05-29T13:48:00Z + logger=ngalert.state.manager user=548157 slug=kushkiprod instance="datasource_uid=ddfda265-8321-4dab-9f53-1af50b9462b9, ref_id=A" t=2024-05-29T13:44:14.248367632Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sw7migzn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.248377664Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sw7migzn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.248347614Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=office, node=ip-10-0-54-24.us-west-2.compute.internal" t=2024-05-29T13:44:14.248307257Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sw0j3n9x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.248271873Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev207.gradle.org, instance=dev207.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.248277027Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev207.gradle.org, instance=dev207.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.248260777Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.248220135Z caller=remote_alert_sender.go:94 user=767797 slug=mgmresorts host=mgmresorts-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.71.101:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=admlyo1xo0glca alerts=1 + logger=ngalert.scheduler user=548157 slug=kushkiprod version=3 fingerprint=0ebe8e56879f1076 attempt=1 now=2024-05-29T13:44:00Z t=2024-05-29T13:44:14.24816675Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=ddfda265-8321-4dab-9f53-1af50b9462b9, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:00 +0000 UTC EvaluationDuration:14.247684065s EvaluationString:}]" duration=4.257035983s + level=debug ts=2024-05-29T13:44:14.248066324Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=office, node=ip-10-0-52-184.us-west-2.compute.internal" t=2024-05-29T13:44:14.248148572Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev206.gradle.org, instance=dev206.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.248087832Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-svwx4ucz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.248034791Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-svwx4ucz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.24798081Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.247741713Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-svq2ug8n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.247681477Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.247677082Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=office, node=ip-10-0-39-32.us-west-2.compute.internal" t=2024-05-29T13:44:14.247658959Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.247646525Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev203.gradle.org, instance=dev203.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.247559382Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev202.gradle.org, instance=dev202.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.247448851Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-svpduvt0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.247367574Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=office, node=ip-10-0-36-154.us-west-2.compute.internal" t=2024-05-29T13:44:14.247425686Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=office, node=ip-10-0-36-154.us-west-2.compute.internal" t=2024-05-29T13:44:14.247399244Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-svkj1np2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.247227102Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev201.gradle.org, instance=dev201.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.247295607Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev201.gradle.org, instance=dev201.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.247282881Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.247211936Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=office, node=ip-10-0-33-218.us-west-2.compute.internal" t=2024-05-29T13:44:14.247161476Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=office, node=ip-10-0-33-218.us-west-2.compute.internal" t=2024-05-29T13:44:14.247140298Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev200.gradle.org, instance=dev200.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.247099248Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-svjdgjrg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.246959629Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-svjdgjrg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.246873979Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=173730 slug=nikon t=2024-05-29T13:44:14.24687736Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=32.433707ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-svjdgjrg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.246738527Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-svif055q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.246677287Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=office, node=ip-10-0-27-217.us-west-2.compute.internal" t=2024-05-29T13:44:14.246763069Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev175.gradle.org, instance=dev175.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.246732478Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-svif055q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.246633566Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.246617037Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=office, node=ip-10-0-25-205.us-west-2.compute.internal" t=2024-05-29T13:44:14.246578191Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev174.gradle.org, instance=dev174.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.246584916Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev173.gradle.org, instance=dev173.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.246495597Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-svbrk0ly-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.246473295Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.246450018Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=office, node=ip-10-0-24-233.us-west-2.compute.internal" t=2024-05-29T13:44:14.246489614Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=office, node=ip-10-0-24-233.us-west-2.compute.internal" t=2024-05-29T13:44:14.24647234Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.246225322Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.246362567Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-svbrk0ly-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.246357203Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=office, node=ip-10-0-18-38.us-west-2.compute.internal" t=2024-05-29T13:44:14.246272847Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.246256206Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev171.gradle.org, instance=dev171.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.246208559Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.246105623Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.24610471Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=janeway, node=ip-10-50-56-180.us-west-2.compute.internal" t=2024-05-29T13:44:14.246078703Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev170.gradle.org, instance=dev170.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.246060678Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.246072196Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.24604254Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sv5la9k6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.24600048Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.246054943Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.2460054Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.245980276Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.245971842Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev169.gradle.org, instance=dev169.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.245955476Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=janeway, node=ip-10-50-52-105.us-west-2.compute.internal" t=2024-05-29T13:44:14.245913185Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.245875256Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.245868048Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=janeway, node=ip-10-50-52-105.us-west-2.compute.internal" t=2024-05-29T13:44:14.245901257Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev168.gradle.org, instance=dev168.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.245851386Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.245696534Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=lxcfs, fstype=fuse.lxcfs, host=dev167.gradle.org, instance=dev167.gradle.org, job=node, mountpoint=/var/lib/lxcfs, os=Ubuntu" t=2024-05-29T13:44:14.245713698Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-suvlp3ej-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.245573155Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=janeway, node=ip-10-50-35-27.us-west-2.compute.internal" t=2024-05-29T13:44:14.245611055Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-suvlp3ej-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.245513325Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-suvb4mzj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.245414714Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=114492 slug=railsbank instance="QueueName=PROD-PPL-INTEGRATION-EVENTS-SQS" t=2024-05-29T13:44:14.245460402Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-suvb4mzj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.245342083Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=janeway, node=ip-10-50-31-69.us-west-2.compute.internal" t=2024-05-29T13:44:14.245365821Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/sdb, fstype=ext4, host=repo-blue.grdev.net, instance=repo-blue.grdev.net, job=node, mountpoint=/mnt/data, os=Ubuntu" t=2024-05-29T13:44:14.245305981Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=janeway, node=ip-10-50-28-84.us-west-2.compute.internal" t=2024-05-29T13:44:14.245299168Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme1n1p1, env=janeway, node=ip-10-50-28-84.us-west-2.compute.internal" t=2024-05-29T13:44:14.245284458Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-suu91t2o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.245180201Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-suu91t2o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.245153261Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/sdb, fstype=ext4, host=eu-build-cache.gradle.org, instance=eu-build-cache.gradle.org, job=node, mountpoint=/opt/gradle/build-cache-node, os=Ubuntu" t=2024-05-29T13:44:14.245116239Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=sandbox, node=ip-10-40-63-200.us-west-2.compute.internal" t=2024-05-29T13:44:14.245033944Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-surneq5j-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.244964159Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/sda15, fstype=vfat, host=salt-master-blue.grdev.net, instance=salt-master-blue.grdev.net, job=node, mountpoint=/boot/efi, os=Ubuntu" t=2024-05-29T13:44:14.244966968Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-surneq5j-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.244902138Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-surlmv38-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.244839168Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-surlmv38-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.244690736Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=111653 slug=theassociationmxp t=2024-05-29T13:44:14.244804539Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=29.210672ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=sandbox, node=ip-10-40-56-87.us-west-2.compute.internal" t=2024-05-29T13:44:14.244821317Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.244775727Z caller=remote_instance_store.go:51 user=70430 slug=dapperlabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/sda15, fstype=vfat, host=internal-services.grdev.net, instance=internal-services.grdev.net, job=node, mountpoint=/boot/efi, os=Ubuntu" t=2024-05-29T13:44:14.244684757Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/sda15, fstype=vfat, host=internal-services.grdev.net, instance=internal-services.grdev.net, job=node, mountpoint=/boot/efi, os=Ubuntu" t=2024-05-29T13:44:14.24467044Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-suowc5pi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.244642056Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=70430 slug=dapperlabs version=1 fingerprint=a5356e5908817f3a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.244528501Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=000000002, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.244281652s EvaluationString:}]" duration=45.704472ms + level=debug ts=2024-05-29T13:44:14.244533792Z caller=remote_instance_store.go:51 user=517596 slug=datar msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/sda15, fstype=vfat, host=eu-build-cache.gradle.org, instance=eu-build-cache.gradle.org, job=node, mountpoint=/boot/efi, os=Ubuntu" t=2024-05-29T13:44:14.24451997Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.24445461Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.244415995Z caller=remote_instance_store.go:51 user=35611 slug=play msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sun3gn41-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.244416703Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=127813 slug=clearsale t=2024-05-29T13:44:14.244344948Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.244283171Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sun3gn41-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.244310822Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-suhclhci-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.244255062Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.244293642Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.156.57:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fdbhsq1zf0gsid alerts=1 + level=debug ts=2024-05-29T13:44:14.244138348Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-suhclhci-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.244228421Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=sandbox, node=ip-10-40-34-18.us-west-2.compute.internal" t=2024-05-29T13:44:14.244214223Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-suhclhci-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.24412026Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sufg30xq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.243998659Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sufg30xq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.243928168Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.243912132Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sufg30xq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.243870098Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/sda1, fstype=ext4, host=eu-build-cache.gradle.org, instance=eu-build-cache.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.243893018Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.243181947Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=2wfwm2xz0f, Method=--, Resource=/partners/groups/{groupId}/details, Stage=--" t=2024-05-29T13:44:14.243762379Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=2wfwm2xz0f, Method=--, Resource=/partners/groups/{groupId}/details, Stage=--" t=2024-05-29T13:44:14.243739961Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=2wfwm2xz0f, Method=--, Resource=/partners/groups/{groupId}/details, Stage=--" t=2024-05-29T13:44:14.243731241Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=2wfwm2xz0f, Method=--, Resource=/partners/groups/{groupId}/details, Stage=--" t=2024-05-29T13:44:14.243719342Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.24369014Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=teamcity-server.grdev.net, instance=teamcity-server.grdev.net, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.243657675Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=production, node=ip-10-20-52-228.us-west-2.compute.internal" t=2024-05-29T13:44:14.243631204Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=teamcity-server.grdev.net, instance=teamcity-server.grdev.net, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.243643454Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-su8pvibb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.243566115Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-su8pvibb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.243519894Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.243553171Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=production, node=ip-10-20-42-149.us-west-2.compute.internal" t=2024-05-29T13:44:14.243561943Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.24345559Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.243157717Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-su7zb4yw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.243357623Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev96.gradle.org, instance=dev96.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.243381453Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev96.gradle.org, instance=dev96.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.243368383Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.243363961Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=production, node=ip-10-20-25-103.us-west-2.compute.internal" t=2024-05-29T13:44:14.243378386Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.243324432Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.243305215Z caller=remote_instance_store.go:51 user=94501 slug=datastax msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=production, node=ip-10-20-22-168.us-west-2.compute.internal" t=2024-05-29T13:44:14.243304945Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=production, node=ip-10-20-22-168.us-west-2.compute.internal" t=2024-05-29T13:44:14.243292539Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev95.gradle.org, instance=dev95.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.243270453Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.243262839Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-su7zb4yw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.243233721Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-su7zb4yw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.243203661Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.243121856Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=office, node=ip-10-0-57-248.us-west-2.compute.internal" t=2024-05-29T13:44:14.243149339Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=office, node=ip-10-0-57-248.us-west-2.compute.internal" t=2024-05-29T13:44:14.243136882Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev94.gradle.org, instance=dev94.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.243115853Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-su4f9pov-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.242909468Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=office, node=ip-10-0-55-47.us-west-2.compute.internal" t=2024-05-29T13:44:14.242915986Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=office, node=ip-10-0-55-47.us-west-2.compute.internal" t=2024-05-29T13:44:14.242904322Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=274199 slug=telemetriahgm t=2024-05-29T13:44:14.242871211Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-su4f9pov-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.242845407Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.242794235Z caller=remote_instance_store.go:51 user=206439 slug=relaypro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=office, node=ip-10-0-52-198.us-west-2.compute.internal" t=2024-05-29T13:44:14.24276337Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=273717 slug=seventyfivef t=2024-05-29T13:44:14.242757966Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=office, node=ip-10-0-52-198.us-west-2.compute.internal" t=2024-05-29T13:44:14.242752716Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev91.gradle.org, instance=dev91.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.242744861Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-su1ysoaw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.242640055Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.242629867Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-stzmymvo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.242439743Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=office, node=ip-10-0-47-126.us-west-2.compute.internal" t=2024-05-29T13:44:14.242373379Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=office, node=ip-10-0-47-126.us-west-2.compute.internal" t=2024-05-29T13:44:14.242358311Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev87.gradle.org, instance=dev87.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.242408566Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=874970 slug=nvidia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.242223681Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev86.gradle.org, instance=dev86.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.242269026Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.242141488Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=office, node=ip-10-0-46-9.us-west-2.compute.internal" t=2024-05-29T13:44:14.24223198Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=874970 slug=nvidia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.24212411Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-stzi1ybs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.24208454Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-stzi1ybs-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.242037549Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-stzi1ybs-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.241992999Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=office, node=ip-10-0-37-86.us-west-2.compute.internal" t=2024-05-29T13:44:14.241960941Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=874970 slug=nvidia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.241879937Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=874970 slug=nvidia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.241872967Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=874970 slug=nvidia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.241864647Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=874970 slug=nvidia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.241820087Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.241786421Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=874970 slug=nvidia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.241799257Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=874970 slug=nvidia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.241790817Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=office, node=ip-10-0-36-154.us-west-2.compute.internal" t=2024-05-29T13:44:14.241844365Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=874970 slug=nvidia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.241779096Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.241632496Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev81.gradle.org, instance=dev81.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.241713067Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.241592716Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=874970 slug=nvidia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.241661055Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=874970 slug=nvidia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.241585824Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.241322886Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-stytk7be-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.241311192Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev78.gradle.org, instance=dev78.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.241292753Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev78.gradle.org, instance=dev78.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.241275963Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-stv44j46-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.24111114Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=office, node=ip-10-0-27-217.us-west-2.compute.internal" t=2024-05-29T13:44:14.241233063Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev77.gradle.org, instance=dev77.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.241083744Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev76.gradle.org, instance=dev76.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.240969584Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.240997419Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-strqdkxa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.240935828Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-strqdkxa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.240914688Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=374423 slug=bitburst t=2024-05-29T13:44:14.240834377Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=office, node=ip-10-0-20-136.us-west-2.compute.internal" t=2024-05-29T13:44:14.240881715Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-strqdkxa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.240885457Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=374423 slug=bitburst instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.240812912Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=office, node=ip-10-0-20-136.us-west-2.compute.internal" t=2024-05-29T13:44:14.240870655Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-stoqfqlc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.240780196Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-stoqfqlc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.240758486Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.240714055Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=374423 slug=bitburst version=73 fingerprint=53d36fe52b81664b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.240648655Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.240335857s EvaluationString:}]" duration=10.172974ms + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev73.gradle.org, instance=dev73.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.240709689Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=janeway, node=ip-10-50-56-180.us-west-2.compute.internal" t=2024-05-29T13:44:14.240603291Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=janeway, node=ip-10-50-56-180.us-west-2.compute.internal" t=2024-05-29T13:44:14.240592654Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev72.gradle.org, instance=dev72.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.240575556Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=174016 slug=journalstaging t=2024-05-29T13:44:14.240548431Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.516508ms + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.unipharma.meiro.io, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.unipharma.meiro.io;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.240567028Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.unipharma.meiro.io, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.unipharma.meiro.io;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.240553447Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.telcowise.meiro.partners, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.telcowise.meiro.partners;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.240476858Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.telcowise.meiro.partners, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.telcowise.meiro.partners;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.240465983Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-stg5wooe-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.240344572Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.store.demo.meiro.io, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.store.demo.meiro.io;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.2403854Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-stg5wooe-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.240254441Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev70.gradle.org, instance=dev70.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.240305785Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev70.gradle.org, instance=dev70.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.240292691Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-staging-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.2401814Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.iprima.meiro.app, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.iprima.meiro.app;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.240193045Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.internal.meiro.io, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.internal.meiro.io;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.240134651Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-staging-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.240043079Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.heureka.meiro.app, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.heureka.meiro.app;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.240110418Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.hemodata.meiro.partners, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.hemodata.meiro.partners;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.240056839Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.23998968Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=janeway, node=ip-10-50-34-72.us-west-2.compute.internal" t=2024-05-29T13:44:14.240010394Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=janeway, node=ip-10-50-34-72.us-west-2.compute.internal" t=2024-05-29T13:44:14.239997502Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-stable-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.239691315Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev67.gradle.org, instance=dev67.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.239935847Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.239836993Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.239826813Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-st6np9kl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.239630044Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-st6np9kl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.239560704Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ssuavbyb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.239303741Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.239823186Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.etstur.com, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.etstur.com;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.239852697Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev66.gradle.org, instance=dev66.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.239849205Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=805202 slug=columbia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.239335931Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=538037 slug=drivewealth version=46 fingerprint=11d3449cb2d7e83c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.239717865Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.239347845s EvaluationString:}]" duration=2.609915941s + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.epace.meiro.partners, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.epace.meiro.partners;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.23981137Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.eon.meiro.app, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.eon.meiro.app;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.239780431Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ssuavbyb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.23922929Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev65.gradle.org, instance=dev65.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.239766892Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.emtek.digital, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.emtek.digital;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.239736229Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.239386922Z caller=remote_instance_store.go:51 user=805202 slug=columbia msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev64.gradle.org, instance=dev64.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.239698525Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=janeway, node=ip-10-50-28-226.us-west-2.compute.internal" t=2024-05-29T13:44:14.239672861Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.dtro.meiro.app, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.dtro.meiro.app;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.239658287Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev63.gradle.org, instance=dev63.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.239612505Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=janeway, node=ip-10-50-16-64.us-west-2.compute.internal" t=2024-05-29T13:44:14.239587144Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p3, env=janeway, node=ip-10-50-16-64.us-west-2.compute.internal" t=2024-05-29T13:44:14.23957538Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.239513434Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev62.gradle.org, instance=dev62.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.239500868Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=sandbox, node=ip-10-40-63-200.us-west-2.compute.internal" t=2024-05-29T13:44:14.239471224Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.drmaxit.meiro.io, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.drmaxit.meiro.io;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.239473157Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=sandbox, node=ip-10-40-60-73.us-west-2.compute.internal" t=2024-05-29T13:44:14.23937909Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.dihagr.meiro.app, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.dihagr.meiro.app;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.239325516Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=sandbox, node=ip-10-40-56-87.us-west-2.compute.internal" t=2024-05-29T13:44:14.239320428Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.239288792Z caller=remote_instance_store.go:51 user=537072 slug=devbitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.di28bsw.meiro.app, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.di28bsw.meiro.app;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.239288614Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.devvy.meiro.app, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.devvy.meiro.app;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.239261298Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev60.gradle.org, instance=dev60.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.23924073Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.devvy.meiro.app, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.devvy.meiro.app;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.239246077Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.dataclub.meiro.partners, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.dataclub.meiro.partners;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.239219051Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev60.gradle.org, instance=dev60.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.239231648Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=537072 slug=devbitvavo t=2024-05-29T13:44:14.239125869Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.cnc.meiro.eu, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.cnc.meiro.eu;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.239164083Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.chemistnz.meiro.io, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.chemistnz.meiro.io;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.239110816Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ssre3aa0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.239034818Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.chemist.meiro.io, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.chemist.meiro.io;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.239072746Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ssre3aa0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.239007748Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev58.gradle.org, instance=dev58.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.239116635Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=129076 slug=marginalunit t=2024-05-29T13:44:14.239021669Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=41.454565ms + logger=ngalert.state.manager user=334644 slug=meiro instance="app=cdp, client=cdp.banking.demo.meiro.io, movingAverage=10, name=movingAverage(customer_events_loaded;app=cdp;client=cdp.banking.demo.meiro.io;service=jobs_scheduler,10) A, service=jobs_scheduler" t=2024-05-29T13:44:14.238960616Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ssp1u9fp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.238930877Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=sandbox, node=ip-10-40-43-165.us-west-2.compute.internal" t=2024-05-29T13:44:14.239068837Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=127813 slug=clearsale t=2024-05-29T13:44:14.239036468Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ssmitqkq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.238682135Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ssmitqkq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.238637204Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ssmitqkq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.238621484Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ssl118xn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.238465412Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ssjf10pa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.238414492Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.238754643Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=sandbox, node=ip-10-40-34-18.us-west-2.compute.internal" t=2024-05-29T13:44:14.238775865Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.238619447Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev38.gradle.org, instance=dev38.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.238623572Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.238585669Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.238444364Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.974824ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=sandbox, node=ip-10-40-20-158.us-west-2.compute.internal" t=2024-05-29T13:44:14.238489908Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.238397492Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.23842989Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev36.gradle.org, instance=dev36.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.238390743Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=sandbox, node=ip-10-40-20-137.us-west-2.compute.internal" t=2024-05-29T13:44:14.238376578Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.238252117Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev35.gradle.org, instance=dev35.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.238247739Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=sandbox, node=ip-10-40-16-252.us-west-2.compute.internal" t=2024-05-29T13:44:14.238287228Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ssiqkre6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.238186599Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=production, node=ip-10-20-58-131.us-west-2.compute.internal" t=2024-05-29T13:44:14.238184763Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ssiqkre6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.238039908Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ss8em0il-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.237906977Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ss8em0il-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.237885266Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:14.237866524Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ss7y5yg3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.237778095Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=production, node=ip-10-20-25-103.us-west-2.compute.internal" t=2024-05-29T13:44:14.237818924Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ss7y5yg3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.237712735Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:14.237765954Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev310.gradle.org, instance=dev310.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.237710369Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev309.gradle.org, instance=dev309.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.237602779Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ss48oxzr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.237546233Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=office, node=ip-10-0-56-29.us-west-2.compute.internal" t=2024-05-29T13:44:14.237519466Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ss3iq62x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.237491712Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ss3iq62x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.237441372Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ss3iq62x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.237412241Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=office, node=ip-10-0-56-190.us-west-2.compute.internal" t=2024-05-29T13:44:14.237413029Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ss0y0lqv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.237361241Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.23728662Z caller=remote_instance_store.go:51 user=543660 slug=jobcloudprogrammaticstage msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=office, node=ip-10-0-55-47.us-west-2.compute.internal" t=2024-05-29T13:44:14.237338546Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:14.237211353Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev306.gradle.org, instance=dev306.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.237233919Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-srxvo1sl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.237204729Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-srxvo1sl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.237182259Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.237136209Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-srxvo1sl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.237124898Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-srxvo1sl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.237080178Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=office, node=ip-10-0-50-29.us-west-2.compute.internal" t=2024-05-29T13:44:14.237078022Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=155740 slug=routific t=2024-05-29T13:44:14.236977739Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=155740 slug=routific instance= t=2024-05-29T13:44:14.236960834Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-srsxaa4k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.236915896Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.236902148Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=806229 slug=simplisafe instance= t=2024-05-29T13:44:14.236949861Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=office, node=ip-10-0-47-148.us-west-2.compute.internal" t=2024-05-29T13:44:14.236943052Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe t=2024-05-29T13:44:14.236900501Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=155740 slug=routific version=3 fingerprint=04af54cda932906c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.236866235Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.236584944s EvaluationString:}]" duration=22.514691ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-srsxaa4k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.236893306Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.236840537Z caller=remote_instance_store.go:51 user=686395 slug=containerfoundation msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=806229 slug=simplisafe version=267 fingerprint=42941383ac3a3ce6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.23675072Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc0728ccd38} C:{Var:C Labels: Value:0xc0728ccd40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.236244633s EvaluationString:[ var='B' labels={} value=525.4968279334389 ], [ var='C' labels={} value=0 ]}]" duration=55.407473ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=office, node=ip-10-0-47-126.us-west-2.compute.internal" t=2024-05-29T13:44:14.236872399Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev303.gradle.org, instance=dev303.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.236867687Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev303.gradle.org, instance=dev303.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.236855104Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=office, node=ip-10-0-47-126.us-west-2.compute.internal" t=2024-05-29T13:44:14.236862448Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=office, node=ip-10-0-46-9.us-west-2.compute.internal" t=2024-05-29T13:44:14.236792744Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-srpf77or-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.236759405Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-srpf77or-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.236735674Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev302.gradle.org, instance=dev302.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.236741445Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-srjs55re-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.236653384Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=office, node=ip-10-0-39-32.us-west-2.compute.internal" t=2024-05-29T13:44:14.236610096Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.236467346Z caller=remote_instance_store.go:51 user=846513 slug=npc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev301.gradle.org, instance=dev301.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.2365811Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.23652007Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.236503799Z caller=remote_instance_store.go:51 user=514639 slug=karatech msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-srhn6fmd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.236540672Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev300.gradle.org, instance=dev300.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.236464875Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev300.gradle.org, instance=dev300.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.236456797Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-srghnsig-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.23626872Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sr9nw2z9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.236189959Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sr9nw2z9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.236168019Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sr3sywht-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.236088248Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=office, node=ip-10-0-35-214.us-west-2.compute.internal" t=2024-05-29T13:44:14.236141007Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sr3sywht-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.236064467Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.235989033Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sr3sywht-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.236036557Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=471861 slug=planetstaging t=2024-05-29T13:44:14.235963643Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.053133ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=office, node=ip-10-0-31-141.us-west-2.compute.internal" t=2024-05-29T13:44:14.235948681Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sr3kmlp8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.235880236Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.235801564Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sr3kmlp8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.235856695Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sr3kmlp8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.235805555Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.235768453Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.235807804Z caller=remote_instance_store.go:51 user=75789 slug=mysign msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.235716328Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.235650017Z caller=remote_instance_store.go:51 user=532553 slug=jithins msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=75789 slug=mysign t=2024-05-29T13:44:14.235734942Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.23558416Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.235590331Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sqratctu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.235654663Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev271.gradle.org, instance=dev271.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.235648016Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=office, node=ip-10-0-29-173.us-west-2.compute.internal" t=2024-05-29T13:44:14.235636247Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.23561603Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=office, node=ip-10-0-27-217.us-west-2.compute.internal" t=2024-05-29T13:44:14.23556456Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=office, node=ip-10-0-27-217.us-west-2.compute.internal" t=2024-05-29T13:44:14.235551353Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=432323 slug=lithic version=1 fingerprint=0fbe6709a42986c2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.235487034Z level=debug msg="Alert rule evaluated" results="[{Instance:FunctionName=svix-statements-consumer-live State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:FunctionName=svix-statements-consumer-live Value:0xc02eb7d430} C:{Var:C Labels:FunctionName=svix-statements-consumer-live Value:0xc02eb7d438}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.235171639s EvaluationString:[ var='B' labels={FunctionName=svix-statements-consumer-live} value=0 ], [ var='C' labels={FunctionName=svix-statements-consumer-live} value=0 ]}]" duration=54.590575ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sqpkt4m0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.235516052Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sqpkt4m0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.235506842Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sqjyyrxn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.23536374Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sqj0sqd2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.23533408Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sqj0sqd2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.23532433Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sqj0sqd2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.235294479Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sqj0sqd2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.235235909Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=office, node=ip-10-0-24-233.us-west-2.compute.internal" t=2024-05-29T13:44:14.235277973Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=office, node=ip-10-0-24-233.us-west-2.compute.internal" t=2024-05-29T13:44:14.235263816Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev268.gradle.org, instance=dev268.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.2352587Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.235011063Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.235139846Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev267.gradle.org, instance=dev267.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.235102274Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev267.gradle.org, instance=dev267.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.235089692Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sqgjvk5p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.235034337Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=office, node=ip-10-0-18-38.us-west-2.compute.internal" t=2024-05-29T13:44:14.235069027Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev266.gradle.org, instance=dev266.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.234964907Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.234885542Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sq7tmzy3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.234788174Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.234797197Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=472647 slug=planet t=2024-05-29T13:44:14.234797604Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=49.909855ms + logger=ngalert.state.manager user=371756 slug=asapp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.234808783Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.234801774Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev265.gradle.org, instance=dev265.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.234823945Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sq7tmzy3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.234716824Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.234777254Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sq7tmzy3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.234685863Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=371756 slug=asapp version=45 fingerprint=15ad847712b12690 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.234686454Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.234417425s EvaluationString:}]" duration=35.060344ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sq4abarz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.234622043Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.234608459Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sq4abarz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.234594722Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sq4abarz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.234547352Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sq4abarz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.234514091Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sq4abarz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.234487391Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sq0vqjwu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.234445571Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sq0vqjwu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.23437531Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sq0vqjwu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.234253999Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=janeway, node=ip-10-50-34-72.us-west-2.compute.internal" t=2024-05-29T13:44:14.234285452Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=janeway, node=ip-10-50-34-72.us-west-2.compute.internal" t=2024-05-29T13:44:14.234270891Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sq0vqjwu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.234217728Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sptns3eh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.234138748Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.234078377Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sptns3eh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.234099547Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=janeway, node=ip-10-50-28-84.us-west-2.compute.internal" t=2024-05-29T13:44:14.234056321Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.234079108Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev258.gradle.org, instance=dev258.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.234082555Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-spsh700i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.233859155Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.23395954Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev257.gradle.org, instance=dev257.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.233994329Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=janeway, node=ip-10-50-28-226.us-west-2.compute.internal" t=2024-05-29T13:44:14.233971122Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=224047 slug=ppbtradingtribeprd t=2024-05-29T13:44:14.233918506Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="device=/dev/nvme0n1p12, env=janeway, node=ip-10-50-28-226.us-west-2.compute.internal" t=2024-05-29T13:44:14.233957534Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.233905046Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.233903212Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye t=2024-05-29T13:44:14.233577102Z level=debug msg="State manager processing evaluation results" resultCount=364 + level=debug ts=2024-05-29T13:44:14.233806694Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.233892212Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.233901043Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd t=2024-05-29T13:44:14.233842673Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-spsh700i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.233774624Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-spqhavmv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.233701143Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-spqhavmv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.233647023Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev254.gradle.org, instance=dev254.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.23366821Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev253.gradle.org, instance=dev253.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.233559814Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev252.gradle.org, instance=dev252.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.233483135Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.23336004Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev251.gradle.org, instance=dev251.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.23340873Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev251.gradle.org, instance=dev251.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.233400783Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-spokoawi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.233344589Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev250.gradle.org, instance=dev250.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.23331999Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-spm12oc0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.233210178Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-spm12oc0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.233188638Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev248.gradle.org, instance=dev248.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.233085118Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-spl4mhru-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.233011236Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-spl4mhru-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.232949985Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-spkm6tl5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.232804804Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-spicm0v4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.232752643Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-spicm0v4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.232643862Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev244.gradle.org, instance=dev244.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.232619568Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:14.232641691Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev244.gradle.org, instance=dev244.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.23260388Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.232574488Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=523054 slug=vialtopartners instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.232607704Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=523054 slug=vialtopartners version=212 fingerprint=69767ab6b6a28ccf attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.232536435Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.2323055s EvaluationString:}]" duration=16.934598ms + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev243.gradle.org, instance=dev243.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.232484967Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-soxn0y23-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.232364399Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.232402251Z caller=remote_instance_store.go:51 user=451427 slug=rocketchat msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.232363672Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.232269239Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev242.gradle.org, instance=dev242.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.232347586Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.232151683Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev241.gradle.org, instance=dev241.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.232223404Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sopi3vsm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.232138017Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=273717 slug=seventyfivef t=2024-05-29T13:44:14.232167953Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=83.764215ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sopi3vsm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.232116667Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=447897 slug=mysten t=2024-05-29T13:44:14.2320824Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=38.8509ms + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev240.gradle.org, instance=dev240.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.232074218Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sopi3vsm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.232027826Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-soczh392-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.231757293Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-so9dzgy7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.231638152Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev237.gradle.org, instance=dev237.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.23167621Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-so3hirfp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.231550001Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-so3hirfp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.231527311Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev236.gradle.org, instance=dev236.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.231553698Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-so3hirfp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.23148875Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.231271027Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-snl625i9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.231209828Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.231057341Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sniezm7b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.231108137Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sniezm7b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.231031756Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sniezm7b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.230980935Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=767797 slug=mgmresorts t=2024-05-29T13:44:14.230782174Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=767797 slug=mgmresorts instance="datasource_uid=d1aebc62-96b9-4d63-9239-4734a6bc96ce, ref_id=A" t=2024-05-29T13:44:14.230765724Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=767797 slug=mgmresorts instance="datasource_uid=d1aebc62-96b9-4d63-9239-4734a6bc96ce, ref_id=A" t=2024-05-29T13:44:14.230743514Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=767797 slug=mgmresorts t=2024-05-29T13:44:14.230645163Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.230723656Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-snec2hyf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.230693252Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev230.gradle.org, instance=dev230.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.230678451Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-snec2hyf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.230600781Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev229.gradle.org, instance=dev229.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.230532487Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.230398911Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev228.gradle.org, instance=dev228.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.230417426Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.230298364Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sna4sqxs-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.230357299Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev227.gradle.org, instance=dev227.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.23027863Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sn8vewci-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.230237028Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sn8vewci-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.230215227Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sn8vewci-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.230155607Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sn8vewci-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.230036346Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev226.gradle.org, instance=dev226.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.230126611Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.229969129Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev225.gradle.org, instance=dev225.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.230015737Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sn4q1ljh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.229957075Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=174016 slug=journalstaging instance="datasource_uid=bYQmLgyGz, ref_id=A" t=2024-05-29T13:44:14.229990809Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=637816 slug=kingobservatory t=2024-05-29T13:44:14.229902503Z level=debug msg="Saving alert states done" count=26 max_state_save_concurrency=1 duration=239.375548ms + logger=ngalert.state.manager user=174016 slug=journalstaging t=2024-05-29T13:44:14.229954708Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.229738743Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev224.gradle.org, instance=dev224.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.229903493Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sn3u44ly-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.229788283Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=332019 slug=liberfly instance="agent_hostname=ip-172-31-93-110, device=/dev/root, fstype=ext4, instance=frontend, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:14.229858076Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sn3u44ly-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.229715502Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev222.gradle.org, instance=dev222.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.229655054Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-smvz17ry-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.229340878Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-smuu3pew-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.229286778Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-smuu3pew-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.229261018Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-smuu3pew-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.229229487Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-smuu3pew-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.229220317Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-smuu3pew-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.229191257Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="app=ppf-backend, cluster=k8s-aks-dev-eus-97ad, container=ppf-backend, environment=Development, filename=/var/log/pods/ppf_ppf-backend-549d6d6c85-qsbn5_e804cee9-11d3-429c-abd8-4db4275a0e25/ppf-backend/0.log, job=ppf/ppf-backend-549d6d6c85-qsbn5, namespace=ppf, pod=ppf-backend-549d6d6c85-qsbn5, service_name=ppf-backend, stream=stdout" t=2024-05-29T13:44:14.229356406Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=174675 slug=journalprod t=2024-05-29T13:44:14.229298374Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=174675 slug=journalprod version=1 fingerprint=b981ffa95ac6d1fc attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.229223Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.228947171s EvaluationString:}]" duration=44.329152ms + level=debug ts=2024-05-29T13:44:14.229084091Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.229027459Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-smt1pp42-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.229100166Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev218.gradle.org, instance=dev218.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.229006984Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.228940389Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-smsz1arc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.228820093Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-smsz1arc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.228797983Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.228786233Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:14.228788544Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev216.gradle.org, instance=dev216.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.228712406Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev216.gradle.org, instance=dev216.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.228698243Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-smqpedr4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.228677702Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-smqbgvhv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.228615701Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.22849927Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=520904 slug=rrf + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-smpqsm1i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.228383129Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-smpqsm1i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.228324668Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-smlatf0n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.228294778Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-smlatf0n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.228223327Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.228254822Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-smge1oeq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.228091046Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-smge1oeq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.228081935Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.22795227Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.227973811Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-smegql29-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.227913604Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-smb90a2i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.227873623Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev211.gradle.org, instance=dev211.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.227872415Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-smb90a2i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.227849583Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev210.gradle.org, instance=dev210.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.227710681Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.227551642Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev21.gradle.org, instance=dev21.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.227580543Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sm63halv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.227481069Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sm63halv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.227454219Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev209.gradle.org, instance=dev209.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.22744105Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.227407457Z caller=remote_instance_store.go:51 user=236496 slug=improbable msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:14.227323457Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sm63halv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.227413019Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sm63halv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.227317448Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.227186857Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=314292 slug=smeuwest1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sm61invn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.227245437Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev208.gradle.org, instance=dev208.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.227265399Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sm61invn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.227182146Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sm61invn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.227144156Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=337951 slug=pawapay t=2024-05-29T13:44:14.227049206Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.344064ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sm61invn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.227111496Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sm4a8rjl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.226929914Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.226887409Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sm194oo0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.226848663Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sm194oo0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.226747202Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.226838516Z caller=remote_alert_sender.go:94 user=22398 slug=sunfolding host=sunfolding-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.83.87:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=f9364fd2-c023-42e2-9066-6cd6b1b8b412 alerts=1 + level=debug ts=2024-05-29T13:44:14.226760489Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.226746386Z caller=remote_alert_sender.go:94 user=22398 slug=sunfolding host=sunfolding-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.37.117:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=f9364fd2-c023-42e2-9066-6cd6b1b8b412 alerts=1 + logger=ngalert.state.manager user=70430 slug=dapperlabs t=2024-05-29T13:44:14.226682537Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.226592671Z caller=remote_instance_store.go:51 user=846513 slug=npc msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=70430 slug=dapperlabs version=1 fingerprint=27eabd5816f7613e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.226601999Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.226363085s EvaluationString:}]" duration=24.929206ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sm194oo0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.226625711Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=320906 slug=techcyte t=2024-05-29T13:44:14.226581062Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.29875ms + level=debug ts=2024-05-29T13:44:14.226587182Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sm0mn0ig-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.226506879Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sm0mn0ig-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.226484699Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.226405152Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.226360399Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-slzyi72d-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.226294667Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev206.gradle.org, instance=dev206.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.226318784Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.226242983Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev206.gradle.org, instance=dev206.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.226310833Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-slzyi72d-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.226236847Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-slzyi72d-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.226185726Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-slvl8pmb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.225982794Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=230713 slug=flocksafety instance= t=2024-05-29T13:44:14.225921676Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=230713 slug=flocksafety t=2024-05-29T13:44:14.22574225Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=fleet-management-jack-rx-rate" + logger=ngalert.state.manager.persist user=214309 slug=spenmo t=2024-05-29T13:44:14.225833115Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.287812ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sluuekxx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.225805492Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev202.gradle.org, instance=dev202.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.225808474Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=342039 slug=criblcloud version=1 fingerprint=145a160df0148c85 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.225696936Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.225433803s EvaluationString:}]" duration=71.397717ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sluuekxx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.225749312Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=230713 slug=flocksafety version=51 fingerprint=f2affd5fdf148398 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.225660634Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.225383821s EvaluationString:}]" duration=74.436763ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sluga4xo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.225696381Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sluga4xo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.225665561Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sluga4xo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.22559151Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.225514513Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev200.gradle.org, instance=dev200.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.225531634Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-slpdnpcx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.225391718Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.225423722Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-slpdnpcx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.225330307Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=415003 slug=salaryfinance t=2024-05-29T13:44:14.225401889Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=112387 slug=lucidhq instance="Region=eu-central-1, ServiceName=SES" t=2024-05-29T13:44:14.22534499Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=112387 slug=lucidhq t=2024-05-29T13:44:14.225330909Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev176.gradle.org, instance=dev176.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.225359547Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=112387 slug=lucidhq version=3 fingerprint=9f2b470a6ff9eabc attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.225265766Z level=debug msg="Alert rule evaluated" results="[{Instance:Region=eu-central-1, ServiceName=SES State:NoData Error: Results:map[] Values:map[B:{Var:B Labels:Region=eu-central-1, ServiceName=SES Value:} C:{Var:C Labels:Region=eu-central-1, ServiceName=SES Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.224974905s EvaluationString:[ var='B' labels={Region=eu-central-1, ServiceName=SES} value=null ], [ var='C' labels={Region=eu-central-1, ServiceName=SES} value=null ]}]" duration=160.275908ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sle02ftz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.225004694Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev174.gradle.org, instance=dev174.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.225041915Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev173.gradle.org, instance=dev173.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.224924616Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sle02ftz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.224883923Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.224843089Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-slbp59db-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.224776632Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.22467365Z caller=remote_instance_store.go:51 user=172772 slug=ppbtradingtribe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.224677289Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-slbp59db-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.224672691Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sl3dn9og-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.22462843Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=172772 slug=ppbtradingtribe t=2024-05-29T13:44:14.224614243Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=172772 slug=ppbtradingtribe instance="StateMachineArn=arn:aws:states:eu-west-1:448085598967:stateMachine:EventMappingStateMachine" t=2024-05-29T13:44:14.22459376Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sl3dn9og-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.224542379Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sl071buw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.224433478Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-skypl7lt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.224348497Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.224331276Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.224291417Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-skwygw5o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.224210906Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=350020 slug=artifact instance= t=2024-05-29T13:44:14.224196735Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.224192Z caller=remote_instance_store.go:51 user=517596 slug=datar msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-skwygw5o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.224165815Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.224168163Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + Error parsing panelUID for alert annotationruleID2629dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=350020 slug=artifact version=1 fingerprint=915d254f721da135 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.224020529Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.223606557s EvaluationString:}]" duration=26.060891ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-skw1213d-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.224034274Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-skw1213d-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.224005764Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-skvgb7cx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.223890813Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev167.gradle.org, instance=dev167.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.223897078Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sklbqxe2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.223719351Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sklbqxe2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.223697511Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sklbqxe2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.22366953Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-skhldxfv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.2236085Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=60199 slug=wallapop t=2024-05-29T13:44:14.223618555Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=60199 slug=wallapop instance= t=2024-05-29T13:44:14.22360092Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.223569823Z caller=client.go:80 msg="creating client for grafana instance" user=618306 addr=dns:///sqrl-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.223540877Z caller=remote_instance_store.go:51 user=776563 slug=eagleeye4els msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:14.223547523Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=490499 slug=servizigestitit4v + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-skhldxfv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.223555069Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-skhldxfv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.223526399Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.223464709Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=60199 slug=wallapop version=7 fingerprint=408ca5063898d984 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.223338416Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.222995442s EvaluationString:}]" duration=534.658552ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-skgud672-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.223372187Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.223403848Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sk8ttoij-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.223342657Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sk8ttoij-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.223269606Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sk8ttoij-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.223229806Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.22322032Z caller=client.go:80 msg="creating client for grafana instance" user=525826 addr=dns:///sqills-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sk69n2k8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.223123575Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.223095181Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sk69n2k8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.223095024Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sk69n2k8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.223071804Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.222941043Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sk4elwpp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.222947913Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sk4ahuft-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.222894122Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sk4ahuft-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.222869982Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=471861 slug=planetstaging instance= t=2024-05-29T13:44:14.222878003Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sk4ahuft-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.222841902Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sk4ahuft-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.222779541Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:14.222747515Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=721368 slug=samcopsey + level=debug ts=2024-05-29T13:44:14.222729529Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.222587368Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.222559132Z caller=remote_instance_store.go:51 user=487988 slug=microstrategyits msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sjljff0b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.222522588Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.222519474Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=855233 slug=sadeno t=2024-05-29T13:44:14.222401741Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.370892ms + level=debug ts=2024-05-29T13:44:14.222434701Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.222440428Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=da03e70f64bb30a1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.222333044Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.222116501s EvaluationString:}]" duration=150.995777ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sjh60s3u-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.222357427Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.222336811Z caller=client.go:80 msg="creating client for grafana instance" user=646120 addr=dns:///sovoscompliance-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.222091728Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:14.222121309Z caller=client.go:80 msg="creating client for grafana instance" user=740293 addr=dns:///sopamo-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sj7uki60-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.222114744Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sj7uki60-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.222093044Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sj6pjtm1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.222054924Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sj6pjtm1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.221969253Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sifcjcv3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.221918832Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.22177066Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sifcjcv3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.221866882Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.221649605Z caller=client.go:80 msg="creating client for grafana instance" user=672567 addr=dns:///soilcapital-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-siaryxme-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.221754741Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev142.gradle.org, instance=dev142.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.221732991Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-siaryxme-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.22167867Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.221670105Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=522608 slug=shareup + level=debug ts=2024-05-29T13:44:14.221640605Z caller=ruler.go:522 msg="tenant is owned by this instance" user=522608 slug=shareup groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-siaryxme-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.221639989Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.221470903Z caller=client.go:80 msg="creating client for grafana instance" user=733505 addr=dns:///smlogistik-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-si79b3oc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.221460848Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-si79b3oc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.221432097Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.221467624Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-si6e87w3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.221361457Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-si6e87w3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.221290796Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=714711 slug=nomiai instance="service=selfie-generator" t=2024-05-29T13:44:14.221308476Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=714711 slug=nomiai t=2024-05-29T13:44:14.221264076Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=714711 slug=nomiai version=14 fingerprint=04b4abaad8b73fd8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.221157094Z level=debug msg="Alert rule evaluated" results="[{Instance:service=selfie-generator State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:service=selfie-generator Value:0xc004762920} C:{Var:C Labels:service=selfie-generator Value:0xc004762940} D:{Var:D Labels:service=selfie-generator Value:0xc004762900}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.220507516s EvaluationString:[ var='A' labels={service=selfie-generator} value=19.14051945551127 ], [ var='C' labels={service=selfie-generator} value=19.14051945551127 ], [ var='D' labels={service=selfie-generator} value=0 ]}]" duration=474.258434ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-shxy90t6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.221031743Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.220960791Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.220378583Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-shvs2tvw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.220601669Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-shr6or03-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.220532428Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-shr6or03-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.220475007Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-shr6or03-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.220419967Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.22051973Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev124.gradle.org, instance=dev124.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.220519682Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.220485564Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=4947 slug=mediamath instance= t=2024-05-29T13:44:14.220508219Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=4947 slug=mediamath instance= t=2024-05-29T13:44:14.220496627Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-shmwi8oj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.220382987Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-shmwi8oj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.220319076Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-shmleye4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.220101884Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-shhtm1np-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.219921792Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=295631 slug=dapvizor t=2024-05-29T13:44:14.219890094Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-shhtm1np-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.219899062Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-shhtm1np-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.219872681Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-shhtm1np-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.219862931Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=295631 slug=dapvizor version=64 fingerprint=ceec2cf53eb6877e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.219691193Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=Ta6tIPbnz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.219359596s EvaluationString:}]" duration=30.340789ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-shd24dtq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.21977876Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-shd24dtq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.21971813Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-shd24dtq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.219664769Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sh92w6u1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.219604689Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sh92w6u1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.219509128Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.219419795Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sh7txmrx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.219363436Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:14.219354414Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=112.013739ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sh0r1bcx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.219279475Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.219226057Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sh0r1bcx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.219204084Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sgvu305b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.219170794Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.219086181Z caller=client.go:80 msg="creating client for grafana instance" user=537530 addr=dns:///smarttechnics-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sgmulh29-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.218976642Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sgmulh29-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.218922142Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sglg4mxi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.218860181Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.218809841Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.218823816Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sglg4mxi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.218714829Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sglg4mxi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.218645939Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sgleohtg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.218500417Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev119.gradle.org, instance=dev119.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.218457003Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sgk1g04m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.218418876Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.218385089Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.218432973Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.218378368Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sgbdg4lg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.218315495Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sgbdg4lg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.218292355Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sgbdg4lg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.218263975Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sg165kzg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.218165864Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sg165kzg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.218143744Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:14.21804308Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=41.457675ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sg0km1y4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.218009382Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sg0km1y4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.217957662Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sg0km1y4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.217905121Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sfwneq2p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.21782435Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sfwneq2p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.2177623Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sfgv1nvu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.217724759Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sfgv1nvu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.217694949Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=316960 slug=mojamteam t=2024-05-29T13:44:14.217640828Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.793895ms + logger=ngalert.state.manager user=337951 slug=pawapay instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.217622475Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=337951 slug=pawapay instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.2176021Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sfgv1nvu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.217630468Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sff0ng6o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.217598888Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sff0ng6o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.217576988Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sff0ng6o-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.217503807Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.21732928Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:14.21730316Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sfa6tmy8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.217221594Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sfa6tmy8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.217199524Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.217162123Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:14.217021161Z caller=client.go:80 msg="creating client for grafana instance" user=647992 addr=dns:///slyngpreprod-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sf8csm8j-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.216996862Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.21693046Z caller=client.go:80 msg="creating client for grafana instance" user=748368 addr=dns:///skyscraper-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sf7oxil6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.216968931Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.21696206Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:14.21690736Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=509700 slug=sevens + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sf7oxil6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.21684514Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sf1wcrtc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.2168016Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.216876446Z caller=remote_instance_store.go:51 user=112732 slug=gleamer msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=112732 slug=gleamer t=2024-05-29T13:44:14.21683262Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=112732 slug=gleamer instance= t=2024-05-29T13:44:14.216805708Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sezase1v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.216629488Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sezase1v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.216586117Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=436633 slug=swirldslabsproduction t=2024-05-29T13:44:14.216652279Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=436633 slug=swirldslabsproduction version=44 fingerprint=e3e46e05e782aae1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.216557314Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=query State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.21619516s EvaluationString:}]" duration=130.269113ms + level=debug ts=2024-05-29T13:44:14.216615549Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-seuvj8nk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.216451966Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-seuvj8nk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.216438016Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.216438018Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.216393804Z caller=grafana.go:247 user=35223 slug=silkroad msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=j-mDkNtnz" groups=3 alerts=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sen9wgit-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.216407136Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.216199809Z caller=grafana.go:247 user=35223 slug=silkroad msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=j-mDkNtnz" groups=1 alerts=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sejw6exg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.216229724Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sebo7dz5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.2159132Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sebo7dz5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.21588004Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=237185 slug=chcenergia instance= t=2024-05-29T13:44:14.215830528Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=237185 slug=chcenergia instance= t=2024-05-29T13:44:14.215818789Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-se7viqzg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.215746209Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-se7viqzg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.215715578Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-se6ks48w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.215670788Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-se6ks48w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.215639138Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance="datasource_uid=000000002, ref_id=A" t=2024-05-29T13:44:14.215637304Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager.persist user=178459 slug=usemodernlogic t=2024-05-29T13:44:14.215592537Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=51.5348ms + logger=ngalert.state.manager user=111653 slug=theassociationmxp instance= t=2024-05-29T13:44:14.2155772Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-se6ks48w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.215556537Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=111653 slug=theassociationmxp version=1 fingerprint=3720af6d98e2ba25 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.215476484Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[E0:{Var:E Labels: Value:} E1:{Var:E Labels: Value:} E2:{Var:E Labels: Value:} E3:{Var:E Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.215129852s EvaluationString:[ var='E0' metric='NoData' labels={} value=null ], [ var='E1' metric='NoData' labels={} value=null ], [ var='E2' metric='NoData' labels={} value=null ], [ var='E3' metric='NoData' labels={} value=null ]}]" duration=291.688262ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-se6ks48w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.215461426Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.215518132Z caller=remote_instance_store.go:51 user=173730 slug=nikon msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=538037 slug=drivewealth instance="host=eqny405-pomsp03" t=2024-05-29T13:44:14.215451175Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.215403127Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=538037 slug=drivewealth instance="host=eqny405-pomsp02" t=2024-05-29T13:44:14.21542233Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="host=eqny405-pomsp01" t=2024-05-29T13:44:14.215391204Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538037 slug=drivewealth t=2024-05-29T13:44:14.215320356Z level=debug msg="State manager processing evaluation results" resultCount=3 + logger=ngalert.state.manager user=173730 slug=nikon t=2024-05-29T13:44:14.215345413Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=173730 slug=nikon version=42 fingerprint=0a3e2d9d488001a5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.215262644Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.215060343s EvaluationString:}]" duration=398.369925ms + level=debug ts=2024-05-29T13:44:14.215041166Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-se21h4km-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.215139682Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-se13uhjb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.214996321Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=359640 slug=swfseu t=2024-05-29T13:44:14.214913532Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=25.685929ms + level=debug ts=2024-05-29T13:44:14.214965355Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-se13uhjb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.21487504Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.21478099Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sdsu523e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.214743838Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.214619212Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.214594201Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sdkjms6u-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.214566516Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sdkjms6u-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.214496966Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=C" t=2024-05-29T13:44:14.214468763Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=C" t=2024-05-29T13:44:14.214454462Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sdkjms6u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.214460145Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sdkjms6u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.214434055Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.214321836Z caller=ruler.go:522 msg="tenant is owned by this instance" user=509344 slug=services0xfn groups=0 + level=debug ts=2024-05-29T13:44:14.214359112Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:14.214308036Z caller=client.go:80 msg="creating client for grafana instance" user=766318 addr=dns:///siscc-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=173730 slug=nikon instance= t=2024-05-29T13:44:14.214383139Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev105.gradle.org, instance=dev105.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.214301479Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sdjuk3ih-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.214323764Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.214230664Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.214212824Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.214203781Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sd9qddh4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.214170262Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.214028839Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sd9qddh4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.214071771Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.214012695Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sd7dza33-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.213828469Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.213725272Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.213783773Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.213703498Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev104.gradle.org, instance=dev104.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.213706841Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sd7dza33-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.213653047Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev104.gradle.org, instance=dev104.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.213525289Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.213258693Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sd78rtgi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.213467785Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sd78rtgi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.213433295Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.213285526Z caller=ruler.go:522 msg="tenant is owned by this instance" user=731058 slug=sdcitygis groups=0 + level=debug ts=2024-05-29T13:44:14.213231372Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=320906 slug=techcyte t=2024-05-29T13:44:14.2132015Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=320906 slug=techcyte version=4 fingerprint=24362aad338a3066 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.213102702Z level=debug msg="Alert rule evaluated" results="[{Instance:ClusterName=production-vetscanimagyst-default, ServiceName=prod-vetcyte-grundium-server, company_type=vetcyte, component=grundium-server, owning_team=scanlot, terraform_project=grundium-integration/terraform/grundium-server, tier=prod State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:ClusterName=production-vetscanimagyst-default, ServiceName=prod-vetcyte-grundium-server, company_type=vetcyte, component=grundium-server, owning_team=scanlot, terraform_project=grundium-integration/terraform/grundium-server, tier=prod Value:0xc02eb7ce58} B:{Var:B Labels:ClusterName=production-vetscanimagyst-default, ServiceName=prod-vetcyte-grundium-server, company_type=vetcyte, component=grundium-server, owning_team=scanlot, terraform_project=grundium-integration/terraform/grundium-server, tier=prod Value:0xc02eb7ced0} C:{Var:C Labels:ClusterName=production-vetscanimagyst-default, ServiceName=prod-vetcyte-grundium-server, company_type=vetcyte, component=grundium-server, owning_team=scanlot, terraform_project=grundium-integration/terraform/grundium-server, tier=prod Value:0xc02eb7cf48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.212666514s EvaluationString:[ var='A' labels={ClusterName=production-vetscanimagyst-default, ServiceName=prod-vetcyte-grundium-server, company_type=vetcyte, component=grundium-server, owning_team=scanlot, terraform_project=grundium-integration/terraform/grundium-server, tier=prod} value=57.23876953125 ], [ var='B' labels={ClusterName=production-vetscanimagyst-default, ServiceName=prod-vetcyte-grundium-server, company_type=vetcyte, component=grundium-server, owning_team=scanlot, terraform_project=grundium-integration/terraform/grundium-server, tier=prod} value=57.23876953125 ], [ var='C' labels={ClusterName=production-vetscanimagyst-default, ServiceName=prod-vetcyte-grundium-server, company_type=vetcyte, component=grundium-server, owning_team=scanlot, terraform_project=grundium-integration/terraform/grundium-server, tier=prod} value=0 ]}]" duration=15.736721ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sd4xwfug-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.213108251Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sd4xwfug-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.213094541Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sd4pxukz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.21295492Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sd3ewqss-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.212822289Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sd3ewqss-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.212796598Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sd2gxde5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.212587316Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.212548919Z caller=ruler.go:522 msg="tenant is owned by this instance" user=548089 slug=sanchezdelreal groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sd2gxde5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.212520085Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.212453584Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.212407084Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-scxwa3w2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.212374084Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-scxwa3w2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.212305933Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-scxwa3w2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.212234813Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.212195123Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-scuagub6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.212092301Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-scuagub6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.212048801Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.212028121Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=855233 slug=sadeno t=2024-05-29T13:44:14.212025329Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-scrp6m1f-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.21196193Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=855233 slug=sadeno instance="instance=localhost:9100" t=2024-05-29T13:44:14.211923527Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.211858911Z caller=remote_instance_store.go:51 user=512398 slug=brightdigital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=532654 slug=chathamdirectint instance= t=2024-05-29T13:44:14.211664408Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:14.211738308Z level=debug msg="Saving alert states" count=9 max_state_save_concurrency=1 + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=bdjimeqi13q4ga, ref_id=A" t=2024-05-29T13:44:14.211717446Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.211733207Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=bdjimeqi13q4ga, ref_id=A" t=2024-05-29T13:44:14.211666758Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=793400 slug=bedrock instance="datasource_uid=addnq3a8rpxc0b, ref_id=A" t=2024-05-29T13:44:14.211630637Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.211692983Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.211691635Z caller=remote_instance_store.go:51 user=793400 slug=bedrock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=793400 slug=bedrock instance="datasource_uid=addnq3a8rpxc0b, ref_id=A" t=2024-05-29T13:44:14.211588469Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.211629111Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=855233 slug=sadeno t=2024-05-29T13:44:14.211639011Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=bdjimeqi13q4ga, ref_id=A" t=2024-05-29T13:44:14.211606405Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.211577342Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.21158044Z caller=remote_instance_store.go:51 user=214309 slug=spenmo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-scp7r61z-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.211568486Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=793400 slug=bedrock t=2024-05-29T13:44:14.211562858Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=bdjimeqi13q4ga, ref_id=A" t=2024-05-29T13:44:14.211543768Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-scp7r61z-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.211511845Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=bdjimeqi13q4ga, ref_id=A" t=2024-05-29T13:44:14.211534744Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=214309 slug=spenmo instance="datasource_uid=grafanacloud-prom, ref_id=A,B" t=2024-05-29T13:44:14.211511887Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-scp7r61z-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.211442064Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=206107 slug=hydrolix version=2 fingerprint=d8770011efe66a7d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.211386166Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=bdjimeqi13q4ga, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.211074966s EvaluationString:}]" duration=120.217114ms + logger=ngalert.scheduler user=855233 slug=sadeno version=19 fingerprint=3da7ced875d8ecc2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.211358406Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=localhost:9100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=localhost:9100 Value:0xc05eddc448} C:{Var:C Labels:instance=localhost:9100 Value:0xc05eddc468}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.211034995s EvaluationString:[ var='A' labels={instance=localhost:9100} value=12.626444184944859 ], [ var='C' labels={instance=localhost:9100} value=0 ]}]" duration=11.567686ms + level=debug ts=2024-05-29T13:44:14.211410463Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.21130623Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.211278802Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-scnqkuev-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.211248722Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.211225688Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.211186826Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sclyihbq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.211169912Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=890268 slug=cmcngipd t=2024-05-29T13:44:14.211123378Z level=debug msg="Saving alert states done" count=7 max_state_save_concurrency=1 duration=82.006315ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sclyihbq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.211068951Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.211088705Z caller=ruler.go:522 msg="tenant is owned by this instance" user=447873 slug=pn0625test01 groups=8 + logger=ngalert.state.manager.persist user=467258 slug=neonprod t=2024-05-29T13:44:14.210989222Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=38.506739ms + level=info component=discovery ts=2024-05-29T13:44:14.210854303Z caller=client.go:80 msg="creating client for grafana instance" user=682633 addr=dns:///shinu-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-scjcy7je-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.210872739Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.210827043Z caller=remote_instance_store.go:51 user=243675 slug=oneschema msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=466402 slug=apexfsnzprod t=2024-05-29T13:44:14.210782234Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.875905ms + level=debug ts=2024-05-29T13:44:14.210595612Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-scdjr5ej-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.210589946Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.21047045Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.210495537Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.210412751Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sc5d14l2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.210385684Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sc5d14l2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.210356983Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.209865494Z caller=client.go:80 msg="creating client for grafana instance" user=522608 addr=dns:///shareup-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.209609291Z caller=ruler.go:522 msg="tenant is owned by this instance" user=693775 slug=sabisstest groups=0 + level=debug ts=2024-05-29T13:44:14.210226205Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sc5d14l2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.210214882Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.209148887Z caller=ruler.go:522 msg="tenant is owned by this instance" user=549935 slug=rockethems groups=1 + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/mapper/vg0-root, fstype=ext4, host=dev102.gradle.org, instance=dev102.gradle.org, job=node, mountpoint=/, os=Ubuntu" t=2024-05-29T13:44:14.21017317Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.208890384Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:14.208740883Z caller=client.go:80 msg="creating client for grafana instance" user=509344 addr=dns:///services0xfn-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.208693282Z caller=ruler.go:522 msg="tenant is owned by this instance" user=494083 slug=safety21 groups=0 + level=info component=discovery ts=2024-05-29T13:44:14.207207168Z caller=client.go:80 msg="creating client for grafana instance" user=533957 addr=dns:///selfcare-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sc1epeb4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.21003943Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sbx21scv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.209904039Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sbx21scv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.209834818Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sbrk6f1e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.209566615Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.209553063Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.209522585Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sbrk6f1e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.209468944Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sbrk6f1e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.209399503Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sbq1t9lx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.209359163Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.208868657Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.209242345Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.209105503Z caller=remote_instance_store.go:51 user=777670 slug=fakku msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sbpj0sqj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.20902393Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.209069601Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=371756 slug=asapp t=2024-05-29T13:44:14.209009765Z level=debug msg="Saving alert states done" count=33 max_state_save_concurrency=1 duration=679.573058ms + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s3s1, fstype=apfs, host=dev198.gradle.org, instance=dev198.gradle.org, job=node, mountpoint=/, os=MacOS" t=2024-05-29T13:44:14.208968441Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=87780 slug=zencloudandhosting instance="datasource_uid=000000020, ref_id=A" t=2024-05-29T13:44:14.208930881Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.208852067Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=87780 slug=zencloudandhosting version=1 fingerprint=7669ab18806ddb77 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.208752815Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=000000020, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.208177536s EvaluationString:}]" duration=106.763356ms + logger=ngalert.state.manager.persist user=532655 slug=chathamdirectdev t=2024-05-29T13:44:14.208879879Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=532655 slug=chathamdirectdev instance= t=2024-05-29T13:44:14.208851269Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.208804666Z caller=remote_instance_store.go:51 user=401509 slug=redefined msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sbonqflu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.208764647Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sbl60ozu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.208410773Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s3s1, fstype=apfs, host=dev197.gradle.org, instance=dev197.gradle.org, job=node, mountpoint=/, os=MacOS" t=2024-05-29T13:44:14.208414276Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID791dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=304032 slug=clearbanc t=2024-05-29T13:44:14.208324222Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.362365ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sbgt9yda-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.208235252Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=401509 slug=redefined instance="name=missing_sectors_history_balances" t=2024-05-29T13:44:14.208183573Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sbgt9yda-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.208158951Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=401509 slug=redefined instance="name=missing_sectors_history_balances" t=2024-05-29T13:44:14.208169165Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sbfrflkx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.20805869Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sbfrflkx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.208010739Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.207953524Z caller=remote_instance_store.go:51 user=235895 slug=nathanprenzler msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sbfrflkx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.207971759Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=412141 slug=sharethrough t=2024-05-29T13:44:14.207948652Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.84396ms + logger=ngalert.state.manager user=401509 slug=redefined instance="name=final_unhealthy_listings" t=2024-05-29T13:44:14.207919791Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.207903455Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.935396ms + level=debug ts=2024-05-29T13:44:14.207930349Z caller=remote_image_capturer.go:33 user=401509 slug=redefined rule_org_id=1 rule_uid=cFL55dzVk msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=401509 slug=redefined instance="name=final_unhealthy_listings" t=2024-05-29T13:44:14.207909889Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sbd79mmw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.207832317Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=_h27TffVk, ref_id=A" t=2024-05-29T13:44:14.207828861Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:45:40Z next_ends_at=2024-05-29T13:46:10Z + logger=ngalert.state.manager user=401509 slug=redefined instance="name=duplicate_currency_pair_listings" t=2024-05-29T13:44:14.207759715Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=538037 slug=drivewealth version=141 fingerprint=0281e5a93ebc79e3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.207709476Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=_h27TffVk, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.207416074s EvaluationString:}]" duration=32.470512ms + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s3s1, fstype=apfs, host=dev196.gradle.org, instance=dev196.gradle.org, job=node, mountpoint=/, os=MacOS" t=2024-05-29T13:44:14.207745767Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.207646064Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sbd79mmw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.207638915Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.207546798Z caller=remote_alert_sender.go:94 user=118359 slug=atixlabs host=atixlabs-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.125.61:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ea874dd1-8232-4589-9a3a-e130e4b401e0 alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sbaheuue-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.207489024Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sbaheuue-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.207459544Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=401509 slug=redefined version=9 fingerprint=0fdeaf604fa403d4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.207184018Z level=debug msg="Alert rule evaluated" results="[{Instance:name=duplicate_crypto_currencies State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:name=duplicate_crypto_currencies Value:0xc0b7178ce8} B:{Var:B Labels:name=duplicate_crypto_currencies Value:0xc0b7178cf8} C:{Var:C Labels:name=duplicate_crypto_currencies Value:0xc0b7178d08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.206432777s EvaluationString:[ var='A' labels={name=duplicate_crypto_currencies} value=1 ], [ var='B' labels={name=duplicate_crypto_currencies} value=1 ], [ var='C' labels={name=duplicate_crypto_currencies} value=1 ]} {Instance:name=duplicate_currency_pair_listings State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:name=duplicate_currency_pair_listings Value:0xc0b7178d20} B:{Var:B Labels:name=duplicate_currency_pair_listings Value:0xc0b7178d30} C:{Var:C Labels:name=duplicate_currency_pair_listings Value:0xc0b7178d40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.206450063s EvaluationString:[ var='A' labels={name=duplicate_currency_pair_listings} value=0 ], [ var='B' labels={name=duplicate_currency_pair_listings} value=0 ], [ var='C' labels={name=duplicate_currency_pair_listings} value=0 ]} {Instance:name=duplicate_currency_pairs State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:name=duplicate_currency_pairs Value:0xc0b7178f60} B:{Var:B Labels:name=duplicate_currency_pairs Value:0xc0b7178f70} C:{Var:C Labels:name=duplicate_currency_pairs Value:0xc0b7178f80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.206458239s EvaluationString:[ var='A' labels={name=duplicate_currency_pairs} value=0 ], [ var='B' labels={name=duplicate_currency_pairs} value=0 ], [ var='C' labels={name=duplicate_currency_pairs} value=0 ]} {Instance:name=final_unhealthy_listings State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:name=final_unhealthy_listings Value:0xc0b7178fd0} B:{Var:B Labels:name=final_unhealthy_listings Value:0xc0b7179020} C:{Var:C Labels:name=final_unhealthy_listings Value:0xc0b7179070}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.206465692s EvaluationString:[ var='A' labels={name=final_unhealthy_listings} value=1607 ], [ var='B' labels={name=final_unhealthy_listings} value=1607 ], [ var='C' labels={name=final_unhealthy_listings} value=1 ]} {Instance:name=late_listings State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:name=late_listings Value:0xc0b71796e0} B:{Var:B Labels:name=late_listings Value:0xc0b71792b0} C:{Var:C Labels:name=late_listings Value:0xc0b7179660}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.206473344s EvaluationString:[ var='A' labels={name=late_listings} value=44 ], [ var='B' labels={name=late_listings} value=44 ], [ var='C' labels={name=late_listings} value=1 ]} {Instance:name=missing_sectors_history_balances State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:name=missing_sectors_history_balances Value:0xc0b71799d0} B:{Var:B Labels:name=missing_sectors_history_balances Value:0xc0b7179950} C:{Var:C Labels:name=missing_sectors_history_balances Value:0xc0b7179990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.206481845s EvaluationString:[ var='A' labels={name=missing_sectors_history_balances} value=0 ], [ var='B' labels={name=missing_sectors_history_balances} value=0 ], [ var='C' labels={name=missing_sectors_history_balances} value=0 ]} {Instance:name=missing_sectors_post_balances State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:name=missing_sectors_post_balances Value:0xc0b7179cd0} B:{Var:B Labels:name=missing_sectors_post_balances Value:0xc0b7179d10} C:{Var:C Labels:name=missing_sectors_post_balances Value:0xc0b7179d50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.206488664s EvaluationString:[ var='A' labels={name=missing_sectors_post_balances} value=116707 ], [ var='B' labels={name=missing_sectors_post_balances} value=116707 ], [ var='C' labels={name=missing_sectors_post_balances} value=1 ]} {Instance:name=orphaned_crypto_currencies_without_pair State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:name=orphaned_crypto_currencies_without_pair Value:0xc0b7179dd0} B:{Var:B Labels:name=orphaned_crypto_currencies_without_pair Value:0xc0b7179e10} C:{Var:C Labels:name=orphaned_crypto_currencies_without_pair Value:0xc0b7179e50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.206495991s EvaluationString:[ var='A' labels={name=orphaned_crypto_currencies_without_pair} value=1 ], [ var='B' labels={name=orphaned_crypto_currencies_without_pair} value=1 ], [ var='C' labels={name=orphaned_crypto_currencies_without_pair} value=1 ]} {Instance:name=orphaned_currency_pairs_or_listings State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:name=orphaned_currency_pairs_or_listings Value:0xc0b7179ed0} B:{Var:B Labels:name=orphaned_currency_pairs_or_listings Value:0xc0b7179ef0} C:{Var:C Labels:name=orphaned_currency_pairs_or_listings Value:0xc0b7179f10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.206502442s EvaluationString:[ var='A' labels={name=orphaned_currency_pairs_or_listings} value=4 ], [ var='B' labels={name=orphaned_currency_pairs_or_listings} value=4 ], [ var='C' labels={name=orphaned_currency_pairs_or_listings} value=1 ]} {Instance:name=orphaned_token_currencies_without_pair State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:name=orphaned_token_currencies_without_pair Value:0xc0b7179f90} B:{Var:B Labels:name=orphaned_token_currencies_without_pair Value:0xc0b7179fd0} C:{Var:C Labels:name=orphaned_token_currencies_without_pair Value:0xc035742000}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.206508547s EvaluationString:[ var='A' labels={name=orphaned_token_currencies_without_pair} value=6 ], [ var='B' labels={name=orphaned_token_currencies_without_pair} value=6 ], [ var='C' labels={name=orphaned_token_currencies_without_pair} value=1 ]}]" duration=13.008439ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sb34a3o6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.207349122Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sb34a3o6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.207313262Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.207294766Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sb34a3o6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.207262232Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sb34a3o6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.207235081Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.207183805Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sb34a3o6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.207183691Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.207168683Z caller=remote_instance_store.go:51 user=20177 slug=paddledash msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sb34a3o6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.207162371Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=194293 slug=enterpret instance= t=2024-05-29T13:44:14.207140218Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sb2y1nzx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.20707026Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=194293 slug=enterpret version=1 fingerprint=bf0a70720b722767 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.207025977Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.206700552s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=41.266514ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sb2y1nzx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.206950378Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sb2y1nzx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.206921218Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sb2y0qtb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.206881738Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.206930896Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sb2y0qtb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.206850367Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.206891665Z caller=ruler.go:522 msg="tenant is owned by this instance" user=741490 slug=rofertrading groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sb2y0qtb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.206802787Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.206846884Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sb2y0qtb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.206731956Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.20659632Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=517596 slug=datar t=2024-05-29T13:44:14.206785745Z level=debug msg="Saving alert states" count=7 max_state_save_concurrency=1 + logger=ngalert.state.manager user=517596 slug=datar instance="datasource_uid=uleqBno4z, ref_id=Amount" t=2024-05-29T13:44:14.206769083Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=517596 slug=datar instance="datasource_uid=uleqBno4z, ref_id=Amount" t=2024-05-29T13:44:14.206762092Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.206786268Z caller=remote_instance_store.go:51 user=94501 slug=datastax msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=517596 slug=datar instance="datasource_uid=uleqBno4z, ref_id=Amount" t=2024-05-29T13:44:14.206739705Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=517596 slug=datar instance="datasource_uid=uleqBno4z, ref_id=Amount" t=2024-05-29T13:44:14.20672284Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=517596 slug=datar instance="datasource_uid=uleqBno4z, ref_id=Amount" t=2024-05-29T13:44:14.206711695Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=517596 slug=datar instance="datasource_uid=uleqBno4z, ref_id=Amount" t=2024-05-29T13:44:14.206704325Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sb2y0qtb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.206664995Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.206687087Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=517596 slug=datar instance="datasource_uid=uleqBno4z, ref_id=Amount" t=2024-05-29T13:44:14.206674671Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=info component=discovery ts=2024-05-29T13:44:14.206531262Z caller=client.go:80 msg="creating client for grafana instance" user=695279 addr=dns:///secforhire-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=517596 slug=datar instance="datasource_uid=uleqBno4z, ref_id=Amount" t=2024-05-29T13:44:14.20662523Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=517596 slug=datar instance="datasource_uid=uleqBno4z, ref_id=Amount" t=2024-05-29T13:44:14.206569929Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-saumanmq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.206550354Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=901230 slug=integromonitor instance= previous_handler=resultError t=2024-05-29T13:44:14.206499502Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=901230 slug=integromonitor instance= previous_handler=resultError t=2024-05-29T13:44:14.206494522Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-saumanmq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.206535054Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=517596 slug=datar version=54 fingerprint=df2c7302dbfd0367 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.206495008Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=uleqBno4z, ref_id=Amount State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.206211161s EvaluationString:}]" duration=706.623881ms + logger=ngalert.state.manager.persist user=112732 slug=gleamer t=2024-05-29T13:44:14.206479083Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=41.859219ms + level=warn ts=2024-05-29T13:44:14.206466661Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=559318 slug=romenas + logger=ngalert.state.manager user=109452 slug=deltarisk instance= t=2024-05-29T13:44:14.206513991Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.206460436Z caller=remote_instance_store.go:51 user=35611 slug=play msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.206506193Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=109452 slug=deltarisk version=25 fingerprint=7d94b497616b372d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.206413244Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.206141074s EvaluationString:}]" duration=48.082286ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sacywswx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.206420583Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.206431622Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.206379061Z caller=ruler.go:522 msg="tenant is owned by this instance" user=571665 slug=reklpro groups=0 + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s3s1, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/, os=MacOS" t=2024-05-29T13:44:14.206371317Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.206345754Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.206340639Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.748441ms + level=debug ts=2024-05-29T13:44:14.206297082Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=283914 slug=emmasleep version=9 fingerprint=2f46c2c611a90dfb attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.206213413Z level=debug msg="Alert rule evaluated" results="[{Instance:QueueName=ecom-prod-wholesale-commercetools-product-published-events-queue State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:QueueName=ecom-prod-wholesale-commercetools-product-published-events-queue Value:0xc0b71784b0} F:{Var:F Labels:QueueName=ecom-prod-wholesale-commercetools-product-published-events-queue Value:0xc0b71784b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.205729525s EvaluationString:[ var='C' labels={QueueName=ecom-prod-wholesale-commercetools-product-published-events-queue} value=0 ], [ var='F' labels={QueueName=ecom-prod-wholesale-commercetools-product-published-events-queue} value=0 ]}]" duration=89.48541ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sac3tk87-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.20614451Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=612525 slug=adleyeview instance= t=2024-05-29T13:44:14.205998018Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s3s1, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/, os=MacOS" t=2024-05-29T13:44:14.206020928Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=612525 slug=adleyeview instance= t=2024-05-29T13:44:14.205981917Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-saa3pqui-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.206015099Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-saa3pqui-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.205899828Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sa0lqc4b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.205856827Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sa0lqc4b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.205824927Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=612525 slug=adleyeview version=154 fingerprint=be806ec1f7a9d33f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.205761704Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[C:{Var:C Labels: Value:0xc04bdf4938} D:{Var:D Labels: Value:0xc04bdf4930}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.197683315s EvaluationString:[ var='C' labels={} value=0 ], [ var='D' labels={} value=0 ]}]" duration=1.392341251s + level=debug ts=2024-05-29T13:44:14.205827737Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-sa0lqc4b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.205756456Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.205564353Z caller=client.go:80 msg="creating client for grafana instance" user=516717 addr=dns:///sebastienbarbier-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s9vsw1cq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.205565274Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:14.205503852Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=558977 slug=ricardocosme1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s9r3enhl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.205404623Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s9r3enhl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.205368882Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.20520985Z caller=ruler.go:522 msg="tenant is owned by this instance" user=522417 slug=robertpaschedag groups=0 + level=debug ts=2024-05-29T13:44:14.205026667Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s9njl9yg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.204976248Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s9njl9yg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.204913107Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.204901077Z caller=remote_alert_sender.go:94 user=642786 slug=sophoscomnsg host=sophoscomnsg-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.44.231:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=c54ef114-ecb6-46ee-a101-7f0a4cb0257b alerts=1 + logger=ngalert.state.manager.persist user=642786 slug=sophoscomnsg t=2024-05-29T13:44:14.204738565Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.815788ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s9isro5v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.204702165Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s3s1, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/, os=MacOS" t=2024-05-29T13:44:14.204679407Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=417450 slug=legitsecurity t=2024-05-29T13:44:14.204651468Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.204697397Z caller=remote_instance_store.go:51 user=417450 slug=legitsecurity msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=417450 slug=legitsecurity instance="datasource_uid=b45e2b58-5a0f-4179-aa70-1fcb7b022245, ref_id=A" t=2024-05-29T13:44:14.204620265Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.204592891Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.204342126Z caller=remote_instance_store.go:51 user=543604 slug=kingmakers msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:14.204260099Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=191103 slug=amazonadmin version=47 fingerprint=cc2e41019438de5c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.204171574Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.203969952s EvaluationString:}]" duration=174.913529ms + level=debug ts=2024-05-29T13:44:14.203950238Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.203971869Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s9546q1c-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.203967478Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:14.203927167Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.203938738Z caller=ruler.go:522 msg="tenant is owned by this instance" user=541286 slug=ravcloud groups=0 + logger=ngalert.scheduler user=4947 slug=mediamath version=1 fingerprint=2a81844f317b3aae attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.203871076Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=000000020, ref_id=I State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.203534571s EvaluationString:}]" duration=25.010804ms + logger=ngalert.scheduler user=520651 slug=robertogiacomozzi t=2024-05-29T13:44:14.203677816Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s8zml5ou-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.203650605Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.203440433Z caller=ruler.go:522 msg="tenant is owned by this instance" user=664724 slug=ponsse groups=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s8vj3c2l-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.203379642Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s8qw3rlz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.203337591Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s8qw3rlz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.203303931Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s8qw3rlz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.20318976Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s3s1, fstype=apfs, host=dev182.gradle.org, instance=dev182.gradle.org, job=node, mountpoint=/, os=MacOS" t=2024-05-29T13:44:14.203224528Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.203195131Z caller=client.go:80 msg="creating client for grafana instance" user=677472 addr=dns:///sbsdash-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s8qw3rlz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.203152259Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.20315133Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=717763 slug=qxt + level=debug ts=2024-05-29T13:44:14.202883028Z caller=ruler.go:522 msg="tenant is owned by this instance" user=717763 slug=qxt groups=0 + level=info component=discovery ts=2024-05-29T13:44:14.203028329Z caller=client.go:80 msg="creating client for grafana instance" user=666581 addr=dns:///sappa-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s8quq365-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.202957697Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s3s1, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/, os=MacOS" t=2024-05-29T13:44:14.202961305Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.202954628Z caller=client.go:80 msg="creating client for grafana instance" user=532976 addr=dns:///sandbox4-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.202931228Z caller=ruler.go:522 msg="tenant is owned by this instance" user=935172 slug=rdswfhashivault groups=0 + level=debug ts=2024-05-29T13:44:14.202858527Z caller=ruler.go:522 msg="tenant is owned by this instance" user=690653 slug=quara groups=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s8p3gblf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.202880007Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s8p3gblf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.202841816Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s3s1, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/, os=MacOS" t=2024-05-29T13:44:14.202713874Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s3s1, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/, os=MacOS" t=2024-05-29T13:44:14.202689898Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s8p3gblf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.202591464Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=849729 slug=medopsimscare t=2024-05-29T13:44:14.202384734Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.310105ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s8kxkpbb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.20225073Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s8kxkpbb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.202153119Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s8kxkpbb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.202108229Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=456073 slug=botify t=2024-05-29T13:44:14.201951345Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s3s1, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/, os=MacOS" t=2024-05-29T13:44:14.201984689Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s8emb53c-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.201948677Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=456073 slug=botify version=50 fingerprint=67fbe71c27e074fe attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.201832767Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.201557742s EvaluationString:}]" duration=20.444769ms + logger=ngalert.scheduler user=377846 slug=blinkng t=2024-05-29T13:44:14.201849696Z level=debug msg="Skip rule evaluation because it is paused" + level=debug ts=2024-05-29T13:44:14.201796089Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.201797717Z caller=ruler.go:522 msg="tenant is owned by this instance" user=731608 slug=praxisdahmen groups=1 + logger=ngalert.state.manager.persist user=800848 slug=flowfoundation t=2024-05-29T13:44:14.201733536Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=800848 slug=flowfoundation instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.201723016Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=800848 slug=flowfoundation t=2024-05-29T13:44:14.201682916Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s8bqvdmw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.201615134Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s8bqvdmw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.201578723Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.201496414Z caller=client.go:80 msg="creating client for grafana instance" user=512405 addr=dns:///salerano-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s879lwu7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.201539483Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.201293318Z caller=remote_instance_store.go:51 user=359640 slug=swfseu msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s3, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS" t=2024-05-29T13:44:14.201289252Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.20115721Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=460402 slug=dmind t=2024-05-29T13:44:14.201087008Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=50.439007ms + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s3, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS" t=2024-05-29T13:44:14.201035843Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.200796932Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s3, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS" t=2024-05-29T13:44:14.200761998Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.200521105Z caller=client.go:80 msg="creating client for grafana instance" user=520904 addr=dns:///rrf-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.200507005Z caller=ruler.go:522 msg="tenant is owned by this instance" user=633237 slug=proto55 groups=0 + level=debug ts=2024-05-29T13:44:14.200470505Z caller=ruler.go:522 msg="tenant is owned by this instance" user=671159 slug=rahisify groups=0 + level=warn ts=2024-05-29T13:44:14.200449905Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=657860 slug=pinch + level=debug ts=2024-05-29T13:44:14.200550543Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s7qqe7eo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.200523172Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.20050509Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s7gl2njj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.200403231Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s7gl2njj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.20033586Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.200334875Z caller=remote_instance_store.go:51 user=90284 slug=volantio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=90284 slug=volantio t=2024-05-29T13:44:14.200265244Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s3, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS" t=2024-05-29T13:44:14.200224622Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s7dapm1x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.200010327Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.199986606Z caller=remote_instance_store.go:51 user=777670 slug=fakku msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s7dapm1x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.199967096Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.1999394Z caller=client.go:80 msg="creating client for grafana instance" user=632033 addr=dns:///rominimal-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.199864534Z caller=remote_instance_store.go:51 user=415315 slug=waveloprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.1998474Z caller=remote_rule_evaluator.go:193 user=855233 slug=sadeno msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s7cparzt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.199781465Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:14.199600859Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s78wkyon-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.199614553Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.199599197Z caller=client.go:80 msg="creating client for grafana instance" user=510808 addr=dns:///romanpertl-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.199440095Z caller=ruler.go:522 msg="tenant is owned by this instance" user=625317 slug=rakowiecki groups=0 + level=debug ts=2024-05-29T13:44:14.199410394Z caller=remote_instance_store.go:51 user=206439 slug=relaypro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s74wimx3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.19936748Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.199333597Z caller=remote_instance_store.go:51 user=146728 slug=dgc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=146728 slug=dgc instance="datasource_uid=WqVnnZtMk, ref_id=A" t=2024-05-29T13:44:14.199249234Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=146728 slug=dgc instance="datasource_uid=WqVnnZtMk, ref_id=A" t=2024-05-29T13:44:14.199226568Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=206439 slug=relaypro instance="cluster=mob, environment=qa, role=ibotdr_fdb" t=2024-05-29T13:44:14.199260957Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206439 slug=relaypro instance="cluster=mob, environment=pro, role=ibot_fdb" t=2024-05-29T13:44:14.198953973Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s701okjd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.198884475Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206439 slug=relaypro t=2024-05-29T13:44:14.198927955Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}The {{role}} FDB cluster in the {{cluster}}_{{environment}} environment has a lower-than expected fault-tolerance value by a large margin. This means at least two fault-tolerance domains (Zone IDs) have been lost.': error parsing template __alert_FDB - Availability Loss Margin (triple): template: __alert_FDB - Availability Loss Margin (triple):1: function \"role\" not defined" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s701okjd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.198844845Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s701okjd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.198831035Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.198760989Z caller=client.go:80 msg="creating client for grafana instance" user=549935 addr=dns:///rockethems-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=206439 slug=relaypro t=2024-05-29T13:44:14.198813253Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}The {{role}} FDB cluster in the {{cluster}}_{{environment}} environment has a lower-than expected fault-tolerance value by a large margin. This means at least two fault-tolerance domains (Zone IDs) have been lost.': error parsing template __alert_FDB - Availability Loss Margin (triple): template: __alert_FDB - Availability Loss Margin (triple):1: function \"role\" not defined" + logger=ngalert.state.manager user=206439 slug=relaypro instance="cluster=beta, environment=qa, role=ibot_fdb" t=2024-05-29T13:44:14.198766065Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.198737189Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.198692205Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s6yxbhp1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.198646113Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.198629647Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s6yxbhp1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.198611412Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.198614597Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206439 slug=relaypro instance="cluster=beta, environment=qa, role=fabric" t=2024-05-29T13:44:14.198666224Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206439 slug=relaypro instance="cluster=beta, environment=qa, role=fabric" t=2024-05-29T13:44:14.198654296Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.198319651Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:14.198330385Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=682941 slug=ppnt + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s6u3pxb1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.198191998Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s6u3pxb1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.198159228Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s6u3pxb1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.198085397Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s3, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS" t=2024-05-29T13:44:14.197987311Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s6shyzc6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.197963506Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=466402 slug=apexfsnzprod t=2024-05-29T13:44:14.197902539Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s6shyzc6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.197846315Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.197743979Z caller=ruler.go:522 msg="tenant is owned by this instance" user=513750 slug=prorailctu groups=0 + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s1, fstype=apfs, host=dev198.gradle.org, instance=dev198.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS" t=2024-05-29T13:44:14.197703309Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s1, fstype=apfs, host=dev198.gradle.org, instance=dev198.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS" t=2024-05-29T13:44:14.197689276Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s6oj9u31-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.197645372Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=415315 slug=waveloprod instance="__name__=redis_rejected_connections_total, app=opentelemetry, az=undefined, cloud_provider=openstack, customer_name=ting, env=pre, instance=10.177.43.196:9121, job=redis_metrics, lob=wavelo, node=redis003.pre-bluebox.cnco.tucows.systems, platform=isos, project=isos, region=cnco2, team=isos, tenant=pre_bluebox" t=2024-05-29T13:44:14.197654273Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=466402 slug=apexfsnzprod version=1 fingerprint=83cfb8839965b302 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.197544074Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=10.0.0.60:9102 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:instance=10.0.0.60:9102 Value:0xc00be05390} C:{Var:C Labels:instance=10.0.0.60:9102 Value:0xc00be053c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.197157056s EvaluationString:[ var='B' labels={instance=10.0.0.60:9102} value=0.25932495809838263 ], [ var='C' labels={instance=10.0.0.60:9102} value=0 ]}]" duration=20.804825ms + level=debug ts=2024-05-29T13:44:14.197583247Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.197516403Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.197592613Z caller=remote_instance_store.go:51 user=129076 slug=marginalunit msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=415315 slug=waveloprod t=2024-05-29T13:44:14.197537794Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="customer_name=ting, team=isos" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s1, fstype=apfs, host=dev196.gradle.org, instance=dev196.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS" t=2024-05-29T13:44:14.197427278Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s1, fstype=apfs, host=dev196.gradle.org, instance=dev196.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS" t=2024-05-29T13:44:14.197414951Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.197206658Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s6ljjjyg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.197239538Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s6ljjjyg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.197207308Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=415315 slug=waveloprod instance="__name__=redis_rejected_connections_total, app=opentelemetry, az=undefined, cloud_provider=openstack, customer_name=ting, env=pre, instance=10.177.43.159:9121, job=redis_metrics, lob=wavelo, node=redis002.pre-bluebox.cnco.tucows.systems, platform=isos, project=isos, region=cnco2, team=isos, tenant=pre_bluebox" t=2024-05-29T13:44:14.197272173Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:14.197220674Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=675094 slug=plato + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s6k5sx3p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.197055846Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.197057773Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=668656 slug=netsrelay + level=debug ts=2024-05-29T13:44:14.197024882Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s6k5sx3p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.196960445Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=415315 slug=waveloprod t=2024-05-29T13:44:14.197012623Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="customer_name=ting, team=isos" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s1, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS" t=2024-05-29T13:44:14.196992474Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s68bx62x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.196910365Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.196899771Z caller=client.go:80 msg="creating client for grafana instance" user=558977 addr=dns:///ricardocosme1-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s1, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS" t=2024-05-29T13:44:14.196843221Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.196819534Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s68bx62x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.196757933Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s68bx62x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.196725383Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s1, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS" t=2024-05-29T13:44:14.196729733Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s68bx62x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.196681543Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s62jf7u6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.196574542Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s62jf7u6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.196531071Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s61pflen-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.196335869Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=358668 slug=gsunimon t=2024-05-29T13:44:14.196489783Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=358668 slug=gsunimon version=17 fingerprint=9c6d3d9c6abca81a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.196376576Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc012332f18} B:{Var:B Labels: Value:0xc012332f10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.195993882s EvaluationString:[ var='A' labels={} value=3629.775394327859 ], [ var='B' labels={} value=0 ]}]" duration=17.268054ms + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s1, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS" t=2024-05-29T13:44:14.196466811Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s1, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS" t=2024-05-29T13:44:14.1964543Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s5sz3wbb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.196131677Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.196210964Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.196166717Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.196075689Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s5sz3wbb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.195968645Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.195991392Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s1, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS" t=2024-05-29T13:44:14.195999181Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=228733 slug=csmoney t=2024-05-29T13:44:14.195875641Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=228733 slug=csmoney instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.195857508Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=228733 slug=csmoney instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.195835805Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=228733 slug=csmoney instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.195827938Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.195794041Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s5hw7qum-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.195641422Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk3s1, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS" t=2024-05-29T13:44:14.195586182Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk2s3s1, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/, os=MacOS" t=2024-05-29T13:44:14.195433273Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk2s3s1, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/, os=MacOS" t=2024-05-29T13:44:14.195403297Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s5fward6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.19541723Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.195219871Z caller=remote_alert_sender.go:94 user=884866 slug=cnonumerique host=cnonumerique-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.51.155:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fdlohxsm54uf4d alerts=1 + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk2s3, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS" t=2024-05-29T13:44:14.195284096Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.195087658Z caller=remote_alert_sender.go:94 user=884866 slug=cnonumerique host=cnonumerique-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.74.54:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fdlohxsm54uf4d alerts=1 + logger=ngalert.state.manager.persist user=884866 slug=cnonumerique t=2024-05-29T13:44:14.194978116Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=7.753552ms + logger=ngalert.state.manager user=412141 slug=sharethrough instance= t=2024-05-29T13:44:14.195079782Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s5a5xthk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.195007105Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412141 slug=sharethrough t=2024-05-29T13:44:14.195044051Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s5a5xthk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.194974705Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.194965738Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:14.194942889Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.1948927Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s5621yts-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.194847254Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk1s2, fstype=apfs, host=dev191.gradle.org, instance=dev191.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS" t=2024-05-29T13:44:14.19483858Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk1s2, fstype=apfs, host=dev191.gradle.org, instance=dev191.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS" t=2024-05-29T13:44:14.194826115Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=274199 slug=telemetriahgm instance= t=2024-05-29T13:44:14.194793333Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.194685705Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.194716902Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk1s1s1, fstype=apfs, host=dev191.gradle.org, instance=dev191.gradle.org, job=node, mountpoint=/, os=MacOS" t=2024-05-29T13:44:14.194717952Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=185895 slug=gradle instance="device=/dev/disk1s1, fstype=apfs, host=dev192.gradle.org, instance=dev192.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS" t=2024-05-29T13:44:14.194560788Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s53idox5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.19450332Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank instance="QueueName=PROD-PP-CLOSED-ACCOUNT-STATUS-DLQ" t=2024-05-29T13:44:14.194492047Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s52m7ady-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.194311948Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.194302862Z caller=remote_instance_store.go:51 user=776563 slug=eagleeye4els msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=185895 slug=gradle version=27 fingerprint=53bfaac5a4a6e9cf attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.180937067Z level=debug msg="Alert rule evaluated" results="[{Instance:device=/dev/disk1s1, fstype=apfs, host=dev192.gradle.org, instance=dev192.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk1s1, fstype=apfs, host=dev192.gradle.org, instance=dev192.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc014512878} B:{Var:B Labels:device=/dev/disk1s1, fstype=apfs, host=dev192.gradle.org, instance=dev192.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc014512930} C:{Var:C Labels:device=/dev/disk1s1, fstype=apfs, host=dev192.gradle.org, instance=dev192.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc014512988}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139370343s EvaluationString:[ var='A' labels={device=/dev/disk1s1, fstype=apfs, host=dev192.gradle.org, instance=dev192.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.002580594249365742 ], [ var='B' labels={device=/dev/disk1s1, fstype=apfs, host=dev192.gradle.org, instance=dev192.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.002580594249365742 ], [ var='C' labels={device=/dev/disk1s1, fstype=apfs, host=dev192.gradle.org, instance=dev192.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0 ]} {Instance:device=/dev/disk1s1s1, fstype=apfs, host=dev191.gradle.org, instance=dev191.gradle.org, job=node, mountpoint=/, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk1s1s1, fstype=apfs, host=dev191.gradle.org, instance=dev191.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc014512ad0} B:{Var:B Labels:device=/dev/disk1s1s1, fstype=apfs, host=dev191.gradle.org, instance=dev191.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc014512b38} C:{Var:C Labels:device=/dev/disk1s1s1, fstype=apfs, host=dev191.gradle.org, instance=dev191.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc014512a58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139404364s EvaluationString:[ var='A' labels={device=/dev/disk1s1s1, fstype=apfs, host=dev191.gradle.org, instance=dev191.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.00014472980670166002 ], [ var='B' labels={device=/dev/disk1s1s1, fstype=apfs, host=dev191.gradle.org, instance=dev191.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.00014472980670166002 ], [ var='C' labels={device=/dev/disk1s1s1, fstype=apfs, host=dev191.gradle.org, instance=dev191.gradle.org, job=node, mountpoint=/, os=MacOS} value=0 ]} {Instance:device=/dev/disk1s2, fstype=apfs, host=dev191.gradle.org, instance=dev191.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk1s2, fstype=apfs, host=dev191.gradle.org, instance=dev191.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc014512c80} B:{Var:B Labels:device=/dev/disk1s2, fstype=apfs, host=dev191.gradle.org, instance=dev191.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc014512cc8} C:{Var:C Labels:device=/dev/disk1s2, fstype=apfs, host=dev191.gradle.org, instance=dev191.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc014512bf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139420584s EvaluationString:[ var='A' labels={device=/dev/disk1s2, fstype=apfs, host=dev191.gradle.org, instance=dev191.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.0023686458310554492 ], [ var='B' labels={device=/dev/disk1s2, fstype=apfs, host=dev191.gradle.org, instance=dev191.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.0023686458310554492 ], [ var='C' labels={device=/dev/disk1s2, fstype=apfs, host=dev191.gradle.org, instance=dev191.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0 ]} {Instance:device=/dev/disk1s5s1, fstype=apfs, host=dev192.gradle.org, instance=dev192.gradle.org, job=node, mountpoint=/, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk1s5s1, fstype=apfs, host=dev192.gradle.org, instance=dev192.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc014513028} B:{Var:B Labels:device=/dev/disk1s5s1, fstype=apfs, host=dev192.gradle.org, instance=dev192.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc014512e78} C:{Var:C Labels:device=/dev/disk1s5s1, fstype=apfs, host=dev192.gradle.org, instance=dev192.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc014512f30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139434046s EvaluationString:[ var='A' labels={device=/dev/disk1s5s1, fstype=apfs, host=dev192.gradle.org, instance=dev192.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.00013820969532718497 ], [ var='B' labels={device=/dev/disk1s5s1, fstype=apfs, host=dev192.gradle.org, instance=dev192.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.00013820969532718497 ], [ var='C' labels={device=/dev/disk1s5s1, fstype=apfs, host=dev192.gradle.org, instance=dev192.gradle.org, job=node, mountpoint=/, os=MacOS} value=0 ]} {Instance:device=/dev/disk2s1, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk2s1, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc014513210} B:{Var:B Labels:device=/dev/disk2s1, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc0145132a8} C:{Var:C Labels:device=/dev/disk2s1, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc0145133e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139446815s EvaluationString:[ var='A' labels={device=/dev/disk2s1, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.006146107031175463 ], [ var='B' labels={device=/dev/disk2s1, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.006146107031175463 ], [ var='C' labels={device=/dev/disk2s1, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0 ]} {Instance:device=/dev/disk2s3, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk2s3, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc014513a40} B:{Var:B Labels:device=/dev/disk2s3, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc0145136c0} C:{Var:C Labels:device=/dev/disk2s3, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc0145137f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139462399s EvaluationString:[ var='A' labels={device=/dev/disk2s3, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.00040760496417402425 ], [ var='B' labels={device=/dev/disk2s3, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.00040760496417402425 ], [ var='C' labels={device=/dev/disk2s3, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0 ]} {Instance:device=/dev/disk2s3s1, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk2s3s1, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc014513da8} B:{Var:B Labels:device=/dev/disk2s3s1, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc014513ee0} C:{Var:C Labels:device=/dev/disk2s3s1, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc014513cf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139498334s EvaluationString:[ var='A' labels={device=/dev/disk2s3s1, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.00040760839856246456 ], [ var='B' labels={device=/dev/disk2s3s1, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.00040760839856246456 ], [ var='C' labels={device=/dev/disk2s3s1, fstype=apfs, host=dev181.gradle.org, instance=dev181.gradle.org, job=node, mountpoint=/, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s1, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s1, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc014513fc0} B:{Var:B Labels:device=/dev/disk3s1, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662018} C:{Var:C Labels:device=/dev/disk3s1, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662090}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139509851s EvaluationString:[ var='A' labels={device=/dev/disk3s1, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.00813023959946102 ], [ var='B' labels={device=/dev/disk3s1, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.00813023959946102 ], [ var='C' labels={device=/dev/disk3s1, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s1, fstype=apfs, host=dev178.gradle.org, instance=dev178.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s1, fstype=apfs, host=dev178.gradle.org, instance=dev178.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662160} B:{Var:B Labels:device=/dev/disk3s1, fstype=apfs, host=dev178.gradle.org, instance=dev178.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc0026621d0} C:{Var:C Labels:device=/dev/disk3s1, fstype=apfs, host=dev178.gradle.org, instance=dev178.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662240}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139520982s EvaluationString:[ var='A' labels={device=/dev/disk3s1, fstype=apfs, host=dev178.gradle.org, instance=dev178.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.008625862364697867 ], [ var='B' labels={device=/dev/disk3s1, fstype=apfs, host=dev178.gradle.org, instance=dev178.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.008625862364697867 ], [ var='C' labels={device=/dev/disk3s1, fstype=apfs, host=dev178.gradle.org, instance=dev178.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s1, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s1, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662310} B:{Var:B Labels:device=/dev/disk3s1, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662380} C:{Var:C Labels:device=/dev/disk3s1, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc0026623d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.13953499s EvaluationString:[ var='A' labels={device=/dev/disk3s1, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.008050495579538963 ], [ var='B' labels={device=/dev/disk3s1, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.008050495579538963 ], [ var='C' labels={device=/dev/disk3s1, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s1, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s1, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc0026624a8} B:{Var:B Labels:device=/dev/disk3s1, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662510} C:{Var:C Labels:device=/dev/disk3s1, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662568}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139549624s EvaluationString:[ var='A' labels={device=/dev/disk3s1, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.00830496708872297 ], [ var='B' labels={device=/dev/disk3s1, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.00830496708872297 ], [ var='C' labels={device=/dev/disk3s1, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s1, fstype=apfs, host=dev182.gradle.org, instance=dev182.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s1, fstype=apfs, host=dev182.gradle.org, instance=dev182.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662648} B:{Var:B Labels:device=/dev/disk3s1, fstype=apfs, host=dev182.gradle.org, instance=dev182.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc0026626b0} C:{Var:C Labels:device=/dev/disk3s1, fstype=apfs, host=dev182.gradle.org, instance=dev182.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662708}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139562887s EvaluationString:[ var='A' labels={device=/dev/disk3s1, fstype=apfs, host=dev182.gradle.org, instance=dev182.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.01449160758744128 ], [ var='B' labels={device=/dev/disk3s1, fstype=apfs, host=dev182.gradle.org, instance=dev182.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.01449160758744128 ], [ var='C' labels={device=/dev/disk3s1, fstype=apfs, host=dev182.gradle.org, instance=dev182.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s1, fstype=apfs, host=dev183.gradle.org, instance=dev183.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s1, fstype=apfs, host=dev183.gradle.org, instance=dev183.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662800} B:{Var:B Labels:device=/dev/disk3s1, fstype=apfs, host=dev183.gradle.org, instance=dev183.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662858} C:{Var:C Labels:device=/dev/disk3s1, fstype=apfs, host=dev183.gradle.org, instance=dev183.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc0026628c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139576899s EvaluationString:[ var='A' labels={device=/dev/disk3s1, fstype=apfs, host=dev183.gradle.org, instance=dev183.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.007366403939455868 ], [ var='B' labels={device=/dev/disk3s1, fstype=apfs, host=dev183.gradle.org, instance=dev183.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.007366403939455868 ], [ var='C' labels={device=/dev/disk3s1, fstype=apfs, host=dev183.gradle.org, instance=dev183.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s1, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s1, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662a00} B:{Var:B Labels:device=/dev/disk3s1, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662a78} C:{Var:C Labels:device=/dev/disk3s1, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139605144s EvaluationString:[ var='A' labels={device=/dev/disk3s1, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.01076239562511705 ], [ var='B' labels={device=/dev/disk3s1, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.01076239562511705 ], [ var='C' labels={device=/dev/disk3s1, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s1, fstype=apfs, host=dev185.gradle.org, instance=dev185.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s1, fstype=apfs, host=dev185.gradle.org, instance=dev185.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662b38} B:{Var:B Labels:device=/dev/disk3s1, fstype=apfs, host=dev185.gradle.org, instance=dev185.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662bb0} C:{Var:C Labels:device=/dev/disk3s1, fstype=apfs, host=dev185.gradle.org, instance=dev185.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662c18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139619955s EvaluationString:[ var='A' labels={device=/dev/disk3s1, fstype=apfs, host=dev185.gradle.org, instance=dev185.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.005833022494550977 ], [ var='B' labels={device=/dev/disk3s1, fstype=apfs, host=dev185.gradle.org, instance=dev185.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.005833022494550977 ], [ var='C' labels={device=/dev/disk3s1, fstype=apfs, host=dev185.gradle.org, instance=dev185.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s1, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s1, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662d98} B:{Var:B Labels:device=/dev/disk3s1, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662cc8} C:{Var:C Labels:device=/dev/disk3s1, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662d30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139631656s EvaluationString:[ var='A' labels={device=/dev/disk3s1, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.007173703672142762 ], [ var='B' labels={device=/dev/disk3s1, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.007173703672142762 ], [ var='C' labels={device=/dev/disk3s1, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s1, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s1, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662ec0} B:{Var:B Labels:device=/dev/disk3s1, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662f18} C:{Var:C Labels:device=/dev/disk3s1, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662e58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139669928s EvaluationString:[ var='A' labels={device=/dev/disk3s1, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.006928980256157735 ], [ var='B' labels={device=/dev/disk3s1, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.006928980256157735 ], [ var='C' labels={device=/dev/disk3s1, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s1, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s1, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002663108} B:{Var:B Labels:device=/dev/disk3s1, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002662fd8} C:{Var:C Labels:device=/dev/disk3s1, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002663050}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139680741s EvaluationString:[ var='A' labels={device=/dev/disk3s1, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.01477488001850047 ], [ var='B' labels={device=/dev/disk3s1, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.01477488001850047 ], [ var='C' labels={device=/dev/disk3s1, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s1, fstype=apfs, host=dev189.gradle.org, instance=dev189.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s1, fstype=apfs, host=dev189.gradle.org, instance=dev189.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc0026632a8} B:{Var:B Labels:device=/dev/disk3s1, fstype=apfs, host=dev189.gradle.org, instance=dev189.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc0026631d0} C:{Var:C Labels:device=/dev/disk3s1, fstype=apfs, host=dev189.gradle.org, instance=dev189.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002663240}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139692041s EvaluationString:[ var='A' labels={device=/dev/disk3s1, fstype=apfs, host=dev189.gradle.org, instance=dev189.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.007926776940801572 ], [ var='B' labels={device=/dev/disk3s1, fstype=apfs, host=dev189.gradle.org, instance=dev189.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.007926776940801572 ], [ var='C' labels={device=/dev/disk3s1, fstype=apfs, host=dev189.gradle.org, instance=dev189.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s1, fstype=apfs, host=dev190.gradle.org, instance=dev190.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s1, fstype=apfs, host=dev190.gradle.org, instance=dev190.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc0026633f0} B:{Var:B Labels:device=/dev/disk3s1, fstype=apfs, host=dev190.gradle.org, instance=dev190.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002663468} C:{Var:C Labels:device=/dev/disk3s1, fstype=apfs, host=dev190.gradle.org, instance=dev190.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002663500}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139707089s EvaluationString:[ var='A' labels={device=/dev/disk3s1, fstype=apfs, host=dev190.gradle.org, instance=dev190.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.0035288597307371328 ], [ var='B' labels={device=/dev/disk3s1, fstype=apfs, host=dev190.gradle.org, instance=dev190.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.0035288597307371328 ], [ var='C' labels={device=/dev/disk3s1, fstype=apfs, host=dev190.gradle.org, instance=dev190.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s1, fstype=apfs, host=dev196.gradle.org, instance=dev196.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s1, fstype=apfs, host=dev196.gradle.org, instance=dev196.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002663620} B:{Var:B Labels:device=/dev/disk3s1, fstype=apfs, host=dev196.gradle.org, instance=dev196.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002663688} C:{Var:C Labels:device=/dev/disk3s1, fstype=apfs, host=dev196.gradle.org, instance=dev196.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc0026635b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.13971942s EvaluationString:[ var='A' labels={device=/dev/disk3s1, fstype=apfs, host=dev196.gradle.org, instance=dev196.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.0008075558764860657 ], [ var='B' labels={device=/dev/disk3s1, fstype=apfs, host=dev196.gradle.org, instance=dev196.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.0008075558764860657 ], [ var='C' labels={device=/dev/disk3s1, fstype=apfs, host=dev196.gradle.org, instance=dev196.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s1, fstype=apfs, host=dev197.gradle.org, instance=dev197.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s1, fstype=apfs, host=dev197.gradle.org, instance=dev197.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002663828} B:{Var:B Labels:device=/dev/disk3s1, fstype=apfs, host=dev197.gradle.org, instance=dev197.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002663758} C:{Var:C Labels:device=/dev/disk3s1, fstype=apfs, host=dev197.gradle.org, instance=dev197.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc0026637d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139733011s EvaluationString:[ var='A' labels={device=/dev/disk3s1, fstype=apfs, host=dev197.gradle.org, instance=dev197.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.000855589821467051 ], [ var='B' labels={device=/dev/disk3s1, fstype=apfs, host=dev197.gradle.org, instance=dev197.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.000855589821467051 ], [ var='C' labels={device=/dev/disk3s1, fstype=apfs, host=dev197.gradle.org, instance=dev197.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s1, fstype=apfs, host=dev198.gradle.org, instance=dev198.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s1, fstype=apfs, host=dev198.gradle.org, instance=dev198.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc0026639c0} B:{Var:B Labels:device=/dev/disk3s1, fstype=apfs, host=dev198.gradle.org, instance=dev198.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc0026638f0} C:{Var:C Labels:device=/dev/disk3s1, fstype=apfs, host=dev198.gradle.org, instance=dev198.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS Value:0xc002663958}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139746161s EvaluationString:[ var='A' labels={device=/dev/disk3s1, fstype=apfs, host=dev198.gradle.org, instance=dev198.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.0008051416702917757 ], [ var='B' labels={device=/dev/disk3s1, fstype=apfs, host=dev198.gradle.org, instance=dev198.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0.0008051416702917757 ], [ var='C' labels={device=/dev/disk3s1, fstype=apfs, host=dev198.gradle.org, instance=dev198.gradle.org, job=node, mountpoint=/System/Volumes/Data, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc002663a98} B:{Var:B Labels:device=/dev/disk3s3, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc002663b00} C:{Var:C Labels:device=/dev/disk3s3, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc002663c88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139759001s EvaluationString:[ var='A' labels={device=/dev/disk3s3, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.00046920985241516444 ], [ var='B' labels={device=/dev/disk3s3, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.00046920985241516444 ], [ var='C' labels={device=/dev/disk3s3, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc002663f90} B:{Var:B Labels:device=/dev/disk3s3, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099a078} C:{Var:C Labels:device=/dev/disk3s3, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099a120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139787721s EvaluationString:[ var='A' labels={device=/dev/disk3s3, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.00043889801823338015 ], [ var='B' labels={device=/dev/disk3s3, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.00043889801823338015 ], [ var='C' labels={device=/dev/disk3s3, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099a390} B:{Var:B Labels:device=/dev/disk3s3, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099a260} C:{Var:C Labels:device=/dev/disk3s3, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099a2d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139799527s EvaluationString:[ var='A' labels={device=/dev/disk3s3, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.0004605084305376872 ], [ var='B' labels={device=/dev/disk3s3, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.0004605084305376872 ], [ var='C' labels={device=/dev/disk3s3, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3, fstype=apfs, host=dev182.gradle.org, instance=dev182.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3, fstype=apfs, host=dev182.gradle.org, instance=dev182.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099a4e0} B:{Var:B Labels:device=/dev/disk3s3, fstype=apfs, host=dev182.gradle.org, instance=dev182.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099a568} C:{Var:C Labels:device=/dev/disk3s3, fstype=apfs, host=dev182.gradle.org, instance=dev182.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099a5f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139815482s EvaluationString:[ var='A' labels={device=/dev/disk3s3, fstype=apfs, host=dev182.gradle.org, instance=dev182.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.0007264613712607559 ], [ var='B' labels={device=/dev/disk3s3, fstype=apfs, host=dev182.gradle.org, instance=dev182.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.0007264613712607559 ], [ var='C' labels={device=/dev/disk3s3, fstype=apfs, host=dev182.gradle.org, instance=dev182.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3, fstype=apfs, host=dev183.gradle.org, instance=dev183.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3, fstype=apfs, host=dev183.gradle.org, instance=dev183.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099a6f0} B:{Var:B Labels:device=/dev/disk3s3, fstype=apfs, host=dev183.gradle.org, instance=dev183.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099a788} C:{Var:C Labels:device=/dev/disk3s3, fstype=apfs, host=dev183.gradle.org, instance=dev183.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099a840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139824249s EvaluationString:[ var='A' labels={device=/dev/disk3s3, fstype=apfs, host=dev183.gradle.org, instance=dev183.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.0004699835852243206 ], [ var='B' labels={device=/dev/disk3s3, fstype=apfs, host=dev183.gradle.org, instance=dev183.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.0004699835852243206 ], [ var='C' labels={device=/dev/disk3s3, fstype=apfs, host=dev183.gradle.org, instance=dev183.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099aa28} B:{Var:B Labels:device=/dev/disk3s3, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099aaf0} C:{Var:C Labels:device=/dev/disk3s3, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099a960}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139831802s EvaluationString:[ var='A' labels={device=/dev/disk3s3, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.0005246359725521454 ], [ var='B' labels={device=/dev/disk3s3, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.0005246359725521454 ], [ var='C' labels={device=/dev/disk3s3, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3, fstype=apfs, host=dev185.gradle.org, instance=dev185.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3, fstype=apfs, host=dev185.gradle.org, instance=dev185.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099ac00} B:{Var:B Labels:device=/dev/disk3s3, fstype=apfs, host=dev185.gradle.org, instance=dev185.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099ac48} C:{Var:C Labels:device=/dev/disk3s3, fstype=apfs, host=dev185.gradle.org, instance=dev185.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099acb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139840009s EvaluationString:[ var='A' labels={device=/dev/disk3s3, fstype=apfs, host=dev185.gradle.org, instance=dev185.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.00039226337421172097 ], [ var='B' labels={device=/dev/disk3s3, fstype=apfs, host=dev185.gradle.org, instance=dev185.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.00039226337421172097 ], [ var='C' labels={device=/dev/disk3s3, fstype=apfs, host=dev185.gradle.org, instance=dev185.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099adb8} B:{Var:B Labels:device=/dev/disk3s3, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099ae10} C:{Var:C Labels:device=/dev/disk3s3, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099ad60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139847747s EvaluationString:[ var='A' labels={device=/dev/disk3s3, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.0004328132730246903 ], [ var='B' labels={device=/dev/disk3s3, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.0004328132730246903 ], [ var='C' labels={device=/dev/disk3s3, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099aed0} B:{Var:B Labels:device=/dev/disk3s3, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099af28} C:{Var:C Labels:device=/dev/disk3s3, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099af80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.13985527s EvaluationString:[ var='A' labels={device=/dev/disk3s3, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.0004216747875629423 ], [ var='B' labels={device=/dev/disk3s3, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.0004216747875629423 ], [ var='C' labels={device=/dev/disk3s3, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099b0e8} B:{Var:B Labels:device=/dev/disk3s3, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099b038} C:{Var:C Labels:device=/dev/disk3s3, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099b090}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139862814s EvaluationString:[ var='A' labels={device=/dev/disk3s3, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.0006442110665239031 ], [ var='B' labels={device=/dev/disk3s3, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.0006442110665239031 ], [ var='C' labels={device=/dev/disk3s3, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3, fstype=apfs, host=dev189.gradle.org, instance=dev189.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3, fstype=apfs, host=dev189.gradle.org, instance=dev189.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099b198} B:{Var:B Labels:device=/dev/disk3s3, fstype=apfs, host=dev189.gradle.org, instance=dev189.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099b200} C:{Var:C Labels:device=/dev/disk3s3, fstype=apfs, host=dev189.gradle.org, instance=dev189.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS Value:0xc02099b258}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139872511s EvaluationString:[ var='A' labels={device=/dev/disk3s3, fstype=apfs, host=dev189.gradle.org, instance=dev189.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.00043457052794060314 ], [ var='B' labels={device=/dev/disk3s3, fstype=apfs, host=dev189.gradle.org, instance=dev189.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0.00043457052794060314 ], [ var='C' labels={device=/dev/disk3s3, fstype=apfs, host=dev189.gradle.org, instance=dev189.gradle.org, job=node, mountpoint=/System/Volumes/Update/mnt1, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3s1, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099b2f8} B:{Var:B Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099b370} C:{Var:C Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099b3d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139879989s EvaluationString:[ var='A' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.00046817503137419614 ], [ var='B' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.00046817503137419614 ], [ var='C' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev177.gradle.org, instance=dev177.gradle.org, job=node, mountpoint=/, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3s1, fstype=apfs, host=dev178.gradle.org, instance=dev178.gradle.org, job=node, mountpoint=/, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev178.gradle.org, instance=dev178.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099b488} B:{Var:B Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev178.gradle.org, instance=dev178.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099b4e0} C:{Var:C Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev178.gradle.org, instance=dev178.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099b5a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139890709s EvaluationString:[ var='A' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev178.gradle.org, instance=dev178.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.00044109849668105827 ], [ var='B' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev178.gradle.org, instance=dev178.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.00044109849668105827 ], [ var='C' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev178.gradle.org, instance=dev178.gradle.org, job=node, mountpoint=/, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3s1, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099b650} B:{Var:B Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099b6b8} C:{Var:C Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099b710}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139899818s EvaluationString:[ var='A' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.00043890007413283083 ], [ var='B' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.00043890007413283083 ], [ var='C' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev179.gradle.org, instance=dev179.gradle.org, job=node, mountpoint=/, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3s1, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099b8a8} B:{Var:B Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099b7c0} C:{Var:C Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099b850}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139908664s EvaluationString:[ var='A' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.0004605021409501697 ], [ var='B' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.0004605021409501697 ], [ var='C' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev180.gradle.org, instance=dev180.gradle.org, job=node, mountpoint=/, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3s1, fstype=apfs, host=dev182.gradle.org, instance=dev182.gradle.org, job=node, mountpoint=/, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev182.gradle.org, instance=dev182.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099ba18} B:{Var:B Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev182.gradle.org, instance=dev182.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099b968} C:{Var:C Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev182.gradle.org, instance=dev182.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099b9c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139916819s EvaluationString:[ var='A' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev182.gradle.org, instance=dev182.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.0007264230133096072 ], [ var='B' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev182.gradle.org, instance=dev182.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.0007264230133096072 ], [ var='C' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev182.gradle.org, instance=dev182.gradle.org, job=node, mountpoint=/, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3s1, fstype=apfs, host=dev183.gradle.org, instance=dev183.gradle.org, job=node, mountpoint=/, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev183.gradle.org, instance=dev183.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099bb70} B:{Var:B Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev183.gradle.org, instance=dev183.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099bac0} C:{Var:C Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev183.gradle.org, instance=dev183.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099bb08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139924945s EvaluationString:[ var='A' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev183.gradle.org, instance=dev183.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.0004700059694255909 ], [ var='B' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev183.gradle.org, instance=dev183.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.0004700059694255909 ], [ var='C' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev183.gradle.org, instance=dev183.gradle.org, job=node, mountpoint=/, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3s1, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099bc90} B:{Var:B Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099bcd8} C:{Var:C Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099bc28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139932956s EvaluationString:[ var='A' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.0005246388173755223 ], [ var='B' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.0005246388173755223 ], [ var='C' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev184.gradle.org, instance=dev184.gradle.org, job=node, mountpoint=/, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3s1, fstype=apfs, host=dev185.gradle.org, instance=dev185.gradle.org, job=node, mountpoint=/, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev185.gradle.org, instance=dev185.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099be68} B:{Var:B Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev185.gradle.org, instance=dev185.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099bda8} C:{Var:C Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev185.gradle.org, instance=dev185.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099be00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.13994078s EvaluationString:[ var='A' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev185.gradle.org, instance=dev185.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.0003922577389268156 ], [ var='B' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev185.gradle.org, instance=dev185.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.0003922577389268156 ], [ var='C' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev185.gradle.org, instance=dev185.gradle.org, job=node, mountpoint=/, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3s1, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc01cca6048} B:{Var:B Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099bf78} C:{Var:C Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc02099bfd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139948376s EvaluationString:[ var='A' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.0004327887148346532 ], [ var='B' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.0004327887148346532 ], [ var='C' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev186.gradle.org, instance=dev186.gradle.org, job=node, mountpoint=/, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3s1, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc01cca6128} B:{Var:B Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc01cca6280} C:{Var:C Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc01cca62f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139958522s EvaluationString:[ var='A' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.00042167263018710877 ], [ var='B' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.00042167263018710877 ], [ var='C' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev187.gradle.org, instance=dev187.gradle.org, job=node, mountpoint=/, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3s1, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc01cca63b8} B:{Var:B Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc01cca6440} C:{Var:C Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc01cca6498}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139965931s EvaluationString:[ var='A' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.0006442637554473496 ], [ var='B' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.0006442637554473496 ], [ var='C' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev188.gradle.org, instance=dev188.gradle.org, job=node, mountpoint=/, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3s1, fstype=apfs, host=dev189.gradle.org, instance=dev189.gradle.org, job=node, mountpoint=/, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev189.gradle.org, instance=dev189.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc01cca6588} B:{Var:B Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev189.gradle.org, instance=dev189.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc01cca66c0} C:{Var:C Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev189.gradle.org, instance=dev189.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc01cca6718}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139972769s EvaluationString:[ var='A' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev189.gradle.org, instance=dev189.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.00043456626350724115 ], [ var='B' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev189.gradle.org, instance=dev189.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.00043456626350724115 ], [ var='C' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev189.gradle.org, instance=dev189.gradle.org, job=node, mountpoint=/, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3s1, fstype=apfs, host=dev190.gradle.org, instance=dev190.gradle.org, job=node, mountpoint=/, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev190.gradle.org, instance=dev190.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc01cca6a00} B:{Var:B Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev190.gradle.org, instance=dev190.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc01cca6810} C:{Var:C Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev190.gradle.org, instance=dev190.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc01cca6988}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139979772s EvaluationString:[ var='A' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev190.gradle.org, instance=dev190.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.0002978236896518105 ], [ var='B' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev190.gradle.org, instance=dev190.gradle.org, job=node, mountpoint=/, os=MacOS} value=0.0002978236896518105 ], [ var='C' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev190.gradle.org, instance=dev190.gradle.org, job=node, mountpoint=/, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3s1, fstype=apfs, host=dev196.gradle.org, instance=dev196.gradle.org, job=node, mountpoint=/, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev196.gradle.org, instance=dev196.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc01cca6bd0} B:{Var:B Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev196.gradle.org, instance=dev196.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc01cca6b00} C:{Var:C Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev196.gradle.org, instance=dev196.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc01cca6b58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139989479s EvaluationString:[ var='A' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev196.gradle.org, instance=dev196.gradle.org, job=node, mountpoint=/, os=MacOS} value=9.168220881417e-05 ], [ var='B' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev196.gradle.org, instance=dev196.gradle.org, job=node, mountpoint=/, os=MacOS} value=9.168220881417e-05 ], [ var='C' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev196.gradle.org, instance=dev196.gradle.org, job=node, mountpoint=/, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3s1, fstype=apfs, host=dev197.gradle.org, instance=dev197.gradle.org, job=node, mountpoint=/, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev197.gradle.org, instance=dev197.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc01cca6d20} B:{Var:B Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev197.gradle.org, instance=dev197.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc01cca6d88} C:{Var:C Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev197.gradle.org, instance=dev197.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc01cca6c98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.139997672s EvaluationString:[ var='A' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev197.gradle.org, instance=dev197.gradle.org, job=node, mountpoint=/, os=MacOS} value=9.168228842337811e-05 ], [ var='B' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev197.gradle.org, instance=dev197.gradle.org, job=node, mountpoint=/, os=MacOS} value=9.168228842337811e-05 ], [ var='C' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev197.gradle.org, instance=dev197.gradle.org, job=node, mountpoint=/, os=MacOS} value=0 ]} {Instance:device=/dev/disk3s3s1, fstype=apfs, host=dev198.gradle.org, instance=dev198.gradle.org, job=node, mountpoint=/, os=MacOS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev198.gradle.org, instance=dev198.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc01cca6ec0} B:{Var:B Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev198.gradle.org, instance=dev198.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc01cca6f40} C:{Var:C Labels:device=/dev/disk3s3s1, fstype=apfs, host=dev198.gradle.org, instance=dev198.gradle.org, job=node, mountpoint=/, os=MacOS Value:0xc01cca6e58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140004675s EvaluationString:[ var='A' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev198.gradle.org, instance=dev198.gradle.org, job=node, mountpoint=/, os=MacOS} value=9.168228842337811e-05 ], [ var='B' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev198.gradle.org, instance=dev198.gradle.org, job=node, mountpoint=/, os=MacOS} value=9.168228842337811e-05 ], [ var='C' labels={device=/dev/disk3s3s1, fstype=apfs, host=dev198.gradle.org, instance=dev198.gradle.org, job=node, mountpoint=/, os=MacOS} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev102.gradle.org, instance=dev102.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev102.gradle.org, instance=dev102.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7060} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev102.gradle.org, instance=dev102.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca70a8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev102.gradle.org, instance=dev102.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7008}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140011435s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev102.gradle.org, instance=dev102.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553461424587457 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev102.gradle.org, instance=dev102.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553461424587457 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev102.gradle.org, instance=dev102.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev103.gradle.org, instance=dev103.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev103.gradle.org, instance=dev103.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7180} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev103.gradle.org, instance=dev103.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca71d8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev103.gradle.org, instance=dev103.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7240}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140020709s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev103.gradle.org, instance=dev103.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553274886823777 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev103.gradle.org, instance=dev103.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553274886823777 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev103.gradle.org, instance=dev103.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev104.gradle.org, instance=dev104.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev104.gradle.org, instance=dev104.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca72f0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev104.gradle.org, instance=dev104.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7348} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev104.gradle.org, instance=dev104.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7390}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140032281s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev104.gradle.org, instance=dev104.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.0045541702680891305 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev104.gradle.org, instance=dev104.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.0045541702680891305 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev104.gradle.org, instance=dev104.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev105.gradle.org, instance=dev105.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev105.gradle.org, instance=dev105.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7448} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev105.gradle.org, instance=dev105.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7490} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev105.gradle.org, instance=dev105.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca74e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140043389s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev105.gradle.org, instance=dev105.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553349501929205 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev105.gradle.org, instance=dev105.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553349501929205 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev105.gradle.org, instance=dev105.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev116.gradle.org, instance=dev116.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev116.gradle.org, instance=dev116.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca75c0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev116.gradle.org, instance=dev116.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7618} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev116.gradle.org, instance=dev116.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7680}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140054796s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev116.gradle.org, instance=dev116.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552566043322104 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev116.gradle.org, instance=dev116.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552566043322104 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev116.gradle.org, instance=dev116.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev117.gradle.org, instance=dev117.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev117.gradle.org, instance=dev117.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7800} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev117.gradle.org, instance=dev117.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7860} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev117.gradle.org, instance=dev117.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7798}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140062369s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev117.gradle.org, instance=dev117.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004559430633022665 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev117.gradle.org, instance=dev117.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004559430633022665 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev117.gradle.org, instance=dev117.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev118.gradle.org, instance=dev118.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev118.gradle.org, instance=dev118.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca78f0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev118.gradle.org, instance=dev118.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7948} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev118.gradle.org, instance=dev118.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca79b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140070115s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev118.gradle.org, instance=dev118.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552491428216676 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev118.gradle.org, instance=dev118.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552491428216676 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev118.gradle.org, instance=dev118.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev119.gradle.org, instance=dev119.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev119.gradle.org, instance=dev119.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7a40} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev119.gradle.org, instance=dev119.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7aa8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev119.gradle.org, instance=dev119.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7b00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140077416s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev119.gradle.org, instance=dev119.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004559841016102517 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev119.gradle.org, instance=dev119.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004559841016102517 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev119.gradle.org, instance=dev119.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev120.gradle.org, instance=dev120.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev120.gradle.org, instance=dev120.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7c40} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev120.gradle.org, instance=dev120.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7ba0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev120.gradle.org, instance=dev120.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7be8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140084245s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev120.gradle.org, instance=dev120.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553610654798312 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev120.gradle.org, instance=dev120.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553610654798312 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev120.gradle.org, instance=dev120.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev122.gradle.org, instance=dev122.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev122.gradle.org, instance=dev122.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7d38} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev122.gradle.org, instance=dev122.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7da0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev122.gradle.org, instance=dev122.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7d00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140091084s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev122.gradle.org, instance=dev122.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553312194376491 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev122.gradle.org, instance=dev122.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553312194376491 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev122.gradle.org, instance=dev122.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev123.gradle.org, instance=dev123.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev123.gradle.org, instance=dev123.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7e58} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev123.gradle.org, instance=dev123.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7eb0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev123.gradle.org, instance=dev123.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7f08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140099729s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev123.gradle.org, instance=dev123.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004558759097073706 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev123.gradle.org, instance=dev123.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004558759097073706 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev123.gradle.org, instance=dev123.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev124.gradle.org, instance=dev124.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev124.gradle.org, instance=dev124.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01cca7f98} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev124.gradle.org, instance=dev124.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c90020} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev124.gradle.org, instance=dev124.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c90068}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140108109s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev124.gradle.org, instance=dev124.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.00455252873576939 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev124.gradle.org, instance=dev124.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.00455252873576939 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev124.gradle.org, instance=dev124.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev141.gradle.org, instance=dev141.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev141.gradle.org, instance=dev141.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c90128} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev141.gradle.org, instance=dev141.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c90190} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev141.gradle.org, instance=dev141.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c901d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.14011733s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev141.gradle.org, instance=dev141.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552416813111249 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev141.gradle.org, instance=dev141.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552416813111249 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev141.gradle.org, instance=dev141.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev142.gradle.org, instance=dev142.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev142.gradle.org, instance=dev142.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c90390} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev142.gradle.org, instance=dev142.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c902b8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev142.gradle.org, instance=dev142.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c90340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140132553s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev142.gradle.org, instance=dev142.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552640658427531 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev142.gradle.org, instance=dev142.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552640658427531 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev142.gradle.org, instance=dev142.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev143.gradle.org, instance=dev143.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev143.gradle.org, instance=dev143.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c90440} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev143.gradle.org, instance=dev143.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c904b0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev143.gradle.org, instance=dev143.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c90520}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140143524s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev143.gradle.org, instance=dev143.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004543127232483957 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev143.gradle.org, instance=dev143.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004543127232483957 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev143.gradle.org, instance=dev143.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev144.gradle.org, instance=dev144.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev144.gradle.org, instance=dev144.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c905d0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev144.gradle.org, instance=dev144.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c90608} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev144.gradle.org, instance=dev144.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c906a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140153395s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev144.gradle.org, instance=dev144.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553498732140171 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev144.gradle.org, instance=dev144.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553498732140171 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev144.gradle.org, instance=dev144.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev145.gradle.org, instance=dev145.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev145.gradle.org, instance=dev145.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c907b0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev145.gradle.org, instance=dev145.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c909d0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev145.gradle.org, instance=dev145.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c90ad8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140160909s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev145.gradle.org, instance=dev145.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552566043322104 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev145.gradle.org, instance=dev145.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552566043322104 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev145.gradle.org, instance=dev145.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev147.gradle.org, instance=dev147.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev147.gradle.org, instance=dev147.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c90d88} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev147.gradle.org, instance=dev147.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c90f20} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev147.gradle.org, instance=dev147.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c90cd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140168759s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev147.gradle.org, instance=dev147.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004548163752101209 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev147.gradle.org, instance=dev147.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004548163752101209 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev147.gradle.org, instance=dev147.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev148.gradle.org, instance=dev148.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev148.gradle.org, instance=dev148.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c91130} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev148.gradle.org, instance=dev148.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c911b8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev148.gradle.org, instance=dev148.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c91370}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140175835s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev148.gradle.org, instance=dev148.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004551819892267717 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev148.gradle.org, instance=dev148.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004551819892267717 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev148.gradle.org, instance=dev148.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev165.gradle.org, instance=dev165.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev165.gradle.org, instance=dev165.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c914e0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev165.gradle.org, instance=dev165.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c91528} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev165.gradle.org, instance=dev165.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c91580}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140183199s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev165.gradle.org, instance=dev165.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.3250999543952475 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev165.gradle.org, instance=dev165.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.3250999543952475 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev165.gradle.org, instance=dev165.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev167.gradle.org, instance=dev167.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev167.gradle.org, instance=dev167.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c916f0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev167.gradle.org, instance=dev167.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c91630} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev167.gradle.org, instance=dev167.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c91678}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140189442s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev167.gradle.org, instance=dev167.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.29147398559271853 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev167.gradle.org, instance=dev167.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.29147398559271853 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev167.gradle.org, instance=dev167.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev168.gradle.org, instance=dev168.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev168.gradle.org, instance=dev168.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c91808} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev168.gradle.org, instance=dev168.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c91860} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev168.gradle.org, instance=dev168.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c917a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140196286s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev168.gradle.org, instance=dev168.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.09329801899879664 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev168.gradle.org, instance=dev168.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.09329801899879664 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev168.gradle.org, instance=dev168.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev169.gradle.org, instance=dev169.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev169.gradle.org, instance=dev169.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c91920} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev169.gradle.org, instance=dev169.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c91978} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev169.gradle.org, instance=dev169.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c919e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140206165s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev169.gradle.org, instance=dev169.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2693564641155066 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev169.gradle.org, instance=dev169.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2693564641155066 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev169.gradle.org, instance=dev169.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev170.gradle.org, instance=dev170.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev170.gradle.org, instance=dev170.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c91ab0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev170.gradle.org, instance=dev170.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c91af8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev170.gradle.org, instance=dev170.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c91b60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140217989s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev170.gradle.org, instance=dev170.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.15084704559997708 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev170.gradle.org, instance=dev170.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.15084704559997708 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev170.gradle.org, instance=dev170.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev171.gradle.org, instance=dev171.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev171.gradle.org, instance=dev171.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c91c18} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev171.gradle.org, instance=dev171.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c91c70} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev171.gradle.org, instance=dev171.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c91cc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140228808s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev171.gradle.org, instance=dev171.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.24804474846949498 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev171.gradle.org, instance=dev171.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.24804474846949498 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev171.gradle.org, instance=dev171.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev172.gradle.org, instance=dev172.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev172.gradle.org, instance=dev172.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c91db0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev172.gradle.org, instance=dev172.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c91de8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev172.gradle.org, instance=dev172.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c91e60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140240795s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev172.gradle.org, instance=dev172.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.18135268530810666 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev172.gradle.org, instance=dev172.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.18135268530810666 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev172.gradle.org, instance=dev172.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev173.gradle.org, instance=dev173.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev173.gradle.org, instance=dev173.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c91fd0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev173.gradle.org, instance=dev173.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c91f20} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev173.gradle.org, instance=dev173.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc023c91f78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140248792s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev173.gradle.org, instance=dev173.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.26317620685456145 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev173.gradle.org, instance=dev173.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.26317620685456145 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev173.gradle.org, instance=dev173.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev174.gradle.org, instance=dev174.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev174.gradle.org, instance=dev174.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa00e0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev174.gradle.org, instance=dev174.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0138} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev174.gradle.org, instance=dev174.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0080}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140257683s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev174.gradle.org, instance=dev174.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.27899136345077546 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev174.gradle.org, instance=dev174.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.27899136345077546 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev174.gradle.org, instance=dev174.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev175.gradle.org, instance=dev175.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev175.gradle.org, instance=dev175.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa01e8} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev175.gradle.org, instance=dev175.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0260} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev175.gradle.org, instance=dev175.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa02b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140265309s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev175.gradle.org, instance=dev175.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2568669400763104 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev175.gradle.org, instance=dev175.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2568669400763104 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev175.gradle.org, instance=dev175.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev176.gradle.org, instance=dev176.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev176.gradle.org, instance=dev176.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa03a0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev176.gradle.org, instance=dev176.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa03f8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev176.gradle.org, instance=dev176.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0348}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140276792s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev176.gradle.org, instance=dev176.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2738116574462294 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev176.gradle.org, instance=dev176.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2738116574462294 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev176.gradle.org, instance=dev176.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev200.gradle.org, instance=dev200.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev200.gradle.org, instance=dev200.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0498} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev200.gradle.org, instance=dev200.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa04f0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev200.gradle.org, instance=dev200.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0548}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140287364s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev200.gradle.org, instance=dev200.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.3824304706601467 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev200.gradle.org, instance=dev200.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.3824304706601467 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev200.gradle.org, instance=dev200.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev201.gradle.org, instance=dev201.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev201.gradle.org, instance=dev201.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa05e0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev201.gradle.org, instance=dev201.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0638} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev201.gradle.org, instance=dev201.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa06a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140300756s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev201.gradle.org, instance=dev201.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.20282829303321748 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev201.gradle.org, instance=dev201.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.20282829303321748 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev201.gradle.org, instance=dev201.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev202.gradle.org, instance=dev202.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev202.gradle.org, instance=dev202.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa07a0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev202.gradle.org, instance=dev202.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa07e8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev202.gradle.org, instance=dev202.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0748}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140312946s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev202.gradle.org, instance=dev202.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.315420099458951 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev202.gradle.org, instance=dev202.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.315420099458951 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev202.gradle.org, instance=dev202.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev203.gradle.org, instance=dev203.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev203.gradle.org, instance=dev203.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa08e8} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev203.gradle.org, instance=dev203.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0940} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev203.gradle.org, instance=dev203.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa08a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140323398s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev203.gradle.org, instance=dev203.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.06845648655973025 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev203.gradle.org, instance=dev203.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.06845648655973025 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev203.gradle.org, instance=dev203.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev204.gradle.org, instance=dev204.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev204.gradle.org, instance=dev204.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa09e0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev204.gradle.org, instance=dev204.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0a38} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev204.gradle.org, instance=dev204.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0a90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140334655s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev204.gradle.org, instance=dev204.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.37243462075231126 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev204.gradle.org, instance=dev204.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.37243462075231126 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev204.gradle.org, instance=dev204.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev205.gradle.org, instance=dev205.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev205.gradle.org, instance=dev205.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0b90} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev205.gradle.org, instance=dev205.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0be8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev205.gradle.org, instance=dev205.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0b38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140346281s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev205.gradle.org, instance=dev205.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.25837722442552336 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev205.gradle.org, instance=dev205.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.25837722442552336 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev205.gradle.org, instance=dev205.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev206.gradle.org, instance=dev206.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev206.gradle.org, instance=dev206.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0d00} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev206.gradle.org, instance=dev206.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0d48} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev206.gradle.org, instance=dev206.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0c88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140363161s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev206.gradle.org, instance=dev206.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.21935662080722795 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev206.gradle.org, instance=dev206.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.21935662080722795 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev206.gradle.org, instance=dev206.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev207.gradle.org, instance=dev207.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev207.gradle.org, instance=dev207.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0df8} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev207.gradle.org, instance=dev207.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0e50} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev207.gradle.org, instance=dev207.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0e98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140375053s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev207.gradle.org, instance=dev207.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2439334934672983 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev207.gradle.org, instance=dev207.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2439334934672983 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev207.gradle.org, instance=dev207.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev208.gradle.org, instance=dev208.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev208.gradle.org, instance=dev208.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0fa0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev208.gradle.org, instance=dev208.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0ff8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev208.gradle.org, instance=dev208.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa0f58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140387386s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev208.gradle.org, instance=dev208.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.41247756323779416 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev208.gradle.org, instance=dev208.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.41247756323779416 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev208.gradle.org, instance=dev208.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev209.gradle.org, instance=dev209.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev209.gradle.org, instance=dev209.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1148} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev209.gradle.org, instance=dev209.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1098} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev209.gradle.org, instance=dev209.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1100}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140399643s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev209.gradle.org, instance=dev209.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2059528005735215 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev209.gradle.org, instance=dev209.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2059528005735215 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev209.gradle.org, instance=dev209.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev21.gradle.org, instance=dev21.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev21.gradle.org, instance=dev21.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1230} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev21.gradle.org, instance=dev21.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa12c0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev21.gradle.org, instance=dev21.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1318}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140412633s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev21.gradle.org, instance=dev21.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004555923723066901 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev21.gradle.org, instance=dev21.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004555923723066901 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev21.gradle.org, instance=dev21.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev210.gradle.org, instance=dev210.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev210.gradle.org, instance=dev210.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1420} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev210.gradle.org, instance=dev210.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1478} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev210.gradle.org, instance=dev210.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa13c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140424932s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev210.gradle.org, instance=dev210.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2084293505381838 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev210.gradle.org, instance=dev210.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2084293505381838 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev210.gradle.org, instance=dev210.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev211.gradle.org, instance=dev211.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev211.gradle.org, instance=dev211.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1520} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev211.gradle.org, instance=dev211.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1578} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev211.gradle.org, instance=dev211.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa15c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140436241s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev211.gradle.org, instance=dev211.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.26714740930384706 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev211.gradle.org, instance=dev211.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.26714740930384706 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev211.gradle.org, instance=dev211.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev212.gradle.org, instance=dev212.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev212.gradle.org, instance=dev212.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1728} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev212.gradle.org, instance=dev212.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1678} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev212.gradle.org, instance=dev212.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa16c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140444398s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev212.gradle.org, instance=dev212.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.24595892050446977 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev212.gradle.org, instance=dev212.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.24595892050446977 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev212.gradle.org, instance=dev212.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev213.gradle.org, instance=dev213.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev213.gradle.org, instance=dev213.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1818} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev213.gradle.org, instance=dev213.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1870} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev213.gradle.org, instance=dev213.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa17c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140451336s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev213.gradle.org, instance=dev213.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2652158853768719 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev213.gradle.org, instance=dev213.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2652158853768719 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev213.gradle.org, instance=dev213.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev214.gradle.org, instance=dev214.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev214.gradle.org, instance=dev214.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1910} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev214.gradle.org, instance=dev214.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1978} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev214.gradle.org, instance=dev214.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa19c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140457994s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev214.gradle.org, instance=dev214.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.20602372223124232 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev214.gradle.org, instance=dev214.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.20602372223124232 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev214.gradle.org, instance=dev214.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev215.gradle.org, instance=dev215.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev215.gradle.org, instance=dev215.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1a80} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev215.gradle.org, instance=dev215.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1ac8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev215.gradle.org, instance=dev215.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1b20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.14046493s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev215.gradle.org, instance=dev215.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.3961148063827552 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev215.gradle.org, instance=dev215.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.3961148063827552 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev215.gradle.org, instance=dev215.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev216.gradle.org, instance=dev216.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev216.gradle.org, instance=dev216.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1c38} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev216.gradle.org, instance=dev216.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1c80} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev216.gradle.org, instance=dev216.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1bd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140476151s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev216.gradle.org, instance=dev216.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2679011710990029 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev216.gradle.org, instance=dev216.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2679011710990029 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev216.gradle.org, instance=dev216.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev217.gradle.org, instance=dev217.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev217.gradle.org, instance=dev217.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1e78} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev217.gradle.org, instance=dev217.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1d78} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev217.gradle.org, instance=dev217.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1e30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140487208s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev217.gradle.org, instance=dev217.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.25564097658637686 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev217.gradle.org, instance=dev217.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.25564097658637686 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev217.gradle.org, instance=dev217.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev218.gradle.org, instance=dev218.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev218.gradle.org, instance=dev218.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc01eaa1f58} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev218.gradle.org, instance=dev218.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc018306050} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev218.gradle.org, instance=dev218.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc0183060a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140498524s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev218.gradle.org, instance=dev218.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.3967512732321592 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev218.gradle.org, instance=dev218.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.3967512732321592 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev218.gradle.org, instance=dev218.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev219.gradle.org, instance=dev219.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev219.gradle.org, instance=dev219.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc0183063a8} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev219.gradle.org, instance=dev219.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc018306500} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev219.gradle.org, instance=dev219.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc0183065a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140510115s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev219.gradle.org, instance=dev219.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.3051335864078736 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev219.gradle.org, instance=dev219.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.3051335864078736 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev219.gradle.org, instance=dev219.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev220.gradle.org, instance=dev220.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev220.gradle.org, instance=dev220.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc0183066c8} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev220.gradle.org, instance=dev220.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc018306810} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev220.gradle.org, instance=dev220.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc018306848}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140521951s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev220.gradle.org, instance=dev220.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.3420518348152888 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev220.gradle.org, instance=dev220.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.3420518348152888 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev220.gradle.org, instance=dev220.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev221.gradle.org, instance=dev221.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev221.gradle.org, instance=dev221.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc018306a38} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev221.gradle.org, instance=dev221.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc018306de0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev221.gradle.org, instance=dev221.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc0183070c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140544157s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev221.gradle.org, instance=dev221.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.23873319369365065 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev221.gradle.org, instance=dev221.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.23873319369365065 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev221.gradle.org, instance=dev221.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev222.gradle.org, instance=dev222.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev222.gradle.org, instance=dev222.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc018307220} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev222.gradle.org, instance=dev222.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc018307308} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev222.gradle.org, instance=dev222.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc018307380}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140552399s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev222.gradle.org, instance=dev222.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.21828675211787518 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev222.gradle.org, instance=dev222.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.21828675211787518 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev222.gradle.org, instance=dev222.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev223.gradle.org, instance=dev223.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev223.gradle.org, instance=dev223.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc018307660} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev223.gradle.org, instance=dev223.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc0183074b0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev223.gradle.org, instance=dev223.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc018307548}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140559645s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev223.gradle.org, instance=dev223.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.1915042942485483 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev223.gradle.org, instance=dev223.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.1915042942485483 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev223.gradle.org, instance=dev223.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev224.gradle.org, instance=dev224.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev224.gradle.org, instance=dev224.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc018307960} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev224.gradle.org, instance=dev224.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc0183077a0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev224.gradle.org, instance=dev224.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc018307898}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140566578s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev224.gradle.org, instance=dev224.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2745013994809177 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev224.gradle.org, instance=dev224.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2745013994809177 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev224.gradle.org, instance=dev224.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev225.gradle.org, instance=dev225.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev225.gradle.org, instance=dev225.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc018307a78} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev225.gradle.org, instance=dev225.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc018307b40} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev225.gradle.org, instance=dev225.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc018307ba8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140573731s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev225.gradle.org, instance=dev225.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2542357129980708 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev225.gradle.org, instance=dev225.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2542357129980708 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev225.gradle.org, instance=dev225.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev226.gradle.org, instance=dev226.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev226.gradle.org, instance=dev226.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc018307d18} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev226.gradle.org, instance=dev226.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc018307d70} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev226.gradle.org, instance=dev226.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc018307ea8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140580627s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev226.gradle.org, instance=dev226.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.19154242256742815 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev226.gradle.org, instance=dev226.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.19154242256742815 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev226.gradle.org, instance=dev226.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev227.gradle.org, instance=dev227.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev227.gradle.org, instance=dev227.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc018307fc8} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev227.gradle.org, instance=dev227.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004558070} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev227.gradle.org, instance=dev227.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004558140}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140606745s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev227.gradle.org, instance=dev227.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553573347245599 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev227.gradle.org, instance=dev227.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553573347245599 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev227.gradle.org, instance=dev227.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev228.gradle.org, instance=dev228.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev228.gradle.org, instance=dev228.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc0045582a8} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev228.gradle.org, instance=dev228.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004558360} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev228.gradle.org, instance=dev228.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004558250}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140619618s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev228.gradle.org, instance=dev228.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.0045501037448425485 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev228.gradle.org, instance=dev228.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.0045501037448425485 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev228.gradle.org, instance=dev228.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev229.gradle.org, instance=dev229.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev229.gradle.org, instance=dev229.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004558460} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev229.gradle.org, instance=dev229.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc0045584c8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev229.gradle.org, instance=dev229.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004558570}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140630243s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev229.gradle.org, instance=dev229.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552454120663962 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev229.gradle.org, instance=dev229.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552454120663962 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev229.gradle.org, instance=dev229.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev230.gradle.org, instance=dev230.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev230.gradle.org, instance=dev230.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc0045586d8} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev230.gradle.org, instance=dev230.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004558720} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev230.gradle.org, instance=dev230.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004558690}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140646533s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev230.gradle.org, instance=dev230.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.0045466341424396095 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev230.gradle.org, instance=dev230.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.0045466341424396095 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev230.gradle.org, instance=dev230.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev231.gradle.org, instance=dev231.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev231.gradle.org, instance=dev231.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004558940} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev231.gradle.org, instance=dev231.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004558830} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev231.gradle.org, instance=dev231.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004558888}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140658526s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev231.gradle.org, instance=dev231.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552864503743925 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev231.gradle.org, instance=dev231.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552864503743925 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev231.gradle.org, instance=dev231.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev232.gradle.org, instance=dev232.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev232.gradle.org, instance=dev232.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004558b18} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev232.gradle.org, instance=dev232.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004558b80} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev232.gradle.org, instance=dev232.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004558be8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140670557s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev232.gradle.org, instance=dev232.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553424117034743 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev232.gradle.org, instance=dev232.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553424117034743 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev232.gradle.org, instance=dev232.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev233.gradle.org, instance=dev233.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev233.gradle.org, instance=dev233.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004558e58} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev233.gradle.org, instance=dev233.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004558ce8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev233.gradle.org, instance=dev233.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004558da0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140682475s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev233.gradle.org, instance=dev233.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552416813111249 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev233.gradle.org, instance=dev233.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552416813111249 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev233.gradle.org, instance=dev233.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev234.gradle.org, instance=dev234.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev234.gradle.org, instance=dev234.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004558fb8} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev234.gradle.org, instance=dev234.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004559110} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev234.gradle.org, instance=dev234.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004559208}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140693341s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev234.gradle.org, instance=dev234.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552566043322104 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev234.gradle.org, instance=dev234.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552566043322104 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev234.gradle.org, instance=dev234.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev235.gradle.org, instance=dev235.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev235.gradle.org, instance=dev235.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004559478} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev235.gradle.org, instance=dev235.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc0045592a8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev235.gradle.org, instance=dev235.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004559420}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140705485s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev235.gradle.org, instance=dev235.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004550252975053515 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev235.gradle.org, instance=dev235.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004550252975053515 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev235.gradle.org, instance=dev235.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev236.gradle.org, instance=dev236.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev236.gradle.org, instance=dev236.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004559620} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev236.gradle.org, instance=dev236.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004559678} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev236.gradle.org, instance=dev236.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004559578}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140716477s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev236.gradle.org, instance=dev236.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004559318710364413 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev236.gradle.org, instance=dev236.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004559318710364413 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev236.gradle.org, instance=dev236.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev237.gradle.org, instance=dev237.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev237.gradle.org, instance=dev237.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004559888} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev237.gradle.org, instance=dev237.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc0045597c8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev237.gradle.org, instance=dev237.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004559840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140728533s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev237.gradle.org, instance=dev237.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553312194376491 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev237.gradle.org, instance=dev237.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553312194376491 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev237.gradle.org, instance=dev237.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev238.gradle.org, instance=dev238.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev238.gradle.org, instance=dev238.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc0045599d0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev238.gradle.org, instance=dev238.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004559a58} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev238.gradle.org, instance=dev238.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004559ab0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140740712s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev238.gradle.org, instance=dev238.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.0045530137339547805 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev238.gradle.org, instance=dev238.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.0045530137339547805 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev238.gradle.org, instance=dev238.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev239.gradle.org, instance=dev239.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev239.gradle.org, instance=dev239.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004559d20} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev239.gradle.org, instance=dev239.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004559bd8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev239.gradle.org, instance=dev239.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004559cd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140752503s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev239.gradle.org, instance=dev239.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552603350874818 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev239.gradle.org, instance=dev239.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552603350874818 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev239.gradle.org, instance=dev239.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev240.gradle.org, instance=dev240.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev240.gradle.org, instance=dev240.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004559e78} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev240.gradle.org, instance=dev240.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004559f50} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev240.gradle.org, instance=dev240.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc004559e00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140763473s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev240.gradle.org, instance=dev240.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552864503743925 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev240.gradle.org, instance=dev240.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552864503743925 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev240.gradle.org, instance=dev240.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev241.gradle.org, instance=dev241.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev241.gradle.org, instance=dev241.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909a0a8} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev241.gradle.org, instance=dev241.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909a0f0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev241.gradle.org, instance=dev241.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909a060}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140775902s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev241.gradle.org, instance=dev241.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552454120663962 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev241.gradle.org, instance=dev241.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552454120663962 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev241.gradle.org, instance=dev241.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev242.gradle.org, instance=dev242.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev242.gradle.org, instance=dev242.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909a258} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev242.gradle.org, instance=dev242.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909a1a8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev242.gradle.org, instance=dev242.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909a200}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140787136s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev242.gradle.org, instance=dev242.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004547566831257677 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev242.gradle.org, instance=dev242.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004547566831257677 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev242.gradle.org, instance=dev242.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev243.gradle.org, instance=dev243.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev243.gradle.org, instance=dev243.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909a360} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev243.gradle.org, instance=dev243.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909a3a8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev243.gradle.org, instance=dev243.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909a2f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140799712s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev243.gradle.org, instance=dev243.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553051041507494 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev243.gradle.org, instance=dev243.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553051041507494 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev243.gradle.org, instance=dev243.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev244.gradle.org, instance=dev244.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev244.gradle.org, instance=dev244.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909a458} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev244.gradle.org, instance=dev244.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909a500} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev244.gradle.org, instance=dev244.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909a570}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140857911s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev244.gradle.org, instance=dev244.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004551969122478572 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev244.gradle.org, instance=dev244.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004551969122478572 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev244.gradle.org, instance=dev244.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev245.gradle.org, instance=dev245.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev245.gradle.org, instance=dev245.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909a610} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev245.gradle.org, instance=dev245.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909a668} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev245.gradle.org, instance=dev245.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909a6c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140870429s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev245.gradle.org, instance=dev245.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552304890453107 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev245.gradle.org, instance=dev245.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552304890453107 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev245.gradle.org, instance=dev245.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev246.gradle.org, instance=dev246.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev246.gradle.org, instance=dev246.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909a760} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev246.gradle.org, instance=dev246.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909a7b8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev246.gradle.org, instance=dev246.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909a810}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140881989s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev246.gradle.org, instance=dev246.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004549767976868124 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev246.gradle.org, instance=dev246.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004549767976868124 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev246.gradle.org, instance=dev246.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev247.gradle.org, instance=dev247.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev247.gradle.org, instance=dev247.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909a968} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev247.gradle.org, instance=dev247.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909a8d8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev247.gradle.org, instance=dev247.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909a930}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140893062s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev247.gradle.org, instance=dev247.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553051041507494 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev247.gradle.org, instance=dev247.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553051041507494 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev247.gradle.org, instance=dev247.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev248.gradle.org, instance=dev248.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev248.gradle.org, instance=dev248.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909ab30} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev248.gradle.org, instance=dev248.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909aa70} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev248.gradle.org, instance=dev248.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909aac8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140906658s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev248.gradle.org, instance=dev248.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.00455252873576939 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev248.gradle.org, instance=dev248.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.00455252873576939 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev248.gradle.org, instance=dev248.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev249.gradle.org, instance=dev249.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev249.gradle.org, instance=dev249.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909ac80} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev249.gradle.org, instance=dev249.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909abd0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev249.gradle.org, instance=dev249.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909ac18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.14091925s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev249.gradle.org, instance=dev249.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552939118849353 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev249.gradle.org, instance=dev249.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552939118849353 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev249.gradle.org, instance=dev249.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev250.gradle.org, instance=dev250.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev250.gradle.org, instance=dev250.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909ad20} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev250.gradle.org, instance=dev250.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909ad78} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev250.gradle.org, instance=dev250.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909add0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140931709s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev250.gradle.org, instance=dev250.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.0045539837303254505 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev250.gradle.org, instance=dev250.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.0045539837303254505 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev250.gradle.org, instance=dev250.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev251.gradle.org, instance=dev251.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev251.gradle.org, instance=dev251.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909af20} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev251.gradle.org, instance=dev251.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909ae70} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev251.gradle.org, instance=dev251.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909aec8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140943042s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev251.gradle.org, instance=dev251.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.0045586098668627395 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev251.gradle.org, instance=dev251.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.0045586098668627395 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev251.gradle.org, instance=dev251.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev252.gradle.org, instance=dev252.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev252.gradle.org, instance=dev252.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b018} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev252.gradle.org, instance=dev252.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b070} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev252.gradle.org, instance=dev252.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909afd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140957314s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev252.gradle.org, instance=dev252.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552491428216676 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev252.gradle.org, instance=dev252.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552491428216676 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev252.gradle.org, instance=dev252.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev253.gradle.org, instance=dev253.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev253.gradle.org, instance=dev253.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b168} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev253.gradle.org, instance=dev253.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b1c0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev253.gradle.org, instance=dev253.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140968632s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev253.gradle.org, instance=dev253.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552864503743925 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev253.gradle.org, instance=dev253.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552864503743925 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev253.gradle.org, instance=dev253.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev254.gradle.org, instance=dev254.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev254.gradle.org, instance=dev254.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b270} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev254.gradle.org, instance=dev254.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b2b8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev254.gradle.org, instance=dev254.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b310}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140979237s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev254.gradle.org, instance=dev254.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004543127232483957 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev254.gradle.org, instance=dev254.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004543127232483957 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev254.gradle.org, instance=dev254.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev255.gradle.org, instance=dev255.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev255.gradle.org, instance=dev255.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b3b0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev255.gradle.org, instance=dev255.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b408} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev255.gradle.org, instance=dev255.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140989739s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev255.gradle.org, instance=dev255.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004556184875936009 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev255.gradle.org, instance=dev255.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004556184875936009 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev255.gradle.org, instance=dev255.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev256.gradle.org, instance=dev256.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev256.gradle.org, instance=dev256.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b560} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev256.gradle.org, instance=dev256.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b5a8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev256.gradle.org, instance=dev256.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b508}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.140999951s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev256.gradle.org, instance=dev256.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.00455252873576939 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev256.gradle.org, instance=dev256.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.00455252873576939 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev256.gradle.org, instance=dev256.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev257.gradle.org, instance=dev257.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev257.gradle.org, instance=dev257.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b710} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev257.gradle.org, instance=dev257.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b658} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev257.gradle.org, instance=dev257.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b6b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141013864s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev257.gradle.org, instance=dev257.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004545701453621653 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev257.gradle.org, instance=dev257.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004545701453621653 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev257.gradle.org, instance=dev257.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev258.gradle.org, instance=dev258.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev258.gradle.org, instance=dev258.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b7c0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev258.gradle.org, instance=dev258.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b808} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev258.gradle.org, instance=dev258.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b860}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141024831s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev258.gradle.org, instance=dev258.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.0045532002717183495 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev258.gradle.org, instance=dev258.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.0045532002717183495 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev258.gradle.org, instance=dev258.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev259.gradle.org, instance=dev259.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev259.gradle.org, instance=dev259.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b958} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev259.gradle.org, instance=dev259.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b9b0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev259.gradle.org, instance=dev259.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909b910}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141036423s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev259.gradle.org, instance=dev259.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552454120663962 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev259.gradle.org, instance=dev259.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552454120663962 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev259.gradle.org, instance=dev259.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev260.gradle.org, instance=dev260.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev260.gradle.org, instance=dev260.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909ba50} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev260.gradle.org, instance=dev260.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909baa8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev260.gradle.org, instance=dev260.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909baf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141046291s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev260.gradle.org, instance=dev260.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552454120663962 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev260.gradle.org, instance=dev260.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552454120663962 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev260.gradle.org, instance=dev260.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev261.gradle.org, instance=dev261.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev261.gradle.org, instance=dev261.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909bc18} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev261.gradle.org, instance=dev261.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909bc80} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev261.gradle.org, instance=dev261.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909bbb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141056832s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev261.gradle.org, instance=dev261.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552640658427531 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev261.gradle.org, instance=dev261.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552640658427531 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev261.gradle.org, instance=dev261.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev262.gradle.org, instance=dev262.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev262.gradle.org, instance=dev262.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909bd30} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev262.gradle.org, instance=dev262.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909bd88} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev262.gradle.org, instance=dev262.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909bdf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141068452s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev262.gradle.org, instance=dev262.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.0045599529387606585 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev262.gradle.org, instance=dev262.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.0045599529387606585 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev262.gradle.org, instance=dev262.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev263.gradle.org, instance=dev263.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev263.gradle.org, instance=dev263.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909bea0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev263.gradle.org, instance=dev263.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909bed8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev263.gradle.org, instance=dev263.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02909bf60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141080169s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev263.gradle.org, instance=dev263.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552715273532959 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev263.gradle.org, instance=dev263.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004552715273532959 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev263.gradle.org, instance=dev263.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev264.gradle.org, instance=dev264.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev264.gradle.org, instance=dev264.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2e0b0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev264.gradle.org, instance=dev264.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2e108} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev264.gradle.org, instance=dev264.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2e020}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141090878s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev264.gradle.org, instance=dev264.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553312194376491 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev264.gradle.org, instance=dev264.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553312194376491 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev264.gradle.org, instance=dev264.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev265.gradle.org, instance=dev265.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev265.gradle.org, instance=dev265.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2e328} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev265.gradle.org, instance=dev265.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2e1c8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev265.gradle.org, instance=dev265.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2e2d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141102838s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev265.gradle.org, instance=dev265.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004546335682017899 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev265.gradle.org, instance=dev265.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004546335682017899 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev265.gradle.org, instance=dev265.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev266.gradle.org, instance=dev266.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev266.gradle.org, instance=dev266.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2e438} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev266.gradle.org, instance=dev266.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2e4a0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev266.gradle.org, instance=dev266.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2e550}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141113353s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev266.gradle.org, instance=dev266.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004551782584715003 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev266.gradle.org, instance=dev266.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004551782584715003 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev266.gradle.org, instance=dev266.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev267.gradle.org, instance=dev267.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev267.gradle.org, instance=dev267.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2e6d0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev267.gradle.org, instance=dev267.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2e5f0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev267.gradle.org, instance=dev267.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2e658}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141125804s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev267.gradle.org, instance=dev267.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004554431420958127 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev267.gradle.org, instance=dev267.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004554431420958127 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev267.gradle.org, instance=dev267.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev268.gradle.org, instance=dev268.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev268.gradle.org, instance=dev268.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2e818} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev268.gradle.org, instance=dev268.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2e8a0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev268.gradle.org, instance=dev268.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2e7c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141138175s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev268.gradle.org, instance=dev268.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004546708757545037 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev268.gradle.org, instance=dev268.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004546708757545037 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev268.gradle.org, instance=dev268.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev269.gradle.org, instance=dev269.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev269.gradle.org, instance=dev269.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2ea40} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev269.gradle.org, instance=dev269.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2e980} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev269.gradle.org, instance=dev269.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2e9f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141151191s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev269.gradle.org, instance=dev269.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004547231063283141 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev269.gradle.org, instance=dev269.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004547231063283141 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev269.gradle.org, instance=dev269.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev270.gradle.org, instance=dev270.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev270.gradle.org, instance=dev270.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2eb50} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev270.gradle.org, instance=dev270.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2ebb8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev270.gradle.org, instance=dev270.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2ec10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141163798s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev270.gradle.org, instance=dev270.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004546671449992323 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev270.gradle.org, instance=dev270.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004546671449992323 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev270.gradle.org, instance=dev270.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev271.gradle.org, instance=dev271.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev271.gradle.org, instance=dev271.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2ee18} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev271.gradle.org, instance=dev271.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2ed28} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev271.gradle.org, instance=dev271.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2edb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141177763s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev271.gradle.org, instance=dev271.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553610654798312 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev271.gradle.org, instance=dev271.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553610654798312 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev271.gradle.org, instance=dev271.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev272.gradle.org, instance=dev272.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev272.gradle.org, instance=dev272.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2ef90} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev272.gradle.org, instance=dev272.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2eff8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev272.gradle.org, instance=dev272.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2ef28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141189506s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev272.gradle.org, instance=dev272.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004547193755730428 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev272.gradle.org, instance=dev272.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004547193755730428 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev272.gradle.org, instance=dev272.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev273.gradle.org, instance=dev273.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev273.gradle.org, instance=dev273.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2f150} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev273.gradle.org, instance=dev273.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2f188} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev273.gradle.org, instance=dev273.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2f098}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141201228s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev273.gradle.org, instance=dev273.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004547268370835855 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev273.gradle.org, instance=dev273.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004547268370835855 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev273.gradle.org, instance=dev273.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev274.gradle.org, instance=dev274.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev274.gradle.org, instance=dev274.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2f2b8} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev274.gradle.org, instance=dev274.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2f310} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev274.gradle.org, instance=dev274.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2f368}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141212856s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev274.gradle.org, instance=dev274.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004548238367206636 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev274.gradle.org, instance=dev274.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004548238367206636 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev274.gradle.org, instance=dev274.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev275.gradle.org, instance=dev275.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev275.gradle.org, instance=dev275.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2f420} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev275.gradle.org, instance=dev275.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2f458} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev275.gradle.org, instance=dev275.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2f510}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.14122501s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev275.gradle.org, instance=dev275.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004547939906784815 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev275.gradle.org, instance=dev275.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004547939906784815 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev275.gradle.org, instance=dev275.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev276.gradle.org, instance=dev276.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev276.gradle.org, instance=dev276.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2f668} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev276.gradle.org, instance=dev276.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2f5b0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev276.gradle.org, instance=dev276.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2f620}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141240732s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev276.gradle.org, instance=dev276.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004547305678388569 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev276.gradle.org, instance=dev276.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004547305678388569 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev276.gradle.org, instance=dev276.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev300.gradle.org, instance=dev300.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev300.gradle.org, instance=dev300.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2f7b0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev300.gradle.org, instance=dev300.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2f818} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev300.gradle.org, instance=dev300.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2f718}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141252s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev300.gradle.org, instance=dev300.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.22288472145285754 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev300.gradle.org, instance=dev300.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.22288472145285754 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev300.gradle.org, instance=dev300.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev301.gradle.org, instance=dev301.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev301.gradle.org, instance=dev301.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2f978} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev301.gradle.org, instance=dev301.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2f8d8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev301.gradle.org, instance=dev301.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2f930}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.14126507s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev301.gradle.org, instance=dev301.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2581009246900787 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev301.gradle.org, instance=dev301.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2581009246900787 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev301.gradle.org, instance=dev301.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev302.gradle.org, instance=dev302.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev302.gradle.org, instance=dev302.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2fe90} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev302.gradle.org, instance=dev302.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2ff18} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev302.gradle.org, instance=dev302.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2fdf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141276368s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev302.gradle.org, instance=dev302.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.20782638587112245 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev302.gradle.org, instance=dev302.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.20782638587112245 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev302.gradle.org, instance=dev302.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev303.gradle.org, instance=dev303.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev303.gradle.org, instance=dev303.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2ffa0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev303.gradle.org, instance=dev303.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc022c2ffe8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev303.gradle.org, instance=dev303.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424040}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141287993s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev303.gradle.org, instance=dev303.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.36280110179649294 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev303.gradle.org, instance=dev303.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.36280110179649294 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev303.gradle.org, instance=dev303.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev304.gradle.org, instance=dev304.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev304.gradle.org, instance=dev304.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d4240d0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev304.gradle.org, instance=dev304.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424118} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev304.gradle.org, instance=dev304.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424160}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141302689s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev304.gradle.org, instance=dev304.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.189225250467986 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev304.gradle.org, instance=dev304.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.189225250467986 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev304.gradle.org, instance=dev304.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev305.gradle.org, instance=dev305.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev305.gradle.org, instance=dev305.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424200} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev305.gradle.org, instance=dev305.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424248} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev305.gradle.org, instance=dev305.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d4242a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141315009s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev305.gradle.org, instance=dev305.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.40487838782424745 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev305.gradle.org, instance=dev305.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.40487838782424745 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev305.gradle.org, instance=dev305.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev306.gradle.org, instance=dev306.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev306.gradle.org, instance=dev306.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424380} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev306.gradle.org, instance=dev306.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d4243e8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev306.gradle.org, instance=dev306.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424430}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.14132549s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev306.gradle.org, instance=dev306.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2870664340068192 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev306.gradle.org, instance=dev306.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2870664340068192 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev306.gradle.org, instance=dev306.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev307.gradle.org, instance=dev307.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev307.gradle.org, instance=dev307.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424550} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev307.gradle.org, instance=dev307.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d4244c0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev307.gradle.org, instance=dev307.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424508}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141335512s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev307.gradle.org, instance=dev307.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.17711577846834892 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev307.gradle.org, instance=dev307.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.17711577846834892 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev307.gradle.org, instance=dev307.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev308.gradle.org, instance=dev308.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev308.gradle.org, instance=dev308.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d4245e0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev308.gradle.org, instance=dev308.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424630} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev308.gradle.org, instance=dev308.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424668}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141346569s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev308.gradle.org, instance=dev308.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2947707794114838 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev308.gradle.org, instance=dev308.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2947707794114838 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev308.gradle.org, instance=dev308.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev309.gradle.org, instance=dev309.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev309.gradle.org, instance=dev309.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424700} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev309.gradle.org, instance=dev309.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424748} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev309.gradle.org, instance=dev309.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141357855s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev309.gradle.org, instance=dev309.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.1855538515123587 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev309.gradle.org, instance=dev309.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.1855538515123587 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev309.gradle.org, instance=dev309.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev310.gradle.org, instance=dev310.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev310.gradle.org, instance=dev310.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424820} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev310.gradle.org, instance=dev310.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424868} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev310.gradle.org, instance=dev310.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d4248b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141368933s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev310.gradle.org, instance=dev310.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.24333041687757873 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev310.gradle.org, instance=dev310.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.24333041687757873 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev310.gradle.org, instance=dev310.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev32.gradle.org, instance=dev32.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev32.gradle.org, instance=dev32.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424980} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev32.gradle.org, instance=dev32.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d4249e8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev32.gradle.org, instance=dev32.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424a50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141379875s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev32.gradle.org, instance=dev32.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.09043085895715541 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev32.gradle.org, instance=dev32.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.09043085895715541 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev32.gradle.org, instance=dev32.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev33.gradle.org, instance=dev33.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev33.gradle.org, instance=dev33.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424b20} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev33.gradle.org, instance=dev33.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424b90} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev33.gradle.org, instance=dev33.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424c00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141390978s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev33.gradle.org, instance=dev33.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.09018283834667251 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev33.gradle.org, instance=dev33.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.09018283834667251 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev33.gradle.org, instance=dev33.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev34.gradle.org, instance=dev34.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev34.gradle.org, instance=dev34.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424d98} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev34.gradle.org, instance=dev34.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424cb8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev34.gradle.org, instance=dev34.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424d30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141412988s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev34.gradle.org, instance=dev34.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2499700047276131 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev34.gradle.org, instance=dev34.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2499700047276131 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev34.gradle.org, instance=dev34.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev35.gradle.org, instance=dev35.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev35.gradle.org, instance=dev35.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424ed0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev35.gradle.org, instance=dev35.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424f40} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev35.gradle.org, instance=dev35.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d424e58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141427115s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev35.gradle.org, instance=dev35.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.39697351432371253 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev35.gradle.org, instance=dev35.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.39697351432371253 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev35.gradle.org, instance=dev35.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev36.gradle.org, instance=dev36.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev36.gradle.org, instance=dev36.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d4250e0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev36.gradle.org, instance=dev36.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d425148} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev36.gradle.org, instance=dev36.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d425070}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141438182s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev36.gradle.org, instance=dev36.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2655785147893108 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev36.gradle.org, instance=dev36.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2655785147893108 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev36.gradle.org, instance=dev36.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev37.gradle.org, instance=dev37.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev37.gradle.org, instance=dev37.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d425208} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev37.gradle.org, instance=dev37.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d425280} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev37.gradle.org, instance=dev37.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d4252e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141450879s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev37.gradle.org, instance=dev37.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.11134491339872399 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev37.gradle.org, instance=dev37.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.11134491339872399 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev37.gradle.org, instance=dev37.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev38.gradle.org, instance=dev38.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev38.gradle.org, instance=dev38.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d4253a8} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev38.gradle.org, instance=dev38.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d425420} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev38.gradle.org, instance=dev38.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d425488}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.14146352s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev38.gradle.org, instance=dev38.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.27586346092317393 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev38.gradle.org, instance=dev38.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.27586346092317393 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev38.gradle.org, instance=dev38.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev39.gradle.org, instance=dev39.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev39.gradle.org, instance=dev39.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d425548} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev39.gradle.org, instance=dev39.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d4255c0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev39.gradle.org, instance=dev39.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d425628}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141474554s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev39.gradle.org, instance=dev39.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.36092427074180544 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev39.gradle.org, instance=dev39.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.36092427074180544 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev39.gradle.org, instance=dev39.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev40.gradle.org, instance=dev40.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev40.gradle.org, instance=dev40.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d4256f8} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev40.gradle.org, instance=dev40.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d425760} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev40.gradle.org, instance=dev40.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d4257c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141484848s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev40.gradle.org, instance=dev40.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.20668567013915418 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev40.gradle.org, instance=dev40.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.20668567013915418 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev40.gradle.org, instance=dev40.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev57.gradle.org, instance=dev57.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev57.gradle.org, instance=dev57.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d425890} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev57.gradle.org, instance=dev57.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d4258e8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev57.gradle.org, instance=dev57.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d425960}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141494085s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev57.gradle.org, instance=dev57.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553349501929205 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev57.gradle.org, instance=dev57.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004553349501929205 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev57.gradle.org, instance=dev57.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev58.gradle.org, instance=dev58.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev58.gradle.org, instance=dev58.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d425a30} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev58.gradle.org, instance=dev58.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d425a88} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev58.gradle.org, instance=dev58.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d425b00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141505615s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev58.gradle.org, instance=dev58.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.0045532002717183495 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev58.gradle.org, instance=dev58.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.0045532002717183495 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev58.gradle.org, instance=dev58.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev60.gradle.org, instance=dev60.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev60.gradle.org, instance=dev60.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d425c00} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev60.gradle.org, instance=dev60.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d425c58} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev60.gradle.org, instance=dev60.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d425cd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141518017s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev60.gradle.org, instance=dev60.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004559691785891662 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev60.gradle.org, instance=dev60.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004559691785891662 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev60.gradle.org, instance=dev60.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev61.gradle.org, instance=dev61.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev61.gradle.org, instance=dev61.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d425da0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev61.gradle.org, instance=dev61.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d425df8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev61.gradle.org, instance=dev61.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d425e70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141533645s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev61.gradle.org, instance=dev61.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.36986025038441706 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev61.gradle.org, instance=dev61.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.36986025038441706 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev61.gradle.org, instance=dev61.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev62.gradle.org, instance=dev62.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev62.gradle.org, instance=dev62.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d425f40} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev62.gradle.org, instance=dev62.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc00d425f98} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev62.gradle.org, instance=dev62.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303a010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141545902s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev62.gradle.org, instance=dev62.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2550390565307916 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev62.gradle.org, instance=dev62.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2550390565307916 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev62.gradle.org, instance=dev62.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev63.gradle.org, instance=dev63.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev63.gradle.org, instance=dev63.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303a150} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev63.gradle.org, instance=dev63.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303a1c8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev63.gradle.org, instance=dev63.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303a240}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141556998s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev63.gradle.org, instance=dev63.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.25524346461214475 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev63.gradle.org, instance=dev63.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.25524346461214475 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev63.gradle.org, instance=dev63.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev64.gradle.org, instance=dev64.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev64.gradle.org, instance=dev64.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303a3d8} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev64.gradle.org, instance=dev64.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303a450} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev64.gradle.org, instance=dev64.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303a370}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141569612s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev64.gradle.org, instance=dev64.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2109475730392344 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev64.gradle.org, instance=dev64.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2109475730392344 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev64.gradle.org, instance=dev64.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev65.gradle.org, instance=dev65.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev65.gradle.org, instance=dev65.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303a560} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev65.gradle.org, instance=dev65.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303a5f8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev65.gradle.org, instance=dev65.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303a680}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141577512s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev65.gradle.org, instance=dev65.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.30707395222484335 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev65.gradle.org, instance=dev65.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.30707395222484335 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev65.gradle.org, instance=dev65.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev66.gradle.org, instance=dev66.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev66.gradle.org, instance=dev66.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303a838} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev66.gradle.org, instance=dev66.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303a960} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev66.gradle.org, instance=dev66.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303a790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141603409s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev66.gradle.org, instance=dev66.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2765016066124504 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev66.gradle.org, instance=dev66.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2765016066124504 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev66.gradle.org, instance=dev66.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev67.gradle.org, instance=dev67.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev67.gradle.org, instance=dev67.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303aaf8} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev67.gradle.org, instance=dev67.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303ab80} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev67.gradle.org, instance=dev67.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303aa70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141616615s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev67.gradle.org, instance=dev67.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.3851535862407358 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev67.gradle.org, instance=dev67.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.3851535862407358 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev67.gradle.org, instance=dev67.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev68.gradle.org, instance=dev68.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev68.gradle.org, instance=dev68.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303acd8} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev68.gradle.org, instance=dev68.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303ad60} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev68.gradle.org, instance=dev68.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303ac70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141629252s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev68.gradle.org, instance=dev68.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.1785578646111896 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev68.gradle.org, instance=dev68.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.1785578646111896 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev68.gradle.org, instance=dev68.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev69.gradle.org, instance=dev69.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev69.gradle.org, instance=dev69.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303ae60} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev69.gradle.org, instance=dev69.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303af00} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev69.gradle.org, instance=dev69.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303af78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141646515s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev69.gradle.org, instance=dev69.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.15339619606223254 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev69.gradle.org, instance=dev69.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.15339619606223254 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev69.gradle.org, instance=dev69.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev70.gradle.org, instance=dev70.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev70.gradle.org, instance=dev70.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303b080} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev70.gradle.org, instance=dev70.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303b0f0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev70.gradle.org, instance=dev70.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303b198}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141658239s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev70.gradle.org, instance=dev70.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.34097875767640207 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev70.gradle.org, instance=dev70.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.34097875767640207 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev70.gradle.org, instance=dev70.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev71.gradle.org, instance=dev71.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev71.gradle.org, instance=dev71.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303b288} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev71.gradle.org, instance=dev71.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303b330} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev71.gradle.org, instance=dev71.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303b398}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.14167496s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev71.gradle.org, instance=dev71.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2346212298479523 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev71.gradle.org, instance=dev71.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2346212298479523 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev71.gradle.org, instance=dev71.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev72.gradle.org, instance=dev72.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev72.gradle.org, instance=dev72.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303b4a0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev72.gradle.org, instance=dev72.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303b518} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev72.gradle.org, instance=dev72.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303b5b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141682158s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev72.gradle.org, instance=dev72.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.22676541577924436 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev72.gradle.org, instance=dev72.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.22676541577924436 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev72.gradle.org, instance=dev72.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev73.gradle.org, instance=dev73.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev73.gradle.org, instance=dev73.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303b848} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev73.gradle.org, instance=dev73.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303b6a8} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev73.gradle.org, instance=dev73.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303b7e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141689502s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev73.gradle.org, instance=dev73.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.24118724720402274 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev73.gradle.org, instance=dev73.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.24118724720402274 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev73.gradle.org, instance=dev73.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev75.gradle.org, instance=dev75.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev75.gradle.org, instance=dev75.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303baf0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev75.gradle.org, instance=dev75.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303b9f0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev75.gradle.org, instance=dev75.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303ba58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141696314s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev75.gradle.org, instance=dev75.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2139926154922448 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev75.gradle.org, instance=dev75.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2139926154922448 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev75.gradle.org, instance=dev75.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev76.gradle.org, instance=dev76.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev76.gradle.org, instance=dev76.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303bbe8} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev76.gradle.org, instance=dev76.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303bc80} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev76.gradle.org, instance=dev76.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303bd18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.14170332s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev76.gradle.org, instance=dev76.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.10634984247258938 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev76.gradle.org, instance=dev76.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.10634984247258938 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev76.gradle.org, instance=dev76.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev77.gradle.org, instance=dev77.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev77.gradle.org, instance=dev77.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303be58} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev77.gradle.org, instance=dev77.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303bee0} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev77.gradle.org, instance=dev77.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc02303bf58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141710359s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev77.gradle.org, instance=dev77.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.3578170739059635 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev77.gradle.org, instance=dev77.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.3578170739059635 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev77.gradle.org, instance=dev77.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev78.gradle.org, instance=dev78.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev78.gradle.org, instance=dev78.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc027d58180} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev78.gradle.org, instance=dev78.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc027d58228} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev78.gradle.org, instance=dev78.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc027d58320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141717418s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev78.gradle.org, instance=dev78.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2504057569433833 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev78.gradle.org, instance=dev78.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2504057569433833 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev78.gradle.org, instance=dev78.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev79.gradle.org, instance=dev79.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev79.gradle.org, instance=dev79.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc027d58480} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev79.gradle.org, instance=dev79.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc027d58508} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev79.gradle.org, instance=dev79.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc027d585a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141725518s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev79.gradle.org, instance=dev79.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2785683704180356 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev79.gradle.org, instance=dev79.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.2785683704180356 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev79.gradle.org, instance=dev79.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev80.gradle.org, instance=dev80.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev80.gradle.org, instance=dev80.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc027d58698} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev80.gradle.org, instance=dev80.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc027d58750} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev80.gradle.org, instance=dev80.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc027d587d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141732974s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev80.gradle.org, instance=dev80.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.27716471105449647 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev80.gradle.org, instance=dev80.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.27716471105449647 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev80.gradle.org, instance=dev80.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev81.gradle.org, instance=dev81.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev81.gradle.org, instance=dev81.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc027d588d8} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev81.gradle.org, instance=dev81.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc027d58940} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev81.gradle.org, instance=dev81.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc027d589f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141739778s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev81.gradle.org, instance=dev81.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.1839749585736935 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev81.gradle.org, instance=dev81.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.1839749585736935 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev81.gradle.org, instance=dev81.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev82.gradle.org, instance=dev82.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev82.gradle.org, instance=dev82.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc027d58bc0} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev82.gradle.org, instance=dev82.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc027d58c78} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev82.gradle.org, instance=dev82.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc027d58b28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.141746785s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev82.gradle.org, instance=dev82.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004558535251757312 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev82.gradle.org, instance=dev82.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.004558535251757312 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev82.gradle.org, instance=dev82.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev83.gradle.org, instance=dev83.gradle.org, job=node, mountpoint=/, os=Ubuntu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev83.gradle.org, instance=dev83.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc027d58d98} B:{Var:B Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev83.gradle.org, instance=dev83.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc027d58e50} C:{Var:C Labels:device=/dev/mapper/vg0-root, fstype=ext4, host=dev83.gradle.org, instance=dev83.gradle.org, job=node, mountpoint=/, os=Ubuntu Value:0xc027d58ed8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.14175345s EvaluationString:[ var='A' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev83.gradle.org, instance=dev83.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.0045497306693154105 ], [ var='B' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev83.gradle.org, instance=dev83.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0.0045497306693154105 ], [ var='C' labels={device=/dev/mapper/vg0-root, fstype=ext4, host=dev83.gradle.org, instance=dev83.gradle.org, job=node, mountpoint=/, os=Ubuntu} value=0 ]} {Instance:device=/dev/mapper/vg0-root, fstype=ext4, host=dev85.gradle.org, instance=dev85.gradle.org, + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s52m7ady-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.194226977Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.194233898Z caller=remote_alert_sender.go:94 user=87780 slug=zencloudandhosting host=zencloudandhosting-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.192.7:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=be3d6d46-4850-4479-9cf3-cac2ce89f82c alerts=1 + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:14.194023526Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s5005fs4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.194064746Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s5005fs4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.194021445Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s5005fs4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.193915284Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.19391144Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:14.193851342Z caller=client.go:80 msg="creating client for grafana instance" user=687697 addr=dns:///restico-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:14.193797942Z caller=client.go:80 msg="creating client for grafana instance" user=696213 addr=dns:///resso-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s5005fs4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.193801903Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.193837959Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=845543 slug=deliveryhero t=2024-05-29T13:44:14.193734091Z level=debug msg="Saving alert states done" count=4 max_state_save_concurrency=1 duration=68.005157ms + level=debug ts=2024-05-29T13:44:14.19372422Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.193739223Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s4sdqglt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.193563781Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s4sdqglt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.19349311Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.193370529Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.193270848Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s4rkbcz0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.193234617Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=447897 slug=mysten t=2024-05-29T13:44:14.193229494Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.193073568Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s4p0nduo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.193018645Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.193019334Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=554721 slug=pordtis + level=debug ts=2024-05-29T13:44:14.192988034Z caller=ruler.go:522 msg="tenant is owned by this instance" user=554721 slug=pordtis groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s4p0nduo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.192943134Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.192872287Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s4oh4j6k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.192826523Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.192821097Z caller=grafana.go:247 user=811513 slug=inpowertech msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query= groups=5 alerts=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s4oh4j6k-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.192749182Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s4m57dr4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.19247771Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=27737 slug=edfmancapital t=2024-05-29T13:44:14.192577709Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=34.229527ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s4m57dr4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.192400589Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.192578121Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s4lzbos3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.192317668Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:14.192268831Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.19221488Z caller=remote_instance_store.go:51 user=71676 slug=lemonbeat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s4lzbos3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.192140416Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s4lzbos3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.192106926Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s4hhbwr3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.192063305Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.191792123Z caller=ruler.go:522 msg="tenant is owned by this instance" user=707175 slug=nexi groups=12 + level=debug ts=2024-05-29T13:44:14.191826773Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s4ey4s1u-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.191786322Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.191726452Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.50963ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s4ey4s1u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.191669801Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.191628703Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:14.191537509Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.191457369Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:14.191337218Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=547038 slug=marcushoy + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s4bkjc6i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.191420709Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s4bkjc6i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.191351358Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s4b4l68x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.191311248Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s4b4l68x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.191215877Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.191229547Z caller=remote_instance_store.go:51 user=777670 slug=fakku msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:14.191165117Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=717149 slug=pingodeliver + level=debug ts=2024-05-29T13:44:14.191119816Z caller=ruler.go:522 msg="tenant is owned by this instance" user=757699 slug=phlitenergy groups=4 + level=info component=discovery ts=2024-05-29T13:44:14.191085016Z caller=client.go:80 msg="creating client for grafana instance" user=610036 addr=dns:///raulvillamor-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.190589111Z caller=ruler.go:522 msg="tenant is owned by this instance" user=529458 slug=onemeeting groups=0 + level=debug ts=2024-05-29T13:44:14.191070939Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:14.191010215Z caller=client.go:80 msg="creating client for grafana instance" user=625317 addr=dns:///rakowiecki-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:14.190981215Z caller=client.go:80 msg="creating client for grafana instance" user=547507 addr=dns:///raindemo-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.190940215Z caller=ruler.go:522 msg="tenant is owned by this instance" user=623958 slug=penbevdev groups=2 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s45e3mkq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.190992504Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s45e3mkq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.190916394Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.190794313Z caller=client.go:80 msg="creating client for grafana instance" user=496686 addr=dns:///radialmire0y-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.190735913Z caller=ruler.go:522 msg="tenant is owned by this instance" user=625805 slug=polinaboneva groups=0 + level=info component=discovery ts=2024-05-29T13:44:14.190720313Z caller=client.go:80 msg="creating client for grafana instance" user=717763 addr=dns:///qxt-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s45e3mkq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.190823873Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s456vklc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.190704781Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.190677012Z caller=client.go:80 msg="creating client for grafana instance" user=690653 addr=dns:///quara-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:14.190668712Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=570228 slug=pgym + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.190715022Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:14.190669933Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s456vklc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.190660721Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.190574411Z caller=ruler.go:522 msg="tenant is owned by this instance" user=616334 slug=pastopnik groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s456vklc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.19054893Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.190432783Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s4378pge-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.190309867Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.190304409Z caller=client.go:80 msg="creating client for grafana instance" user=726234 addr=dns:///qfree-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.190286709Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.190161503Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s407spe1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.190152996Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s407spe1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.190109715Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.123458177Z caller=client.go:80 msg="creating client for grafana instance" user=518522 addr=dns:///projectcttm-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s3ogo0zv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.189974444Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.189826904Z caller=client.go:80 msg="creating client for grafana instance" user=698551 addr=dns:///pvlvlml-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.189867858Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.189806597Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=642786 slug=sophoscomnsg version=26 fingerprint=b4c4e323a1f45dc7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.189726635Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.189316935s EvaluationString:}]" duration=9.52867ms + level=debug ts=2024-05-29T13:44:14.189668955Z caller=remote_instance_store.go:51 user=473984 slug=driivz msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.18967937Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s3m8nd5j-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.189511159Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.189261224Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=359640 slug=swfseu t=2024-05-29T13:44:14.189219105Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=359640 slug=swfseu instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.189178871Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=359640 slug=swfseu instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.189159425Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s3gwkonn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.189162896Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=359640 slug=swfseu t=2024-05-29T13:44:14.189028677Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=237629 slug=ocrolus t=2024-05-29T13:44:14.189085825Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.959491ms + logger=ngalert.scheduler user=359640 slug=swfseu version=8 fingerprint=3e35449a3e591ccf attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.188880576Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.188415316s EvaluationString:}]" duration=10.893065ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s3fmpgsb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.189080655Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s3fmpgsb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.188950943Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.188881518Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=304032 slug=clearbanc instance="datasource_uid=grafanacloud-logs, ref_id=Queue Failed to Consume Error message" t=2024-05-29T13:44:14.188940085Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=304032 slug=clearbanc instance="datasource_uid=grafanacloud-logs, ref_id=Queue Failed to Consume Error message" t=2024-05-29T13:44:14.188934082Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=233863 slug=rtsystems t=2024-05-29T13:44:14.188930534Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s3fmpgsb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.188870183Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.188697989Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.188684009Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s3e9cmr6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.18865309Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s2nowjrb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.188500539Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.188341463Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.188242101Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=324083 slug=sgnl t=2024-05-29T13:44:14.188208577Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=324083 slug=sgnl instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.188188399Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.187901615Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s2g8rzj9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.187917313Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s2do0gcw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.187792301Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.187831138Z caller=remote_instance_store.go:51 user=746168 slug=dbeck121 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s2a1ebh3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.187083784Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s2a1ebh3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.186964793Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s27iq88d-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.186544739Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s27iq88d-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.186439538Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s2780l2k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.186372827Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s2780l2k-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.186330657Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s2780l2k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.186289766Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=171235 slug=circleslabs t=2024-05-29T13:44:14.187746104Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=32.839031ms + logger=ngalert.state.manager.persist user=171550 slug=selfchill t=2024-05-29T13:44:14.187661813Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.53549ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s22k7yzd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.186241676Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s22k7yzd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.186212245Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s226dxyv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.186041804Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.187667075Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s1syccnt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.186006383Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s1syccnt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.185995483Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s1syccnt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.185963143Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s1syccnt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.185953253Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.187564294Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s1p5il9z-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.185827141Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s1koyucg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.185746801Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s1koyucg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.18571293Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.18752821Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s1475ju8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.185438437Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s1475ju8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.185297806Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s13ptl94-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.185237915Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s13ptl94-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.185199385Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s13ptl94-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.185187135Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s13ptl94-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.185154134Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s0wo1477-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.184986423Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s0v1kvgi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.184944192Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s0v1kvgi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.184859011Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.187264845Z caller=remote_instance_store.go:51 user=884866 slug=cnonumerique msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s0scjy2h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.184825671Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s0scjy2h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.184813291Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s0rmicoo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.184562118Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s0rmicoo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.184550288Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.187188468Z caller=remote_instance_store.go:51 user=403369 slug=clearsaletechlabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.18713041Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s0rmicoo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.184505938Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.187146771Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.473096ms + level=debug ts=2024-05-29T13:44:14.187119492Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s0j640p3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.184418237Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s0j640p3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.184384267Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s0d15e4d-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.184340486Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s0d15e4d-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.184297196Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.187095658Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s0d15e4d-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.184285196Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s0d15e4d-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.184254875Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.187043391Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-s041mb0g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.183988222Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rzy79efm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.18379132Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.186854722Z caller=remote_instance_store.go:51 user=236496 slug=improbable msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rzy79efm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.18378156Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rzxeqrrc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.18370755Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rzxeqrrc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.183642129Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rzxeqrrc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.183609479Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rztuqutg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.183559608Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rztuqutg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.183545078Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=LAGOS Query" t=2024-05-29T13:44:14.186639744Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.186571816Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.186425051Z caller=remote_instance_store.go:51 user=543604 slug=kingmakers msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=698103 slug=vericast t=2024-05-29T13:44:14.186414567Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.18633473Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698103 slug=vericast instance= t=2024-05-29T13:44:14.186385036Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698103 slug=vericast t=2024-05-29T13:44:14.186355905Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.185969767Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.185781956Z caller=remote_instance_store.go:51 user=890268 slug=cmcngipd msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:14.185688135Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=4947 slug=mediamath instance= t=2024-05-29T13:44:14.185650204Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:14.185610669Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.185617873Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.185157722Z caller=remote_instance_store.go:51 user=465816 slug=metricgamingqa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.185089175Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=tpu-device-plugin, redpanda_id=cpbigqf37uvmolr3jb7g" t=2024-05-29T13:44:14.185007056Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=tpu-device-plugin, redpanda_id=cpbifef37uvmolr3jb50" t=2024-05-29T13:44:14.184950876Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance="metric.label.storage_type=ssd, metric.name=value.storage_utilization, resource.label.cluster=itemstore-prod, resource.label.instance=itemstore-prod, resource.label.project_id=planet-itemstore-prod, resource.label.zone=us-central1-f" t=2024-05-29T13:44:14.184866854Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance="metric.label.storage_type=ssd, metric.name=value.storage_utilization, resource.label.cluster=itemstore-prod, resource.label.instance=itemstore-prod, resource.label.project_id=planet-itemstore-prod, resource.label.zone=us-central1-f" t=2024-05-29T13:44:14.184821232Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.184827376Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.184694811Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=tpu-device-plugin, redpanda_id=cpbiara74ocl436p6nv0" t=2024-05-29T13:44:14.184684925Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=tpu-device-plugin, redpanda_id=cpbiaif37uvmolr3jal0" t=2024-05-29T13:44:14.184565252Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=tpu-device-plugin, redpanda_id=cpbi8sq74ocl436p6ne0" t=2024-05-29T13:44:14.184520183Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.184421702Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="connector=pasta-retail-terminal" t=2024-05-29T13:44:14.18425896Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=tpu-device-plugin, redpanda_id=cjk99o0a3v32vef0rqtg" t=2024-05-29T13:44:14.184274174Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.184199362Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="connector=pasta-punter-risk-updated" t=2024-05-29T13:44:14.184174897Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=secret-csi-driver-control-cluster-secrets-store-csi-driver" t=2024-05-29T13:44:14.184163371Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.1841116Z caller=remote_instance_store.go:51 user=849729 slug=medopsimscare msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="connector=pasta-bonustool-assignedbonusprogram" t=2024-05-29T13:44:14.184050716Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=runsc-metric-server, redpanda_id=cpbigqf37uvmolr3jb7g" t=2024-05-29T13:44:14.184106112Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=849729 slug=medopsimscare t=2024-05-29T13:44:14.184071739Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=849729 slug=medopsimscare instance="datasource_uid=bdby5xk5gfcaof, ref_id=A" t=2024-05-29T13:44:14.184061089Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="connector=pasta-betstorage-transaction" t=2024-05-29T13:44:14.184008194Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=runsc-metric-server, redpanda_id=cpbifef37uvmolr3jb50" t=2024-05-29T13:44:14.184042123Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="connector=pasta-betoffer" t=2024-05-29T13:44:14.183910757Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=849729 slug=medopsimscare version=4 fingerprint=74f04d3f46d62eed attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.183977238Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=bdby5xk5gfcaof, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.183592077s EvaluationString:}]" duration=197.189289ms + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="connector=pasta-betoffer" t=2024-05-29T13:44:14.183897042Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=307381 slug=kambitaskforce t=2024-05-29T13:44:14.183824909Z level=debug msg="State manager processing evaluation results" resultCount=10 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=runsc-metric-server, redpanda_id=cpbidrn37uvmolr3jb0g" t=2024-05-29T13:44:14.183912384Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=runsc-metric-server, redpanda_id=cpbidrn37uvmolr3jb0g" t=2024-05-29T13:44:14.183891929Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.183834322Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=runsc-metric-server, redpanda_id=cpbiara74ocl436p6nv0" t=2024-05-29T13:44:14.183753951Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=runsc-metric-server, redpanda_id=cpbiao737uvmolr3jan0" t=2024-05-29T13:44:14.183633158Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=runsc-metric-server, redpanda_id=cpbiao737uvmolr3jan0" t=2024-05-29T13:44:14.183619429Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=runsc-metric-server, redpanda_id=cpbiaif37uvmolr3jal0" t=2024-05-29T13:44:14.183550641Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=854581 slug=freewave t=2024-05-29T13:44:14.183322786Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rztuqutg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.183426287Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.183446441Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.18345567Z caller=remote_alert_sender.go:94 user=467258 slug=neonprod host=neonprod-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.36.75:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=c1031582-c519-4912-95be-54692e4dd0cf alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rztuqutg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.183391406Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=854581 slug=freewave version=2 fingerprint=6f1172bd47bfe8d4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.183093773Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=load-balancer2-staging State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=load-balancer2-staging Value:0xc00b57dad0} B:{Var:B Labels:instance=load-balancer2-staging Value:0xc00b57dae0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.182220439s EvaluationString:[ var='A' labels={instance=load-balancer2-staging} value=1.5666666669615807 ], [ var='B' labels={instance=load-balancer2-staging} value=0 ]}]" duration=13.688091ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rzkuv5s6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.183109653Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.183176384Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=489921 slug=statuscake instance= t=2024-05-29T13:44:14.183428988Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=489921 slug=statuscake instance= t=2024-05-29T13:44:14.183419779Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rzgukf5d-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.183021693Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=runsc-metric-server, redpanda_id=cpbi43f37uvmolr3jab0" t=2024-05-29T13:44:14.183388546Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rzgukf5d-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.182982912Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.183142773Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=runsc-metric-server, redpanda_id=cjk99o0a3v32vef0rqtg" t=2024-05-29T13:44:14.183140226Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=redpanda-node-tuner, redpanda_id=cpbi30274ocl436p6ma0" t=2024-05-29T13:44:14.183062691Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rzdxmolz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.182836631Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=redpanda-node-tuner, redpanda_id=cpbhoai74ocl436p6je0" t=2024-05-29T13:44:14.182844366Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=redpanda-node-tuner, redpanda_id=cpbhoai74ocl436p6je0" t=2024-05-29T13:44:14.182809591Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rzdxmolz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.18272737Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=495005 slug=idealscorp t=2024-05-29T13:44:14.182662438Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=495005 slug=idealscorp instance="instance=prod-erp-db-server, job=windows, volume=D:" t=2024-05-29T13:44:14.18264011Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=495005 slug=idealscorp instance="instance=prod-erp-db-server, job=windows, volume=D:" t=2024-05-29T13:44:14.182624656Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.182594252Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=redpanda-node-tuner, redpanda_id=ciep3ie5utor16caq7ig" t=2024-05-29T13:44:14.182688426Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=redpanda-node-tuner, redpanda_id=ciep3ie5utor16caq7ig" t=2024-05-29T13:44:14.182681456Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rzdxmolz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.182651539Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=redpanda-node-tuner, redpanda_id=ciddv0m5utor16capvn0" t=2024-05-29T13:44:14.182635367Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rzcottia-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.182591618Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:14.182388048Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.645363ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rzcottia-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.182469797Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rzcottia-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.182439577Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=redpanda-labeler, redpanda_id=cpbhvv274ocl436p6lb0" t=2024-05-29T13:44:14.182451921Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=redpanda-labeler, redpanda_id=cpbhoai74ocl436p6je0" t=2024-05-29T13:44:14.182382059Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=redpanda-labeler, redpanda_id=cn6b1srsj6j1mdqdhil0" t=2024-05-29T13:44:14.182310909Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rzcomg63-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.182286755Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=redpanda-labeler, redpanda_id=ciep3ie5utor16caq7ig" t=2024-05-29T13:44:14.182195032Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=redpanda-labeler, redpanda_id=ciddv0m5utor16capvn0" t=2024-05-29T13:44:14.182154888Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rza85f9s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.182155004Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rza85f9s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.182089303Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=redpanda-labeler, redpanda_id=ci0c2f8k30vsi89l4v1g" t=2024-05-29T13:44:14.182106039Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.181888311Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=prometheus-prometheus-node-exporter, redpanda_id=cliqutrruq7ct96cg01g" t=2024-05-29T13:44:14.18198318Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=prometheus-prometheus-node-exporter, redpanda_id=cliqutrruq7ct96cg01g" t=2024-05-29T13:44:14.181973309Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rz4slfy5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.181863061Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.181863623Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rz4slfy5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.18182769Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rz4slfy5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.18180464Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=447807 slug=dreamcorp t=2024-05-29T13:44:14.181626872Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=32.074653ms + level=debug ts=2024-05-29T13:44:14.181636858Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=pdcsi-node-windows, redpanda_id=cpbigqf37uvmolr3jb7g" t=2024-05-29T13:44:14.181715761Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=pdcsi-node-windows, redpanda_id=cpbifef37uvmolr3jb50" t=2024-05-29T13:44:14.181668174Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=pdcsi-node-windows, redpanda_id=cpbifef37uvmolr3jb50" t=2024-05-29T13:44:14.181660885Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ryy60qr9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.181579348Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.181564311Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=pdcsi-node-windows, redpanda_id=cpbibci74ocl436p6o30" t=2024-05-29T13:44:14.181456853Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ryvgjlh5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.181379376Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ryp5pgtc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.181297565Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.181206675Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=pdcsi-node-windows, redpanda_id=cpbiaif37uvmolr3jal0" t=2024-05-29T13:44:14.181219856Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.181083715Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rye6nqz3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.181085893Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rye6nqz3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.181061812Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rye6nqz3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.180980171Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=pdcsi-node-windows, redpanda_id=cpbi1lv37uvmolr3ja50" t=2024-05-29T13:44:14.181004341Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ryc9mv76-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.180909911Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ryc9mv76-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.18088034Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.180842999Z caller=remote_instance_store.go:51 user=707607 slug=obi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ryc9mv76-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.18079633Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ry7qqodi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.180724229Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=pdcsi-node, redpanda_id=cpbigqf37uvmolr3jb7g" t=2024-05-29T13:44:14.1807281Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=pdcsi-node, redpanda_id=cpbigqf37uvmolr3jb7g" t=2024-05-29T13:44:14.18068337Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=707607 slug=obi instance="QueueName=srp-raster-store-adapter-store-DE695-v1-dlq" t=2024-05-29T13:44:14.180690646Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=pdcsi-node, redpanda_id=cpbifef37uvmolr3jb50" t=2024-05-29T13:44:14.180595237Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ry7qqodi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.180531327Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rxw961wr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.180490806Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.180560461Z caller=remote_instance_store.go:51 user=523906 slug=cyberark msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.180423323Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.180306285Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.08031ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rxw961wr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.180283984Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=pdcsi-node, redpanda_id=cpbibci74ocl436p6o30" t=2024-05-29T13:44:14.180288214Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=pdcsi-node, redpanda_id=cpbiara74ocl436p6nv0" t=2024-05-29T13:44:14.180191389Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=pdcsi-node, redpanda_id=cpbiara74ocl436p6nv0" t=2024-05-29T13:44:14.180179755Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.180048137Z caller=remote_instance_store.go:51 user=557231 slug=lnrsusinsuranceprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rxvoxt35-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.180118552Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=pdcsi-node, redpanda_id=cpbiao737uvmolr3jan0" t=2024-05-29T13:44:14.180119136Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=pdcsi-node, redpanda_id=cpbiao737uvmolr3jan0" t=2024-05-29T13:44:14.180110697Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rxvoxt35-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.180045282Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rxvbhcuj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.179937491Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=pdcsi-node, redpanda_id=cpbi8sq74ocl436p6ne0" t=2024-05-29T13:44:14.179963681Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=pdcsi-node, redpanda_id=cpbi43f37uvmolr3jab0" t=2024-05-29T13:44:14.179865347Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rxuc3di4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.179754849Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.179716846Z caller=remote_instance_store.go:51 user=776563 slug=eagleeye4els msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=pdcsi-node, redpanda_id=cliqvijruq7ct96cg040" t=2024-05-29T13:44:14.179730319Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=pdcsi-node, redpanda_id=cliqvijruq7ct96cg040" t=2024-05-29T13:44:14.179721064Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=pdcsi-node, redpanda_id=cjk99o0a3v32vef0rqtg" t=2024-05-29T13:44:14.179668973Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rxuc3di4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.179585227Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=70430 slug=dapperlabs instance= t=2024-05-29T13:44:14.179556171Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rxrxlypa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.179549407Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rxrxlypa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.179510566Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-small-ubuntu, redpanda_id=cpbibci74ocl436p6o30" t=2024-05-29T13:44:14.179409846Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.17928311Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rxlklll7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.179356715Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rxlklll7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.179333584Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-small-ubuntu, redpanda_id=cpbiao737uvmolr3jan0" t=2024-05-29T13:44:14.179325886Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.17927664Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.179255982Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rxh5q5g8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.179226273Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.17920041Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rxh5q5g8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.179196803Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-small-ubuntu, redpanda_id=cpbi8sq74ocl436p6ne0" t=2024-05-29T13:44:14.179235824Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.179188037Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-small-ubuntu, redpanda_id=cpbi43f37uvmolr3jab0" t=2024-05-29T13:44:14.179191503Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rxh5q5g8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.179144612Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.179019926Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rxd76emj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.179045671Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-small-cos, redpanda_id=cpbigqf37uvmolr3jb7g" t=2024-05-29T13:44:14.179008837Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rxd76emj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.178968441Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-small-cos, redpanda_id=cpbifef37uvmolr3jb50" t=2024-05-29T13:44:14.17896428Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=642786 slug=sophoscomnsg t=2024-05-29T13:44:14.178857088Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rxbg0ej8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.17894095Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-small-cos, redpanda_id=cpbiee737uvmolr3jb1g" t=2024-05-29T13:44:14.178926099Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-small-cos, redpanda_id=cpbidrn37uvmolr3jb0g" t=2024-05-29T13:44:14.178857507Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rxa7dw47-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.178741488Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rxa7dw47-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.178715568Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=173730 slug=nikon t=2024-05-29T13:44:14.17875249Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Newrelic Hybris Apdex score Frontend alert" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-small-cos, redpanda_id=cpbiao737uvmolr3jan0" t=2024-05-29T13:44:14.178710314Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-small-cos, redpanda_id=cpbiaif37uvmolr3jal0" t=2024-05-29T13:44:14.178664934Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-small-cos, redpanda_id=cpbi8sq74ocl436p6ne0" t=2024-05-29T13:44:14.178618377Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-small-cos, redpanda_id=cpbi43f37uvmolr3jab0" t=2024-05-29T13:44:14.178570355Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rx26pp5s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.178470315Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-small-cos, redpanda_id=cpbi1lv37uvmolr3ja50" t=2024-05-29T13:44:14.178526134Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-small-cos, redpanda_id=cliqvijruq7ct96cg040" t=2024-05-29T13:44:14.178472873Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet instance= t=2024-05-29T13:44:14.178363807Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-medium-ubuntu, redpanda_id=cpbigqf37uvmolr3jb7g" t=2024-05-29T13:44:14.178362334Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-medium-ubuntu, redpanda_id=cpbifef37uvmolr3jb50" t=2024-05-29T13:44:14.178317218Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=472647 slug=planet version=5 fingerprint=16b1a989290d4ad4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.178207903Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc02eb35710} C:{Var:C Labels: Value:0xc02eb35718}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.177942994s EvaluationString:[ var='B' labels={} value=0 ], [ var='C' labels={} value=0 ]}]" duration=20.877889ms + logger=ngalert.state.manager.persist user=746168 slug=dbeck121 t=2024-05-29T13:44:14.178211006Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-medium-ubuntu, redpanda_id=cpbiee737uvmolr3jb1g" t=2024-05-29T13:44:14.17827365Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rwzb9hoo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.178211703Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=746168 slug=dbeck121 instance="__name__=temperature:outliers, city=Zadar, ml_algorithm=dbscan, ml_forecast=outlier, ml_job_metric=temperature, ml_job_name=TEmperature" t=2024-05-29T13:44:14.178165865Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rwzb9hoo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.178181302Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-medium-ubuntu, redpanda_id=cpbibci74ocl436p6o30" t=2024-05-29T13:44:14.17818943Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.178149212Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-medium-ubuntu, redpanda_id=cpbibci74ocl436p6o30" t=2024-05-29T13:44:14.178179937Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=746168 slug=dbeck121 instance="__name__=temperature:outliers, city=Schkeuditz, ml_algorithm=dbscan, ml_forecast=outlier, ml_job_metric=temperature, ml_job_name=TEmperature" t=2024-05-29T13:44:14.178136945Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.178094552Z caller=remote_alert_sender.go:94 user=874970 slug=nvidia host=nvidia-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.11.236.247:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=cdkul1z5iq7swf alerts=1 + logger=ngalert.state.manager user=746168 slug=dbeck121 instance="__name__=temperature:outliers, city=Schkeuditz, ml_algorithm=dbscan, ml_forecast=outlier, ml_job_metric=temperature, ml_job_name=TEmperature" t=2024-05-29T13:44:14.178123324Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rwy61h0j-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.178070131Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rwy61h0j-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.178045131Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-medium-ubuntu, redpanda_id=cpbiara74ocl436p6nv0" t=2024-05-29T13:44:14.178135406Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=746168 slug=dbeck121 instance="__name__=temperature:outliers, city=Köln, ml_algorithm=dbscan, ml_forecast=outlier, ml_job_metric=temperature, ml_job_name=TEmperature" t=2024-05-29T13:44:14.178069052Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.177984293Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-medium-ubuntu, redpanda_id=cpbi8sq74ocl436p6ne0" t=2024-05-29T13:44:14.177957506Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-medium-ubuntu, redpanda_id=cpbi43f37uvmolr3jab0" t=2024-05-29T13:44:14.177880389Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rwxmxkb1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.177810349Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-medium-ubuntu, redpanda_id=cpbi1lv37uvmolr3ja50" t=2024-05-29T13:44:14.177842348Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-medium-ubuntu, redpanda_id=cpbi1lv37uvmolr3ja50" t=2024-05-29T13:44:14.177834799Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-medium-cos, redpanda_id=cpbigqf37uvmolr3jb7g" t=2024-05-29T13:44:14.177711765Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-medium-cos, redpanda_id=cpbifef37uvmolr3jb50" t=2024-05-29T13:44:14.177667039Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-medium-cos, redpanda_id=cpbiee737uvmolr3jb1g" t=2024-05-29T13:44:14.177617427Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rwuiqcyz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.177509786Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rwuiqcyz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.177473235Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rwlx4zm0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.177373154Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-medium-cos, redpanda_id=cpbiao737uvmolr3jan0" t=2024-05-29T13:44:14.177411214Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-medium-cos, redpanda_id=cpbiao737uvmolr3jan0" t=2024-05-29T13:44:14.177404578Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-medium-cos, redpanda_id=cpbiaif37uvmolr3jal0" t=2024-05-29T13:44:14.177366737Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-medium-cos, redpanda_id=cpbi8sq74ocl436p6ne0" t=2024-05-29T13:44:14.177315471Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rwky00z9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.177211753Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rwky00z9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.177161272Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-medium-cos, redpanda_id=cpbi1lv37uvmolr3ja50" t=2024-05-29T13:44:14.177185115Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rwky00z9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.177131272Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rwkfkm99-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.177074151Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.17712238Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.17705944Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-medium-cos, redpanda_id=cjk99o0a3v32vef0rqtg" t=2024-05-29T13:44:14.176992724Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rwija61i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.176868899Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.176894354Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rwija61i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.176807128Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rwija61i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.176770838Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.176832801Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-large-ubuntu, redpanda_id=cpbifef37uvmolr3jb50" t=2024-05-29T13:44:14.176763407Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-large-ubuntu, redpanda_id=cpbiee737uvmolr3jb1g" t=2024-05-29T13:44:14.176653855Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-large-ubuntu, redpanda_id=cpbidrn37uvmolr3jb0g" t=2024-05-29T13:44:14.176574284Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rwg9af6q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.176600426Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.176633566Z caller=remote_instance_store.go:51 user=432323 slug=lithic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-large-ubuntu, redpanda_id=cpbidrn37uvmolr3jb0g" t=2024-05-29T13:44:14.176561825Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.176563817Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=432323 slug=lithic instance="Cluster Name=prod-v2, Consumer Group=journal-processor, Topic=processing.journal-postgres.v1" t=2024-05-29T13:44:14.176543015Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:14.176462832Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=21.751534ms + level=debug ts=2024-05-29T13:44:14.176446352Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rwf44rtq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.176429305Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rwf44rtq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.176399584Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rwf44rtq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.176349464Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rwf44rtq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.176327263Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rw5oxg3s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.176295333Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.176250172Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-large-ubuntu, redpanda_id=cpbiara74ocl436p6nv0" t=2024-05-29T13:44:14.176353145Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-large-ubuntu, redpanda_id=cpbiao737uvmolr3jan0" t=2024-05-29T13:44:14.176288044Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.176145684Z caller=remote_instance_store.go:51 user=154996 slug=veovo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rw4o9azl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.176130321Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-large-ubuntu, redpanda_id=cpbi8sq74ocl436p6ne0" t=2024-05-29T13:44:14.176093949Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-large-ubuntu, redpanda_id=cpbi8sq74ocl436p6ne0" t=2024-05-29T13:44:14.176066231Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-large-ubuntu, redpanda_id=cpbi43f37uvmolr3jab0" t=2024-05-29T13:44:14.176002569Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.175980891Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rvndmktg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.175934989Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rvndmktg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.175878679Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-large-ubuntu, redpanda_id=cpbi1lv37uvmolr3ja50" t=2024-05-29T13:44:14.175854104Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rvndmktg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.175801828Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-large-ubuntu, redpanda_id=cliqvijruq7ct96cg040" t=2024-05-29T13:44:14.17577225Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rvg8livh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.175726557Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rvg8livh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.175598146Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-large-cos, redpanda_id=cpbigqf37uvmolr3jb7g" t=2024-05-29T13:44:14.175616249Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rvg8livh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.175563666Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rv5l7j6v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.175485765Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-large-cos, redpanda_id=cpbifef37uvmolr3jb50" t=2024-05-29T13:44:14.175542356Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rv5l7j6v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.175443174Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rv5l7j6v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.175407824Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rv5l7j6v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.175365194Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=937416 slug=cambridgeuniversitypress instance= t=2024-05-29T13:44:14.175474375Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=937416 slug=cambridgeuniversitypress instance= t=2024-05-29T13:44:14.175466665Z level=debug msg="Setting next state" handler=resultError + level=debug ts=2024-05-29T13:44:14.175296277Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rv2ezu26-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.175287843Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-large-cos, redpanda_id=cpbibci74ocl436p6o30" t=2024-05-29T13:44:14.175342473Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.17526883Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rv2ezu26-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.175198582Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rv2ezu26-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.175173942Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:14.175170559Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ruw742ue-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.175117911Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ruw742ue-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.17505263Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-large-cos, redpanda_id=cpbiaif37uvmolr3jal0" t=2024-05-29T13:44:14.175037229Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ruw742ue-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.17504316Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.17498299Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=430961 slug=solifi version=2 fingerprint=3317ec9a13a60b07 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.174929718Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.174645208s EvaluationString:}]" duration=184.441235ms + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-large-cos, redpanda_id=cpbi8sq74ocl436p6ne0" t=2024-05-29T13:44:14.174970578Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=114286 slug=enverus t=2024-05-29T13:44:14.174938464Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=26.000157ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ruu9k6qd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.174937639Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ruu9k6qd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.174907279Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ruu9k6qd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.174883169Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:14.174723566Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.174826047Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:14.17470768Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-large-cos, redpanda_id=cpbi1lv37uvmolr3ja50" t=2024-05-29T13:44:14.174759798Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.174743099Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.174663323Z caller=remote_instance_store.go:51 user=94501 slug=datastax msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rut66692-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.174620166Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=432323 slug=lithic t=2024-05-29T13:44:14.174531751Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=432323 slug=lithic version=9 fingerprint=6be3d684bdf3d81b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.174397852Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.173998445s EvaluationString:}]" duration=166.506738ms + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nvidia-gpu-device-plugin-large-cos, redpanda_id=cliqvijruq7ct96cg040" t=2024-05-29T13:44:14.174581119Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=netd, redpanda_id=cpbigqf37uvmolr3jb7g" t=2024-05-29T13:44:14.174438712Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ruqnveyv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.174379123Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=netd, redpanda_id=cpbifef37uvmolr3jb50" t=2024-05-29T13:44:14.174373549Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ruqnveyv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.174269572Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=807262 slug=unaxisprod instance="__name__=instance_job_severity:probe_success:mean5m, alert_sensitivity=high, instance=https://unicalinkprod.axis.now.hclsoftware.cloud/login, job=NR Browser Link-App Home Page" t=2024-05-29T13:44:14.174309952Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rumccxn8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.174131881Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rumccxn8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.174110201Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.174114477Z caller=remote_instance_store.go:51 user=451223 slug=amadeuspfptest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rumccxn8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.17405743Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=654076 slug=peerbr t=2024-05-29T13:44:14.173962095Z level=debug msg="Deleting alert states" count=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=netd, redpanda_id=cpbiao737uvmolr3jan0" t=2024-05-29T13:44:14.174035795Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ruflx45a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.173958219Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=netd, redpanda_id=cpbiaif37uvmolr3jal0" t=2024-05-29T13:44:14.173967782Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=netd, redpanda_id=cpbi8sq74ocl436p6ne0" t=2024-05-29T13:44:14.173889077Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=netd, redpanda_id=cpbi8sq74ocl436p6ne0" t=2024-05-29T13:44:14.173879164Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=654076 slug=peerbr t=2024-05-29T13:44:14.173859434Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rudyuaeq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.173815428Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rudyuaeq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.173759897Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=netd, redpanda_id=cpbi1lv37uvmolr3ja50" t=2024-05-29T13:44:14.173741579Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rudyuaeq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.173731237Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rudyuaeq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.173720547Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=netd, redpanda_id=cpbi1lv37uvmolr3ja50" t=2024-05-29T13:44:14.173729967Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rucvqn0h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.173688496Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rucvqn0h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.173662936Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=416741 slug=despread t=2024-05-29T13:44:14.173541492Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=netd, redpanda_id=cjk99o0a3v32vef0rqtg" t=2024-05-29T13:44:14.173584582Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=416741 slug=despread instance= t=2024-05-29T13:44:14.173528648Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.173524097Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rubvhzj9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.173435094Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=763376 slug=f5nginxone t=2024-05-29T13:44:14.173310203Z level=debug msg="Saving alert states done" count=13 max_state_save_concurrency=1 duration=168.995606ms + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nccl-fastsocket-installer, redpanda_id=cpbifef37uvmolr3jb50" t=2024-05-29T13:44:14.173435778Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ru7y1ejx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.173224702Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.173206394Z caller=remote_instance_store.go:51 user=739013 slug=altoglobalsharing msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ru7y1ejx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.173138371Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=739013 slug=altoglobalsharing t=2024-05-29T13:44:14.173163283Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nccl-fastsocket-installer, redpanda_id=cpbiara74ocl436p6nv0" t=2024-05-29T13:44:14.17315646Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.173044997Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=739013 slug=altoglobalsharing version=62 fingerprint=378a137e597c72f9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.17302037Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=172.17.0.1:9100, job=node_venco_rjh State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=172.17.0.1:9100, job=node_venco_rjh Value:0xc0084ed570} C:{Var:C Labels:instance=172.17.0.1:9100, job=node_venco_rjh Value:0xc0084ed5a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.172694398s EvaluationString:[ var='A' labels={instance=172.17.0.1:9100, job=node_venco_rjh} value=53.49382714388153 ], [ var='C' labels={instance=172.17.0.1:9100, job=node_venco_rjh} value=0 ]}]" duration=9.521213ms + logger=ngalert.state.manager user=171550 slug=selfchill t=2024-05-29T13:44:14.17304045Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.173039654Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=237629 slug=ocrolus version=141 fingerprint=a19b643fc5459c56 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.172951239Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[C0:{Var:C Labels:__name__=debezium_metrics_snapshotrunning, app=kafka-connect-detect-ocrolus, cluster=production01, context=snapshot, environment=prod, instance=10.160.127.222:9103, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=kafka-connect-detect-ocrolus-65546956ff-lb59b, name=detect, plugin=postgres, pod_template_hash=65546956ff Value:0xc0182fb1d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.172590393s EvaluationString:[ var='C0' metric='debezium_metrics_snapshotrunning' labels={__name__=debezium_metrics_snapshotrunning, app=kafka-connect-detect-ocrolus, cluster=production01, context=snapshot, environment=prod, instance=10.160.127.222:9103, job=kubernetes-pods, kubernetes_namespace=prod, kubernetes_pod_name=kafka-connect-detect-ocrolus-65546956ff-lb59b, name=detect, plugin=postgres, pod_template_hash=65546956ff} value=0 ]}]" duration=68.553909ms + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nccl-fastsocket-installer, redpanda_id=cpbiaif37uvmolr3jal0" t=2024-05-29T13:44:14.173053884Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nccl-fastsocket-installer, redpanda_id=cpbi8sq74ocl436p6ne0" t=2024-05-29T13:44:14.173008635Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.172982134Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nccl-fastsocket-installer, redpanda_id=cpbi8sq74ocl436p6ne0" t=2024-05-29T13:44:14.172996907Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ru2ecxxu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.172939889Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nccl-fastsocket-installer, redpanda_id=cpbi1lv37uvmolr3ja50" t=2024-05-29T13:44:14.172845064Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=20177 slug=paddledash instance="DBInstanceIdentifier=paddle-core-aurora-read-app" t=2024-05-29T13:44:14.172784916Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.172822012Z caller=remote_instance_store.go:51 user=417450 slug=legitsecurity msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nccl-fastsocket-installer, redpanda_id=cliqvijruq7ct96cg040" t=2024-05-29T13:44:14.17278121Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rtzsjhki-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.172692016Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rtzsjhki-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.172574715Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rtvdg08l-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.172517054Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=nccl-fastsocket-installer, redpanda_id=cjk99o0a3v32vef0rqtg" t=2024-05-29T13:44:14.172693684Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=metadata-proxy-v0.1, redpanda_id=cpbigqf37uvmolr3jb7g" t=2024-05-29T13:44:14.172589293Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.172532761Z caller=remote_instance_store.go:51 user=467258 slug=neonprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.172304962Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=467258 slug=neonprod instance="datasource_uid=HUNg6jvVk, ref_id=A" t=2024-05-29T13:44:14.172459026Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=metadata-proxy-v0.1, redpanda_id=cpbiee737uvmolr3jb1g" t=2024-05-29T13:44:14.172446863Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=467258 slug=neonprod version=7 fingerprint=5d45eadaaa52fc40 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.172360388Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=HUNg6jvVk, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.172028215s EvaluationString:}]" duration=114.885769ms + level=debug ts=2024-05-29T13:44:14.172299378Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=metadata-proxy-v0.1, redpanda_id=cpbidrn37uvmolr3jb0g" t=2024-05-29T13:44:14.172287237Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.172078017Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=metadata-proxy-v0.1, redpanda_id=cpbibci74ocl436p6o30" t=2024-05-29T13:44:14.172177112Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=metadata-proxy-v0.1, redpanda_id=cpbibci74ocl436p6o30" t=2024-05-29T13:44:14.172166647Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.172118349Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.17197847Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=metadata-proxy-v0.1, redpanda_id=cpbiara74ocl436p6nv0" t=2024-05-29T13:44:14.172107614Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rtswu43u-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.17204006Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=metadata-proxy-v0.1, redpanda_id=cpbiao737uvmolr3jan0" t=2024-05-29T13:44:14.17203815Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rtsv63ni-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.171939158Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=metadata-proxy-v0.1, redpanda_id=cpbiaif37uvmolr3jal0" t=2024-05-29T13:44:14.17189529Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.171807889Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rtsv63ni-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.171886238Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.17182394Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=metadata-proxy-v0.1, redpanda_id=cpbi43f37uvmolr3jab0" t=2024-05-29T13:44:14.171734127Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rtraqbnt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.171780667Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.171654096Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=metadata-proxy-v0.1, redpanda_id=cliqvijruq7ct96cg040" t=2024-05-29T13:44:14.171571932Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=metadata-proxy-v0.1, redpanda_id=cjk99o0a3v32vef0rqtg" t=2024-05-29T13:44:14.171516417Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.171455786Z caller=remote_instance_store.go:51 user=403369 slug=clearsaletechlabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=233137 slug=mirrornode t=2024-05-29T13:44:14.171436012Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=233137 slug=mirrornode instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.171406756Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=logging-fluent-bit, redpanda_id=cliqvijruq7ct96cg040" t=2024-05-29T13:44:14.171440478Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rtjte9xf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.171421633Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rtjte9xf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.171361513Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.171326268Z caller=remote_instance_store.go:51 user=652086 slug=unihosted msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=233137 slug=mirrornode version=33 fingerprint=b6a31b741fdba988 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.171277642Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.170935348s EvaluationString:}]" duration=12.994817ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rtb1d9zc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.171255991Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=logging-fluent-bit" t=2024-05-29T13:44:14.171234012Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rtb1d9zc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.171203741Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rt55assx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.171175561Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rt55assx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.17112312Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rt55assx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.17109428Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.171069923Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=kube-proxy, redpanda_id=cpbifef37uvmolr3jb50" t=2024-05-29T13:44:14.171077926Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.171019187Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=kube-proxy, redpanda_id=cpbifef37uvmolr3jb50" t=2024-05-29T13:44:14.171070625Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.171009581Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rt4c1as6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.171001339Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rt4c1as6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.170979319Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rt4c1as6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.170926548Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rt4c1as6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.170886568Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rsybd1xy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.170827357Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=kube-proxy, redpanda_id=cpbibci74ocl436p6o30" t=2024-05-29T13:44:14.170818978Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rsybd1xy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.170735566Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rsybd1xy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.170681726Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=kube-proxy, redpanda_id=cpbiara74ocl436p6nv0" t=2024-05-29T13:44:14.170702396Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rsv38ycf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.170538164Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rsv38ycf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.170500424Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rsorpu4v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.170448593Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rsorpu4v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.170425773Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rsorpu4v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.170392983Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rsk8k7f1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.170280361Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rsk8k7f1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.170194391Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=kube-proxy, redpanda_id=cpbhvv274ocl436p6lb0" t=2024-05-29T13:44:14.170259374Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rsk8k7f1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.17016472Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=kube-proxy, redpanda_id=cpbhoai74ocl436p6je0" t=2024-05-29T13:44:14.170196322Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=146728 slug=dgc instance= t=2024-05-29T13:44:14.170194116Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=146728 slug=dgc instance= t=2024-05-29T13:44:14.17018453Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=146728 slug=dgc instance= t=2024-05-29T13:44:14.170166812Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=146728 slug=dgc instance= t=2024-05-29T13:44:14.170159493Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=146728 slug=dgc instance= t=2024-05-29T13:44:14.17013232Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=kube-proxy, redpanda_id=cn6b1srsj6j1mdqdhil0" t=2024-05-29T13:44:14.170134243Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=kube-proxy, redpanda_id=cn6b1srsj6j1mdqdhil0" t=2024-05-29T13:44:14.170123127Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=kube-proxy, redpanda_id=cliqutrruq7ct96cg01g" t=2024-05-29T13:44:14.16998119Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rsjhw9oc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.169975228Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rsjhw9oc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.169921248Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=kube-proxy, redpanda_id=cjk99o0a3v32vef0rqtg" t=2024-05-29T13:44:14.169893064Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rshfn2z6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.169880497Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rshfn2z6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.169859577Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rshfn2z6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.169831417Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rsfoayu6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.169719826Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rsfoayu6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.169680915Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rsfoayu6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.169630395Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rse0pzh9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.169602265Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=kube-proxy, redpanda_id=ciddv0m5utor16capvn0" t=2024-05-29T13:44:14.169728277Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.169672353Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=kube-proxy, redpanda_id=ci0c2f8k30vsi89l4v1g" t=2024-05-29T13:44:14.169604033Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.169495582Z caller=remote_instance_store.go:51 user=288032 slug=dapperlabssre msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rs97zxrr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.169446143Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=310637 slug=notino instance= t=2024-05-29T13:44:14.169416112Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:14.169459647Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=288032 slug=dapperlabssre t=2024-05-29T13:44:14.169393631Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=288032 slug=dapperlabssre version=1 fingerprint=d3394d98914a36ec attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.169313787Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.168972822s EvaluationString:}]" duration=27.81131ms + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=kube-prometheus-stack-prometheus-node-exporter, redpanda_id=cpbifef37uvmolr3jb50" t=2024-05-29T13:44:14.169352402Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rs6jcvc4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.169290501Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rs6jcvc4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.169281171Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=kube-prometheus-stack-prometheus-node-exporter, redpanda_id=cpbiee737uvmolr3jb1g" t=2024-05-29T13:44:14.169263866Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.169202776Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.19148ms + level=debug ts=2024-05-29T13:44:14.169206528Z caller=remote_instance_store.go:51 user=118359 slug=atixlabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rs0n9afx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.169077039Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.168973112Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=2wfwm2xz0f, Method=--, Resource=/conversions/{proxy+}, Stage=--" t=2024-05-29T13:44:14.169105846Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=118359 slug=atixlabs instance="datasource_uid=Td9y1QRGk, ref_id=B" t=2024-05-29T13:44:14.169125848Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=118359 slug=atixlabs instance="datasource_uid=Td9y1QRGk, ref_id=B" t=2024-05-29T13:44:14.169111702Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=kube-prometheus-stack-prometheus-node-exporter, redpanda_id=cpbiara74ocl436p6nv0" t=2024-05-29T13:44:14.169046881Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rrzu8c44-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.168935718Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rrzu8c44-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.168913447Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=316418 slug=workmotion t=2024-05-29T13:44:14.168939335Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.168854766Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.168696493Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.168692933Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.168692813Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=kube-prometheus-stack-prometheus-node-exporter, redpanda_id=cpbi8sq74ocl436p6ne0" t=2024-05-29T13:44:14.168774861Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rruqkkgk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.168709975Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rruqkkgk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.168684385Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=kube-prometheus-stack-prometheus-node-exporter, redpanda_id=cpbi43f37uvmolr3jab0" t=2024-05-29T13:44:14.168709932Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rrufw09i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.168618134Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=kube-prometheus-stack-prometheus-node-exporter, redpanda_id=cpbi30274ocl436p6ma0" t=2024-05-29T13:44:14.168642672Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rrufw09i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.168555554Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.168562539Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.168416742Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=kube-prometheus-stack-prometheus-node-exporter, redpanda_id=cpbhoai74ocl436p6je0" t=2024-05-29T13:44:14.168388784Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rrpckucz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.168267111Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.168270628Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=kube-prometheus-stack-prometheus-node-exporter, redpanda_id=cjk99o0a3v32vef0rqtg" t=2024-05-29T13:44:14.168265522Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.168147261Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rrpckucz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.16815606Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rrpckucz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.168126259Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rrpbjg7h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.168066419Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=host-tuner, redpanda_id=cpbifef37uvmolr3jb50" t=2024-05-29T13:44:14.168033602Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rrnw2fwl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.167892477Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rrnw2fwl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.167823306Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=host-tuner, redpanda_id=cpbidrn37uvmolr3jb0g" t=2024-05-29T13:44:14.167859246Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=874970 slug=nvidia t=2024-05-29T13:44:14.167662085Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rrnw2fwl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.167743495Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.167774662Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.167748057Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rrh727h0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.167649685Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=host-tuner, redpanda_id=cpbibci74ocl436p6o30" t=2024-05-29T13:44:14.167788359Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=host-tuner, redpanda_id=cpbibci74ocl436p6o30" t=2024-05-29T13:44:14.16777674Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.167603496Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=host-tuner, redpanda_id=cpbiara74ocl436p6nv0" t=2024-05-29T13:44:14.167691225Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=host-tuner, redpanda_id=cpbiara74ocl436p6nv0" t=2024-05-29T13:44:14.167679502Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=host-tuner, redpanda_id=cpbiao737uvmolr3jan0" t=2024-05-29T13:44:14.167586706Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rrgp82xl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.167433132Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=host-tuner, redpanda_id=cpbiaif37uvmolr3jal0" t=2024-05-29T13:44:14.167506516Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=host-tuner, redpanda_id=cpbi8sq74ocl436p6ne0" t=2024-05-29T13:44:14.167436057Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.167352157Z caller=remote_instance_store.go:51 user=87780 slug=zencloudandhosting msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=host-tuner, redpanda_id=cpbi1lv37uvmolr3ja50" t=2024-05-29T13:44:14.167314793Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rrehn7dc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.167276191Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rr9luxsf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.1672259Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=host-tuner, redpanda_id=cliqvijruq7ct96cg040" t=2024-05-29T13:44:14.167237819Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rr9luxsf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.16717378Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=hold-snap-refresh, redpanda_id=cpbi30274ocl436p6ma0" t=2024-05-29T13:44:14.167108483Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=hold-snap-refresh, redpanda_id=cpbi30274ocl436p6ma0" t=2024-05-29T13:44:14.167096939Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.166999273Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=hold-snap-refresh, redpanda_id=cpbi0fq74ocl436p6lfg" t=2024-05-29T13:44:14.167025343Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.166961827Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=679831 slug=joveostageaws instance="datasource_uid=a6f971c0-2a39-492e-8c6a-8034f1702671, ref_id=A" t=2024-05-29T13:44:14.166974284Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=679831 slug=joveostageaws instance="datasource_uid=a6f971c0-2a39-492e-8c6a-8034f1702671, ref_id=A" t=2024-05-29T13:44:14.166946443Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=679831 slug=joveostageaws t=2024-05-29T13:44:14.166933316Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rr5c4zst-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.166898607Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=hold-snap-refresh, redpanda_id=cpbhoai74ocl436p6je0" t=2024-05-29T13:44:14.16687372Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rr5c4zst-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.166875057Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rr5c4zst-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.166738935Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.166768693Z caller=remote_instance_store.go:51 user=846513 slug=npc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=846513 slug=npc instance="check_url=https://my.pingdom.com/app/reports/uptime?swicus_org_id=31478037680543744#check=8684048, id=8684048, name=myAvatar - Hawaii, State of - PROD - (14545)" t=2024-05-29T13:44:14.166672102Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=hold-snap-refresh, redpanda_id=ciep3ie5utor16caq7ig" t=2024-05-29T13:44:14.166617788Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.166476789Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.166424744Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=846513 slug=npc instance="check_url=https://my.pingdom.com/app/reports/uptime?swicus_org_id=31478037680543744#check=1767685, id=1767685, name=Avatar CWS - Ventura County Behavioral Health" t=2024-05-29T13:44:14.16650359Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=846513 slug=npc instance="check_url=https://my.pingdom.com/app/reports/uptime?swicus_org_id=31478037680543744#check=1767685, id=1767685, name=Avatar CWS - Ventura County Behavioral Health" t=2024-05-29T13:44:14.16649154Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rr38ulqa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.166486513Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.166415982Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rr38ulqa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.166463782Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rr38ulqa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.166433362Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rr38ulqa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.166411922Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=846513 slug=npc instance="check_url=https://my.pingdom.com/app/reports/uptime?swicus_org_id=31478037680543744#check=12885016, id=12885016, name=myAvatarNX - Outreach Project - PROD - (12709)" t=2024-05-29T13:44:14.166403749Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=hold-snap-refresh, redpanda_id=ci0c2f8k30vsi89l4v1g" t=2024-05-29T13:44:14.166435956Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.16628482Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rr2vd6hp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.16625369Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=846513 slug=npc instance="check_url=https://my.pingdom.com/app/reports/uptime?swicus_org_id=31478037680543744#check=12724207, id=12724207, name=myAvatarNX - County of Los Angeles DPH-SAPC - PROD - (16276)" t=2024-05-29T13:44:14.166118715Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=846513 slug=npc instance="check_url=https://my.pingdom.com/app/reports/uptime?swicus_org_id=31478037680543744#check=12724207, id=12724207, name=myAvatarNX - County of Los Angeles DPH-SAPC - PROD - (16276)" t=2024-05-29T13:44:14.166098324Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=846513 slug=npc instance="check_url=https://my.pingdom.com/app/reports/uptime?swicus_org_id=31478037680543744#check=12724161, id=12724161, name=myAvatarNX - West Alabama Mental Health Ctr - PROD - (91264)" t=2024-05-29T13:44:14.166005344Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.165921589Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.022991ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rr0s5srw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.165936467Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=gke-metadata-server, redpanda_id=cpbiara74ocl436p6nv0" t=2024-05-29T13:44:14.165893825Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=gke-metadata-server, redpanda_id=cpbiara74ocl436p6nv0" t=2024-05-29T13:44:14.165880138Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=467357 slug=peturs05 t=2024-05-29T13:44:14.165477974Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=26.441314ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rqymd0c5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.165730825Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.16572734Z caller=remote_instance_store.go:51 user=335419 slug=tbauctions msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.165559199Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rqymd0c5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.165642714Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=335419 slug=tbauctions instance="datasource_uid=grafanacloud-logs, ref_id=Check Alerts" t=2024-05-29T13:44:14.165588368Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rqymd0c5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.165598273Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=679831 slug=joveostageaws t=2024-05-29T13:44:14.165414823Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=19.462776ms + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=gke-metadata-server, redpanda_id=cpbi8sq74ocl436p6ne0" t=2024-05-29T13:44:14.165498533Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.165440833Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.165395849Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.165369652Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=335419 slug=tbauctions version=18 fingerprint=119980c4a3bd1260 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.165282159Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=Check Alerts State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.164881303s EvaluationString:}]" duration=43.395529ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rqwfq5ky-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.165346871Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rqwfq5ky-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.165316051Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:14.165293221Z level=debug msg="Saving alert states done" count=11 max_state_save_concurrency=1 duration=183.384727ms + level=debug ts=2024-05-29T13:44:14.165229317Z caller=remote_instance_store.go:51 user=465816 slug=metricgamingqa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=gke-metadata-server, redpanda_id=cliqvijruq7ct96cg040" t=2024-05-29T13:44:14.165238617Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=776563 slug=eagleeye4els instance="city=Tokyo" t=2024-05-29T13:44:14.16517547Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=776563 slug=eagleeye4els instance="city=Shanghai" t=2024-05-29T13:44:14.165120429Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=gke-metadata-server, redpanda_id=cjk99o0a3v32vef0rqtg" t=2024-05-29T13:44:14.165135968Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rqqo22q2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.165143269Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=776563 slug=eagleeye4els instance="city=London" t=2024-05-29T13:44:14.165102699Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=776563 slug=eagleeye4els instance="city=Beijing" t=2024-05-29T13:44:14.165071358Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=fluentbit-control-cluster-fluent-bit" t=2024-05-29T13:44:14.165059977Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=776563 slug=eagleeye4els version=44 fingerprint=d9b1323182f3dc74 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.164921885Z level=debug msg="Alert rule evaluated" results="[{Instance:city=Beijing State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:city=Beijing Value:0xc0080ab028} B:{Var:B Labels:city=Beijing Value:0xc0080ab040} C:{Var:C Labels:city=Beijing Value:0xc0080ab010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.164332111s EvaluationString:[ var='A' labels={city=Beijing} value=100 ], [ var='B' labels={city=Beijing} value=100 ], [ var='C' labels={city=Beijing} value=0 ]} {Instance:city=London State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:city=London Value:0xc0080ab080} B:{Var:B Labels:city=London Value:0xc0080ab098} C:{Var:C Labels:city=London Value:0xc0080ab068}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.164352451s EvaluationString:[ var='A' labels={city=London} value=100 ], [ var='B' labels={city=London} value=100 ], [ var='C' labels={city=London} value=0 ]} {Instance:city=Shanghai State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:city=Shanghai Value:0xc0080ab0e0} B:{Var:B Labels:city=Shanghai Value:0xc0080ab0f8} C:{Var:C Labels:city=Shanghai Value:0xc0080ab0c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.164377511s EvaluationString:[ var='A' labels={city=Shanghai} value=100 ], [ var='B' labels={city=Shanghai} value=100 ], [ var='C' labels={city=Shanghai} value=0 ]} {Instance:city=Shenzhen State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:city=Shenzhen Value:0xc0080ab128} B:{Var:B Labels:city=Shenzhen Value:0xc0080ab140} C:{Var:C Labels:city=Shenzhen Value:0xc0080ab158}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.164390212s EvaluationString:[ var='A' labels={city=Shenzhen} value=100 ], [ var='B' labels={city=Shenzhen} value=100 ], [ var='C' labels={city=Shenzhen} value=0 ]} {Instance:city=Tokyo State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:city=Tokyo Value:0xc0080ab180} B:{Var:B Labels:city=Tokyo Value:0xc0080ab198} C:{Var:C Labels:city=Tokyo Value:0xc0080ab1b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.164400412s EvaluationString:[ var='A' labels={city=Tokyo} value=100 ], [ var='B' labels={city=Tokyo} value=100 ], [ var='C' labels={city=Tokyo} value=0 ]} {Instance:city=Virginia State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:city=Virginia Value:0xc0080ab1e0} B:{Var:B Labels:city=Virginia Value:0xc0080ab1f8} C:{Var:C Labels:city=Virginia Value:0xc0080ab210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.164411052s EvaluationString:[ var='A' labels={city=Virginia} value=100 ], [ var='B' labels={city=Virginia} value=100 ], [ var='C' labels={city=Virginia} value=0 ]}]" duration=380.162845ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rqnroqtt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.164974057Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rqkrwnpq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.164942647Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=fluent-bit, redpanda_id=cpbifef37uvmolr3jb50" t=2024-05-29T13:44:14.164856697Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rqdtfmyl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.164779435Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rqdtfmyl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.164751195Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.164726188Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=fluent-bit, redpanda_id=cpbiee737uvmolr3jb1g" t=2024-05-29T13:44:14.164716178Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rqde0i1g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.164625134Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.164644119Z caller=remote_instance_store.go:51 user=112732 slug=gleamer msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=112732 slug=gleamer t=2024-05-29T13:44:14.164612721Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rqde0i1g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.164596543Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112732 slug=gleamer instance= t=2024-05-29T13:44:14.164602826Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=fluent-bit, redpanda_id=cpbidrn37uvmolr3jb0g" t=2024-05-29T13:44:14.164624488Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.164551928Z caller=grafana.go:247 user=811513 slug=inpowertech msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=16" groups=5 alerts=0 + logger=ngalert.state.manager user=112732 slug=gleamer instance= t=2024-05-29T13:44:14.164594267Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=fluent-bit, redpanda_id=cpbidrn37uvmolr3jb0g" t=2024-05-29T13:44:14.164612438Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rqde0i1g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.164532493Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=fluent-bit, redpanda_id=cpbibci74ocl436p6o30" t=2024-05-29T13:44:14.164543805Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.164434097Z caller=remote_instance_store.go:51 user=777670 slug=fakku msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=fluent-bit, redpanda_id=cpbiara74ocl436p6nv0" t=2024-05-29T13:44:14.164445569Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rq2f71ka-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.164414141Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.164350389Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=fluent-bit, redpanda_id=cpbiao737uvmolr3jan0" t=2024-05-29T13:44:14.164355382Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rq2f71ka-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.164350381Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rq2f71ka-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.164340281Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.164213096Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rq22u7bg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.16428752Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.164217861Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=fluent-bit, redpanda_id=cpbiaif37uvmolr3jal0" t=2024-05-29T13:44:14.164244325Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rq22u7bg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.164183209Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.164158911Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rq19suvq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.164131668Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rq19suvq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.164083038Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=178459 slug=usemodernlogic instance= t=2024-05-29T13:44:14.164033858Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.164092074Z caller=remote_instance_store.go:51 user=178459 slug=usemodernlogic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=178459 slug=usemodernlogic instance= t=2024-05-29T13:44:14.164026221Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rpuoashq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.163980137Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.163975956Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=178459 slug=usemodernlogic version=1 fingerprint=886527ed3f1b239e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.163942403Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.163686268s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=193.391973ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rpuoashq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.163952667Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rpuoashq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.163912486Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=fluent-bit, redpanda_id=cpbi1lv37uvmolr3ja50" t=2024-05-29T13:44:14.163957373Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.163812369Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.163844716Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=fluent-bit, redpanda_id=cpbhoai74ocl436p6je0" t=2024-05-29T13:44:14.163747218Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.163593106Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=fluent-bit, redpanda_id=cjk99o0a3v32vef0rqtg" t=2024-05-29T13:44:14.163609627Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=fluent-bit, redpanda_id=cjk99o0a3v32vef0rqtg" t=2024-05-29T13:44:14.163599538Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rpn9hnh7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.163557323Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rpn9hnh7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.163462102Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=fluent-bit, redpanda_id=ciep3ie5utor16caq7ig" t=2024-05-29T13:44:14.16353924Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=fluent-bit, redpanda_id=ciddv0m5utor16capvn0" t=2024-05-29T13:44:14.163434693Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.16303784Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-roxidfza-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.162986517Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=ebs-csi-node, redpanda_id=cn6b1srsj6j1mdqdhil0" t=2024-05-29T13:44:14.163016387Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-roxidfza-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.162924576Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.162879473Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=ebs-csi-node, redpanda_id=ciep3ie5utor16caq7ig" t=2024-05-29T13:44:14.162880469Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rowpcs4p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.162652143Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rowd71jk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.162623203Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=ebs-csi-node, redpanda_id=ciddv0m5utor16capvn0" t=2024-05-29T13:44:14.162799613Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=ebs-csi-node, redpanda_id=ci0c2f8k30vsi89l4v1g" t=2024-05-29T13:44:14.162744642Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=ebs-csi-node, redpanda_id=ci0c2f8k30vsi89l4v1g" t=2024-05-29T13:44:14.162734956Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=ebs-csi-node" t=2024-05-29T13:44:14.162664371Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.1624654Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rowd71jk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.162550142Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=csi-secrets-store-secrets-store-csi-driver" t=2024-05-29T13:44:14.162595105Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rowd71jk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.162497342Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=csi-secrets-store-provider-aws" t=2024-05-29T13:44:14.162535015Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=csi-secrets-store-provider-aws" t=2024-05-29T13:44:14.162524677Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=collector, redpanda_id=cpbigqf37uvmolr3jb7g" t=2024-05-29T13:44:14.162467816Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rovpc026-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.16237693Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=collector, redpanda_id=cpbifef37uvmolr3jb50" t=2024-05-29T13:44:14.162404491Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rovpc026-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.16235517Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=collector, redpanda_id=cpbiee737uvmolr3jb1g" t=2024-05-29T13:44:14.162342418Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rokmqafu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.16232556Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=collector, redpanda_id=cpbiee737uvmolr3jb1g" t=2024-05-29T13:44:14.162331798Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=426229 slug=accelbyte t=2024-05-29T13:44:14.162274806Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.901054ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rokmqafu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.1623022Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rokmqafu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.162274839Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=collector, redpanda_id=cpbidrn37uvmolr3jb0g" t=2024-05-29T13:44:14.162275309Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.162160071Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.162169677Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-roh8nhkb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.162169718Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=collector, redpanda_id=cpbibci74ocl436p6o30" t=2024-05-29T13:44:14.162200123Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.162121749Z caller=remote_instance_store.go:51 user=532553 slug=jithins msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-roh8nhkb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.162027417Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.162033844Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=collector, redpanda_id=cpbiao737uvmolr3jan0" t=2024-05-29T13:44:14.162063776Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rogdfp9s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.161952216Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=532553 slug=jithins instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.161959486Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=info ts=2024-05-29T13:44:14.161980533Z caller=remote_alert_sender.go:94 user=656459 slug=activeport host=activeport-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.164.20.114:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=f6608375-cfd0-499c-a973-c05aa69d1366 alerts=1 + logger=ngalert.state.manager user=532553 slug=jithins instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.161930584Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=collector, redpanda_id=cpbi8sq74ocl436p6ne0" t=2024-05-29T13:44:14.161934047Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=532553 slug=jithins instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.161921853Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="daemonset=aws-secrets-store-secrets-store-csi-driver-provider-aws" t=2024-05-29T13:44:14.161704674Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.161668007Z caller=remote_instance_store.go:51 user=316418 slug=workmotion msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-roahonq8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.161625823Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=447897 slug=mysten t=2024-05-29T13:44:14.161629251Z level=debug msg="Saving alert states" count=39 max_state_save_concurrency=1 + logger=ngalert.state.manager user=447897 slug=mysten instance="route=unsafe_moveCall" t=2024-05-29T13:44:14.161601583Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=447897 slug=mysten instance="route=suix_resolveNameServiceNames" t=2024-05-29T13:44:14.161568171Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=447897 slug=mysten instance="route=suix_resolveNameServiceAddress" t=2024-05-29T13:44:14.161544878Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ro8m06r9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.161524222Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ro8m06r9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.16142068Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=447897 slug=mysten instance="route=suix_queryEvents" t=2024-05-29T13:44:14.161468495Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ro8m06r9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.16139913Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=447897 slug=mysten instance="route=suix_getStakes" t=2024-05-29T13:44:14.161363042Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rnzzgnqr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.16134431Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rnzzgnqr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.161240169Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rnhkmgw8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.161212788Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=447897 slug=mysten instance="route=suix_getLatestBridge" t=2024-05-29T13:44:14.161207355Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=447897 slug=mysten instance="route=suix_getDynamicFields" t=2024-05-29T13:44:14.161152743Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=447897 slug=mysten instance="route=suix_getDynamicFieldObject" t=2024-05-29T13:44:14.161131596Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=447897 slug=mysten instance="route=suix_getCommitteeInfo" t=2024-05-29T13:44:14.16110337Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rnhkmgw8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.161075797Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=447897 slug=mysten instance="route=suix_getCoins" t=2024-05-29T13:44:14.161074746Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rndv9hb5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.161017316Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=447897 slug=mysten instance="route=suix_getBalance" t=2024-05-29T13:44:14.161005666Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rndv9hb5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.160956546Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.160878736Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=447897 slug=mysten instance="route=suix_getAllBalances" t=2024-05-29T13:44:14.160952034Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=447897 slug=mysten instance="route=suix_getAllBalances" t=2024-05-29T13:44:14.160910605Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rndv9hb5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.160925515Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.160819606Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rncdbddv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.160846855Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=447897 slug=mysten instance="route=sui_tryMultiGetPastObjects" t=2024-05-29T13:44:14.160877986Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=447897 slug=mysten instance="route=sui_multiGetTransactionBlocks" t=2024-05-29T13:44:14.160851133Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rncdbddv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.160795534Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rncdbddv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.160784644Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rncdbddv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.160754934Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=447897 slug=mysten instance="route=sui_getProtocolConfig" t=2024-05-29T13:44:14.160743787Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=447897 slug=mysten instance="route=sui_getObject" t=2024-05-29T13:44:14.160704144Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rn5ipeka-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.160685803Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rn5ipeka-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.160635082Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rn5ipeka-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.160606912Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rn3h3cja-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.160506791Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=447897 slug=mysten instance="route=sui_getCheckpoints" t=2024-05-29T13:44:14.160527023Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=447897 slug=mysten instance="route=sui_getCheckpoint" t=2024-05-29T13:44:14.160496066Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=447897 slug=mysten instance="route=sui_getCheckpoint" t=2024-05-29T13:44:14.160486043Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=447897 slug=mysten instance="route=sui_getChainIdentifier" t=2024-05-29T13:44:14.160467103Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=447897 slug=mysten instance="route=sui_dryRunTransactionBlock" t=2024-05-29T13:44:14.160429776Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=447897 slug=mysten instance="route=rpc.discover" t=2024-05-29T13:44:14.160354229Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rmz6vsj5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.160352149Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rmz6vsj5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.160312559Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rmz6vsj5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.160290769Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.160264001Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rmysjwl0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.160261498Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rmysjwl0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.160156447Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:14.159976338Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=e841bba69dd51233 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.159870867Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=diff, name=SINGLEPLAYER XBSX TRIAL Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc0aeea5430} IgnoreBelow:{Var:IgnoreBelow Labels: Value:0xc0aeea5438} Threshold:{Var:Threshold Labels: Value:0xc0aeea5370} compare:{Var:compare Labels:aggregatedBy=diff, name=SINGLEPLAYER XBSX TRIAL Query Value:0xc0aeea53c0} sum:{Var:sum Labels:aggregatedBy=diff, name=SINGLEPLAYER XBSX TRIAL Query Value:0xc0aeea5408}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.159579129s EvaluationString:[ var='Breaches' labels={} value=1 ], [ var='IgnoreBelow' labels={} value=2000 ], [ var='Threshold' labels={} value=-2.5 ], [ var='compare' labels={aggregatedBy=diff, name=SINGLEPLAYER XBSX TRIAL Query} value=0 ], [ var='sum' labels={aggregatedBy=diff, name=SINGLEPLAYER XBSX TRIAL Query} value=0 ]}]" duration=72.411205ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rmsbm6a3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.159872004Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rmsbm6a3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.159842284Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.159819424Z caller=remote_instance_store.go:51 user=855643 slug=datable msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=namedprocess_namegroup_num_procs, agent_hostname=wellaltusprodref, client=wellaltus, env=prod, groupname=sshd, host=wellaltusprodref, instance=wellaltusprodref, job=integrations/process_exporter, type=ref" t=2024-05-29T13:44:14.159779887Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.159682714Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rmloei2k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.159789534Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv instance="datasource_uid=a7c4b457-6a7a-416c-b994-1407dd32ed34, ref_id=Query" t=2024-05-29T13:44:14.159664294Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rmloei2k-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.159716603Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=namedprocess_namegroup_num_procs, agent_hostname=wellaltusnonprodref, client=wellaltus, env=nonprod, groupname=sshd, host=wellaltusnonprodref, instance=wellaltusnonprodref, job=integrations/process_exporter, type=ref" t=2024-05-29T13:44:14.159694036Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rmloei2k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.159683972Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rmlifcmn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.159616222Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.159632487Z caller=remote_instance_store.go:51 user=350551 slug=loopme msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rmlifcmn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.159594282Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhanduatawsproxymaitre, client=steadyhand, env=nonprod, groupname=sshd, instance=steadyhanduatawsproxymaitre:9090, job=integrations/process_exporter, site=aws" t=2024-05-29T13:44:14.159591804Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.159556981Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.159559879Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhanduatawsproxymaitre, client=steadyhand, env=nonprod, groupname=sshd, instance=steadyhanduatawsproxymaitre:9090, job=integrations/process_exporter, site=aws" t=2024-05-29T13:44:14.159577969Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rmlifcmn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.159542641Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.159451162Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:14.159553099Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + level=debug ts=2024-05-29T13:44:14.159483602Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rmjcau6p-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.15944867Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rmjcau6p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.15941102Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=2 fingerprint=c4a0688e2faabc0d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.159428469Z level=error msg="Failed to evaluate rule" error="failed to build query 'F': data source not found" duration=4.62778ms + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:14.15939145Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=70430 slug=dapperlabs instance= t=2024-05-29T13:44:14.159377953Z level=debug msg="Keeping state" state=Normal + level=error ts=2024-05-29T13:44:14.159396134Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'F': data source not found" + level=debug ts=2024-05-29T13:44:14.159373894Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.159353291Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.15935248Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhanduatawsapp1, client=steadyhand, env=nonprod, groupname=sshd, instance=steadyhanduatawsapp1:9090, job=integrations/process_exporter, site=aws" t=2024-05-29T13:44:14.159383136Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rmjcau6p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.159356449Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rmjcau6p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.159346939Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rmbcqzpa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.159297448Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rmbcqzpa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.159269758Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.159316676Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.159261389Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.159075645Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rm948qqx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.158972005Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.158992374Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandawswinvmaitre, client=steadyhand, env=prod, groupname=sshd, instance=steadyhandawswinvmaitre:9090, job=integrations/process_exporter, site=aws" t=2024-05-29T13:44:14.159033434Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.158957862Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=307381 slug=kambitaskforce t=2024-05-29T13:44:14.158832566Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:14.158809128Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance= t=2024-05-29T13:44:14.158812189Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance= t=2024-05-29T13:44:14.158794464Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:14.158740327Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:14.158730327Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:14.158709327Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:14.158677126Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rm803305-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.158675442Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rm0phqgg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.158622521Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:14.158647026Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:14.158633126Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=namedprocess_namegroup_num_procs, agent_hostname=passerelledemoawsproxymaitre, client=croesus, env=nonprod, groupname=sshd, instance=passerelledemoawsproxymaitre:9090, job=integrations/process_exporter, site=aws" t=2024-05-29T13:44:14.15860392Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:14.158587325Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:14.158577125Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.158540499Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:14.158514924Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rm0phqgg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.15849181Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:14.158471923Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=namedprocess_namegroup_num_procs, agent_hostname=passerelledemoawsapp1, client=croesus, env=nonprod, groupname=sshd, instance=passerelledemoawsapp1:9090, job=integrations/process_exporter, site=aws" t=2024-05-29T13:44:14.158391513Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=543604 slug=kingmakers version=2 fingerprint=b04054f36f68a480 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.158323621Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.158925197s EvaluationString:}]" duration=123.552993ms + logger=ngalert.state.manager.persist user=27737 slug=edfmancapital t=2024-05-29T13:44:14.15834676Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1VPROD019.GoAnywhere A" t=2024-05-29T13:44:14.158326805Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rlxv8i1t-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.158256758Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.158177497Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.158228578Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rlxrdoxb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.158180407Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.158179471Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.443539ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rlxrdoxb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.158066606Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=namedprocess_namegroup_num_procs, agent_hostname=nbinnonprodref, client=nbin, env=nonprod, groupname=sshd, host=nbinnonprodref, instance=nbinnonprodref, job=integrations/process_exporter, type=ref" t=2024-05-29T13:44:14.158124379Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.157995154Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.158093186Z caller=remote_instance_store.go:51 user=296733 slug=iotics msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.158024414Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.157945911Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=142180 slug=luxtronic t=2024-05-29T13:44:14.157953751Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.937784ms + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.ufc-2021-ps4.coreSlave*.usersessions.status.ufc-2021-ps4.GaugeUS_{b,m}*-syd_Slave,5)) Query" t=2024-05-29T13:44:14.157961785Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rlw6eb9t-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.157893714Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:14.157781758Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rlvyy0yu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.157785823Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=dbeb5aab3bfe7ce1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.157671307Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.ufc-2021-ps4.coreSlave*.usersessions.status.ufc-2021-ps4.GaugeUS_{b,m}*-syd_Slave,5)) Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc00f5d0a40} IgnoreBelow:{Var:IgnoreBelow Labels: Value:0xc00f5d0a48} Threshold:{Var:Threshold Labels: Value:0xc00f5d09c0} compare:{Var:compare Labels:aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.ufc-2021-ps4.coreSlave*.usersessions.status.ufc-2021-ps4.GaugeUS_{b,m}*-syd_Slave,5)) Query Value:0xc00f5d0a00} sum:{Var:sum Labels:aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.ufc-2021-ps4.coreSlave*.usersessions.status.ufc-2021-ps4.GaugeUS_{b,m}*-syd_Slave,5)) Query Value:0xc00f5d0a28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.157299909s EvaluationString:[ var='Breaches' labels={} value=1 ], [ var='IgnoreBelow' labels={} value=25 ], [ var='Threshold' labels={} value=-30 ], [ var='compare' labels={aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.ufc-2021-ps4.coreSlave*.usersessions.status.ufc-2021-ps4.GaugeUS_{b,m}*-syd_Slave,5)) Query} value=0 ], [ var='sum' labels={aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.ufc-2021-ps4.coreSlave*.usersessions.status.ufc-2021-ps4.GaugeUS_{b,m}*-syd_Slave,5)) Query} value=0 ]}]" duration=68.164899ms + logger=ngalert.state.manager user=114492 slug=railsbank instance="QueueName=PROD-PLAYLIVE-MESSAGE_QUEUE_GPS_CARD_MANAGEMENT-DLQ" t=2024-05-29T13:44:14.157780747Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.157641264Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=namedprocess_namegroup_num_procs, agent_hostname=claretnonprodref, client=claret, env=nonprod, groupname=sshd, host=claretnonprodref, instance=claretnonprodref, job=integrations/process_exporter, type=ref" t=2024-05-29T13:44:14.157713451Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rlvyy0yu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.157723252Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=296733 slug=iotics instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.157711166Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rluxbmx7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.157671702Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rluxbmx7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.157649772Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=296733 slug=iotics t=2024-05-29T13:44:14.157611223Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rluxbmx7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.157568611Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=namedprocess_namegroup_num_procs, agent_hostname=cibcnonprodref, client=cibc, env=nonprod, groupname=sshd, host=cibcnonprodref, instance=cibcnonprodref, job=integrations/process_exporter, type=ref" t=2024-05-29T13:44:14.157604394Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rlssv0rj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.15752892Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.157470716Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rlssv0rj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.15750595Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=171897 slug=croesus instance="__name__=namedprocess_namegroup_num_procs, agent_hostname=cibcisiuat2crm2bdmaitre, client=cibcisi, env=nonprod, groupname=sshd, instance=cibcisiuat2crm2bdmaitre:9090, job=integrations/process_exporter, site=aws" t=2024-05-29T13:44:14.157503397Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rlssv0rj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.157447299Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rlssv0rj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.157394739Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rlpsgcau-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.157292058Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=171897 slug=croesus version=7 fingerprint=a60b32a635ddd6c1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.156796724Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=namedprocess_namegroup_num_procs, agent_hostname=cibcisiuat2crm2app1, client=cibcisi, env=nonprod, groupname=sshd, instance=cibcisiuat2crm2app1:9090, job=integrations/process_exporter, site=aws State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=cibcisiuat2crm2app1, client=cibcisi, env=nonprod, groupname=sshd, instance=cibcisiuat2crm2app1:9090, job=integrations/process_exporter, site=aws Value:0xc0160f58f0} B:{Var:B Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=cibcisiuat2crm2app1, client=cibcisi, env=nonprod, groupname=sshd, instance=cibcisiuat2crm2app1:9090, job=integrations/process_exporter, site=aws Value:0xc0160f5958}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.154975022s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=cibcisiuat2crm2app1, client=cibcisi, env=nonprod, groupname=sshd, instance=cibcisiuat2crm2app1:9090, job=integrations/process_exporter, site=aws} value=1 ], [ var='B' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=cibcisiuat2crm2app1, client=cibcisi, env=nonprod, groupname=sshd, instance=cibcisiuat2crm2app1:9090, job=integrations/process_exporter, site=aws} value=0 ]} {Instance:__name__=namedprocess_namegroup_num_procs, agent_hostname=cibcisiuat2crm2bdmaitre, client=cibcisi, env=nonprod, groupname=sshd, instance=cibcisiuat2crm2bdmaitre:9090, job=integrations/process_exporter, site=aws State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=cibcisiuat2crm2bdmaitre, client=cibcisi, env=nonprod, groupname=sshd, instance=cibcisiuat2crm2bdmaitre:9090, job=integrations/process_exporter, site=aws Value:0xc0160f5a88} B:{Var:B Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=cibcisiuat2crm2bdmaitre, client=cibcisi, env=nonprod, groupname=sshd, instance=cibcisiuat2crm2bdmaitre:9090, job=integrations/process_exporter, site=aws Value:0xc0160f5a28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.154992591s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=cibcisiuat2crm2bdmaitre, client=cibcisi, env=nonprod, groupname=sshd, instance=cibcisiuat2crm2bdmaitre:9090, job=integrations/process_exporter, site=aws} value=1 ], [ var='B' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=cibcisiuat2crm2bdmaitre, client=cibcisi, env=nonprod, groupname=sshd, instance=cibcisiuat2crm2bdmaitre:9090, job=integrations/process_exporter, site=aws} value=0 ]} {Instance:__name__=namedprocess_namegroup_num_procs, agent_hostname=cibcnonprodref, client=cibc, env=nonprod, groupname=sshd, host=cibcnonprodref, instance=cibcnonprodref, job=integrations/process_exporter, type=ref State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=cibcnonprodref, client=cibc, env=nonprod, groupname=sshd, host=cibcnonprodref, instance=cibcnonprodref, job=integrations/process_exporter, type=ref Value:0xc0160f5ba8} B:{Var:B Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=cibcnonprodref, client=cibc, env=nonprod, groupname=sshd, host=cibcnonprodref, instance=cibcnonprodref, job=integrations/process_exporter, type=ref Value:0xc0160f5c70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.154999737s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=cibcnonprodref, client=cibc, env=nonprod, groupname=sshd, host=cibcnonprodref, instance=cibcnonprodref, job=integrations/process_exporter, type=ref} value=1 ], [ var='B' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=cibcnonprodref, client=cibc, env=nonprod, groupname=sshd, host=cibcnonprodref, instance=cibcnonprodref, job=integrations/process_exporter, type=ref} value=0 ]} {Instance:__name__=namedprocess_namegroup_num_procs, agent_hostname=claretnonprodref, client=claret, env=nonprod, groupname=sshd, host=claretnonprodref, instance=claretnonprodref, job=integrations/process_exporter, type=ref State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=claretnonprodref, client=claret, env=nonprod, groupname=sshd, host=claretnonprodref, instance=claretnonprodref, job=integrations/process_exporter, type=ref Value:0xc0160f5e30} B:{Var:B Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=claretnonprodref, client=claret, env=nonprod, groupname=sshd, host=claretnonprodref, instance=claretnonprodref, job=integrations/process_exporter, type=ref Value:0xc0160f5d98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.155006778s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=claretnonprodref, client=claret, env=nonprod, groupname=sshd, host=claretnonprodref, instance=claretnonprodref, job=integrations/process_exporter, type=ref} value=1 ], [ var='B' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=claretnonprodref, client=claret, env=nonprod, groupname=sshd, host=claretnonprodref, instance=claretnonprodref, job=integrations/process_exporter, type=ref} value=0 ]} {Instance:__name__=namedprocess_namegroup_num_procs, agent_hostname=iagpnonprodref, client=iagp, env=nonprod, groupname=sshd, host=iagpnonprodref, instance=iagpnonprodref, job=integrations/process_exporter, type=ref State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=iagpnonprodref, client=iagp, env=nonprod, groupname=sshd, host=iagpnonprodref, instance=iagpnonprodref, job=integrations/process_exporter, type=ref Value:0xc0160f5f58} B:{Var:B Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=iagpnonprodref, client=iagp, env=nonprod, groupname=sshd, host=iagpnonprodref, instance=iagpnonprodref, job=integrations/process_exporter, type=ref Value:0xc014444000}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.15501618s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=iagpnonprodref, client=iagp, env=nonprod, groupname=sshd, host=iagpnonprodref, instance=iagpnonprodref, job=integrations/process_exporter, type=ref} value=1 ], [ var='B' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=iagpnonprodref, client=iagp, env=nonprod, groupname=sshd, host=iagpnonprodref, instance=iagpnonprodref, job=integrations/process_exporter, type=ref} value=0 ]} {Instance:__name__=namedprocess_namegroup_num_procs, agent_hostname=nbinnonprodref, client=nbin, env=nonprod, groupname=sshd, host=nbinnonprodref, instance=nbinnonprodref, job=integrations/process_exporter, type=ref State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=nbinnonprodref, client=nbin, env=nonprod, groupname=sshd, host=nbinnonprodref, instance=nbinnonprodref, job=integrations/process_exporter, type=ref Value:0xc014444118} B:{Var:B Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=nbinnonprodref, client=nbin, env=nonprod, groupname=sshd, host=nbinnonprodref, instance=nbinnonprodref, job=integrations/process_exporter, type=ref Value:0xc0144441b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.155022615s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=nbinnonprodref, client=nbin, env=nonprod, groupname=sshd, host=nbinnonprodref, instance=nbinnonprodref, job=integrations/process_exporter, type=ref} value=1 ], [ var='B' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=nbinnonprodref, client=nbin, env=nonprod, groupname=sshd, host=nbinnonprodref, instance=nbinnonprodref, job=integrations/process_exporter, type=ref} value=0 ]} {Instance:__name__=namedprocess_namegroup_num_procs, agent_hostname=nbinprodref, client=nbin, env=prod, groupname=sshd, host=nbinprodref, instance=nbinprodref, job=integrations/process_exporter, type=ref State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=nbinprodref, client=nbin, env=prod, groupname=sshd, host=nbinprodref, instance=nbinprodref, job=integrations/process_exporter, type=ref Value:0xc0144442f0} B:{Var:B Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=nbinprodref, client=nbin, env=prod, groupname=sshd, host=nbinprodref, instance=nbinprodref, job=integrations/process_exporter, type=ref Value:0xc014444380}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.155028388s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=nbinprodref, client=nbin, env=prod, groupname=sshd, host=nbinprodref, instance=nbinprodref, job=integrations/process_exporter, type=ref} value=1 ], [ var='B' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=nbinprodref, client=nbin, env=prod, groupname=sshd, host=nbinprodref, instance=nbinprodref, job=integrations/process_exporter, type=ref} value=0 ]} {Instance:__name__=namedprocess_namegroup_num_procs, agent_hostname=passerelledemoawsapp1, client=croesus, env=nonprod, groupname=sshd, instance=passerelledemoawsapp1:9090, job=integrations/process_exporter, site=aws State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=passerelledemoawsapp1, client=croesus, env=nonprod, groupname=sshd, instance=passerelledemoawsapp1:9090, job=integrations/process_exporter, site=aws Value:0xc014444448} B:{Var:B Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=passerelledemoawsapp1, client=croesus, env=nonprod, groupname=sshd, instance=passerelledemoawsapp1:9090, job=integrations/process_exporter, site=aws Value:0xc0144444b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.155038512s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=passerelledemoawsapp1, client=croesus, env=nonprod, groupname=sshd, instance=passerelledemoawsapp1:9090, job=integrations/process_exporter, site=aws} value=1 ], [ var='B' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=passerelledemoawsapp1, client=croesus, env=nonprod, groupname=sshd, instance=passerelledemoawsapp1:9090, job=integrations/process_exporter, site=aws} value=0 ]} {Instance:__name__=namedprocess_namegroup_num_procs, agent_hostname=passerelledemoawsbdmaitre, client=croesus, env=nonprod, groupname=sshd, instance=passerelledemoawsbdmaitre:9090, job=integrations/process_exporter, site=aws State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=passerelledemoawsbdmaitre, client=croesus, env=nonprod, groupname=sshd, instance=passerelledemoawsbdmaitre:9090, job=integrations/process_exporter, site=aws Value:0xc014444588} B:{Var:B Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=passerelledemoawsbdmaitre, client=croesus, env=nonprod, groupname=sshd, instance=passerelledemoawsbdmaitre:9090, job=integrations/process_exporter, site=aws Value:0xc0144445f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.155044266s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=passerelledemoawsbdmaitre, client=croesus, env=nonprod, groupname=sshd, instance=passerelledemoawsbdmaitre:9090, job=integrations/process_exporter, site=aws} value=1 ], [ var='B' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=passerelledemoawsbdmaitre, client=croesus, env=nonprod, groupname=sshd, instance=passerelledemoawsbdmaitre:9090, job=integrations/process_exporter, site=aws} value=0 ]} {Instance:__name__=namedprocess_namegroup_num_procs, agent_hostname=passerelledemoawsproxymaitre, client=croesus, env=nonprod, groupname=sshd, instance=passerelledemoawsproxymaitre:9090, job=integrations/process_exporter, site=aws State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=passerelledemoawsproxymaitre, client=croesus, env=nonprod, groupname=sshd, instance=passerelledemoawsproxymaitre:9090, job=integrations/process_exporter, site=aws Value:0xc0144446b0} B:{Var:B Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=passerelledemoawsproxymaitre, client=croesus, env=nonprod, groupname=sshd, instance=passerelledemoawsproxymaitre:9090, job=integrations/process_exporter, site=aws Value:0xc014444718}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.155053716s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=passerelledemoawsproxymaitre, client=croesus, env=nonprod, groupname=sshd, instance=passerelledemoawsproxymaitre:9090, job=integrations/process_exporter, site=aws} value=1 ], [ var='B' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=passerelledemoawsproxymaitre, client=croesus, env=nonprod, groupname=sshd, instance=passerelledemoawsproxymaitre:9090, job=integrations/process_exporter, site=aws} value=0 ]} {Instance:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandawsapp1, client=steadyhand, env=prod, groupname=sshd, instance=steadyhandawsapp1:9090, job=integrations/process_exporter, site=aws State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandawsapp1, client=steadyhand, env=prod, groupname=sshd, instance=steadyhandawsapp1:9090, job=integrations/process_exporter, site=aws Value:0xc0144447e8} B:{Var:B Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandawsapp1, client=steadyhand, env=prod, groupname=sshd, instance=steadyhandawsapp1:9090, job=integrations/process_exporter, site=aws Value:0xc014444850}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.15506028s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandawsapp1, client=steadyhand, env=prod, groupname=sshd, instance=steadyhandawsapp1:9090, job=integrations/process_exporter, site=aws} value=1 ], [ var='B' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandawsapp1, client=steadyhand, env=prod, groupname=sshd, instance=steadyhandawsapp1:9090, job=integrations/process_exporter, site=aws} value=0 ]} {Instance:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandawsbdmaitre, client=steadyhand, env=prod, groupname=sshd, instance=steadyhandawsbdmaitre:9090, job=integrations/process_exporter, site=aws State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandawsbdmaitre, client=steadyhand, env=prod, groupname=sshd, instance=steadyhandawsbdmaitre:9090, job=integrations/process_exporter, site=aws Value:0xc014444920} B:{Var:B Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandawsbdmaitre, client=steadyhand, env=prod, groupname=sshd, instance=steadyhandawsbdmaitre:9090, job=integrations/process_exporter, site=aws Value:0xc014444980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.155066564s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandawsbdmaitre, client=steadyhand, env=prod, groupname=sshd, instance=steadyhandawsbdmaitre:9090, job=integrations/process_exporter, site=aws} value=1 ], [ var='B' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandawsbdmaitre, client=steadyhand, env=prod, groupname=sshd, instance=steadyhandawsbdmaitre:9090, job=integrations/process_exporter, site=aws} value=0 ]} {Instance:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandawsproxymaitre, client=steadyhand, env=prod, groupname=sshd, instance=steadyhandawsproxymaitre:9090, job=integrations/process_exporter, site=aws State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandawsproxymaitre, client=steadyhand, env=prod, groupname=sshd, instance=steadyhandawsproxymaitre:9090, job=integrations/process_exporter, site=aws Value:0xc014444aa8} B:{Var:B Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandawsproxymaitre, client=steadyhand, env=prod, groupname=sshd, instance=steadyhandawsproxymaitre:9090, job=integrations/process_exporter, site=aws Value:0xc014444a50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.155071799s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandawsproxymaitre, client=steadyhand, env=prod, groupname=sshd, instance=steadyhandawsproxymaitre:9090, job=integrations/process_exporter, site=aws} value=1 ], [ var='B' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandawsproxymaitre, client=steadyhand, env=prod, groupname=sshd, instance=steadyhandawsproxymaitre:9090, job=integrations/process_exporter, site=aws} value=0 ]} {Instance:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandawswinvmaitre, client=steadyhand, env=prod, groupname=sshd, instance=steadyhandawswinvmaitre:9090, job=integrations/process_exporter, site=aws State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandawswinvmaitre, client=steadyhand, env=prod, groupname=sshd, instance=steadyhandawswinvmaitre:9090, job=integrations/process_exporter, site=aws Value:0xc014444ba8} B:{Var:B Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandawswinvmaitre, client=steadyhand, env=prod, groupname=sshd, instance=steadyhandawswinvmaitre:9090, job=integrations/process_exporter, site=aws Value:0xc014444c08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.155077231s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandawswinvmaitre, client=steadyhand, env=prod, groupname=sshd, instance=steadyhandawswinvmaitre:9090, job=integrations/process_exporter, site=aws} value=1 ], [ var='B' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandawswinvmaitre, client=steadyhand, env=prod, groupname=sshd, instance=steadyhandawswinvmaitre:9090, job=integrations/process_exporter, site=aws} value=0 ]} {Instance:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandnonprodref, client=steadyhand, env=nonprod, groupname=sshd, host=steadyhandnonprodref, instance=steadyhandnonprodref, job=integrations/process_exporter, type=ref State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandnonprodref, client=steadyhand, env=nonprod, groupname=sshd, host=steadyhandnonprodref, instance=steadyhandnonprodref, job=integrations/process_exporter, type=ref Value:0xc014444d50} B:{Var:B Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandnonprodref, client=steadyhand, env=nonprod, groupname=sshd, host=steadyhandnonprodref, instance=steadyhandnonprodref, job=integrations/process_exporter, type=ref Value:0xc014444ce0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.155097445s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandnonprodref, client=steadyhand, env=nonprod, groupname=sshd, host=steadyhandnonprodref, instance=steadyhandnonprodref, job=integrations/process_exporter, type=ref} value=1 ], [ var='B' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandnonprodref, client=steadyhand, env=nonprod, groupname=sshd, host=steadyhandnonprodref, instance=steadyhandnonprodref, job=integrations/process_exporter, type=ref} value=0 ]} {Instance:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandprodref, client=steadyhand, env=prod, groupname=sshd, host=steadyhandprodref, instance=steadyhandprodref, job=integrations/process_exporter, type=ref State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandprodref, client=steadyhand, env=prod, groupname=sshd, host=steadyhandprodref, instance=steadyhandprodref, job=integrations/process_exporter, type=ref Value:0xc014444e90} B:{Var:B Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandprodref, client=steadyhand, env=prod, groupname=sshd, host=steadyhandprodref, instance=steadyhandprodref, job=integrations/process_exporter, type=ref Value:0xc014444e30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.155104823s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandprodref, client=steadyhand, env=prod, groupname=sshd, host=steadyhandprodref, instance=steadyhandprodref, job=integrations/process_exporter, type=ref} value=1 ], [ var='B' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhandprodref, client=steadyhand, env=prod, groupname=sshd, host=steadyhandprodref, instance=steadyhandprodref, job=integrations/process_exporter, type=ref} value=0 ]} {Instance:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhanduatawsapp1, client=steadyhand, env=nonprod, groupname=sshd, instance=steadyhanduatawsapp1:9090, job=integrations/process_exporter, site=aws State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhanduatawsapp1, client=steadyhand, env=nonprod, groupname=sshd, instance=steadyhanduatawsapp1:9090, job=integrations/process_exporter, site=aws Value:0xc014444f60} B:{Var:B Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhanduatawsapp1, client=steadyhand, env=nonprod, groupname=sshd, instance=steadyhanduatawsapp1:9090, job=integrations/process_exporter, site=aws Value:0xc014444fd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.155116543s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhanduatawsapp1, client=steadyhand, env=nonprod, groupname=sshd, instance=steadyhanduatawsapp1:9090, job=integrations/process_exporter, site=aws} value=1 ], [ var='B' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhanduatawsapp1, client=steadyhand, env=nonprod, groupname=sshd, instance=steadyhanduatawsapp1:9090, job=integrations/process_exporter, site=aws} value=0 ]} {Instance:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhanduatawsbdmaitre, client=steadyhand, env=nonprod, groupname=sshd, instance=steadyhanduatawsbdmaitre:9090, job=integrations/process_exporter, site=aws State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhanduatawsbdmaitre, client=steadyhand, env=nonprod, groupname=sshd, instance=steadyhanduatawsbdmaitre:9090, job=integrations/process_exporter, site=aws Value:0xc0144450a8} B:{Var:B Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhanduatawsbdmaitre, client=steadyhand, env=nonprod, groupname=sshd, instance=steadyhanduatawsbdmaitre:9090, job=integrations/process_exporter, site=aws Value:0xc014445130}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.15512681s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhanduatawsbdmaitre, client=steadyhand, env=nonprod, groupname=sshd, instance=steadyhanduatawsbdmaitre:9090, job=integrations/process_exporter, site=aws} value=1 ], [ var='B' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhanduatawsbdmaitre, client=steadyhand, env=nonprod, groupname=sshd, instance=steadyhanduatawsbdmaitre:9090, job=integrations/process_exporter, site=aws} value=0 ]} {Instance:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhanduatawsproxymaitre, client=steadyhand, env=nonprod, groupname=sshd, instance=steadyhanduatawsproxymaitre:9090, job=integrations/process_exporter, site=aws State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhanduatawsproxymaitre, client=steadyhand, env=nonprod, groupname=sshd, instance=steadyhanduatawsproxymaitre:9090, job=integrations/process_exporter, site=aws Value:0xc014445220} B:{Var:B Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhanduatawsproxymaitre, client=steadyhand, env=nonprod, groupname=sshd, instance=steadyhanduatawsproxymaitre:9090, job=integrations/process_exporter, site=aws Value:0xc0144452a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.155134349s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhanduatawsproxymaitre, client=steadyhand, env=nonprod, groupname=sshd, instance=steadyhanduatawsproxymaitre:9090, job=integrations/process_exporter, site=aws} value=1 ], [ var='B' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=steadyhanduatawsproxymaitre, client=steadyhand, env=nonprod, groupname=sshd, instance=steadyhanduatawsproxymaitre:9090, job=integrations/process_exporter, site=aws} value=0 ]} {Instance:__name__=namedprocess_namegroup_num_procs, agent_hostname=wellaltusnonprodref, client=wellaltus, env=nonprod, groupname=sshd, host=wellaltusnonprodref, instance=wellaltusnonprodref, job=integrations/process_exporter, type=ref State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=wellaltusnonprodref, client=wellaltus, env=nonprod, groupname=sshd, host=wellaltusnonprodref, instance=wellaltusnonprodref, job=integrations/process_exporter, type=ref Value:0xc0144453a0} B:{Var:B Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=wellaltusnonprodref, client=wellaltus, env=nonprod, groupname=sshd, host=wellaltusnonprodref, instance=wellaltusnonprodref, job=integrations/process_exporter, type=ref Value:0xc014445420}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.155143415s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=wellaltusnonprodref, client=wellaltus, env=nonprod, groupname=sshd, host=wellaltusnonprodref, instance=wellaltusnonprodref, job=integrations/process_exporter, type=ref} value=1 ], [ var='B' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=wellaltusnonprodref, client=wellaltus, env=nonprod, groupname=sshd, host=wellaltusnonprodref, instance=wellaltusnonprodref, job=integrations/process_exporter, type=ref} value=0 ]} {Instance:__name__=namedprocess_namegroup_num_procs, agent_hostname=wellaltusprodref, client=wellaltus, env=prod, groupname=sshd, host=wellaltusprodref, instance=wellaltusprodref, job=integrations/process_exporter, type=ref State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=wellaltusprodref, client=wellaltus, env=prod, groupname=sshd, host=wellaltusprodref, instance=wellaltusprodref, job=integrations/process_exporter, type=ref Value:0xc014445600} B:{Var:B Labels:__name__=namedprocess_namegroup_num_procs, agent_hostname=wellaltusprodref, client=wellaltus, env=prod, groupname=sshd, host=wellaltusprodref, instance=wellaltusprodref, job=integrations/process_exporter, type=ref Value:0xc014445548}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.155153493s EvaluationString:[ var='A' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=wellaltusprodref, client=wellaltus, env=prod, groupname=sshd, host=wellaltusprodref, instance=wellaltusprodref, job=integrations/process_exporter, type=ref} value=1 ], [ var='B' labels={__name__=namedprocess_namegroup_num_procs, agent_hostname=wellaltusprodref, client=wellaltus, env=prod, groupname=sshd, host=wellaltusprodref, instance=wellaltusprodref, job=integrations/process_exporter, type=ref} value=0 ]}]" duration=13.111357ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rlnxx41j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.157026905Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.157114776Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rlnxx41j-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.156933044Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rlnxx41j-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.156923124Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=435206 slug=kkrprivateuat t=2024-05-29T13:44:14.15701507Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager.persist user=542900 slug=yuktarthtrehan t=2024-05-29T13:44:14.156906349Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.241658ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rli99mqw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.156870384Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rli99mqw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.156776993Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.156688923Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.156650166Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rle9m6yo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.156677952Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rle9m6yo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.156634471Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rl2jjnym-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.15657148Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.15642458Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.156443577Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rl23v2bg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.156387939Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rl19r7ze-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.156197137Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rl16h9a4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.156120296Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rl16h9a4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.156063255Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rl16h9a4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.155998755Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rkzzude1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.155969634Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.155893559Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rkrnlpkz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.155728552Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rkrnlpkz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.155677571Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rkqefadv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.15557358Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.15555012Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.155482999Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rkkqjdxt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.155293697Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.155643543Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.155483065Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.155406933Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rkkqjdxt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.155190626Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.155195149Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.155158135Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rkkfzqqb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.155107895Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.historian backend=loki user=332555 slug=omexomcs t=2024-05-29T13:44:14.155009313Z level=debug msg="Done saving alert state history batch" + level=debug ts=2024-05-29T13:44:14.154951396Z caller=remote_instance_store.go:51 user=171235 slug=circleslabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=171235 slug=circleslabs t=2024-05-29T13:44:14.154902052Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=171235 slug=circleslabs instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.154887059Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=171235 slug=circleslabs instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.154864436Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rkh48c0b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.154878183Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rkh48c0b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.154852733Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rkcz5d1c-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.154817032Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.154807254Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rkcz5d1c-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.154704361Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.154736654Z caller=remote_instance_store.go:51 user=432323 slug=lithic msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:14.154668338Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.154676319Z caller=remote_instance_store.go:51 user=473984 slug=driivz msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:14.154705439Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=adennlvlmzi0wf, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:14.154656025Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=adennlvlmzi0wf, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:14.154648028Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.154708041Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=adennlvlmzi0wf, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:14.154605533Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:45:40Z next_ends_at=2024-05-29T13:46:10Z + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=adennlvlmzi0wf, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:14.154595347Z level=debug msg="Execution keep last state is Alerting" handler=resultAlerting + logger=ngalert.state.manager user=432323 slug=lithic instance= t=2024-05-29T13:44:14.154678876Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rk9hax0h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.15462156Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=432323 slug=lithic t=2024-05-29T13:44:14.154640441Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rk9hax0h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.1545717Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=473984 slug=driivz t=2024-05-29T13:44:14.154610137Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.scheduler user=538037 slug=drivewealth version=24 fingerprint=f9c7f2d04e8004bb attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.154483683Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=adennlvlmzi0wf, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.154249641s EvaluationString:}]" duration=59.856241ms + logger=ngalert.state.manager user=473984 slug=driivz t=2024-05-29T13:44:14.154475265Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.154458107Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rk96bitl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.154380838Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rjytw1o3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.154111445Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rjytw1o3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.154081885Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rjytw1o3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.154071965Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rjx05evt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.154031634Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rjx05evt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.154022474Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rjx05evt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.153993684Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.154217977Z caller=remote_image_capturer.go:33 user=245291 slug=pismo rule_org_id=1 rule_uid=ocLdgd0Vk msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rjx05evt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.153955394Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.154124416Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:14.154153263Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rjtkda95-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.153839782Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rjtkda95-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.153828062Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.153827501Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=260796 slug=expressvpn t=2024-05-29T13:44:14.153658017Z level=debug msg="Saving alert states" count=10 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rjqonfom-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.15361926Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rjnsccqe-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.15358892Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rjnsccqe-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.15356141Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.153539048Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.153509228Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=USA - New Jersey - 1" t=2024-05-29T13:44:14.153455784Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.15342104Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.15335795Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=679029 slug=joveoprodaws t=2024-05-29T13:44:14.153329639Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rjjdbd5z-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.153304177Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.153217002Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rji3fgqa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.153252996Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rji3fgqa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.153147185Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rji3fgqa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.153137935Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs instance= t=2024-05-29T13:44:14.152962535Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=USA - Los Angeles - 1" t=2024-05-29T13:44:14.153077597Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rjcvuojf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.153034984Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs instance= t=2024-05-29T13:44:14.152939312Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rjcvuojf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.153013144Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs instance= t=2024-05-29T13:44:14.152923077Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rjcvuojf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.152974534Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=312340 slug=lakefs version=5 fingerprint=c5375e869d1a0d4c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.152767822Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.152413458s EvaluationString:}]" duration=16.144351ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rjb7ym89-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.152787782Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.152585263Z caller=remote_instance_store.go:51 user=538355 slug=flogic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rj0dq7l8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.152564169Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.152527533Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.152517052Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=UK - London" t=2024-05-29T13:44:14.152517168Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=538355 slug=flogic t=2024-05-29T13:44:14.152537515Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=UK - London" t=2024-05-29T13:44:14.152500645Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538355 slug=flogic instance="__name__=aws_ec2_cpucredit_balance_average, account_id=641264638977, dimension_InstanceId=i-050fe154cdde01e8d, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:ec2:ap-northeast-1:641264638977:instance/i-050fe154cdde01e8d, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter" t=2024-05-29T13:44:14.152521596Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=538355 slug=flogic version=197 fingerprint=e97bf71e2d205e6d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.152375799Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=aws_ec2_cpucredit_balance_average, account_id=641264638977, dimension_InstanceId=i-050fe154cdde01e8d, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:ec2:ap-northeast-1:641264638977:instance/i-050fe154cdde01e8d, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=aws_ec2_cpucredit_balance_average, account_id=641264638977, dimension_InstanceId=i-050fe154cdde01e8d, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:ec2:ap-northeast-1:641264638977:instance/i-050fe154cdde01e8d, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter Value:0xc01cfbfab8} F:{Var:F Labels:__name__=aws_ec2_cpucredit_balance_average, account_id=641264638977, dimension_InstanceId=i-050fe154cdde01e8d, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:ec2:ap-northeast-1:641264638977:instance/i-050fe154cdde01e8d, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter Value:0xc01cfbfb98} G:{Var:G Labels:__name__=aws_ec2_cpucredit_balance_average, account_id=641264638977, dimension_InstanceId=i-050fe154cdde01e8d, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:ec2:ap-northeast-1:641264638977:instance/i-050fe154cdde01e8d, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter Value:0xc01cfbfc78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.151933288s EvaluationString:[ var='A' labels={__name__=aws_ec2_cpucredit_balance_average, account_id=641264638977, dimension_InstanceId=i-050fe154cdde01e8d, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:ec2:ap-northeast-1:641264638977:instance/i-050fe154cdde01e8d, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter} value=288 ], [ var='F' labels={__name__=aws_ec2_cpucredit_balance_average, account_id=641264638977, dimension_InstanceId=i-050fe154cdde01e8d, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:ec2:ap-northeast-1:641264638977:instance/i-050fe154cdde01e8d, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter} value=288 ], [ var='G' labels={__name__=aws_ec2_cpucredit_balance_average, account_id=641264638977, dimension_InstanceId=i-050fe154cdde01e8d, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:ec2:ap-northeast-1:641264638977:instance/i-050fe154cdde01e8d, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter} value=0 ]}]" duration=10.178722ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-riyt35ap-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.152468558Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-riyt35ap-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.152257296Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=372011 slug=actianeu t=2024-05-29T13:44:14.152134173Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.765291ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=UK - Docklands" t=2024-05-29T13:44:14.152142641Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-risvvpsf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.152118855Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-risvvpsf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.152084214Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.151961933Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=260796 slug=expressvpn version=357 fingerprint=f7d7cc35b098c2fb attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.151716494Z level=debug msg="Alert rule evaluated" results="[{Instance:cluster_name=UK - Docklands State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster_name=UK - Docklands Value:0xc01638f9e0} B:{Var:B Labels:cluster_name=UK - Docklands Value:0xc01638f9e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.15125124s EvaluationString:[ var='A' labels={cluster_name=UK - Docklands} value=0.9409612142991212 ], [ var='B' labels={cluster_name=UK - Docklands} value=0 ]} {Instance:cluster_name=UK - East London State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster_name=UK - East London Value:0xc01638fa38} B:{Var:B Labels:cluster_name=UK - East London Value:0xc01638fc40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.151263503s EvaluationString:[ var='A' labels={cluster_name=UK - East London} value=0.9485651432979979 ], [ var='B' labels={cluster_name=UK - East London} value=0 ]} {Instance:cluster_name=UK - London State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster_name=UK - London Value:0xc01638ff00} B:{Var:B Labels:cluster_name=UK - London Value:0xc01638ff08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.151269144s EvaluationString:[ var='A' labels={cluster_name=UK - London} value=0.9532215031378954 ], [ var='B' labels={cluster_name=UK - London} value=0 ]} {Instance:cluster_name=USA - Atlanta State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster_name=USA - Atlanta Value:0xc05c0c20b0} B:{Var:B Labels:cluster_name=USA - Atlanta Value:0xc05c0c2028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.151274808s EvaluationString:[ var='A' labels={cluster_name=USA - Atlanta} value=0.9616511623188624 ], [ var='B' labels={cluster_name=USA - Atlanta} value=0 ]} {Instance:cluster_name=USA - Chicago State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster_name=USA - Chicago Value:0xc05c0c2128} B:{Var:B Labels:cluster_name=USA - Chicago Value:0xc05c0c2120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.151279995s EvaluationString:[ var='A' labels={cluster_name=USA - Chicago} value=0.95700457710379 ], [ var='B' labels={cluster_name=USA - Chicago} value=0 ]} {Instance:cluster_name=USA - Dallas State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster_name=USA - Dallas Value:0xc05c0c2220} B:{Var:B Labels:cluster_name=USA - Dallas Value:0xc05c0c21a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.151285177s EvaluationString:[ var='A' labels={cluster_name=USA - Dallas} value=0.9633564620757713 ], [ var='B' labels={cluster_name=USA - Dallas} value=0 ]} {Instance:cluster_name=USA - Los Angeles - 1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster_name=USA - Los Angeles - 1 Value:0xc05c0c2288} B:{Var:B Labels:cluster_name=USA - Los Angeles - 1 Value:0xc05c0c2280}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.151291134s EvaluationString:[ var='A' labels={cluster_name=USA - Los Angeles - 1} value=0.9548959155194442 ], [ var='B' labels={cluster_name=USA - Los Angeles - 1} value=0 ]} {Instance:cluster_name=USA - Miami State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster_name=USA - Miami Value:0xc05c0c22d8} B:{Var:B Labels:cluster_name=USA - Miami Value:0xc05c0c2330}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.151296077s EvaluationString:[ var='A' labels={cluster_name=USA - Miami} value=0.9627697311408869 ], [ var='B' labels={cluster_name=USA - Miami} value=0 ]} {Instance:cluster_name=USA - New Jersey - 1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster_name=USA - New Jersey - 1 Value:0xc05c0c2398} B:{Var:B Labels:cluster_name=USA - New Jersey - 1 Value:0xc05c0c2390}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.151300383s EvaluationString:[ var='A' labels={cluster_name=USA - New Jersey - 1} value=0.9560720983107333 ], [ var='B' labels={cluster_name=USA - New Jersey - 1} value=0 ]} {Instance:cluster_name=USA - New York State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster_name=USA - New York Value:0xc05c0c2438} B:{Var:B Labels:cluster_name=USA - New York Value:0xc05c0c2490}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.151307339s EvaluationString:[ var='A' labels={cluster_name=USA - New York} value=0.9495370057935166 ], [ var='B' labels={cluster_name=USA - New York} value=0 ]}]" duration=2.05479664s + level=debug ts=2024-05-29T13:44:14.151886672Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.151864995Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.1517608Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.151679225Z caller=remote_alert_sender.go:94 user=332555 slug=omexomcs host=omexomcs-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.148.85.163:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=bdkkg2os34zk0a alerts=1 + logger=ngalert.state.manager.persist user=87780 slug=zencloudandhosting t=2024-05-29T13:44:14.151742555Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=50.84105ms + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:14.151750934Z level=debug msg="Deleting alert states" count=1 + level=debug ts=2024-05-29T13:44:14.151777103Z caller=remote_instance_store.go:57 user=806229 slug=simplisafe msg="calling DeleteAlertInstances - not implemented" + logger=ngalert.state.manager user=806229 slug=simplisafe instance="host=writeproxy-02.ss-events.proxysql.us-east-1.prd.ss42.net" t=2024-05-29T13:44:14.151695753Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.151734951Z caller=remote_instance_store.go:51 user=652086 slug=unihosted msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-riq1l9da-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.15167728Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=806229 slug=simplisafe version=33 fingerprint=cb410e386aea0fff attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.151532811Z level=debug msg="Alert rule evaluated" results="[{Instance:host=writeproxy-02.ss-events.proxysql.us-east-1.prd.ss42.net State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=writeproxy-02.ss-events.proxysql.us-east-1.prd.ss42.net Value:0xc029d12180} B:{Var:B Labels:host=writeproxy-02.ss-events.proxysql.us-east-1.prd.ss42.net Value:0xc029d12170}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.150810214s EvaluationString:[ var='A' labels={host=writeproxy-02.ss-events.proxysql.us-east-1.prd.ss42.net} value=1 ], [ var='B' labels={host=writeproxy-02.ss-events.proxysql.us-east-1.prd.ss42.net} value=0 ]}]" duration=17.333189ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rioinubr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.151575629Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=332555 slug=omexomcs t=2024-05-29T13:44:14.151435565Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.510626ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rioinubr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.151524589Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rioinubr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.151503218Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.151403301Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rim0i97a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.151365307Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rilcuufg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.151249126Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:14.151307561Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=26.510253ms + level=debug ts=2024-05-29T13:44:14.151281411Z caller=remote_instance_store.go:51 user=71676 slug=lemonbeat msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.151066921Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rig7z8fp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.150924683Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.150911488Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-riej4zf0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.150863812Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-riej4zf0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.15070942Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.15087763Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-riej4zf0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.15066017Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rie7t3zi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.150609509Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rie7t3zi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.150579119Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=173374 slug=felmo t=2024-05-29T13:44:14.150775432Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.000954ms + logger=ngalert.state.manager.persist user=460402 slug=dmind t=2024-05-29T13:44:14.150644447Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=460402 slug=dmind instance="datasource_uid=waLBmbH4z, ref_id=A" t=2024-05-29T13:44:14.150618352Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=460402 slug=dmind instance="datasource_uid=waLBmbH4z, ref_id=A" t=2024-05-29T13:44:14.150579092Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=460402 slug=dmind instance="datasource_uid=waLBmbH4z, ref_id=A" t=2024-05-29T13:44:14.150540407Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=277807 slug=info96f8 t=2024-05-29T13:44:14.150420354Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.873058ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ribjorqd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.150487088Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=460402 slug=dmind instance="datasource_uid=waLBmbH4z, ref_id=A" t=2024-05-29T13:44:14.150516214Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ribjorqd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.150466928Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=460402 slug=dmind version=116 fingerprint=b5df750fbe399a05 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.150313575Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=waLBmbH4z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.149937677s EvaluationString:}]" duration=27.220144ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rhzw1deg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.150247336Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rhzw1deg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.150220095Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.150255331Z caller=remote_instance_store.go:51 user=137307 slug=travelportal msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=137307 slug=travelportal instance="datasource_uid=grafanacloud-logs, ref_id=ERROR" t=2024-05-29T13:44:14.150184337Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=137307 slug=travelportal t=2024-05-29T13:44:14.150173065Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rhyjodpg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.150117914Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rhyjodpg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.150089834Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rhyjodpg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.150053364Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rhyjodpg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.150026353Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rhsl351c-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.149803741Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=548157 slug=kushkiprod t=2024-05-29T13:44:14.14992176Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.142483ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rhsl351c-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.149794461Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rhsl351c-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.149764461Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rhsl351c-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.149753801Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rhsf82pm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.149580939Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=788474 slug=elisasre instance="__name__=probe_success, cluster=sre-ci.k8s.local, component=luukku-preprod-system, instance=https://10.222.156.211, monitor=monitor-475, namespace=health, region=sdcv3, target=https://10.222.156.211" t=2024-05-29T13:44:14.149796633Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.149786832Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=788474 slug=elisasre version=6 fingerprint=2c2e2e201e159cba attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.14963852Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=probe_success, cluster=sre-ci.k8s.local, component=luukku-preprod-system, instance=https://10.222.156.211, monitor=monitor-475, namespace=health, region=sdcv3, target=https://10.222.156.211 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=probe_success, cluster=sre-ci.k8s.local, component=luukku-preprod-system, instance=https://10.222.156.211, monitor=monitor-475, namespace=health, region=sdcv3, target=https://10.222.156.211 Value:0xc0060b1328} B:{Var:B Labels:__name__=probe_success, cluster=sre-ci.k8s.local, component=luukku-preprod-system, instance=https://10.222.156.211, monitor=monitor-475, namespace=health, region=sdcv3, target=https://10.222.156.211 Value:0xc0060b14d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.149278514s EvaluationString:[ var='A' labels={__name__=probe_success, cluster=sre-ci.k8s.local, component=luukku-preprod-system, instance=https://10.222.156.211, monitor=monitor-475, namespace=health, region=sdcv3, target=https://10.222.156.211} value=1 ], [ var='B' labels={__name__=probe_success, cluster=sre-ci.k8s.local, component=luukku-preprod-system, instance=https://10.222.156.211, monitor=monitor-475, namespace=health, region=sdcv3, target=https://10.222.156.211} value=0 ]}]" duration=7.728616ms + level=debug ts=2024-05-29T13:44:14.149565168Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=447807 slug=dreamcorp t=2024-05-29T13:44:14.149547023Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=447807 slug=dreamcorp instance="item=aggregateBy(1m, avg), item_key=" t=2024-05-29T13:44:14.149445799Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rhp6r76b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.149339956Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rhp6r76b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.149310796Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=852841 slug=agrivolt instance= t=2024-05-29T13:44:14.149312514Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=332031 slug=lexisnexisemailage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.149296439Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=852841 slug=agrivolt version=35 fingerprint=98363085da386a4e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.149172301Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc025177148} B:{Var:B Labels: Value:0xc025177150}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.14880898s EvaluationString:[ var='A' labels={} value=0 ], [ var='B' labels={} value=0 ]}]" duration=33.103107ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rhn781b7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.149201085Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rhn781b7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.149171345Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rhn781b7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.149113434Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.149114713Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.148936171Z caller=remote_instance_store.go:51 user=716319 slug=idealsplatformprodinfra msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.148972055Z caller=remote_instance_store.go:51 user=114286 slug=enverus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rhi7cll1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.148922192Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=114286 slug=enverus instance="datasource_uid=lzFWNTdGk, ref_id=A" t=2024-05-29T13:44:14.148909739Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rhi7cll1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.148883962Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rhi04qdl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.148837041Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rhi04qdl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.14869159Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.148648967Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rhi04qdl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.148673869Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.148588353Z caller=remote_instance_store.go:51 user=111839 slug=last9 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rh9hgit5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.148433767Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rh6d2eg2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.148341416Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rh6d2eg2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.148237965Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.14809572Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rh3v23ti-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.148185084Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rh3v23ti-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.148164934Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.148122876Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.5417ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rh3v23ti-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.148061443Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.147910606Z caller=remote_instance_store.go:51 user=302415 slug=mgbcoreinfraprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rh3q2g3h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.147883701Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=302415 slug=mgbcoreinfraprod instance= t=2024-05-29T13:44:14.147817921Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rgyax95v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.14778307Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rgyax95v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.14774212Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.147760179Z caller=remote_instance_store.go:51 user=151289 slug=everflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rgyax95v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.147675829Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=273717 slug=seventyfivef instance= t=2024-05-29T13:44:14.147595003Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + Error parsing panelUID for alert annotationruleID5300dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=273717 slug=seventyfivef version=24 fingerprint=31e51a31ae656a85 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.14747441Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Alerting Error: Results:map[] Values:map[B0:{Var:B Labels: Value:0xc0145122e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.147200271s EvaluationString:[ var='B0' metric='Value' labels={} value=1 ]}]" duration=92.393261ms + logger=ngalert.scheduler user=111839 slug=last9 version=148 fingerprint=efab2a33a3e7c3b0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.147281688Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Alerting Error: Results:map[] Values:map[B0:{Var:B Labels:cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.128.7:8482 Value:0xc005298f20} B1:{Var:B Labels:cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.129.130:8482 Value:0xc005299140} B10:{Var:B Labels:cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.148.188:8482 Value:0xc005298fe0} B11:{Var:B Labels:cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.149.126:8482 Value:0xc0052991e0} B12:{Var:B Labels:cluster=tsdb-use1.last9.io, instance=10.20.132.49:8482 Value:0xc005299180} B13:{Var:B Labels:cluster=tsdb-use1.last9.io, instance=10.20.136.85:8482 Value:0xc005299230} B14:{Var:B Labels:cluster=tsdb-use1.last9.io, instance=10.20.137.185:8482 Value:0xc005298fc0} B15:{Var:B Labels:cluster=tsdb-use1.last9.io, instance=10.20.137.1:8482 Value:0xc0052990c0} B16:{Var:B Labels:cluster=tsdb-use1.last9.io, instance=10.20.142.67:8482 Value:0xc005298f60} B17:{Var:B Labels:cluster=tsdb-use1.last9.io, instance=10.20.144.190:8482 Value:0xc005299260} B18:{Var:B Labels:cluster=tsdb-use1.last9.io, instance=10.20.144.191:8482 Value:0xc005298f80} B19:{Var:B Labels:cluster=tsdb-use1.last9.io, instance=10.20.145.83:8482 Value:0xc005298ef0} B2:{Var:B Labels:cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.130.201:8482 Value:0xc005299000} B20:{Var:B Labels:cluster=tsdb-use1.last9.io, instance=10.20.146.81:8482 Value:0xc005299120} B21:{Var:B Labels:cluster=tsdb-use1.last9.io, instance=10.20.147.38:8482 Value:0xc005298fa0} B22:{Var:B Labels:cluster=tsdb-use1.last9.io, instance=10.20.154.227:8482 Value:0xc005299020} B23:{Var:B Labels:cluster=tsdb-use1.last9.io, instance=10.20.156.82:8482 Value:0xc005299040} B3:{Var:B Labels:cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.131.125:8482 Value:0xc0052991c0} B4:{Var:B Labels:cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.131.224:8482 Value:0xc005299100} B5:{Var:B Labels:cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.132.217:8482 Value:0xc005299160} B6:{Var:B Labels:cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.134.243:8482 Value:0xc005298f40} B7:{Var:B Labels:cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.136.115:8482 Value:0xc0052991a0} B8:{Var:B Labels:cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.140.147:8482 Value:0xc005299060} B9:{Var:B Labels:cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.145.11:8482 Value:0xc0052990e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.146559008s EvaluationString:[ var='B0' metric='Value' labels={cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.128.7:8482} value=82.58361327557569 ], [ var='B1' metric='Value' labels={cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.129.130:8482} value=82.53412639790673 ], [ var='B2' metric='Value' labels={cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.130.201:8482} value=82.52715530991811 ], [ var='B3' metric='Value' labels={cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.131.125:8482} value=82.8858943812127 ], [ var='B4' metric='Value' labels={cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.131.224:8482} value=82.94389253308037 ], [ var='B5' metric='Value' labels={cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.132.217:8482} value=82.88514612166034 ], [ var='B6' metric='Value' labels={cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.134.243:8482} value=82.47935235531384 ], [ var='B7' metric='Value' labels={cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.136.115:8482} value=82.64740458367781 ], [ var='B8' metric='Value' labels={cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.140.147:8482} value=82.45987128334538 ], [ var='B9' metric='Value' labels={cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.145.11:8482} value=82.49335314533106 ], [ var='B10' metric='Value' labels={cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.148.188:8482} value=83.16930785509794 ], [ var='B11' metric='Value' labels={cluster=hotstar-aps1-tsdb.last9.io, instance=10.49.149.126:8482} value=82.76500948416867 ], [ var='B12' metric='Value' labels={cluster=tsdb-use1.last9.io, instance=10.20.132.49:8482} value=88.23121740945376 ], [ var='B13' metric='Value' labels={cluster=tsdb-use1.last9.io, instance=10.20.136.85:8482} value=89.31586102619283 ], [ var='B14' metric='Value' labels={cluster=tsdb-use1.last9.io, instance=10.20.137.185:8482} value=87.96678323160091 ], [ var='B15' metric='Value' labels={cluster=tsdb-use1.last9.io, instance=10.20.137.1:8482} value=88.33335781812525 ], [ var='B16' metric='Value' labels={cluster=tsdb-use1.last9.io, instance=10.20.142.67:8482} value=90.94711224628512 ], [ var='B17' metric='Value' labels={cluster=tsdb-use1.last9.io, instance=10.20.144.190:8482} value=89.71241155114443 ], [ var='B18' metric='Value' labels={cluster=tsdb-use1.last9.io, instance=10.20.144.191:8482} value=90.89103042046855 ], [ var='B19' metric='Value' labels={cluster=tsdb-use1.last9.io, instance=10.20.145.83:8482} value=90.44310445561061 ], [ var='B20' metric='Value' labels={cluster=tsdb-use1.last9.io, instance=10.20.146.81:8482} value=90.11590584726106 ], [ var='B21' metric='Value' labels={cluster=tsdb-use1.last9.io, instance=10.20.147.38:8482} value=90.49572375985302 ], [ var='B22' metric='Value' labels={cluster=tsdb-use1.last9.io, instance=10.20.154.227:8482} value=88.05962592820754 ], [ var='B23' metric='Value' labels={cluster=tsdb-use1.last9.io, instance=10.20.156.82:8482} value=90.98177164371788 ]}]" duration=767.039415ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rgx7lzp5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.147383456Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=174675 slug=journalprod t=2024-05-29T13:44:14.147373892Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=64.691134ms + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.14735286Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rgx7lzp5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.147340496Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rgx7lzp5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.147309966Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.147063984Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rgvsoiue-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.147118564Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.147101905Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rgvsoiue-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.147054323Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rgrtt3jn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.146932142Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.146963029Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rgrtt3jn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.146877771Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.146931035Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rgrfogtz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.14673761Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=471861 slug=planetstaging t=2024-05-29T13:44:14.146759828Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=937416 slug=cambridgeuniversitypress t=2024-05-29T13:44:14.146804208Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rgl6nbtz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.146634969Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=937416 slug=cambridgeuniversitypress version=2 fingerprint=2a9c01f347a17910 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.146717877Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=hIjIxKP7k, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.146449209s EvaluationString:}]" duration=1.694274ms + level=debug ts=2024-05-29T13:44:14.146713652Z caller=remote_instance_store.go:51 user=451427 slug=rocketchat msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=491157 slug=prd01wr t=2024-05-29T13:44:14.146604838Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.669211ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rgl6nbtz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.146604118Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rgl6nbtz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.146553338Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rgl6nbtz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.146531598Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rgjo6sh8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.146480187Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rgjo6sh8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.146431207Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rgjo6sh8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.146378856Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=426229 slug=accelbyte t=2024-05-29T13:44:14.146371993Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rgd7qcd8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.146327195Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rgd7qcd8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.146298165Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rgd7qcd8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.146220004Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.14615232Z caller=remote_instance_store.go:51 user=173730 slug=nikon msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rfutiyzx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.146188534Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=173730 slug=nikon t=2024-05-29T13:44:14.14608245Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=173730 slug=nikon instance= t=2024-05-29T13:44:14.146053594Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=173730 slug=nikon t=2024-05-29T13:44:14.146012156Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rfutiyzx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.146026452Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=2wfwm2xz0f, Method=--, Resource=/partners, Stage=--" t=2024-05-29T13:44:14.145940161Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=2wfwm2xz0f, Method=--, Resource=/partners, Stage=--" t=2024-05-29T13:44:14.145929738Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=316418 slug=workmotion t=2024-05-29T13:44:14.14589661Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.145798713Z caller=remote_instance_store.go:51 user=777670 slug=fakku msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rfq6rqck-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.14577192Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rfi7ju4n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.145692359Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.145696257Z caller=remote_instance_store.go:51 user=542900 slug=yuktarthtrehan msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.14550703Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rfgv8tac-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.145373716Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.14530417Z caller=remote_instance_store.go:51 user=224047 slug=ppbtradingtribeprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rfgv8tac-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.145296345Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.145190884Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.145228429Z caller=remote_instance_store.go:51 user=137351 slug=pinnacle21 msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:14.145164309Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=35.994812ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rfeywhkt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.145011632Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rfedffqa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.144941671Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rfedffqa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.144930031Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rfedffqa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.144900811Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.145015169Z caller=remote_instance_store.go:51 user=94501 slug=datastax msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rfedffqa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.1448504Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rfedffqa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.14482713Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.144730509Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rf2nkt51-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.144735759Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.144666332Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rex3eone-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.144598168Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rex3eone-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.144588558Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=114492 slug=railsbank instance="DBInstanceIdentifier=db-endusers-120230105143002016600000005" t=2024-05-29T13:44:14.144598091Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-resehwaq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.144297965Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-resehwaq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.144268064Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rerz25oo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.144069282Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.144075291Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:14.14404301Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=426229 slug=accelbyte version=14 fingerprint=45b99a1b73cc50bd attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.143979627Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.143642103s EvaluationString:}]" duration=157.499272ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rep7f54x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.143646168Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rep7f54x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.143522187Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.143327818Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.143339247Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:14.143293199Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=33.095746ms + level=debug ts=2024-05-29T13:44:14.143229018Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-realzoqo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.143073182Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-realzoqo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.143018802Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-re0sh8sd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.142927541Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-re0sh8sd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.14287266Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.142874534Z caller=remote_alert_sender.go:94 user=250150 slug=bizagi host=bizagi-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.107.6:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=cefa2331-3218-463b-8200-4eacb0ff4f4e alerts=1 + level=debug ts=2024-05-29T13:44:14.142854224Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.142820988Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.142801745Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.142783744Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.933923ms + level=debug ts=2024-05-29T13:44:14.142816792Z caller=remote_instance_store.go:51 user=656459 slug=activeport msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=73a8dead4ccbe7dd attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.142716587Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.142478137s EvaluationString:}]" duration=361.3453ms + logger=ngalert.state.manager user=656459 slug=activeport instance="datasource_uid=a17a51ac-52fa-4a8f-ae4d-66e273cfbbfc, ref_id=A" t=2024-05-29T13:44:14.142759094Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rdxae1y4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.142640378Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rdxae1y4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.142630938Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.142575182Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rdx4ksxn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.142530886Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rdx4ksxn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.142501836Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.142494201Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.142468643Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rdsq953b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.142380005Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.142247237Z caller=remote_rule_evaluator.go:193 user=375798 slug=beeworks msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rdsq953b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.142272514Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.14220535Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rdsnvz3f-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.142106662Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.141991621Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rdqzey5t-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.141990771Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.141900919Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=112387 slug=lucidhq t=2024-05-29T13:44:14.141854633Z level=debug msg="Deleting alert states" count=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rdmstteq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.14193839Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.141882474Z caller=remote_instance_store.go:57 user=112387 slug=lucidhq msg="calling DeleteAlertInstances - not implemented" + level=debug ts=2024-05-29T13:44:14.141859949Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112387 slug=lucidhq t=2024-05-29T13:44:14.141827171Z level=info msg="Detected stale state entry" cacheID="[[\"Series\",\"query4292e6562e574e0baa6a9a07a119a671\"],[\"__alert_rule_namespace_uid__\",\"MLLy09GMk\"],[\"__alert_rule_uid__\",\"a794e0ee-1a26-423a-8c5a-f2bb4b7bcb10\"],[\"__contacts__\",\"\\\"DemandProjectAPI Slack\\\"\"],[\"alertname\",\"[Releng] demand-project-search OS cluster status RED alert\"],[\"grafana_folder\",\"Team-Marketplace\"]]" state=Normal reason= + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rdmstteq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.14187926Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rdmstteq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.141849879Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.141645748Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=127813 slug=clearsale t=2024-05-29T13:44:14.141766595Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rdmp3uij-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.141688208Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rdmp3uij-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.141662037Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rd81zu1g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.141620867Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.141507495Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.141544715Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rd4i3szh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.141380655Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rd4i3szh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.141338314Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rd4i3szh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.141272963Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.141111019Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rd2jnlm6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.141240693Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rd2jnlm6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.141211403Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.141220193Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.14097312Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rcvqdpds-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.140804919Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.140710598Z caller=remote_instance_store.go:51 user=763376 slug=f5nginxone msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rcmclnjc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.140710418Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.140685163Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.140668714Z caller=remote_alert_sender.go:94 user=537591 slug=btldevops host=btldevops-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.156.59.32:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=-oNibO14z alerts=1 + level=debug ts=2024-05-29T13:44:14.140622003Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.140624408Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rclyn7s4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.140568196Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rclyn7s4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.140477545Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rclyn7s4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.140454025Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rclc5lup-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.140347104Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rc9iw9zk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.140210312Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=265756 slug=vowfood t=2024-05-29T13:44:14.140115462Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rc9iw9zk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.140120681Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=265756 slug=vowfood instance= t=2024-05-29T13:44:14.140090232Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rc27qfyt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.13995413Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.140005521Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.139920857Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.139912571Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rc05ilkg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.139839929Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.139866874Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rbyccp7f-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.139747988Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rbyccp7f-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.139641017Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rbp1srjf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.139417464Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rbp1srjf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.139370684Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=277807 slug=info96f8 instance="environment=swiss-sense, instance=www.swisssense.nl:443, job=magento, monitor=magento, status=pending" t=2024-05-29T13:44:14.139467109Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rbp1srjf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.139310623Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=277807 slug=info96f8 version=1 fingerprint=f55af91821897a45 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.139115145Z level=debug msg="Alert rule evaluated" results="[{Instance:environment=swiss-sense, instance=www.swisssense.nl:443, job=magento, monitor=magento, status=pending State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:environment=swiss-sense, instance=www.swisssense.nl:443, job=magento, monitor=magento, status=pending Value:0xc039879d08} B:{Var:B Labels:environment=swiss-sense, instance=www.swisssense.nl:443, job=magento, monitor=magento, status=pending Value:0xc039879d88} C:{Var:C Labels:environment=swiss-sense, instance=www.swisssense.nl:443, job=magento, monitor=magento, status=pending Value:0xc039879dc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.13845995s EvaluationString:[ var='A' labels={environment=swiss-sense, instance=www.swisssense.nl:443, job=magento, monitor=magento, status=pending} value=59 ], [ var='B' labels={environment=swiss-sense, instance=www.swisssense.nl:443, job=magento, monitor=magento, status=pending} value=59 ], [ var='C' labels={environment=swiss-sense, instance=www.swisssense.nl:443, job=magento, monitor=magento, status=pending} value=0 ]}]" duration=15.322753ms + level=debug ts=2024-05-29T13:44:14.139062425Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rb8ml951-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.139065961Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rb8ml951-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.13898422Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rb8f4re8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.138920139Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.138881509Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=332555 slug=omexomcs instance= t=2024-05-29T13:44:14.138873185Z level=debug msg="Changing state" previous_state=Alerting next_state=Normal previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:44:10Z + level=debug ts=2024-05-29T13:44:14.138815125Z caller=remote_instance_store.go:51 user=173374 slug=felmo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=467357 slug=peturs05 t=2024-05-29T13:44:14.138819174Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rb8f4re8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.138813798Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rb8f4re8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.138780508Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=173374 slug=felmo t=2024-05-29T13:44:14.138772374Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rb5wlzqo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.138688707Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=173374 slug=felmo instance= t=2024-05-29T13:44:14.138757983Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=467357 slug=peturs05 version=8 fingerprint=af12729c4035da16 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.138622623Z level=debug msg="Alert rule evaluated" results="[{Instance:server=obvault State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:server=obvault Value:0xc01c4dc7b0} C:{Var:C Labels:server=obvault Value:0xc01c4dc7c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.138231823s EvaluationString:[ var='B' labels={server=obvault} value=21 ], [ var='C' labels={server=obvault} value=0 ]}]" duration=16.617392ms + level=debug ts=2024-05-29T13:44:14.138662474Z caller=remote_instance_store.go:51 user=890268 slug=cmcngipd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rb5wlzqo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.138598666Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rb3uz274-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.138561645Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rb3uz274-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.138530445Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rb3uz274-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.138480115Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rb10ucbz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.138452424Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rb10ucbz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.138443064Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.138438569Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ray7vcol-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.138212942Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.138052818Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-raslkuot-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.138120051Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.13808421Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.138045369Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-raslkuot-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.13808499Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.137989996Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=855643 slug=datable instance="__name__=queue_jobs_count, customer_id=monitor, instance=172.17.0.30:9150, job=query, queue=ingress, status=waiting" t=2024-05-29T13:44:14.13805439Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=855643 slug=datable instance="__name__=queue_jobs_count, customer_id=monitor, instance=172.17.0.30:9150, job=query, queue=egress, status=waiting" t=2024-05-29T13:44:14.1380153Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rad50lap-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.137938819Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-rad50lap-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.137925529Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ra1kxmo3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.137839288Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.137777997Z caller=remote_alert_sender.go:94 user=548157 slug=kushkiprod host=kushkiprod-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.209.249:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=c5c622d1-2e0b-4d25-b1df-57fc608daba1 alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ra1kxmo3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.137776897Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r9l95uef-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.137677076Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r9l95uef-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.137624656Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r9g6r34v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.137581195Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r9g6r34v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.137488284Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=299574 slug=caidev t=2024-05-29T13:44:14.137461748Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.137453277Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r9g6r34v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.137454074Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=299574 slug=caidev instance="datasource_uid=fXpT1b14k, ref_id=A" t=2024-05-29T13:44:14.137436452Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.137399241Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r99l9jlf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.137380103Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=299574 slug=caidev instance="datasource_uid=fXpT1b14k, ref_id=A" t=2024-05-29T13:44:14.137406895Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=299574 slug=caidev instance="datasource_uid=fXpT1b14k, ref_id=A" t=2024-05-29T13:44:14.13739436Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=299574 slug=caidev instance="datasource_uid=fXpT1b14k, ref_id=A" t=2024-05-29T13:44:14.137353588Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r99l9jlf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.137316853Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=299574 slug=caidev t=2024-05-29T13:44:14.137333637Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r96ar601-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.136848548Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=142180 slug=luxtronic instance= t=2024-05-29T13:44:14.136998214Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=142180 slug=luxtronic version=33 fingerprint=f9bee86c3927d83b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.136890324Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.136579876s EvaluationString:}]" duration=340.979796ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r955mgwx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.136780257Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.136784876Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.136762413Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.136759607Z caller=remote_rule_evaluator.go:193 user=312340 slug=lakefs msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r93ysbe1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.136629126Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.136629661Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r93ysbe1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.136619065Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r93ysbe1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.136553395Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.136484173Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.136477524Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.136497783Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r93ysbe1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.136521194Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r8vs5vij-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.136331952Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r8vs5vij-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.136306192Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.13605006Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r8tlgsc1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.1360747Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r8scmer3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.135999229Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r8p13t9u-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.135866218Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=633335 slug=promqlworkshop instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.135924089Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=633335 slug=promqlworkshop t=2024-05-29T13:44:14.135863903Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=633335 slug=promqlworkshop version=1 fingerprint=d85b27adca4e6cb3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.135799039Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.135457248s EvaluationString:}]" duration=12.025164ms + logger=ngalert.state.manager.persist user=549802 slug=neax t=2024-05-29T13:44:14.135765275Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.274088ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r8p13t9u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.135736506Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r8d7536l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.135624265Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.135619073Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r8d7536l-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.135577365Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.135552839Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.135588558Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.13547739Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r83f5c22-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.135443123Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r80w1s7x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.135342692Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r80w1s7x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.135332752Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r80w1s7x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.135283062Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=460915 slug=funrise version=21 fingerprint=fce02545fdf64289 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.135170811Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=node-exporter-ev-tsdb-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:instance=node-exporter-ev-tsdb-1 Value:0xc0211bb910} C:{Var:C Labels:instance=node-exporter-ev-tsdb-1 Value:0xc0211bb920}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.134883576s EvaluationString:[ var='B' labels={instance=node-exporter-ev-tsdb-1} value=98.30315872918963 ], [ var='C' labels={instance=node-exporter-ev-tsdb-1} value=0 ]}]" duration=15.580289ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r7ty444x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.1350864Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.135022826Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.13503841Z caller=remote_instance_store.go:51 user=432323 slug=lithic msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.135015222Z caller=remote_instance_store.go:51 user=365838 slug=habicorp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.13492437Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r7t5xnh8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.134868477Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=365838 slug=habicorp version=11 fingerprint=76f8816e167676a0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.134706887Z level=debug msg="Alert rule evaluated" results="[{Instance:proxy=rds_general_mx_ro State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:proxy=rds_general_mx_ro Value:0xc00327d428} C:{Var:C Labels:proxy=rds_general_mx_ro Value:0xc00327d418}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.134307768s EvaluationString:[ var='B' labels={proxy=rds_general_mx_ro} value=1 ], [ var='C' labels={proxy=rds_general_mx_ro} value=0 ]} {Instance:proxy=rds_general_mx_rw State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:proxy=rds_general_mx_rw Value:0xc00327d448} C:{Var:C Labels:proxy=rds_general_mx_rw Value:0xc00327d458}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.134321673s EvaluationString:[ var='B' labels={proxy=rds_general_mx_rw} value=1 ], [ var='C' labels={proxy=rds_general_mx_rw} value=0 ]}]" duration=85.473968ms + logger=ngalert.state.manager user=548157 slug=kushkiprod instance= t=2024-05-29T13:44:14.134748306Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=548157 slug=kushkiprod t=2024-05-29T13:44:14.134710323Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.134486133Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.134442743Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=707420 slug=pangealab t=2024-05-29T13:44:14.134416245Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=6.750975ms + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.134431574Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.134358378Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.134402403Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r7erc3ce-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.134271131Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r7bc7wvz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.13413545Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=309009 slug=elestyle t=2024-05-29T13:44:14.134101514Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.419084ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r7a6bs1t-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.134073539Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r7a6bs1t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.134043229Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.134045767Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.133815237Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r73alsy8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.133873267Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r72218xp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.133762416Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=167212 slug=zypeinfra t=2024-05-29T13:44:14.133678492Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.72215ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r72218xp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.133603405Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r72218xp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.133581604Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=61472 slug=glasslewis t=2024-05-29T13:44:14.133529317Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=visual-regression-tracker-0" t=2024-05-29T13:44:14.133507213Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=61472 slug=glasslewis instance= t=2024-05-29T13:44:14.133507216Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=selenosis-dbf59b9d5-zzjpd" t=2024-05-29T13:44:14.133461872Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r710w59p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.133444523Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r710w59p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.133420493Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=61472 slug=glasslewis version=1 fingerprint=f37b9c68b5a7f3e0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.133408782Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.133105999s EvaluationString:}]" duration=31.509776ms + logger=ngalert.state.manager user=372011 slug=actianeu instance= t=2024-05-29T13:44:14.133353686Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=selenoid-ui-7bb849765f-v6bms" t=2024-05-29T13:44:14.133335809Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=372011 slug=actianeu t=2024-05-29T13:44:14.133311041Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=372011 slug=actianeu version=3 fingerprint=7556d8098246647a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.133248254Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.13299502s EvaluationString:}]" duration=153.314534ms + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=s3-csi-node-xggn5" t=2024-05-29T13:44:14.133233157Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=s3-csi-node-x9bvv" t=2024-05-29T13:44:14.133182116Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=s3-csi-node-x9bvv" t=2024-05-29T13:44:14.133173066Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=s3-csi-node-svdjb" t=2024-05-29T13:44:14.133077134Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=s3-csi-node-rp86s" t=2024-05-29T13:44:14.132964252Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r6tawall-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.132865657Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=s3-csi-node-ncmps" t=2024-05-29T13:44:14.132888651Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=s3-csi-node-6sdsv" t=2024-05-29T13:44:14.132716226Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=s3-csi-node-6c5hg" t=2024-05-29T13:44:14.132635876Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=reportportal-ui-6b79449f49-gqplx" t=2024-05-29T13:44:14.132593635Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r6sfgy9t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.132641295Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r6sfgy9t-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.132517873Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.132532432Z caller=remote_instance_store.go:51 user=465816 slug=metricgamingqa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r6jzotzi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.132347302Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=reportportal-jobs-558f4df568-kcdqh" t=2024-05-29T13:44:14.132409881Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r6jzotzi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.132282811Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=reportportal-api-8dfdfd4c6-c24c8" t=2024-05-29T13:44:14.132287178Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=reportportal-api-8dfdfd4c6-c24c8" t=2024-05-29T13:44:14.132275928Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r6j4rkzd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.132121899Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.1321407Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=reportportal-analyzer-train-0" t=2024-05-29T13:44:14.132221477Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.132128235Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.132041968Z caller=remote_instance_store.go:51 user=536824 slug=forgerockit msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r6dij7va-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.132008808Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.132110308Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r6dij7va-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.131979198Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.131982967Z caller=remote_instance_store.go:51 user=491157 slug=prd01wr msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r6dij7va-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.131904987Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=overprovisioned-7c796db9c5-rp9vn" t=2024-05-29T13:44:14.131993433Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r68cyv2a-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.131843276Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r68cyv2a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.131771236Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=491157 slug=prd01wr t=2024-05-29T13:44:14.131848838Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r68cyv2a-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.131730035Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r68cyv2a-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.131707415Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=491157 slug=prd01wr version=1 fingerprint=0d11b1cfee2b674c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.131733851Z level=debug msg="Alert rule evaluated" results="[{Instance:DatabaseClass=db.r5.4xlarge State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DatabaseClass=db.r5.4xlarge Value:0xc0029b7d80} C:{Var:C Labels:DatabaseClass=db.r5.4xlarge Value:0xc0029b7d88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.131283404s EvaluationString:[ var='B' labels={DatabaseClass=db.r5.4xlarge} value=1.022783277869446 ], [ var='C' labels={DatabaseClass=db.r5.4xlarge} value=0 ]}]" duration=105.335182ms + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=overprovisioned-7c796db9c5-pmwbv" t=2024-05-29T13:44:14.13184365Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r6542hw9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.131569224Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=overprovisioned-7c796db9c5-gmlr6" t=2024-05-29T13:44:14.131781378Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=overprovisioned-7c796db9c5-gmlr6" t=2024-05-29T13:44:14.131770348Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=overprovisioned-7c796db9c5-4hbgl" t=2024-05-29T13:44:14.131708857Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.131649184Z caller=remote_instance_store.go:51 user=763376 slug=f5nginxone msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.131631616Z caller=remote_instance_store.go:51 user=618621 slug=sendamatic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=overprovisioned-7c796db9c5-4ggdn" t=2024-05-29T13:44:14.131564944Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=618621 slug=sendamatic instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.131556544Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=618621 slug=sendamatic t=2024-05-29T13:44:14.131535054Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.131415884Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r64sg4sq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.131401692Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=metrics-server-7b69fb95fc-94ck4" t=2024-05-29T13:44:14.131407581Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=kube-proxy-zvvjt" t=2024-05-29T13:44:14.131359269Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.13130005Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r61cm58o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.13125093Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:14.131166343Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.703386ms + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=kube-proxy-r6tbh" t=2024-05-29T13:44:14.131292318Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r60kzk2s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.131065418Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=kube-proxy-qv6vb" t=2024-05-29T13:44:14.131223697Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r60kzk2s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.130989618Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=kube-proxy-qmj4z" t=2024-05-29T13:44:14.131164156Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r60kzk2s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.130949047Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=kube-proxy-q8nh5" t=2024-05-29T13:44:14.131120075Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.131116639Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r5s8uz26-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.130831536Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=kube-proxy-pm5jd" t=2024-05-29T13:44:14.131058624Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.131034809Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=kube-proxy-pm5jd" t=2024-05-29T13:44:14.131045244Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r5qsh2yl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.130713275Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=kube-proxy-p7c8q" t=2024-05-29T13:44:14.130980882Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=114492 slug=railsbank t=2024-05-29T13:44:14.13091756Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=114492 slug=railsbank version=2 fingerprint=ec7c7b783f47ad40 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.130835416Z level=debug msg="Alert rule evaluated" results="[{Instance:DBInstanceIdentifier=db-endusers-120230105143002016600000005 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=db-endusers-120230105143002016600000005 Value:0xc016c7a908} C:{Var:C Labels:DBInstanceIdentifier=db-endusers-120230105143002016600000005 Value:0xc016c7a910}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.130526484s EvaluationString:[ var='B' labels={DBInstanceIdentifier=db-endusers-120230105143002016600000005} value=0.0008961664249999999 ], [ var='C' labels={DBInstanceIdentifier=db-endusers-120230105143002016600000005} value=0 ]}]" duration=352.1931ms + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=kube-proxy-dzlsr" t=2024-05-29T13:44:14.13088339Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=kube-proxy-dzlsr" t=2024-05-29T13:44:14.13086935Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=kube-proxy-9r2wx" t=2024-05-29T13:44:14.130813849Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=ingress-nginx-controller-7764fdb5b8-7r96d" t=2024-05-29T13:44:14.130627735Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=fluent-bit-xmsjv" t=2024-05-29T13:44:14.130574244Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=fluent-bit-w99tv" t=2024-05-29T13:44:14.130511903Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.130469691Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=fluent-bit-sdxqs" t=2024-05-29T13:44:14.130306829Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r5gk6u92-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.130287751Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=28776 slug=flotechnologies t=2024-05-29T13:44:14.13032057Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=28776 slug=flotechnologies instance= t=2024-05-29T13:44:14.13030427Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=28776 slug=flotechnologies t=2024-05-29T13:44:14.130241913Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=fluent-bit-pjn4l" t=2024-05-29T13:44:14.130174957Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID1512dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=28776 slug=flotechnologies version=21 fingerprint=c75447a85de00e14 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.130171241Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.129862618s EvaluationString:}]" duration=58.370514ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r5flzkov-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.130148129Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:14.130147829Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=114492 slug=railsbank version=1 fingerprint=567c694bc3ad60a9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.130016866Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.129808469s EvaluationString:}]" duration=123.784967ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r55surd0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.129933747Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=327842 slug=exabeam t=2024-05-29T13:44:14.129894183Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.253548ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r4rdxv4t-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.129793965Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=fluent-bit-dmxqv" t=2024-05-29T13:44:14.129771048Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=fluent-bit-cr6ng" t=2024-05-29T13:44:14.129605275Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r4ijscdm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.129651004Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r4dzbxsz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.129409332Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r4dzbxsz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.129355671Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=ebs-csi-node-zt5cg" t=2024-05-29T13:44:14.129402061Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r4dzbxsz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.129324051Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=ebs-csi-node-z9wmk" t=2024-05-29T13:44:14.129314009Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r4dx4b81-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.12925701Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r4dx4b81-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.129149479Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.129117889Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.129091474Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r4dl38ld-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.129075248Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.129001822Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=514639 slug=karatech t=2024-05-29T13:44:14.129078134Z level=debug msg="Saving alert states" count=11 max_state_save_concurrency=1 + logger=ngalert.state.manager user=890268 slug=cmcngipd instance="service_name=cngiprdliveorder-history" t=2024-05-29T13:44:14.128915453Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=20177 slug=paddledash t=2024-05-29T13:44:14.128929146Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=28.489313ms + level=debug ts=2024-05-29T13:44:14.128982311Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:14.128905027Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r4dl38ld-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.128889366Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.128900696Z caller=remote_instance_store.go:51 user=116479 slug=tomtomnv msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:14.128861088Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=60.279979ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r48at23i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.128820195Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=514639 slug=karatech instance="agent_hostname=vm-19, device=/dev/vda2, fstype=ext4, instance=vm19, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:14.128807975Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r48at23i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.128705994Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=890268 slug=cmcngipd instance="service_name=cngiprdauthts-app" t=2024-05-29T13:44:14.128621272Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=514639 slug=karatech instance="agent_hostname=vm-17, device=/dev/vda2, fstype=ext4, instance=vm17, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:14.128701914Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=ebs-csi-node-qpp8b" t=2024-05-29T13:44:14.128674927Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r47yc3l4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.128607003Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.128508995Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r47yc3l4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.128500432Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r42gn3s0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.128432232Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.128286919Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r41epbmh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.128147399Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=514639 slug=karatech instance="agent_hostname=vm-07, device=/dev/vda2, fstype=ext4, instance=vm07, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:14.128093921Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r3x3g6n6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.128033287Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.127784845Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=514639 slug=karatech instance="agent_hostname=vm-03, device=/dev/vda2, fstype=ext4, instance=vm03, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:14.127816384Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=707420 slug=pangealab instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.127621629Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r3sbv5ct-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.127814425Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r3sbv5ct-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.127670714Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.127617868Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=707420 slug=pangealab version=1 fingerprint=d5c6552283c25c84 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.127529777Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.127341603s EvaluationString:}]" duration=7.682372ms + level=debug ts=2024-05-29T13:44:14.127537203Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r3rrafar-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.127548202Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.12750887Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r3r9jeg6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.127415701Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r3r9jeg6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.127365231Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r3r9jeg6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.12734381Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r3ljm4ov-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.12731383Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.12725094Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.127235062Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=182434 slug=form t=2024-05-29T13:44:14.127153977Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=65.046889ms + level=debug ts=2024-05-29T13:44:14.127171007Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r3g6gkc5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.127023477Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.126988574Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.126885886Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.126852103Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r3aar61e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.126745134Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.126856814Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Ukraine" t=2024-05-29T13:44:14.126789699Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, instance=prod-vsan2-esx4, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.126696418Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.126590685Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, instance=prod-vsan2-esx3, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.126643891Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, instance=prod-vsan2-esx3, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.126633145Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, instance=prod-vsan1-esx1, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.126527203Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=664100 slug=lson t=2024-05-29T13:44:14.126495716Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.206284ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=USA - Santa Monica" t=2024-05-29T13:44:14.126529381Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, instance=prod-sec-gfodir, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, region=na, role=prod, source=tysons, type=physical" t=2024-05-29T13:44:14.126476711Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.126419054Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, instance=prod-sec-gdir, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, region=na, role=prod, source=tysons, type=physical" t=2024-05-29T13:44:14.12642237Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.126345368Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=UK - Wembley" t=2024-05-29T13:44:14.12637526Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Taiwan - 3" t=2024-05-29T13:44:14.126246782Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Spain - Barcelona - 2" t=2024-05-29T13:44:14.126146702Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Spain - Barcelona" t=2024-05-29T13:44:14.126009504Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r3693z7l-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.12635093Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, instance=prod-sec-garch, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, region=na, role=prod, source=tysons, type=physical" t=2024-05-29T13:44:14.126363063Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, instance=prod-sec-gam, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, region=na, role=prod, source=tysons, type=physical" t=2024-05-29T13:44:14.126326802Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Portugal" t=2024-05-29T13:44:14.125629355Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Portugal" t=2024-05-29T13:44:14.125613867Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Philippines" t=2024-05-29T13:44:14.125441061Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=ebs-csi-node-ks75v" t=2024-05-29T13:44:14.126315379Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Lebanon" t=2024-05-29T13:44:14.125241474Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=ebs-csi-node-ks75v" t=2024-05-29T13:44:14.126299058Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r3693z7l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.126242259Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r3693z7l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.126200809Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=ebs-csi-node-d7dfr" t=2024-05-29T13:44:14.126114615Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.126063555Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx7, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.126048051Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx7, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.126036301Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=ebs-csi-node-blzrr" t=2024-05-29T13:44:14.126026443Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx6, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.125987953Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=ebs-csi-controller-ffd7fb8f5-w4cx8" t=2024-05-29T13:44:14.125934142Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r2wnbfim-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.125844485Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.125888209Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=ebs-csi-controller-ffd7fb8f5-4zbtn" t=2024-05-29T13:44:14.125896511Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=dependabot-dependabot-gitlab-worker-66f64c68b7-xr9hj" t=2024-05-29T13:44:14.125740648Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r2wnbfim-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.125698853Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx48, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.125744188Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=845543 slug=deliveryhero t=2024-05-29T13:44:14.125728294Z level=debug msg="Saving alert states" count=4 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r2vzd2rr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.125664293Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r2vzd2rr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.125583262Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=dependabot-dependabot-gitlab-web-5466d89bf8-pxk8l" t=2024-05-29T13:44:14.125611225Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r2vzd2rr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.125543942Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=851228 slug=brenan t=2024-05-29T13:44:14.125516531Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.794572ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r2vzd2rr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.125516171Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx44, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.125541224Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx44, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.125526922Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r2tir8bz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.125446051Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx43, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.125493656Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx42, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.125432163Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=aws-node-w2rft" t=2024-05-29T13:44:14.125445832Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r2tir8bz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.12540034Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.125350081Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=aws-node-sktv8" t=2024-05-29T13:44:14.12536503Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=aws-node-sktv8" t=2024-05-29T13:44:14.12535661Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.125160563Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r2ftga0x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.125264559Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=aws-node-8nsfg" t=2024-05-29T13:44:14.125263018Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716319 slug=idealsplatformprodinfra instance="pod=aws-node-4nw4s" t=2024-05-29T13:44:14.125114276Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r2ftga0x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.125194668Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r2ftga0x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.125102097Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.125052513Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Japan - Yokohama" t=2024-05-29T13:44:14.125066368Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx36, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.125045456Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx35, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.124998338Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r29qhlxj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.124918605Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx34, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.124951467Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r29k0smv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.124810564Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.124791854Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:14.124795113Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx22, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.124802305Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=432323 slug=lithic instance="Cluster Name=prod-v2, Consumer Group=ledger-fe-consumer-v1, Topic=treasury.financial-events.v1" t=2024-05-29T13:44:14.124781459Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r29k0smv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.124770894Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=432323 slug=lithic instance="Cluster Name=prod-v2, Consumer Group=ledger-fe-consumer-v1, Topic=treasury.financial-events.v1" t=2024-05-29T13:44:14.124763341Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx19, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.124646951Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx19, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.124641476Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=432323 slug=lithic version=1 fingerprint=2acd579bd13c82fe attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.124531595Z level=debug msg="Alert rule evaluated" results="[{Instance:Cluster Name=prod-v2, Consumer Group=ledger-fe-consumer-v1, Topic=treasury.financial-events.v1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=prod-v2, Consumer Group=ledger-fe-consumer-v1, Topic=treasury.financial-events.v1 Value:0xc078203120} C:{Var:C Labels:Cluster Name=prod-v2, Consumer Group=ledger-fe-consumer-v1, Topic=treasury.financial-events.v1 Value:0xc078203158}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.124139613s EvaluationString:[ var='B' labels={Cluster Name=prod-v2, Consumer Group=ledger-fe-consumer-v1, Topic=treasury.financial-events.v1} value=1 ], [ var='C' labels={Cluster Name=prod-v2, Consumer Group=ledger-fe-consumer-v1, Topic=treasury.financial-events.v1} value=0 ]}]" duration=55.243449ms + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx16, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.124559668Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx16, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.12454532Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r22it1bd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.124494511Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx15, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.124512376Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx14, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.124466278Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx14, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.124452758Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx13, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.124393496Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r1sg08gz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.124330589Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx12, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.124363404Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx11, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.124302417Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-vsan-esx10, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.12425021Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Greece" t=2024-05-29T13:44:14.124250061Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r1sg08gz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.124288759Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r1ou6znn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.124215388Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=tech-mac-esx2, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.124144917Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r1ou6znn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.124096927Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Ghana" t=2024-05-29T13:44:14.124101858Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Ghana" t=2024-05-29T13:44:14.124089617Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r1ou6znn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.123950375Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=qel-aix-lpar2, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.123907543Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=qel-aix-lpar2, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.123889098Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=France - Strasbourg" t=2024-05-29T13:44:14.123953654Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=France - Strasbourg" t=2024-05-29T13:44:14.123938221Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r1lx8n88-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.123779203Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=ocean, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.123857574Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=ocean, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.123843135Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=milkway, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.123794033Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=esxi-dc6-tech01, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.123700805Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=adcbj04, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.123599659Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.123521995Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:14.123556878Z caller=client.go:80 msg="creating client for grafana instance" user=513750 addr=dns:///prorailctu-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r1cmlapq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.123528161Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.123503162Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=adcbe18, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.123540461Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Czech Republic" t=2024-05-29T13:44:14.12354303Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r1cmlapq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.123506601Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=adcba25, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.123489743Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.123533288Z caller=remote_instance_store.go:51 user=549802 slug=neax msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.123474561Z caller=remote_rule_evaluator.go:193 user=656459 slug=activeport msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r1cmlapq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.12346498Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:14.123440977Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=559766 slug=onto + logger=ngalert.state.manager user=549802 slug=neax instance="__name__=grafanacloud_org_traces_usage, cluster=prod-us-central-0, org_id=806195" t=2024-05-29T13:44:14.123472587Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=549802 slug=neax instance="__name__=grafanacloud_org_traces_usage, cluster=prod-us-central-0, org_id=806195" t=2024-05-29T13:44:14.123458887Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.123440697Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=549802 slug=neax t=2024-05-29T13:44:14.123415486Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=adcaw14, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.123433424Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.123315182Z caller=remote_instance_store.go:51 user=707575 slug=prod1themomproject msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=adcau11, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.123385494Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Colombia" t=2024-05-29T13:44:14.123357565Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r1bvvthh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.123346129Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=tech, instance=adcae20, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.123355254Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.123274275Z caller=client.go:80 msg="creating client for grafana instance" user=554589 addr=dns:///prodio-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r1bvvthh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.123291398Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r1bvvthh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.123268438Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=707575 slug=prod1themomproject t=2024-05-29T13:44:14.123255929Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=supp, instance=ts-esx-db2, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.123223554Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=supp, instance=ts-esx-db1, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.123147234Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r1bvvthh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.123212308Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=707575 slug=prod1themomproject instance="datasource_uid=grafanacloud-prom, ref_id=query" t=2024-05-29T13:44:14.123169505Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.123204772Z caller=remote_instance_store.go:51 user=159781 slug=suncornoc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:14.12311128Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=277970 slug=teckresourcestest t=2024-05-29T13:44:14.123093564Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=2 fingerprint=7914935263273d9d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.123044979Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=7.979972ms + level=warn ts=2024-05-29T13:44:14.123078973Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=649973 slug=opsstack + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r15ez9xq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.123111917Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r15ez9xq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.123055996Z level=debug msg="Keeping state" state=Normal + level=error ts=2024-05-29T13:44:14.122968031Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Brazil - 2" t=2024-05-29T13:44:14.123076449Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Brazil - 2" t=2024-05-29T13:44:14.123062448Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.122994367Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=supp, instance=tech-vsan-esx31, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.123047472Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=supp, instance=tech-vsan-esx31, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.123033624Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.122981581Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r0x777jv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.122899384Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=supp, instance=tech-vsan-esx29, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.1229465Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=537591 slug=btldevops instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.122887158Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.122890246Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.122831781Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:14.122758576Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.122763698Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=supp, instance=tech-vsan-esx25, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.122727948Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=supp, instance=tech-vsan-esx25, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.122702105Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=supp, instance=tech-vsan-esx24, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, os=esx, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.122668333Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=f9fd03cdda81340d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.122672642Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.122390893s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=160.096947ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Australia - Perth" t=2024-05-29T13:44:14.122647038Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r0mw2jvg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.12250822Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r0mw2jvg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.12246911Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r0mw2jvg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.12244063Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=487988 slug=microstrategyits instance="__name__=probe_success, env=corp, instance=corp-dc4-was, job=icmp_check, office=hq, origin_prometheus=prod-corp-prom, region=na, role=prod, source=cyrusone, type=physical" t=2024-05-29T13:44:14.122100148Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=487988 slug=microstrategyits t=2024-05-29T13:44:14.122016517Z level=debug msg="State manager processing evaluation results" resultCount=90 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Argentina" t=2024-05-29T13:44:14.122218304Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r0jatecn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.122238148Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.122036473Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r0jatecn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.122175457Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r0hau86y-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.122114986Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.122113464Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=557153 slug=personanongrata + level=debug ts=2024-05-29T13:44:14.122085864Z caller=ruler.go:522 msg="tenant is owned by this instance" user=557153 slug=personanongrata groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r0hau86y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.122026245Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r0hau86y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.122006875Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r0dulv34-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.121942124Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.121859364Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r0dulv34-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.121815453Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r0bvsvmb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.121716072Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r0bvsvmb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.121640051Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=411780 slug=hyperprateek t=2024-05-29T13:44:14.121547028Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.993949ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r089rrch-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.12150549Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r089rrch-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.121424859Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r089rrch-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.121380659Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r089rrch-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.121344908Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r00085cq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.121304148Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r00085cq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.121272778Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.121066499Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-r00085cq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.121093246Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.121050862Z caller=remote_instance_store.go:51 user=167212 slug=zypeinfra msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:14.120956753Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=538026 slug=ontix + logger=ngalert.state.manager.persist user=548157 slug=kushkiprod t=2024-05-29T13:44:14.120827779Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=127813 slug=clearsale t=2024-05-29T13:44:14.120819757Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=548157 slug=kushkiprod instance="datasource_uid=ddfda265-8321-4dab-9f53-1af50b9462b9, ref_id=A" t=2024-05-29T13:44:14.120795428Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.120646303Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=127813 slug=clearsale instance= t=2024-05-29T13:44:14.120785487Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qzpq8kk9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.120647601Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qzpq8kk9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.120638521Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qzpo31xy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.120599021Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qzpo31xy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.12052283Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.120337007Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qzec49zk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.120240227Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qzec49zk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.120208197Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.120379515Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.120231738Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:14.120115245Z caller=client.go:80 msg="creating client for grafana instance" user=762021 addr=dns:///postenbring-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:14.120161846Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=717160 slug=oriondevtech + level=warn ts=2024-05-29T13:44:14.120091945Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=677369 slug=nativetech + level=debug ts=2024-05-29T13:44:14.120073545Z caller=ruler.go:522 msg="tenant is owned by this instance" user=677369 slug=nativetech groups=0 + level=info component=discovery ts=2024-05-29T13:44:14.119856743Z caller=client.go:80 msg="creating client for grafana instance" user=554721 addr=dns:///pordtis-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qz05t3pa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.119785182Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.119730242Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=525873 slug=oeyssund + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qz05t3pa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.119725232Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qz05t3pa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.119687751Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qypesxnn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.119435569Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qyotf8ep-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.119268627Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qyotf8ep-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.119235997Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qyo5mxkv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.119138526Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qyo5mxkv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.119116766Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qyo5mxkv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.119032455Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.118992335Z caller=ruler.go:522 msg="tenant is owned by this instance" user=554585 slug=openfleet groups=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qyo5mxkv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.119010734Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qyjzxkc0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.118929854Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qyjzxkc0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.118887523Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qyjzxkc0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.118861183Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qyjfjdfu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.118701791Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.118664032Z caller=ruler.go:522 msg="tenant is owned by this instance" user=672408 slug=orionprod groups=3 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qyhwj2a8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.118632041Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qyhwj2a8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.11858013Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qyaw6907-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.118484489Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.118287228Z caller=ruler.go:522 msg="tenant is owned by this instance" user=538679 slug=pavellad groups=0 + level=warn ts=2024-05-29T13:44:14.118314728Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=693950 slug=persianjapi + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qy8uh19v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.118341778Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qy8uh19v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.118312407Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qy8uh19v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.118229666Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qy6cg4do-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.118189136Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qy6cg4do-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.118160386Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qy6cg4do-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.118088695Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qy6cg4do-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.118066255Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.118021552Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.117881298Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.575001ms + level=debug ts=2024-05-29T13:44:14.117922021Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qy3tsiwz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.117937883Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.117766235Z caller=remote_instance_store.go:51 user=196413 slug=form3production msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datacenter=us-west-2" t=2024-05-29T13:44:14.117920486Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datacenter=us-east-1" t=2024-05-29T13:44:14.117877092Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=196413 slug=form3production instance="Region=eu-west-2, ServiceLimit=Subnets per subnet group, ServiceName=RDS" t=2024-05-29T13:44:14.117687856Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qxz245yl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.117862253Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qxz245yl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.117833952Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=196413 slug=form3production instance="Region=eu-west-2, ServiceLimit=Subnets per subnet group, ServiceName=RDS" t=2024-05-29T13:44:14.117671432Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qxz245yl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.117823842Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qxz245yl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.117794422Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.117769324Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.117714903Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qxxgp350-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.1176147Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance= t=2024-05-29T13:44:14.117560327Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance= t=2024-05-29T13:44:14.117550229Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.117443237Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.11743622Z caller=ruler.go:522 msg="tenant is owned by this instance" user=551798 slug=opened groups=0 + level=debug ts=2024-05-29T13:44:14.117428147Z caller=remote_instance_store.go:51 user=602963 slug=provectusalgae msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=602963 slug=provectusalgae instance= t=2024-05-29T13:44:14.117348523Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qxgwayh4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.117404678Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qxgwayh4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.117394988Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qxgwayh4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.117357467Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.117369497Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=602963 slug=provectusalgae t=2024-05-29T13:44:14.11726686Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qxgwayh4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.117291667Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qxbuiu4f-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.117001954Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qxbuiu4f-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.116987304Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.117060416Z caller=client.go:80 msg="creating client for grafana instance" user=754968 addr=dns:///pnl0sp42-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.117008616Z caller=ruler.go:522 msg="tenant is owned by this instance" user=502125 slug=optimisie groups=0 + level=warn ts=2024-05-29T13:44:14.116979616Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=535760 slug=palovskyondrej + level=debug ts=2024-05-29T13:44:14.116770739Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qxbkf7fb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.116816422Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.116554412Z caller=ruler.go:522 msg="tenant is owned by this instance" user=502501 slug=nwn groups=0 + level=debug ts=2024-05-29T13:44:14.116708443Z caller=remote_instance_store.go:51 user=328755 slug=infogrideu msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.11666044Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.116683139Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qx7jnn66-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.11658743Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.116444411Z caller=ruler.go:522 msg="tenant is owned by this instance" user=502539 slug=pereest groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qwxvp82x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.116475968Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qwxvp82x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.116397828Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.115859305Z caller=ruler.go:522 msg="tenant is owned by this instance" user=516096 slug=michalbern1 groups=0 + level=info ts=2024-05-29T13:44:14.116270338Z caller=remote_alert_sender.go:94 user=884866 slug=cnonumerique host=cnonumerique-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.74.54:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddlo4rlyrumf4b alerts=1 + logger=ngalert.state.manager.persist user=284646 slug=habitap t=2024-05-29T13:44:14.116272125Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.116304807Z caller=remote_instance_store.go:51 user=284646 slug=habitap msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=284646 slug=habitap instance="__name__=up, agent_hostname=duet, instance=duet:12345, job=integrations/windows_exporter" t=2024-05-29T13:44:14.116260803Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=284646 slug=habitap instance="__name__=up, agent_hostname=duet, instance=duet:12345, job=integrations/windows_exporter" t=2024-05-29T13:44:14.116251146Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qwvhc1vi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.116269386Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qwvhc1vi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.116113995Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qwmtll50-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.115848822Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:14.115836305Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=513835 slug=paulactin + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qwf5x6n5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.115800122Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qwf5x6n5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.115787111Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qwebtli5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.115560439Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qwebtli5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.115544599Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qwebtli5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.115506339Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qwcynej4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.115451248Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qwcynej4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.115438208Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.115633254Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.115668174Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.115474273Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=150145 slug=pleasant t=2024-05-29T13:44:14.115330047Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.191747ms + logger=ngalert.state.manager.persist user=127400 slug=fifthdomain t=2024-05-29T13:44:14.115182907Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.258842ms + level=debug ts=2024-05-29T13:44:14.115194899Z caller=ruler.go:522 msg="tenant is owned by this instance" user=634891 slug=mgbmcapeng groups=3 + level=debug ts=2024-05-29T13:44:14.11521281Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=112732 slug=gleamer t=2024-05-29T13:44:14.11514288Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=24.797388ms + level=info ts=2024-05-29T13:44:14.11511231Z caller=remote_alert_sender.go:94 user=18335 slug=semaphore host=semaphore-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.67.109:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=d90c69c8-dee1-4b97-828d-af91f4a60952 alerts=1 + logger=ngalert.state.manager.persist user=134245 slug=zume t=2024-05-29T13:44:14.115068964Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.215121ms + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:14.115039469Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=523054 slug=vialtopartners instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.115025866Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.115079078Z caller=remote_instance_store.go:51 user=523054 slug=vialtopartners msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.115010686Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qw9bpw7x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.115048334Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qw9bpw7x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.115012413Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.114998737Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qw801dvi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.114900562Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qw801dvi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.114739191Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.114870907Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qw801dvi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.1146932Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.11472534Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=309009 slug=elestyle t=2024-05-29T13:44:14.114631897Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qvy5ddww-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.114581809Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qvy5ddww-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.114520398Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.114490472Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qvx3gtgd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.114452348Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qvx3gtgd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.114433138Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.11427949Z caller=ruler.go:522 msg="tenant is owned by this instance" user=537600 slug=oneytrustpoc groups=1 + level=debug ts=2024-05-29T13:44:14.114278523Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.114160989Z caller=ruler.go:522 msg="tenant is owned by this instance" user=674819 slug=opsbrick groups=1 + level=debug ts=2024-05-29T13:44:14.114203363Z caller=remote_instance_store.go:51 user=248027 slug=mishp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qvv0qc9g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.114153115Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=248027 slug=mishp instance= t=2024-05-29T13:44:14.114157749Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.114071508Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=248027 slug=mishp t=2024-05-29T13:44:14.114123669Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=1" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qvtq6rmt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.114060494Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=248027 slug=mishp t=2024-05-29T13:44:14.114029583Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=248027 slug=mishp version=123 fingerprint=a30ad5e9107a6f19 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.113971248Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.113763604s EvaluationString:}]" duration=20.141169ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qvtq6rmt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.114006713Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=87052 slug=polystream t=2024-05-29T13:44:14.113886676Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.238099ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qvtmyi4g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.113850662Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qvtmyi4g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.113832891Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qvtmyi4g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.113787411Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qvtmyi4g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.113769641Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qvt6k9mm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.11372687Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qvmsnere-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.113494588Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qvmsnere-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.113479458Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=716527 slug=newpigqa t=2024-05-29T13:44:14.113405376Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=7.273193ms + level=info component=discovery ts=2024-05-29T13:44:14.11322398Z caller=client.go:80 msg="creating client for grafana instance" user=647068 addr=dns:///pgolosov88-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:14.11319788Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=690492 slug=ognl + level=debug ts=2024-05-29T13:44:14.11316758Z caller=ruler.go:522 msg="tenant is owned by this instance" user=690492 slug=ognl groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qvkzzjlx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.113131684Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qvkzzjlx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.113113944Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.113010413Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qvkzzjlx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.113035683Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qviqhmzq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.112957502Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.113066719Z caller=remote_instance_store.go:51 user=608968 slug=safhironyx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=608968 slug=safhironyx instance= t=2024-05-29T13:44:14.112980217Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.112976449Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=608968 slug=safhironyx t=2024-05-29T13:44:14.112938916Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.112805019Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qviqhmzq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.112843461Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qviqhmzq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.112820581Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qvh2iafw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.11267781Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.112583717Z caller=remote_instance_store.go:51 user=512398 slug=brightdigital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qvh2iafw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.112606349Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qvcy8ocp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.112556018Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.112441125Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qvctqtge-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.112419897Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qvctqtge-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.112288516Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.112237751Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.112096827Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qv1q3oir-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.112169794Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qv1ehth8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.112014983Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.11195155Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qv1ehth8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.111956902Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=890269 slug=cmjjilnp instance="instance=suuscn1cjillwcadbs1001" t=2024-05-29T13:44:14.111861908Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=890269 slug=cmjjilnp t=2024-05-29T13:44:14.11173101Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-quvceo33-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.111643129Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.111438206Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:14.111398777Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.111355972Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.111467381Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=543660 slug=jobcloudprogrammaticstage version=1 fingerprint=84c09b0182fe81d9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.111294236Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.110832861s EvaluationString:}]" duration=10.620865ms + level=debug ts=2024-05-29T13:44:14.111438846Z caller=remote_instance_store.go:51 user=137351 slug=pinnacle21 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.111425501Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.111415999Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.111257662Z caller=ruler.go:522 msg="tenant is owned by this instance" user=629323 slug=nk99 groups=0 + level=debug ts=2024-05-29T13:44:14.111242195Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.111226926Z caller=remote_instance_store.go:51 user=224047 slug=ppbtradingtribeprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-quv6oirl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.111263965Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.111173299Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.111127268Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qurm1zv1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.111030003Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qurm1zv1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.110960432Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qul1g2mz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.11080646Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=851228 slug=brenan t=2024-05-29T13:44:14.110714229Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=851228 slug=brenan instance="__name__=temperature, id=bdj9mc4j139j4a, name=temperature" t=2024-05-29T13:44:14.110692679Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.110613625Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:14.110537055Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=552926 slug=ofh + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-quiee01p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.110494567Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=327842 slug=exabeam version=363 fingerprint=a4f9505896d0d529 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.110520866Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.110280205s EvaluationString:}]" duration=66.217765ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-quiee01p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.110456067Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-quck3zg9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.110225204Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=664100 slug=lson instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.110197486Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=664100 slug=lson instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.110182997Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + level=info component=discovery ts=2024-05-29T13:44:14.110135651Z caller=client.go:80 msg="creating client for grafana instance" user=557153 addr=dns:///personanongrata-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:14.110153865Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:14.110144231Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-quck3zg9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.110093803Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qubx94zg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.109700029Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.109654227Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:14.109618646Z caller=client.go:80 msg="creating client for grafana instance" user=502539 addr=dns:///pereest-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.109527485Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.109543337Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qu9pbm4y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.109382376Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qu9n5qnr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.109335395Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qu9n5qnr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.109305645Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qu9n5qnr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.109260634Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qu9n5qnr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.109153193Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qtvwe46k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.109108313Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qtvwe46k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.109080233Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.109293339Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:14.109232742Z caller=client.go:80 msg="creating client for grafana instance" user=623958 addr=dns:///penbevdev-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qtvwe46k-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.109002572Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qtvwe46k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.108967561Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:14.109300166Z level=debug msg="Saving alert states done" count=84 max_state_save_concurrency=1 duration=1.777363742s + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qtvtc7t7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.108880541Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qtvtc7t7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.1088409Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qtvtc7t7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.10881485Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qtvtc7t7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.108747389Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qtuvpzff-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.108709509Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qtuvpzff-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.108680049Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=288032 slug=dapperlabssre version=1 fingerprint=dd46d7498d78034a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.109079095Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.108693533s EvaluationString:}]" duration=17.96292ms + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:14.109087698Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.108981461Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.10899174Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.108948095Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.108557436Z caller=ruler.go:522 msg="tenant is owned by this instance" user=524372 slug=mysh groups=0 + logger=ngalert.state.manager user=427871 slug=fasax t=2024-05-29T13:44:14.108581795Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=427871 slug=fasax version=2 fingerprint=b9f40eee756ba85a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.108442662Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.108061611s EvaluationString:}]" duration=48.601108ms + logger=ngalert.state.manager user=411780 slug=hyperprateek instance= t=2024-05-29T13:44:14.108517566Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=411780 slug=hyperprateek t=2024-05-29T13:44:14.108388802Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qtihnu43-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.108421706Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=411780 slug=hyperprateek version=15 fingerprint=a89a09b6773c7d35 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.108313011Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.107951575s EvaluationString:}]" duration=12.740026ms + level=debug ts=2024-05-29T13:44:14.108364562Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.108348824Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.108347754Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.108271754Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:14.108276833Z caller=client.go:80 msg="creating client for grafana instance" user=513835 addr=dns:///paulactin-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.scheduler user=806229 slug=simplisafe version=33 fingerprint=9a42a36c1eea5ff7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.108216591Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.107795987s EvaluationString:}]" duration=21.592254ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qtfpfwem-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.108171883Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.108140232Z caller=client.go:80 msg="creating client for grafana instance" user=616334 addr=dns:///pastopnik-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qtfpfwem-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.108118263Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.108078831Z caller=ruler.go:522 msg="tenant is owned by this instance" user=605791 slug=noreo18 groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qtfpfwem-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.108104863Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qtfpfwem-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.108070972Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qtfjbzw4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.107939941Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.10787943Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:14.107865538Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qtcldp1r-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.107794529Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qtcldp1r-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.107675898Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=656284 slug=cencosudx t=2024-05-29T13:44:14.107544383Z level=debug msg="Saving alert states done" count=4 max_state_save_concurrency=1 duration=59.945442ms + level=debug ts=2024-05-29T13:44:14.107423952Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qt4bm8m6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.107334965Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=465668 slug=xpressinfra t=2024-05-29T13:44:14.107286177Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=info component=discovery ts=2024-05-29T13:44:14.107275924Z caller=client.go:80 msg="creating client for grafana instance" user=693577 addr=dns:///paletas-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.107211541Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qt4bm8m6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.107270044Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.107245724Z caller=client.go:80 msg="creating client for grafana instance" user=704648 addr=dns:///pacx87x-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:14.107251924Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=544998 slug=msalehiomron + logger=ngalert.state.manager user=465668 slug=xpressinfra instance= t=2024-05-29T13:44:14.107244516Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.107229723Z caller=ruler.go:522 msg="tenant is owned by this instance" user=544998 slug=msalehiomron groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qsj27dtm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.107239034Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qsj27dtm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.107216644Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qsj27dtm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.107163653Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qsj27dtm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.107133543Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qsj27dtm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.107106602Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.107118722Z caller=client.go:80 msg="creating client for grafana instance" user=513737 addr=dns:///oxptech-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.107029122Z caller=ruler.go:522 msg="tenant is owned by this instance" user=623909 slug=nenstest groups=0 + level=debug ts=2024-05-29T13:44:14.106987455Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qshganb6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.106961501Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.106658818Z caller=client.go:80 msg="creating client for grafana instance" user=672408 addr=dns:///orionprod-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager.persist user=237629 slug=ocrolus t=2024-05-29T13:44:14.106576098Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.564222ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qscgmuiw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.106572287Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qsbgurx4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.106255444Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.106196764Z caller=remote_instance_store.go:51 user=716527 slug=newpigqa msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:14.106108513Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=524703 slug=noddies + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qs20z54v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.106133682Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qs20z54v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.106098442Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qs20z54v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.10595404Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.105883421Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qs15ln7h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.10587588Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qs15ln7h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.105824259Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=444725 slug=devnextgen t=2024-05-29T13:44:14.105617774Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=444725 slug=devnextgen instance="__name__=probe_success, config_version=1707215594237024768, instance=https://promotion-signalr-eng-pub.calmsky-f6350dc4.australiaeast.azurecontainerapps.io, job=promotion, probe=dev_private_probe" t=2024-05-29T13:44:14.105593395Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.10548847Z caller=grafana.go:247 user=251869 slug=roseburg msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=a2a5f84c-51ee-4a86-9c5d-8c3dca4ced3e" groups=0 alerts=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qs0uucuo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.105494146Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.105311446Z caller=grafana.go:247 user=251869 slug=roseburg msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=a2a5f84c-51ee-4a86-9c5d-8c3dca4ced3e" groups=0 alerts=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qrvwl3bv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.105277493Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.10466821Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qrlnst9x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.105205613Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qrlnst9x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.105160362Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.104993223Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qrhzsonn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.10490273Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.104670672Z caller=remote_instance_store.go:51 user=154996 slug=veovo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qrhzsonn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.104834899Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.104666311Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=196413 slug=form3production t=2024-05-29T13:44:14.104643458Z level=debug msg="Saving alert states done" count=6 max_state_save_concurrency=1 duration=161.422442ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qre4u1jw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.104548756Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.104426397Z caller=client.go:80 msg="creating client for grafana instance" user=674819 addr=dns:///opsbrick-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qre4u1jw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.104519396Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qre4u1jw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.104510156Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qre4u1jw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.104478245Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qre4u1jw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.104448975Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.104367196Z caller=ruler.go:522 msg="tenant is owned by this instance" user=673435 slug=nnowag groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qr3abx8t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.104225083Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qr3abx8t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.104215142Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qr3abx8t-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.104160442Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qqwc73ko-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.104081521Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qqwc73ko-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.104047441Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qqwc73ko-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.10401768Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qqwc73ko-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.10397398Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qqvjtljc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.103887809Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=245291 slug=pismo version=76 fingerprint=9d0310b25d12cf07 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.10408826Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.103813316s EvaluationString:}]" duration=38.377626ms + level=info component=discovery ts=2024-05-29T13:44:14.104006593Z caller=client.go:80 msg="creating client for grafana instance" user=551798 addr=dns:///opened-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:14.103843791Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=534031 slug=neoncube + level=debug ts=2024-05-29T13:44:14.103747691Z caller=ruler.go:522 msg="tenant is owned by this instance" user=502147 slug=mmic groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qqvjtljc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.103699407Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.103581244Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.591487ms + level=debug ts=2024-05-29T13:44:14.103460158Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qqvgxq7i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.103515205Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.103527013Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qqqarymy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.103323923Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qqqarymy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.103309053Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qqqarymy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.103200482Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.103347109Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.103241323Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.103170172Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.103160163Z caller=remote_instance_store.go:51 user=777670 slug=fakku msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.102937146Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:14.103065984Z caller=client.go:80 msg="creating client for grafana instance" user=692285 addr=dns:///onepilotapp-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:14.102972883Z caller=client.go:80 msg="creating client for grafana instance" user=529458 addr=dns:///onemeeting-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qqk50bm5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.102746747Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:14.102893892Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=245291 slug=pismo version=324 fingerprint=2625615801175b0a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.102835024Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.102590912s EvaluationString:}]" duration=227.335222ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qqk50bm5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.102678527Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:14.10251258Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.245448ms + logger=ngalert.state.manager.persist user=114516 slug=heliumdashboard t=2024-05-29T13:44:14.102296978Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=34.143437ms + level=debug ts=2024-05-29T13:44:14.102366191Z caller=remote_instance_store.go:51 user=134245 slug=zume msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=109452 slug=deltarisk t=2024-05-29T13:44:14.102276652Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=28.339063ms + logger=ngalert.state.manager.persist user=134245 slug=zume t=2024-05-29T13:44:14.102290518Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qqgktit5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.102325003Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.102252576Z caller=ruler.go:522 msg="tenant is owned by this instance" user=528225 slug=momoapp groups=0 + logger=ngalert.state.manager user=134245 slug=zume instance= t=2024-05-29T13:44:14.10217132Z level=warn msg="Failed to take an image" dashboard=rojBOMHGz panel=11 error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:14.102206593Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.102176069Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:14.102093177Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=29.817338ms + level=debug ts=2024-05-29T13:44:14.102118425Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.102129208Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.102090651Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=150145 slug=pleasant t=2024-05-29T13:44:14.102097727Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.101948018Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qqa1agae-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.101804388Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.10156607Z caller=ruler.go:522 msg="tenant is owned by this instance" user=637122 slug=mrgreenoffices groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qq5jglfd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.101575695Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=747518 slug=dvevoli instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.101643922Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=747518 slug=dvevoli instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.101603692Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=747518 slug=dvevoli t=2024-05-29T13:44:14.101593252Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qq5ft788-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.101393163Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.101410205Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qq5ft788-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.101298172Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:14.101213667Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=539730 slug=meliora + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qq5ft788-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.101263982Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qpwhsuyr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.101217222Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qpwhsuyr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.101189121Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=134245 slug=zume instance= t=2024-05-29T13:44:14.101139644Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:46:40Z next_ends_at=2024-05-29T13:47:40Z + level=debug ts=2024-05-29T13:44:14.101136395Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=134245 slug=zume t=2024-05-29T13:44:14.101049785Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qpwhsuyr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.101141461Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.100987616Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qpwhsuyr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.10103172Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.100835704Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=328755 slug=infogrideu version=1 fingerprint=d64f36c5f58da4b5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.100721635Z level=debug msg="Alert rule evaluated" results="[{Instance:DBClusterIdentifier=dagster-service-live, Role=READER State:Normal Error: Results:map[] Values:map[Expression:{Var:Expression Labels:DBClusterIdentifier=dagster-service-live, Role=READER Value:0xc00a0b41c8} Query:{Var:Query Labels:DBClusterIdentifier=dagster-service-live, Role=READER Value:0xc00a0b41e8} Reducer:{Var:Reducer Labels:DBClusterIdentifier=dagster-service-live, Role=READER Value:0xc00a0b4230}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.10028366s EvaluationString:[ var='Expression' labels={DBClusterIdentifier=dagster-service-live, Role=READER} value=0 ], [ var='Query' labels={DBClusterIdentifier=dagster-service-live, Role=READER} value=14.218088475514218 ], [ var='Reducer' labels={DBClusterIdentifier=dagster-service-live, Role=READER} value=14.218088475514218 ]} {Instance:DBClusterIdentifier=dagster-service-live, Role=WRITER State:Normal Error: Results:map[] Values:map[Expression:{Var:Expression Labels:DBClusterIdentifier=dagster-service-live, Role=WRITER Value:0xc00a0b4318} Query:{Var:Query Labels:DBClusterIdentifier=dagster-service-live, Role=WRITER Value:0xc00a0b4288} Reducer:{Var:Reducer Labels:DBClusterIdentifier=dagster-service-live, Role=WRITER Value:0xc00a0b42d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.100302495s EvaluationString:[ var='Expression' labels={DBClusterIdentifier=dagster-service-live, Role=WRITER} value=0 ], [ var='Query' labels={DBClusterIdentifier=dagster-service-live, Role=WRITER} value=19.684317549210792 ], [ var='Reducer' labels={DBClusterIdentifier=dagster-service-live, Role=WRITER} value=19.684317549210792 ]}]" duration=19.6453ms + logger=ngalert.state.manager.persist user=87780 slug=zencloudandhosting t=2024-05-29T13:44:14.10089712Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qpvmze7o-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.100863628Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qpuum7lq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.100792767Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.100780605Z caller=remote_image_capturer.go:61 user=87780 slug=zencloudandhosting rule_org_id=1 rule_uid=d09dde8d-af50-411d-90f9-4cd9acfaa976 dashboard=tb1KsO3Mz panel=10 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qpuum7lq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.100764877Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.100770875Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.100687095Z caller=remote_instance_store.go:51 user=87052 slug=polystream msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.100627371Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qpug5zix-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.100584105Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:14.10050279Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qpug5zix-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.100479474Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.100336558Z caller=ruler.go:522 msg="tenant is owned by this instance" user=604389 slug=mikroelektronika groups=0 + logger=ngalert.state.manager user=20177 slug=paddledash t=2024-05-29T13:44:14.100389006Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qptl60gd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.100351783Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qptl60gd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.100294882Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:14.100163257Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=491568 slug=mintumapp + level=debug ts=2024-05-29T13:44:14.100121033Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=87780 slug=zencloudandhosting instance= t=2024-05-29T13:44:14.100016631Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qpnu4qhg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.099928218Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qpnu4qhg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.099787487Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=87780 slug=zencloudandhosting t=2024-05-29T13:44:14.09990295Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=87052 slug=polystream instance= t=2024-05-29T13:44:14.099903254Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=warn ts=2024-05-29T13:44:14.099883154Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=660882 slug=naron + level=debug ts=2024-05-29T13:44:14.09985168Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.099804706Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.099708209Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.099673621Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.099542636Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.93725ms + level=debug ts=2024-05-29T13:44:14.099558222Z caller=remote_instance_store.go:51 user=663231 slug=ryelabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.09948307Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.099517542Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=663231 slug=ryelabs instance="datasource_uid=grafanacloud-logs, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:14.099462441Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=663231 slug=ryelabs instance="datasource_uid=grafanacloud-logs, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:14.099445849Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=884866 slug=cnonumerique instance="datasource_uid=fdhk917z41xj4a, ref_id=A" t=2024-05-29T13:44:14.09944753Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=663231 slug=ryelabs instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.099429101Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=884866 slug=cnonumerique t=2024-05-29T13:44:14.099396839Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.099424457Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=663231 slug=ryelabs t=2024-05-29T13:44:14.099409677Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=warn ts=2024-05-29T13:44:14.099368249Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=807971 slug=nanotech + level=debug ts=2024-05-29T13:44:14.099365069Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=824501 slug=bendingspoons t=2024-05-29T13:44:14.099365859Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=60.722428ms + level=debug ts=2024-05-29T13:44:14.099339049Z caller=ruler.go:522 msg="tenant is owned by this instance" user=807971 slug=nanotech groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qp9rhgrb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.099308002Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qp9rhgrb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.099292372Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qp9rhgrb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.09915997Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qp50652r-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.09910113Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qp50652r-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.098995759Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qp4gm5hn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.098939668Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qp4gm5hn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.098865787Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qp4gm5hn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.098728966Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:14.098928645Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=713296 slug=mediakindazuredev + logger=ngalert.state.manager.persist user=134245 slug=zume t=2024-05-29T13:44:14.098854357Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.098856544Z caller=ruler.go:522 msg="tenant is owned by this instance" user=751700 slug=middlewaresonu groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qozq7wlx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.098655945Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qozq7wlx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.098619745Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.09871005Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.476526ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qot1os7f-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.098472833Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qot1os7f-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.098360382Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.09855759Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qophtswf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.09817318Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qophtswf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.09816392Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qophtswf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.098087849Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qooqs1hs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.098046379Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.098467049Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.09836694Z caller=ruler.go:522 msg="tenant is owned by this instance" user=506909 slug=mequen groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qoolwaki-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.097879867Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.098248214Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.098035837Z caller=ruler.go:522 msg="tenant is owned by this instance" user=547178 slug=minizaggi groups=0 + level=debug ts=2024-05-29T13:44:14.097940384Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.098010555Z caller=remote_instance_store.go:51 user=127400 slug=fifthdomain msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.097973615Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=127400 slug=fifthdomain instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.097895761Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=127400 slug=fifthdomain t=2024-05-29T13:44:14.097834188Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=536824 slug=forgerockit instance="__name__=service_up, agent_hostname=vre2p, env=auth-prod, instance=vre2p, job=integrations/node_exporter, service_name=ssh" t=2024-05-29T13:44:14.097717898Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.097626942Z caller=remote_instance_store.go:51 user=634268 slug=monitoringusdev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.097548132Z caller=ruler.go:522 msg="tenant is owned by this instance" user=528533 slug=mcsscm groups=0 + logger=ngalert.state.manager user=536824 slug=forgerockit instance="__name__=service_up, agent_hostname=vre2p, env=auth-prod, instance=vre2p, job=integrations/node_exporter, service_name=am" t=2024-05-29T13:44:14.09759267Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.097583366Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qof1tvao-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.097552204Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qof1tvao-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.097524334Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=612525 slug=adleyeview instance= t=2024-05-29T13:44:14.097491761Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=536824 slug=forgerockit instance="__name__=service_up, agent_hostname=vre1p, env=auth-prod, instance=vre1p, job=integrations/node_exporter, service_name=ssh" t=2024-05-29T13:44:14.097449723Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=536824 slug=forgerockit instance="__name__=service_up, agent_hostname=vre1p, env=auth-prod, instance=vre1p, job=integrations/node_exporter, service_name=ssh" t=2024-05-29T13:44:14.097435851Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.097364198Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.097151824Z caller=remote_instance_store.go:51 user=512398 slug=brightdigital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qocrmyr3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.097333562Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=612525 slug=adleyeview version=152 fingerprint=033d766563a30daf attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.097237245Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[C:{Var:C Labels: Value:0xc00d782a80} D:{Var:D Labels: Value:0xc00d782a88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.089264374s EvaluationString:[ var='C' labels={} value=0 ], [ var='D' labels={} value=0 ]}]" duration=1.180622362s + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qocrmyr3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.097311422Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qocrmyr3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.097282451Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=536824 slug=forgerockit instance="__name__=service_up, agent_hostname=vre1p, env=auth-prod, instance=vre1p, job=integrations/node_exporter, service_name=am" t=2024-05-29T13:44:14.097293727Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.097215546Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.863348ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qob9ssqn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.09720453Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=536824 slug=forgerockit instance="__name__=service_up, agent_hostname=usu2p, env=auth-prod, instance=usu2p, job=integrations/node_exporter, service_name=ssh" t=2024-05-29T13:44:14.097139611Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qob9ssqn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.09711797Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qo6t0dcv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.097035829Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qo6t0dcv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.096954408Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qo6t0dcv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.096932378Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qo5zouo6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.096841037Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qo5huhiq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.096759946Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=536824 slug=forgerockit instance="__name__=service_up, agent_hostname=use2p, env=auth-prod, instance=use2p, job=integrations/node_exporter, service_name=ssh" t=2024-05-29T13:44:14.096588635Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=112387 slug=lucidhq t=2024-05-29T13:44:14.096514953Z level=debug msg="Saving alert states done" count=5 max_state_save_concurrency=1 duration=181.39466ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qo1og4if-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.096529223Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.096367421Z caller=client.go:80 msg="creating client for grafana instance" user=707175 addr=dns:///nexi-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:14.09631382Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=492975 slug=mdgtest + logger=ngalert.state.manager user=536824 slug=forgerockit instance="__name__=service_up, agent_hostname=use1p, env=auth-prod, instance=use1p, job=integrations/node_exporter, service_name=ssh" t=2024-05-29T13:44:14.096335283Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=536824 slug=forgerockit instance="__name__=service_up, agent_hostname=use1p, env=auth-prod, instance=use1p, job=integrations/node_exporter, service_name=ds" t=2024-05-29T13:44:14.09619935Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qnsctolh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.096081239Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.096000303Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=536824 slug=forgerockit instance="__name__=service_up, agent_hostname=tsu1p, env=auth-prod, instance=tsu1p, job=integrations/node_exporter, service_name=ssh" t=2024-05-29T13:44:14.096063207Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.096004289Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qnsctolh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.095980578Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qnr7f47y-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.095902007Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=22398 slug=sunfolding t=2024-05-29T13:44:14.095812912Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.620999ms + level=debug ts=2024-05-29T13:44:14.095781219Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qnr7f47y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.095811996Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.09568281Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.095638496Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qnol7xuz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.095556904Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qnol7xuz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.095496443Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=536824 slug=forgerockit instance="__name__=service_up, agent_hostname=rsu1p, env=auth-prod, instance=rsu1p, job=integrations/node_exporter, service_name=ssh" t=2024-05-29T13:44:14.09545307Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=536824 slug=forgerockit instance="__name__=service_up, agent_hostname=rsu1p, env=auth-prod, instance=rsu1p, job=integrations/node_exporter, service_name=ssh" t=2024-05-29T13:44:14.095431988Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.095470206Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qno3lkup-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.095363912Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.095325272Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.095327038Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.095323924Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.09516834Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qno3lkup-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.095275431Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qnnzoin1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.09520739Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=536824 slug=forgerockit instance="__name__=service_up, agent_hostname=rsu1p, env=auth-prod, instance=rsu1p, job=integrations/node_exporter, service_name=rs" t=2024-05-29T13:44:14.095248022Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.095072067Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.095110673Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.095130862Z caller=remote_instance_store.go:51 user=87780 slug=zencloudandhosting msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=536824 slug=forgerockit instance="__name__=service_up, agent_hostname=rse1p, env=auth-prod, instance=rse1p, job=integrations/node_exporter, service_name=ssh" t=2024-05-29T13:44:14.09508743Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qnkl91au-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.095020048Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qnkl91au-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.094990648Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qnkl91au-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.094968437Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qnhyb8qn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.094726485Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.094760006Z caller=ruler.go:522 msg="tenant is owned by this instance" user=523927 slug=manorecom groups=1 + logger=ngalert.state.manager user=633335 slug=promqlworkshop instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.094714175Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=633335 slug=promqlworkshop t=2024-05-29T13:44:14.094646484Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.094413892Z caller=remote_instance_store.go:51 user=763376 slug=f5nginxone msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qnbj13ob-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.094483683Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qnbj13ob-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.094459792Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.0945494Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qnbj13ob-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.094319561Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=536824 slug=forgerockit instance="__name__=service_up, agent_hostname=csu1p, env=auth-prod, instance=csu1p, job=integrations/node_exporter, service_name=ds" t=2024-05-29T13:44:14.09450675Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.094467576Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qnae4q0t-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.09428495Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.094488091Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qnae4q0t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.09421508Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qnae4q0t-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.094118319Z level=debug msg="Keeping state" state=Normal + Error parsing panelUID for alert annotationruleID1975dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=152655 slug=orbweaver t=2024-05-29T13:44:14.09442565Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=36.7556ms + level=debug ts=2024-05-29T13:44:14.094425106Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qna4dqpc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.093897907Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:14.094314535Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=33.080939ms + level=debug ts=2024-05-29T13:44:14.094238111Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qn0rh1hf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.093760795Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qn0rh1hf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.093692794Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.094215954Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.094246Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qmzale22-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.093575733Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=536824 slug=forgerockit instance="__name__=service_up, agent_hostname=cse1p, env=auth-prod, instance=cse1p, job=integrations/node_exporter, service_name=ds" t=2024-05-29T13:44:14.094246744Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.094200531Z caller=remote_alert_sender.go:94 user=114492 slug=railsbank host=railsbank-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.253.115:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=0vXwavBVk alerts=1 + level=debug ts=2024-05-29T13:44:14.094196413Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qmzale22-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.093538583Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qmvhxh6w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.093472072Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qmvhxh6w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.093406111Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.09416234Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qmucne28-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.093188489Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qmucne28-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.093136059Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.09407106Z caller=remote_instance_store.go:51 user=71676 slug=lemonbeat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qmua7qbd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.093021018Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.09402289Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.09400696Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.093939388Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + Error parsing panelUID for alert annotationruleID1589dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:14.093784989Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=43.202444ms + level=debug ts=2024-05-29T13:44:14.09362353Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=536824 slug=forgerockit instance="__name__=service_up, agent_hostname=ame1p, env=auth-prod, instance=ame1p, job=integrations/node_exporter, service_name=am" t=2024-05-29T13:44:14.093559265Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.093282225Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qmpq8wof-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.092764075Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qmhquup8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.092721444Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qmhquup8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.092642434Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qmhquup8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.092609743Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qmhquup8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.092568693Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.09251186Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.901168ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qm17pzr8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.092041717Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=237629 slug=ocrolus instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.091995684Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=IRELAND Query" t=2024-05-29T13:44:14.091967807Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qm0xny44-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.091970437Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=IRELAND Query" t=2024-05-29T13:44:14.091947256Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=237629 slug=ocrolus t=2024-05-29T13:44:14.09193944Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=warn ts=2024-05-29T13:44:14.091827978Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=543033 slug=maneshipocrates + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qm0xny44-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.091848205Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.091809678Z caller=ruler.go:522 msg="tenant is owned by this instance" user=523929 slug=maescloud groups=0 + level=debug ts=2024-05-29T13:44:14.091442974Z caller=ruler.go:522 msg="tenant is owned by this instance" user=543033 slug=maneshipocrates groups=0 + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=26bbb316a04e5758 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.091721182Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=IRELAND Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc0aeea42a8} Threshold:{Var:Threshold Labels: Value:0xc0aeea42e0} compare:{Var:compare Labels:aggregatedBy=sum, name=IRELAND Query Value:0xc0aeea4218} sum:{Var:sum Labels:aggregatedBy=sum, name=IRELAND Query Value:0xc0aeea4290}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.091251059s EvaluationString:[ var='Breaches' labels={} value=72 ], [ var='Threshold' labels={} value=2 ], [ var='compare' labels={aggregatedBy=sum, name=IRELAND Query} value=0 ], [ var='sum' labels={aggregatedBy=sum, name=IRELAND Query} value=0 ]}]" duration=79.582277ms + level=warn ts=2024-05-29T13:44:14.091702977Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=503469 slug=kasyx + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qlweqjyp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.091705684Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qlweqjyp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.091660904Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:14.091658776Z caller=client.go:80 msg="creating client for grafana instance" user=636819 addr=dns:///mrdev-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.091459474Z caller=ruler.go:522 msg="tenant is owned by this instance" user=644911 slug=legate groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qlqj1nn3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.091502262Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.091444374Z caller=client.go:80 msg="creating client for grafana instance" user=504711 addr=dns:///mipnamic-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qlqj1nn3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.091439631Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qlj2uhho-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.09132156Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.09094397Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=673696 slug=itmaster + level=info component=discovery ts=2024-05-29T13:44:14.091226672Z caller=client.go:80 msg="creating client for grafana instance" user=547178 addr=dns:///minizaggi-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.091202172Z caller=ruler.go:522 msg="tenant is owned by this instance" user=556868 slug=levvr groups=2 + level=info component=discovery ts=2024-05-29T13:44:14.091158172Z caller=client.go:80 msg="creating client for grafana instance" user=604389 addr=dns:///mikroelektronika-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ql95jvdt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.091152088Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ql95jvdt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.091121798Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ql7x30xa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.091054497Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ql7x30xa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.091018467Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ql7x30xa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.090995347Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ql7x30xa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.090955626Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.090802168Z caller=ruler.go:522 msg="tenant is owned by this instance" user=686926 slug=luciabotoaca groups=1 + level=warn ts=2024-05-29T13:44:14.09095717Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=738824 slug=lucasjuillard + level=info component=discovery ts=2024-05-29T13:44:14.090847169Z caller=client.go:80 msg="creating client for grafana instance" user=634896 addr=dns:///mgbmcapprod-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info ts=2024-05-29T13:44:14.090831968Z caller=grafana.go:247 user=843304 slug=ppcgroup msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query= groups=2 alerts=0 + level=info component=discovery ts=2024-05-29T13:44:14.090820468Z caller=client.go:80 msg="creating client for grafana instance" user=634891 addr=dns:///mgbmcapeng-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:14.090778168Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=664981 slug=matthewwall + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ql6iwiv9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.090634053Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qkx2qzc2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.090593563Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qkx2qzc2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.090567792Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qkx2qzc2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.090538142Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.0905161Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:14.090503765Z caller=client.go:80 msg="creating client for grafana instance" user=507602 addr=dns:///meronz-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qkx2qzc2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.090488332Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.090401631Z caller=remote_instance_store.go:51 user=112732 slug=gleamer msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=112732 slug=gleamer t=2024-05-29T13:44:14.09034145Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.090281859Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=307381 slug=kambitaskforce t=2024-05-29T13:44:14.090354381Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=112732 slug=gleamer t=2024-05-29T13:44:14.090255387Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qkwkfkxi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.09032372Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qkwkfkxi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.090255809Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=112732 slug=gleamer version=1 fingerprint=9c6a141cfb62842b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.09006546Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.089605724s EvaluationString:}]" duration=12.692577ms + level=debug ts=2024-05-29T13:44:14.089999469Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.08991465Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qkkzz4hg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.089868555Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.089965787Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.089902515Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qkkzz4hg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.089808695Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qk6rf58n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.089779884Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.089865321Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.089751693Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qk6rf58n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.089661233Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qk5ssg27-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.089628163Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qk5ssg27-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.089552562Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.089315343Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qjt6rinb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.089265739Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="ClientId=665993615218, DomainName=preprd-logs" t=2024-05-29T13:44:14.089228492Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=806229 slug=simplisafe t=2024-05-29T13:44:14.089180411Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qjt4uxp4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.089119368Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qjt4uxp4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.089086477Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qjt4uxp4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.089030097Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.08896636Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qjqhm174-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.088906705Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.088709527Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qjf4ugns-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.088684853Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.088555965Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qj6seqpr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.088496131Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qj6seqpr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.088452631Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qj4wrb85-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.088289779Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.088183006Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.088002082Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.088024107Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qj4147nx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.088025406Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qj4147nx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.087993296Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qj38ana9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.087924385Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qj38ana9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.087880065Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qj38ana9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.087850605Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qj38ana9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.087784344Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.087664668Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qj2tygj2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.087720683Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qj2tygj2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.087691783Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.087567151Z caller=remote_instance_store.go:51 user=698103 slug=vericast msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qj2tygj2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.087569632Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.087543313Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.087529317Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698103 slug=vericast instance= t=2024-05-29T13:44:14.08751566Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qj109fu5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.087493221Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698103 slug=vericast t=2024-05-29T13:44:14.087464396Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qj109fu5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.08743817Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qj109fu5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.0874273Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID968dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:14.087316644Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.662298ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qiryk2pa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.087309439Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qiryk2pa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.087278389Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qiryk2pa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.087236568Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.087146037Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.087009927Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.086997928Z caller=grafana.go:247 user=843304 slug=ppcgroup msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query= groups=69 alerts=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qiqt22y8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.087005656Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qiqt22y8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.086920715Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.08694354Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.086852979Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qiqt22y8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.086867584Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qict01dt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.086702493Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.08679681Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.086317388Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qi6itexs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.086297669Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qhv15mxm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.086209278Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qhv15mxm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.086120097Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qhv15mxm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.086080616Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qhpp8bme-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.085983825Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.085805223Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qhjh9zpm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.085800263Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qhdii2me-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.085382749Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.085212163Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qh3gn0ez-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.085216697Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=22398 slug=sunfolding t=2024-05-29T13:44:14.08518127Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.085057306Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.085053812Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qh3gn0ez-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.085059356Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qgx14sqj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.084960845Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.084872426Z caller=remote_instance_store.go:51 user=664976 slug=staging1themomproject msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qgx14sqj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.084828563Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qgsvg709-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.084800093Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qgsvg709-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.084790673Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=664976 slug=staging1themomproject instance="datasource_uid=grafanacloud-prom, ref_id=query" t=2024-05-29T13:44:14.084790635Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=664976 slug=staging1themomproject t=2024-05-29T13:44:14.084769724Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.084709475Z caller=remote_instance_store.go:51 user=403369 slug=clearsaletechlabs msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=664976 slug=staging1themomproject version=13 fingerprint=754855ad61f4dc6f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.084702473Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=query State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.08434469s EvaluationString:}]" duration=11.123534ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qgp1a5q3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.084579431Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.08457954Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=18335 slug=semaphore instance= t=2024-05-29T13:44:14.084599258Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=18335 slug=semaphore instance= t=2024-05-29T13:44:14.084587648Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qgjgeoo0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.084301678Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qgjgeoo0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.084270818Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.084296134Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=696798 slug=mcv instance="name=keepLastValue(eadp.gos.torch.prod.battlefield-1-ps4.Gameplay_Users,3) Query" t=2024-05-29T13:44:14.084214657Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv instance="name=keepLastValue(eadp.gos.torch.prod.battlefield-1-ps4.Gameplay_Users,3) Query" t=2024-05-29T13:44:14.084197133Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=807171 slug=unstarnp t=2024-05-29T13:44:14.083845538Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.939476ms + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=2670f11c8fbcd742 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.083991921Z level=debug msg="Alert rule evaluated" results="[{Instance:name=keepLastValue(eadp.gos.torch.prod.battlefield-1-ps4.Gameplay_Users,3) Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc0051f12c0} IgnoreBelow:{Var:IgnoreBelow Labels: Value:0xc0051f12c8} Threshold:{Var:Threshold Labels: Value:0xc0051f1270} compare:{Var:compare Labels:name=keepLastValue(eadp.gos.torch.prod.battlefield-1-ps4.Gameplay_Users,3) Query Value:0xc0051f1290} sum:{Var:sum Labels:name=keepLastValue(eadp.gos.torch.prod.battlefield-1-ps4.Gameplay_Users,3) Query Value:0xc0051f12a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.083526069s EvaluationString:[ var='Breaches' labels={} value=1 ], [ var='IgnoreBelow' labels={} value=100 ], [ var='Threshold' labels={} value=-10 ], [ var='compare' labels={name=keepLastValue(eadp.gos.torch.prod.battlefield-1-ps4.Gameplay_Users,3) Query} value=0 ], [ var='sum' labels={name=keepLastValue(eadp.gos.torch.prod.battlefield-1-ps4.Gameplay_Users,3) Query} value=0 ]}]" duration=35.652205ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qgfj8ntw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.084071975Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qgddp3ht-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.083998575Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=907609 slug=calerts version=68 fingerprint=8ad7fcbe92cbf51f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.083865008Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=node_systemd_unit_state, instance=localhost:9100, instance_name=acue-staging-app-auditcue, job=acue-staging-app-auditcue-systemd_service_export, name=acueapp-bootstrap.service, region=ap-south-1, state=failed, type=simple State:Normal Error: Results:map[] Values:map[Current service state:{Var:Current service state Labels:__name__=node_systemd_unit_state, instance=localhost:9100, instance_name=acue-staging-app-auditcue, job=acue-staging-app-auditcue-systemd_service_export, name=acueapp-bootstrap.service, region=ap-south-1, state=failed, type=simple Value:0xc00409e770} Threshold:{Var:Threshold Labels:__name__=node_systemd_unit_state, instance=localhost:9100, instance_name=acue-staging-app-auditcue, job=acue-staging-app-auditcue-systemd_service_export, name=acueapp-bootstrap.service, region=ap-south-1, state=failed, type=simple Value:0xc00409e6e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.083484204s EvaluationString:[ var='Current service state' labels={__name__=node_systemd_unit_state, instance=localhost:9100, instance_name=acue-staging-app-auditcue, job=acue-staging-app-auditcue-systemd_service_export, name=acueapp-bootstrap.service, region=ap-south-1, state=failed, type=simple} value=0 ], [ var='Threshold' labels={__name__=node_systemd_unit_state, instance=localhost:9100, instance_name=acue-staging-app-auditcue, job=acue-staging-app-auditcue-systemd_service_export, name=acueapp-bootstrap.service, region=ap-south-1, state=failed, type=simple} value=0 ]}]" duration=6.585843ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qgddp3ht-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.083900804Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qg9bi6fw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.083771282Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qg9bi6fw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.083732032Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qg9bi6fw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.083698292Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qfuvso0f-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.08353877Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.083482571Z caller=remote_alert_sender.go:94 user=548157 slug=kushkiprod host=kushkiprod-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.209.249:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=c28777e6-7313-4d3b-bd6f-0353a846a9ea alerts=1 + level=info ts=2024-05-29T13:44:14.083449772Z caller=remote_alert_sender.go:94 user=174016 slug=journalstaging host=journalstaging-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.66.172:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=lWEns3dnz alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qf3u0053-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.083355148Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qf3u0053-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.083238947Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qf3u0053-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.083216747Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qezz7m1s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.082965514Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.082921126Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.082840812Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qezz7m1s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.082702841Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=633501 slug=y2engineering instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.082645323Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=174675 slug=journalprod t=2024-05-29T13:44:14.082679217Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qetg7bjs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.082643171Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=633501 slug=y2engineering instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.082609321Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=633501 slug=y2engineering t=2024-05-29T13:44:14.082594433Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=633501 slug=y2engineering version=37 fingerprint=02ed9e3a75627c43 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.082533123Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.082196652s EvaluationString:}]" duration=12.751059ms + level=debug ts=2024-05-29T13:44:14.082503851Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.08252025Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qetg7bjs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.082487569Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qedvs0c7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.082362368Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.082220037Z caller=remote_instance_store.go:51 user=137351 slug=pinnacle21 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qe6ji3t1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.082251487Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.082186009Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qe6ji3t1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.082180316Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.082053943Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.081997397Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.081552909Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.081429173Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.081499076Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.081438555Z caller=grafana.go:247 user=524518 slug=hfplumber msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=21a53bb9-e8a6-batDT" groups=0 alerts=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qdltduqx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.081308267Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.081390362Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.081387733Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.081331702Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.081224374Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.081348711Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.081320289Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.081224224Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.081206924Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.081163913Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qdj0021o-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.081148585Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qdj0021o-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.081116205Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qdg1l1c1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.080996194Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qdg1l1c1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.080926463Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qddewfoe-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.080859572Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qddewfoe-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.080818892Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qddewfoe-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.080763951Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.081018704Z caller=remote_instance_store.go:51 user=763376 slug=f5nginxone msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=68499 slug=identt t=2024-05-29T13:44:14.081056659Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=36.140996ms + level=debug ts=2024-05-29T13:44:14.08100516Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.080960236Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.080774736Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.080845117Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.080822966Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qczoq51h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.080561539Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.080423671Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qcp3d8bx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.080294737Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qcp3d8bx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.080208436Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.080180123Z caller=remote_instance_store.go:51 user=543604 slug=kingmakers msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=543604 slug=kingmakers t=2024-05-29T13:44:14.080134622Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=543604 slug=kingmakers instance="cluster=aks-igaming-prod-weu-shared" t=2024-05-29T13:44:14.080121022Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qcn35jcy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.079901783Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qckll23n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.079789971Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.079956211Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qckll23n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.079740841Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.08002105Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=112732 slug=gleamer t=2024-05-29T13:44:14.07981479Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=45.261725ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qc9o6nua-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.079473528Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qc6lvchu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.079446708Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.079846952Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qc0k9i3q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.079280126Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.079528062Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qc0k9i3q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.079209665Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qc0k9i3q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.079171395Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.078992027Z caller=remote_instance_store.go:51 user=312340 slug=lakefs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qbwsa30a-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.078951593Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=332555 slug=omexomcs instance= t=2024-05-29T13:44:14.078972345Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.078804414Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.078800769Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.078861357Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=terraform-locks" t=2024-05-29T13:44:14.078889254Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qbn3w9zw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.078730871Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.078780149Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-replication-table" + Error parsing panelUID for alert annotationruleID756dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:14.078701987Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=66.293486ms + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:14.078666021Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.142694ms + level=debug ts=2024-05-29T13:44:14.078653121Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-wealthy-pug-qur9bw" t=2024-05-29T13:44:14.078646775Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.078606893Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.078621717Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse-wealthy-pug-qur9bw" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qb96aff2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.078606549Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:14.07854723Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qb96aff2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.078576749Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-vague-stoa" t=2024-05-29T13:44:14.078563479Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=245291 slug=pismo version=30 fingerprint=4327d462a6fed3cf attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.078483218Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.078238841s EvaluationString:}]" duration=475.835435ms + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.078534405Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse-vague-stoa" + logger=ngalert.state.manager.persist user=177453 slug=clabs t=2024-05-29T13:44:14.078439593Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=52.8543ms + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-testy-cow" t=2024-05-29T13:44:14.078379734Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.078358271Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse-testy-cow" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qb6u7of5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.078310436Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qb6u7of5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.078284986Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=527202 slug=lnrsusinsurancedev t=2024-05-29T13:44:14.078203808Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=41.257827ms + logger=ngalert.state.manager user=811546 slug=fyld instance="QueueName=sitestream-staging-fyld-brain-event-bus" t=2024-05-29T13:44:14.078168619Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.078130374Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse-sts" + logger=ngalert.state.manager user=811546 slug=fyld instance="QueueName=celery-notifications" t=2024-05-29T13:44:14.078135288Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.078055193Z caller=remote_instance_store.go:51 user=557231 slug=lnrsusinsuranceprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=811546 slug=fyld instance="QueueName=DeadLetters" t=2024-05-29T13:44:14.078073266Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=235895 slug=nathanprenzler instance="__name__=ALERTS, alertname=SNMPTargetDown, alertstate=firing, instance=192-168-1-120.tpgi.com.au, job=integrations/snmp/Starlight, job_snmp=integrations/snmp, severity=critical, snmp_target=Starlight" t=2024-05-29T13:44:14.078129723Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:48:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.077984641Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qb3hrh60-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.077964863Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=235895 slug=nathanprenzler instance="__name__=ALERTS, alertname=PromScrapeFailed, alertstate=firing, instance=fedora, job=integrations/kafka, kafka_cluster=my-cluster, severity=critical" t=2024-05-29T13:44:14.078006531Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=235895 slug=nathanprenzler instance="__name__=ALERTS, alertname=PromScrapeFailed, alertstate=firing, instance=fedora, job=integrations/kafka, kafka_cluster=my-cluster, severity=critical" t=2024-05-29T13:44:14.077976507Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=235895 slug=nathanprenzler t=2024-05-29T13:44:14.077925488Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=prna" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qazgrqcd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.077850542Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qazgrqcd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.077823261Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-rigid-star" t=2024-05-29T13:44:14.077810074Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=235895 slug=nathanprenzler t=2024-05-29T13:44:14.077806659Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=prna" + level=debug ts=2024-05-29T13:44:14.077774108Z caller=remote_image_capturer.go:33 user=235895 slug=nathanprenzler rule_org_id=1 rule_uid=b22cd600-30b2-48b1-b8e6-ecda6741327e msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.077708501Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse-rigid-star" + logger=ngalert.state.manager user=235895 slug=nathanprenzler instance="__name__=ALERTS, alertname=NodejsDown, alertstate=firing, severity=critical" t=2024-05-29T13:44:14.077690376Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=235895 slug=nathanprenzler t=2024-05-29T13:44:14.077631024Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=prna" + Error parsing panelUID for alert annotationruleID2008dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:14.077565613Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=250.371263ms + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:14.077563986Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=76.432513ms + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-rich-pony-1grudx" t=2024-05-29T13:44:14.077543468Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.077542558Z caller=remote_image_capturer.go:33 user=235895 slug=nathanprenzler rule_org_id=1 rule_uid=b22cd600-30b2-48b1-b8e6-ecda6741327e msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.07751618Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse-rich-pony-1grudx" + logger=ngalert.state.manager user=235895 slug=nathanprenzler instance="__name__=ALERTS, alertname=CoreDNSDown, alertstate=firing, severity=critical" t=2024-05-29T13:44:14.077496591Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-renewing-kingfish-d22q0h" t=2024-05-29T13:44:14.077431996Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.077414872Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse-renewing-kingfish-d22q0h" + logger=ngalert.state.manager user=235895 slug=nathanprenzler t=2024-05-29T13:44:14.077396361Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=prna" + level=debug ts=2024-05-29T13:44:14.077311024Z caller=remote_image_capturer.go:33 user=235895 slug=nathanprenzler rule_org_id=1 rule_uid=b22cd600-30b2-48b1-b8e6-ecda6741327e msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-quality-bedbug-1usiqf" t=2024-05-29T13:44:14.077266388Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-powerful-goose-gk1f8c" t=2024-05-29T13:44:14.077149749Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qafkzbbb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.077220165Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=235895 slug=nathanprenzler t=2024-05-29T13:44:14.077222307Z level=debug msg="State manager processing evaluation results" resultCount=7 + level=info ts=2024-05-29T13:44:14.077124521Z caller=grafana.go:247 user=524518 slug=hfplumber msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=21a53bb9-e8a6-batDT" groups=0 alerts=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qafkzbbb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.077114204Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.077082398Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.077134826Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qafkzbbb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.077089584Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.076963707Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-pleased-muskrat-34euh7" t=2024-05-29T13:44:14.077059877Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.077059747Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.076994758Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.077003207Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=EMA9p0nVk, ref_id=A" t=2024-05-29T13:44:14.076994695Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.076944464Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.076922084Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse-pl-gc" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qa86j8zk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.076871501Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.076866148Z caller=remote_alert_sender.go:94 user=84360 slug=sib host=sib-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.245.254:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=FK1-QCm4z alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qa86j8zk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.076861881Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qa86j8zk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.076833611Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-qa86j8zk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.076824051Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q9yh4qyf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.076786441Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q9v1j9wy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.076595249Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q9ux6ocx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.076506548Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-loyal-wallaby-qv0ffu" t=2024-05-29T13:44:14.07651224Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-loyal-wallaby-qv0ffu" t=2024-05-29T13:44:14.076499711Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.076343156Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-leading-le" t=2024-05-29T13:44:14.076365648Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q9iklhe4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.076326876Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q9iklhe4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.076298116Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.076294009Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse-leading-le" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q9iklhe4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.076258875Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q9iklhe4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.076167604Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-intimate-mouse-8sqpzx" t=2024-05-29T13:44:14.076229157Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.076016161Z caller=remote_instance_store.go:51 user=94501 slug=datastax msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q95eopau-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.075902302Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-honest-chigger-hcbrhj" t=2024-05-29T13:44:14.07589441Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-honest-chigger-hcbrhj" t=2024-05-29T13:44:14.075883242Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.07583441Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q95eopau-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.075846061Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q8x1fzml-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.07578331Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.075785913Z caller=remote_instance_store.go:51 user=243675 slug=oneschema msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q8x1fzml-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.07574637Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-harmless-kitten-peaxut" t=2024-05-29T13:44:14.075756804Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-harmless-kitten-peaxut" t=2024-05-29T13:44:14.07573312Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.075692525Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse-harmless-kitten-peaxut" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q8x1fzml-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.075683009Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q8x1fzml-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.075654999Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.075498809Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q8srg8fs-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.075534378Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.075498738Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.075525241Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-evident-wh" t=2024-05-29T13:44:14.075448671Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-evident-wh" t=2024-05-29T13:44:14.07544239Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q8srg8fs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.075469157Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.075392825Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q8m82rol-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.075432457Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q8m82rol-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.075401006Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q8m82rol-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.075327346Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.075302387Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse-epic-monster-gdiaz1" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q8m82rol-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.075241195Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-enhanced-bedbug-v1vdsc" t=2024-05-29T13:44:14.075131688Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q8k43nq1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.075197404Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q8bpa591-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.074984992Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q8bpa591-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.074946622Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q8bpa591-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.074872501Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.074907862Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse-devoted-mackerel-av9yz5" + logger=ngalert.scheduler user=807171 slug=unstarnp version=9 fingerprint=e6011b4bc03142a0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.074752491Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.074449142s EvaluationString:}]" duration=11.45997ms + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-treeverse-definite-gnat-7k4t60" t=2024-05-29T13:44:14.074839812Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.074813189Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.07479808Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q88kg5wg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.074714849Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.074736742Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.074709985Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse-current-ostrich-sv74vd" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q86ztjjz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.074520317Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q86ztjjz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.074485797Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q86ztjjz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.074462157Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q85z2nlg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.074340286Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q85z2nlg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.074301485Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.07428056Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-treeverse" + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-similarweb" t=2024-05-29T13:44:14.074128732Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q84jkol2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.074159804Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.074105574Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-similarweb" + level=debug ts=2024-05-29T13:44:14.073990908Z caller=remote_instance_store.go:51 user=109452 slug=deltarisk msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.073927969Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.073991962Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-ruling-poodle-iwf8zg" + level=debug ts=2024-05-29T13:44:14.073919579Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=109452 slug=deltarisk instance= t=2024-05-29T13:44:14.073909191Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-precious-akita-yb6fqc" t=2024-05-29T13:44:14.073920755Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q80phaw6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.073859571Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=109452 slug=deltarisk version=12 fingerprint=cf992974f85d32c4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.073749214Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.073374569s EvaluationString:}]" duration=56.395592ms + level=debug ts=2024-05-29T13:44:14.073839403Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-mighty-vulture-mgmyhb" t=2024-05-29T13:44:14.073814805Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q80bnh05-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.073730109Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q80bnh05-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.073691019Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q80bnh05-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.073604058Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.073548949Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-mighty-iguana-iz3nkf" + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-living-gelding-664vr0" t=2024-05-29T13:44:14.073472539Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-innocent-sunbeam-uy8k35" t=2024-05-29T13:44:14.073394539Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-immense-husky-hz3ybb" t=2024-05-29T13:44:14.073284468Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-helpful-boxer-x10u0j" t=2024-05-29T13:44:14.07321469Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.073027876Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q7hdmuxr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.072874991Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q7hdmuxr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.07284082Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.072743225Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-enigma" + level=debug ts=2024-05-29T13:44:14.072736425Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.072667984Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-driving-albacore-f6p007" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q7f3ib5p-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.072589298Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.072515835Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.07256946Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.072466278Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-custom1" t=2024-05-29T13:44:14.072557397Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.0725297Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-custom1" + level=debug ts=2024-05-29T13:44:14.072461807Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q7f3ib5p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.072448116Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-cunning-insect-c6gj8j" t=2024-05-29T13:44:14.072467078Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-cunning-insect-c6gj8j" t=2024-05-29T13:44:14.072460342Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.072416881Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q6rpproh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.072357185Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q6rpproh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.072282264Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.072311054Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q6rpproh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.072230604Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q6r2xdxn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.072155893Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q6r2xdxn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.072055002Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q6r2xdxn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.072011752Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-close-sunbird-fhwmpw" t=2024-05-29T13:44:14.072167025Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank t=2024-05-29T13:44:14.072197533Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q6q1kqvh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.071913181Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q6q1kqvh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.07186792Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q6egw0jg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.071726309Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-close-sunbird-fhwmpw" t=2024-05-29T13:44:14.072156879Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q67rzgxx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.071437946Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q67rzgxx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.071366535Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.072040007Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-bursting-hen-xau574" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q66hnku4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.071222304Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q66hnku4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.071145933Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q66hnku4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.071119243Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q65zy1z2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.071060562Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q65zy1z2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.071047822Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q65zy1z2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.071008151Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q65zy1z2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.07091583Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q64eiygt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.070699168Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.071951933Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-arm-dev" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q62gx7cz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.070620787Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-arm" t=2024-05-29T13:44:14.071911647Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-arm" t=2024-05-29T13:44:14.071904807Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.071887392Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-arm" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q601auu0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.070469976Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q601auu0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.070460506Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5zovjp6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.070428245Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5zovjp6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.070419465Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5ywf41b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.070302964Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5ywf41b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.070235583Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5ywf41b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.070225863Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5xb1m5y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.070109502Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-allowed-frog-819x4t" t=2024-05-29T13:44:14.071697056Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5w217im-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.070029261Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5w217im-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.069998891Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5w217im-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.069989441Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5w1rkvx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.069961031Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5w1rkvx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.06992294Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5w1rkvx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.06991333Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.071594231Z caller=remote_instance_store.go:51 user=403369 slug=clearsaletechlabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5w1rkvx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.06987573Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=lakefs-cloud-refstore-aidn" t=2024-05-29T13:44:14.071592215Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5r65tub-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.069752198Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5qg1fov-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.069709278Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=312340 slug=lakefs t=2024-05-29T13:44:14.071530269Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="TableName=lakefs-cloud-refstore-aidn" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5qg1fov-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.069630677Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5m0kc20-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.069596717Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5m0kc20-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.069546566Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5m0kc20-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.069529316Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.071529423Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.071469761Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5lnzrbl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.069445495Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5lnzrbl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.069408405Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5hopmhz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.069304334Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5hopmhz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.069264803Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=312340 slug=lakefs instance="TableName=control-plane" t=2024-05-29T13:44:14.071245759Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5hh2dxz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.069139882Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5hfcsha-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.069102952Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5hfcsha-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.069093122Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5hfcsha-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.069023671Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5fqs5wt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.06893308Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5fqs5wt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.068876549Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5fqs5wt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.068821269Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5f7pp60-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.068774748Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5f7pp60-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.068765668Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5f7pp60-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.068693297Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5f7pp60-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.068683687Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5arvggi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.068652297Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q5arvggi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.068562236Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q56jysxi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.068511926Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q56jysxi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.068472605Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q54bcme4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.068281203Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q54bcme4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.068248083Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q54bcme4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.068191022Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=548157 slug=kushkiprod instance="datasource_uid=ddfda265-8321-4dab-9f53-1af50b9462b9, ref_id=A" t=2024-05-29T13:44:14.070835223Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:00Z next_ends_at=2024-05-29T13:48:00Z + logger=ngalert.state.manager user=548157 slug=kushkiprod t=2024-05-29T13:44:14.070723662Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=902357 slug=tonvalidators instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.070664628Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.070601665Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.070579987Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.070579389Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.070536747Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.070450354Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=472647 slug=planet instance= t=2024-05-29T13:44:14.070138171Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:14.070102462Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.069694021Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.069431437Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.069168267Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.069032137Z caller=remote_instance_store.go:51 user=703825 slug=andrewbauman msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.068928616Z caller=remote_alert_sender.go:94 user=22398 slug=sunfolding host=sunfolding-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.37.117:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=c1fa17b2-0669-4466-9924-665a115c2609 alerts=1 + level=info ts=2024-05-29T13:44:14.068856936Z caller=remote_alert_sender.go:94 user=22398 slug=sunfolding host=sunfolding-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.83.87:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=c1fa17b2-0669-4466-9924-665a115c2609 alerts=1 + level=debug ts=2024-05-29T13:44:14.068755744Z caller=remote_instance_store.go:51 user=174016 slug=journalstaging msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=174016 slug=journalstaging t=2024-05-29T13:44:14.06871808Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.068665831Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=174016 slug=journalstaging instance="datasource_uid=bYQmLgyGz, ref_id=A" t=2024-05-29T13:44:14.068703615Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=174016 slug=journalstaging instance="datasource_uid=bYQmLgyGz, ref_id=A" t=2024-05-29T13:44:14.068690379Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=174016 slug=journalstaging version=1 fingerprint=de1f6680ed0b0dd2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.06858738Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=bYQmLgyGz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.068245003s EvaluationString:}]" duration=15.218311ms + logger=ngalert.state.manager user=432323 slug=lithic instance="LoadBalancer=app/papi-lb-live-lb/12eccf05e78e0cfd" t=2024-05-29T13:44:14.068589906Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:14.068625845Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=114492 slug=railsbank instance="QueueName=PROD-DATA-FIXES-STATUS-UPDATES-SQS" t=2024-05-29T13:44:14.068598614Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.068465998Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.06850355Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=114492 slug=railsbank version=6 fingerprint=ac2792345e6b411f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.068476805Z level=debug msg="Alert rule evaluated" results="[{Instance:QueueName=PROD-DATA-FIXES-STATUS-UPDATES-SQS State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:QueueName=PROD-DATA-FIXES-STATUS-UPDATES-SQS Value:0xc0323a0710} C:{Var:C Labels:QueueName=PROD-DATA-FIXES-STATUS-UPDATES-SQS Value:0xc0323a0718}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.068224072s EvaluationString:[ var='B' labels={QueueName=PROD-DATA-FIXES-STATUS-UPDATES-SQS} value=0 ], [ var='C' labels={QueueName=PROD-DATA-FIXES-STATUS-UPDATES-SQS} value=0 ]}]" duration=472.312632ms + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=auth-worker-worker, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=auth-worker-worker, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development" t=2024-05-29T13:44:14.068537874Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:14.068490304Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="region=eu-west-1, service=kube-state-metrics, stage=development" + logger=ngalert.scheduler user=698963 slug=lemonade version=3 fingerprint=7397269616cc60a7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.068259406Z level=debug msg="Alert rule evaluated" results="[{Instance:app=auth-worker-worker, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=auth-worker-worker, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=auth-worker-worker, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=auth-worker-worker, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development Value:0xc0237d4610} THRESHOLD:{Var:THRESHOLD Labels:app=auth-worker-worker, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=auth-worker-worker, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development Value:0xc0237d4550}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.067800363s EvaluationString:[ var='QUERY' labels={app=auth-worker-worker, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=auth-worker-worker, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development} value=0 ], [ var='THRESHOLD' labels={app=auth-worker-worker, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=auth-worker-worker, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development} value=0 ]}]" duration=59.927247ms + level=debug ts=2024-05-29T13:44:14.068206457Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=112387 slug=lucidhq t=2024-05-29T13:44:14.068098137Z level=debug msg="Deleting alert states" count=1 + logger=ngalert.state.manager user=112387 slug=lucidhq t=2024-05-29T13:44:14.068077219Z level=info msg="Detected stale state entry" cacheID="[[\"Series\",\"queryede4ce8a5ee24a2bb913de1fe422989d\"],[\"__alert_rule_namespace_uid__\",\"MLLy09GMk\"],[\"__alert_rule_uid__\",\"bec05735-8aee-4e8b-b8d9-589c3aee9b12\"],[\"__contacts__\",\"\\\"DemandProjectAPI Slack\\\"\"],[\"alertname\",\"[Dev] demand-project-search OS cluster status RED alert\"],[\"grafana_folder\",\"Team-Marketplace\"]]" state=Normal reason= + level=debug ts=2024-05-29T13:44:14.068176215Z caller=remote_instance_store.go:51 user=114516 slug=heliumdashboard msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114516 slug=heliumdashboard t=2024-05-29T13:44:14.068092825Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=114516 slug=heliumdashboard version=94 fingerprint=d2a3c92fadabf40b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.068014506Z level=debug msg="Alert rule evaluated" results="[{Instance:Env=mainnet, Role=verifier, Stack=iot, agent_hostname=mainnet-iot-verifier0-oregon, instance=mainnet-iot-verifier0-oregon:19001, job=iot_verifier_metrics State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:Env=mainnet, Role=verifier, Stack=iot, agent_hostname=mainnet-iot-verifier0-oregon, instance=mainnet-iot-verifier0-oregon:19001, job=iot_verifier_metrics Value:0xc070c51ea0} C:{Var:C Labels:Env=mainnet, Role=verifier, Stack=iot, agent_hostname=mainnet-iot-verifier0-oregon, instance=mainnet-iot-verifier0-oregon:19001, job=iot_verifier_metrics Value:0xc070c51e18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.06763528s EvaluationString:[ var='A' labels={Env=mainnet, Role=verifier, Stack=iot, agent_hostname=mainnet-iot-verifier0-oregon, instance=mainnet-iot-verifier0-oregon:19001, job=iot_verifier_metrics} value=45850 ], [ var='C' labels={Env=mainnet, Role=verifier, Stack=iot, agent_hostname=mainnet-iot-verifier0-oregon, instance=mainnet-iot-verifier0-oregon:19001, job=iot_verifier_metrics} value=0 ]}]" duration=14.715339ms + logger=ngalert.state.manager user=112387 slug=lucidhq t=2024-05-29T13:44:14.067882336Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q4vzxndn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.067849339Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q4rl1zsk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.067664107Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q4rl1zsk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.067546965Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q4irnjdm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.06700971Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:14.067499591Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q4bcn91e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.066738887Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q45qut7o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.066660866Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q45qut7o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.066651626Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q45qut7o-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.066582326Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q43fcg18-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.066541775Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q43fcg18-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.066495045Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q42ol1zs-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.066385383Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.067200033Z caller=remote_alert_sender.go:94 user=228733 slug=csmoney host=csmoney-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.149.115.19:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fc3674ed-eb7f-437b-9715-c523a8adbac8 alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q42ol1zs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.066343923Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q42ol1zs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.066328343Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.067188764Z caller=remote_alert_sender.go:94 user=228733 slug=csmoney host=csmoney-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.148.68.215:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fc3674ed-eb7f-437b-9715-c523a8adbac8 alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q42ecjm2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.066192562Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q42ecjm2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.066175781Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q3vcg7x4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.065984619Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q3owff6g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.065852878Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q3owff6g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.065842878Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.066940805Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.06691489Z caller=remote_instance_store.go:51 user=196413 slug=form3production msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q3hjarvy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.065663816Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q3hjarvy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.065566385Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q3hjarvy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.065542605Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.066954846Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q3g8ia1b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.065458664Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q3g8ia1b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.065420254Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q3g8ia1b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.065343463Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q3f3rcth-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.065291142Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.066594441Z caller=remote_instance_store.go:51 user=512398 slug=brightdigital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=512398 slug=brightdigital instance="metric=Kast 20 - D2 - LK2 4 P2/16 - geïnitialiseerd" t=2024-05-29T13:44:14.066535941Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=512398 slug=brightdigital instance="metric=Kast 20 - D2 - LK2 4 P2/16 - Stroomuitval Alarm" t=2024-05-29T13:44:14.066506841Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=512398 slug=brightdigital t=2024-05-29T13:44:14.066452541Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Alert PMC Utrecht Kast 20- Links" + logger=ngalert.state.manager user=512398 slug=brightdigital instance="metric=Kast 20 - D2 - LK2 4 P2/16 - Spanningsalarm" t=2024-05-29T13:44:14.06640284Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=512398 slug=brightdigital instance="metric=Kast 20 - D2 - LK2 4 P2/16 - Slave Reset" t=2024-05-29T13:44:14.06637884Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=512398 slug=brightdigital instance="metric=Kast 20 - D2 - LK2 4 P2/16 - Slave Reset" t=2024-05-29T13:44:14.06637334Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=512398 slug=brightdigital t=2024-05-29T13:44:14.06635884Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Alert PMC Utrecht Kast 20- Links" + logger=ngalert.state.manager user=512398 slug=brightdigital instance="metric=Kast 20 - D2 - LK2 4 P2/16 - Power On Reset" t=2024-05-29T13:44:14.06633794Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=512398 slug=brightdigital t=2024-05-29T13:44:14.06632494Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Alert PMC Utrecht Kast 20- Links" + logger=ngalert.state.manager user=512398 slug=brightdigital t=2024-05-29T13:44:14.066235739Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Alert PMC Utrecht Kast 20- Links" + logger=ngalert.state.manager user=512398 slug=brightdigital instance="metric=Kast 20 - D2 - LK2 4 P2/16 - Externe Reset" t=2024-05-29T13:44:14.066213139Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=512398 slug=brightdigital t=2024-05-29T13:44:14.066191239Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Alert PMC Utrecht Kast 20- Links" + logger=ngalert.state.manager user=512398 slug=brightdigital t=2024-05-29T13:44:14.066136439Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Alert PMC Utrecht Kast 20- Links" + logger=ngalert.scheduler user=512398 slug=brightdigital version=4 fingerprint=1d6e57927f985127 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.065879237Z level=debug msg="Alert rule evaluated" results="[{Instance:metric=Kast 20 - D2 - LK2 4 P2/16 - Algemeen Alarm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:metric=Kast 20 - D2 - LK2 4 P2/16 - Algemeen Alarm Value:0xc0297bb658} B:{Var:B Labels:metric=Kast 20 - D2 - LK2 4 P2/16 - Algemeen Alarm Value:0xc0297bb668}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.065780864s EvaluationString:[ var='A' labels={metric=Kast 20 - D2 - LK2 4 P2/16 - Algemeen Alarm} value=0 ], [ var='B' labels={metric=Kast 20 - D2 - LK2 4 P2/16 - Algemeen Alarm} value=0 ]} {Instance:metric=Kast 20 - D2 - LK2 4 P2/16 - Externe Reset State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:metric=Kast 20 - D2 - LK2 4 P2/16 - Externe Reset Value:0xc0297bb778} B:{Var:B Labels:metric=Kast 20 - D2 - LK2 4 P2/16 - Externe Reset Value:0xc0297bb788}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.065795564s EvaluationString:[ var='A' labels={metric=Kast 20 - D2 - LK2 4 P2/16 - Externe Reset} value=0 ], [ var='B' labels={metric=Kast 20 - D2 - LK2 4 P2/16 - Externe Reset} value=0 ]} {Instance:metric=Kast 20 - D2 - LK2 4 P2/16 - Hardware Error State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:metric=Kast 20 - D2 - LK2 4 P2/16 - Hardware Error Value:0xc0297bb7d8} B:{Var:B Labels:metric=Kast 20 - D2 - LK2 4 P2/16 - Hardware Error Value:0xc0297bb7c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.065800564s EvaluationString:[ var='A' labels={metric=Kast 20 - D2 - LK2 4 P2/16 - Hardware Error} value=0 ], [ var='B' labels={metric=Kast 20 - D2 - LK2 4 P2/16 - Hardware Error} value=0 ]} {Instance:metric=Kast 20 - D2 - LK2 4 P2/16 - Interne Error State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:metric=Kast 20 - D2 - LK2 4 P2/16 - Interne Error Value:0xc0297bb7f8} B:{Var:B Labels:metric=Kast 20 - D2 - LK2 4 P2/16 - Interne Error Value:0xc0297bb818}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.065805564s EvaluationString:[ var='A' labels={metric=Kast 20 - D2 - LK2 4 P2/16 - Interne Error} value=0 ], [ var='B' labels={metric=Kast 20 - D2 - LK2 4 P2/16 - Interne Error} value=0 ]} {Instance:metric=Kast 20 - D2 - LK2 4 P2/16 - Power On Reset State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:metric=Kast 20 - D2 - LK2 4 P2/16 - Power On Reset Value:0xc0297bb868} B:{Var:B Labels:metric=Kast 20 - D2 - LK2 4 P2/16 - Power On Reset Value:0xc0297bb878}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.065811764s EvaluationString:[ var='A' labels={metric=Kast 20 - D2 - LK2 4 P2/16 - Power On Reset} value=0 ], [ var='B' labels={metric=Kast 20 - D2 - LK2 4 P2/16 - Power On Reset} value=0 ]} {Instance:metric=Kast 20 - D2 - LK2 4 P2/16 - Slave Reset State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:metric=Kast 20 - D2 - LK2 4 P2/16 - Slave Reset Value:0xc0297bb898} B:{Var:B Labels:metric=Kast 20 - D2 - LK2 4 P2/16 - Slave Reset Value:0xc0297bb8a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.065816964s EvaluationString:[ var='A' labels={metric=Kast 20 - D2 - LK2 4 P2/16 - Slave Reset} value=0 ], [ var='B' labels={metric=Kast 20 - D2 - LK2 4 P2/16 - Slave Reset} value=0 ]} {Instance:metric=Kast 20 - D2 - LK2 4 P2/16 - Spanningsalarm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:metric=Kast 20 - D2 - LK2 4 P2/16 - Spanningsalarm Value:0xc0297bb8c8} B:{Var:B Labels:metric=Kast 20 - D2 - LK2 4 P2/16 - Spanningsalarm Value:0xc0297bb8d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.065822064s EvaluationString:[ var='A' labels={metric=Kast 20 - D2 - LK2 4 P2/16 - Spanningsalarm} value=0 ], [ var='B' labels={metric=Kast 20 - D2 - LK2 4 P2/16 - Spanningsalarm} value=0 ]} {Instance:metric=Kast 20 - D2 - LK2 4 P2/16 - Spanningsuitval Alarm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:metric=Kast 20 - D2 - LK2 4 P2/16 - Spanningsuitval Alarm Value:0xc0297bb8f8} B:{Var:B Labels:metric=Kast 20 - D2 - LK2 4 P2/16 - Spanningsuitval Alarm Value:0xc0297bb908}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.065826864s EvaluationString:[ var='A' labels={metric=Kast 20 - D2 - LK2 4 P2/16 - Spanningsuitval Alarm} value=0 ], [ var='B' labels={metric=Kast 20 - D2 - LK2 4 P2/16 - Spanningsuitval Alarm} value=0 ]} {Instance:metric=Kast 20 - D2 - LK2 4 P2/16 - Stroom Alarm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:metric=Kast 20 - D2 - LK2 4 P2/16 - Stroom Alarm Value:0xc0297bb928} B:{Var:B Labels:metric=Kast 20 - D2 - LK2 4 P2/16 - Stroom Alarm Value:0xc0297bb938}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.065835764s EvaluationString:[ var='A' labels={metric=Kast 20 - D2 - LK2 4 P2/16 - Stroom Alarm} value=0 ], [ var='B' labels={metric=Kast 20 - D2 - LK2 4 P2/16 - Stroom Alarm} value=0 ]} {Instance:metric=Kast 20 - D2 - LK2 4 P2/16 - Stroomuitval Alarm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:metric=Kast 20 - D2 - LK2 4 P2/16 - Stroomuitval Alarm Value:0xc0297bb978} B:{Var:B Labels:metric=Kast 20 - D2 - LK2 4 P2/16 - Stroomuitval Alarm Value:0xc0297bb988}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.065841864s EvaluationString:[ var='A' labels={metric=Kast 20 - D2 - LK2 4 P2/16 - Stroomuitval Alarm} value=0 ], [ var='B' labels={metric=Kast 20 - D2 - LK2 4 P2/16 - Stroomuitval Alarm} value=0 ]} {Instance:metric=Kast 20 - D2 - LK2 4 P2/16 - geïnitialiseerd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:metric=Kast 20 - D2 - LK2 4 P2/16 - geïnitialiseerd Value:0xc0297bb9c8} B:{Var:B Labels:metric=Kast 20 - D2 - LK2 4 P2/16 - geïnitialiseerd Value:0xc0297bb9b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.065849964s EvaluationString:[ var='A' labels={metric=Kast 20 - D2 - LK2 4 P2/16 - geïnitialiseerd} value=0 ], [ var='B' labels={metric=Kast 20 - D2 - LK2 4 P2/16 - geïnitialiseerd} value=0 ]}]" duration=26.888361ms + level=debug ts=2024-05-29T13:44:14.066063149Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.065992816Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.065310534Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.06516775Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q3d9ke7i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.06503342Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=849729 slug=medopsimscare t=2024-05-29T13:44:14.06503038Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q3d9ke7i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.064920688Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=849729 slug=medopsimscare version=5 fingerprint=2145dbd0a66af23a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.064912024Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.064558809s EvaluationString:}]" duration=21.519122ms + level=debug ts=2024-05-29T13:44:14.064682667Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q3bjfolg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.064822767Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.06469523Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.064679236Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q3bjfolg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.064784157Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q3bjfolg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.064757377Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q37y62k0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.064635526Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=856040 slug=kuady t=2024-05-29T13:44:14.064631217Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.274165ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q37y62k0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.064551915Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q37y62k0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.064519564Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.064561511Z caller=remote_instance_store.go:51 user=402122 slug=leapwallet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q36nuhsq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.064474884Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q36nuhsq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.064403603Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=402122 slug=leapwallet version=45 fingerprint=31d162cd2835d333 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.064323837Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.06398058s EvaluationString:}]" duration=13.960712ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q36lmac3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.064153141Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.06408088Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q36lmac3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.06412161Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q31dtc20-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.06406994Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q31dtc20-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.063960519Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q2p8zlto-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.063861298Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=612525 slug=adleyeview t=2024-05-29T13:44:14.063801763Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.063808142Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q2p8zlto-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.063753497Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q2f8uul3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.063672416Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q2f8uul3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.063599385Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=129076 slug=marginalunit t=2024-05-29T13:44:14.063569083Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.063575736Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=129076 slug=marginalunit version=1 fingerprint=06d5a40ffcc8550e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.063424871Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.063075275s EvaluationString:}]" duration=59.161672ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q2acgp49-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.063399423Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q2acgp49-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.063292772Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.063274957Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q2acgp49-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.063237181Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.063235392Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.063177921Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.063181223Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q21rfpnk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.06307888Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q21rfpnk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.063031129Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q202cb27-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.062998839Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q202cb27-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.062949018Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q202cb27-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.062933948Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.0628883Z caller=remote_instance_store.go:51 user=656284 slug=cencosudx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q202cb27-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.062898888Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q1xyiz67-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.062846457Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q1xyiz67-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.062825527Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q1xyiz67-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.062717426Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.062691676Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=87052 slug=polystream t=2024-05-29T13:44:14.062746344Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.797668ms + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:14.06265744Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance="Cluster Name=GBSK-PRD, Consumer Group=rapl-prd, Topic=gbs-outbound-sportsbet" t=2024-05-29T13:44:14.062652277Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance="Cluster Name=GBSK-PRD, Consumer Group=rapl-prd, Topic=gbs-outbound-pokerstars" t=2024-05-29T13:44:14.06262656Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank t=2024-05-29T13:44:14.062616276Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance="Cluster Name=GBSK-PRD, Consumer Group=rapl-prd, Topic=gbs-outbound-paddypower" t=2024-05-29T13:44:14.062569752Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=82372 slug=fout instance="datasource_uid=000000001, ref_id=A" t=2024-05-29T13:44:14.062559872Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q1nk91ke-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.062522214Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=82372 slug=fout instance="datasource_uid=000000001, ref_id=A" t=2024-05-29T13:44:14.062537063Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.06247105Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance="Cluster Name=GBSK-PRD, Consumer Group=rapl-prd, Topic=gbs-outbound-paddypower" t=2024-05-29T13:44:14.062557938Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q1nk91ke-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.062487504Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.062407319Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q1n54zfo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.062423203Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q1n54zfo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.062398703Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.062288837Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance="Cluster Name=GBSK-PRD, Consumer Group=rapl-prd, Topic=gbs-outbound-betfairus" t=2024-05-29T13:44:14.062417994Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q1n54zfo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.062365832Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance="Cluster Name=GBSK-PRD, Consumer Group=rapl-prd, Topic=gbs-outbound-betfairus" t=2024-05-29T13:44:14.062407683Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance="Cluster Name=GBSK-PRD, Consumer Group=rapl-prd, Topic=gbs-outbound-arkle" t=2024-05-29T13:44:14.062347203Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=307381 slug=kambitaskforce t=2024-05-29T13:44:14.062361896Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance= t=2024-05-29T13:44:14.062325317Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q1n54zfo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.062268981Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q1mcy7cm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.062198611Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=182434 slug=form t=2024-05-29T13:44:14.062103712Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=182434 slug=form instance= t=2024-05-29T13:44:14.062074623Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q1mcy7cm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.062093349Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q1k6na9i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.062025189Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q1k0j05y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.061687955Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q1fubwzb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.061616755Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q1fubwzb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.061573694Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q1fubwzb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.061500783Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q1fubwzb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.061473203Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:14.061371084Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q1fn4ti9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.061431453Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q1fn4ti9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.061403652Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.061358028Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q1fn4ti9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.061358842Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.061353028Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.061326627Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.061248825Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.061233125Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.061182023Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=70430 slug=dapperlabs t=2024-05-29T13:44:14.061128787Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q1ck9mie-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.061081709Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q19xkqzc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.060965338Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=173730 slug=nikon instance= t=2024-05-29T13:44:14.060876689Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=173730 slug=nikon t=2024-05-29T13:44:14.060855991Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q155qokd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.060783456Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=173730 slug=nikon version=4 fingerprint=10d91d4c3da40650 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.060793266Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.06051725s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=122.989192ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q155qokd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.060695625Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q151zppi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.060600594Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=788187 slug=quantumlyconfused instance= t=2024-05-29T13:44:14.060516257Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:50:10Z next_ends_at=2024-05-29T13:52:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q151zppi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.060536604Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.060459082Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=788187 slug=quantumlyconfused t=2024-05-29T13:44:14.060455967Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q11yl0sg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.06021309Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q11yl0sg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.06018295Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q11yl0sg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.060140949Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q10ag41t-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.060028558Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.05999102Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.059939123Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q10ag41t-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.059921477Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.059928775Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q10ag41t-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.059895237Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.059865634Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q0y8hph4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.059824476Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q0y8hph4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.059680095Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:14.059592281Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q0u21zqp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.059450562Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.059351097Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q0r0i3qe-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.059325671Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=159532 slug=getfabric t=2024-05-29T13:44:14.059341733Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.888312ms + level=debug ts=2024-05-29T13:44:14.059341406Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q0awtu8g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.059129549Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q0awtu8g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.059092429Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q0awtu8g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.059064138Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.0591676Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q0awtu8g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.059026868Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.059046578Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q08efk7y-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.058947337Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q08efk7y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.058875287Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q08efk7y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.058799506Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.058964076Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q05obusg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.058670044Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-q05obusg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.058594374Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.058863205Z caller=remote_instance_store.go:51 user=403369 slug=clearsaletechlabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pzsqxkgi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.058189739Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:14.058608554Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.058767954Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=806229 slug=simplisafe instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.058574443Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=806229 slug=simplisafe t=2024-05-29T13:44:14.058553122Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.058655755Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.058572379Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.058572979Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.058346146Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.058268554Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.05810649Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.058035916Z caller=remote_instance_store.go:51 user=777670 slug=fakku msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pznvdbrr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.057888356Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pznvdbrr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.057820486Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.057552614Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.057706141Z caller=remote_instance_store.go:51 user=152655 slug=orbweaver msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pzidj5si-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.057679344Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.057512572Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pzhf08he-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.057463352Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pzgih39m-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.05729891Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:14.057343644Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=37.204934ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pza5it9z-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.057115298Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pza5it9z-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.056933577Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pz84x6ki-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.056744835Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pz84x6ki-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.056717524Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pz5d0dib-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.056660334Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pz5d0dib-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.056603273Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pz5d0dib-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.056529802Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pz155ih9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.056393661Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.056764901Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.056748354Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.056625973Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.056442237Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.056262813Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.056271566Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pyvwbji9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.05624565Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pyvwbji9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.056231199Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pyvwbji9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.056121518Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.056009315Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pysbubk5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.055925946Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=608555 slug=ias t=2024-05-29T13:44:14.055932858Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.847687ms + logger=ngalert.state.manager.persist user=22398 slug=sunfolding t=2024-05-29T13:44:14.055849585Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pykd72ar-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.055754475Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pyg4m7rt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.055563933Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pyg4m7rt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.055533312Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.055512962Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pyg4m7rt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.055487792Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=475799 slug=dpdcz t=2024-05-29T13:44:14.05546632Z level=debug msg="Saving alert states" count=34 max_state_save_concurrency=1 + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=qe-resending-notification-backfill" t=2024-05-29T13:44:14.055455277Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pyewhcno-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.05532211Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pi-dx-customer-addresses-backfill" t=2024-05-29T13:44:14.055312506Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.055306084Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pi-dx-courier-tours-backfill" t=2024-05-29T13:44:14.055292438Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pi-dx-courier-tours-backfill" t=2024-05-29T13:44:14.055285551Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-workdays-backfill" t=2024-05-29T13:44:14.055272161Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=464973 slug=equansdatahub t=2024-05-29T13:44:14.055245151Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=464973 slug=equansdatahub instance= t=2024-05-29T13:44:14.055232968Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.055215465Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=464973 slug=equansdatahub version=9 fingerprint=41c8c71970a7fe01 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.055119532Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.054895321s EvaluationString:}]" duration=110.375465ms + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-stable-collection-block-backfill" t=2024-05-29T13:44:14.055194351Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-routes-backfill" t=2024-05-29T13:44:14.05516677Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-labels-backfill" t=2024-05-29T13:44:14.055084322Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pyeiefn5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.055139908Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.055086168Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-customer-contact-relations-backfill" t=2024-05-29T13:44:14.055045911Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-customer-contact-relations-backfill" t=2024-05-29T13:44:14.055040863Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-collection-ps-relations-backfill" t=2024-05-29T13:44:14.05502765Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.055027397Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-collection-ps-relations-backfill" t=2024-05-29T13:44:14.055020441Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-cache-addresses-backfill" t=2024-05-29T13:44:14.055004588Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pyeiefn5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.055039307Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pyegqibi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.054932786Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=9947c5e8d245121c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.054919185Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.054690805s EvaluationString:}]" duration=298.553558ms + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.05486711Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.460055ms + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=il-ingest-stable-collection-blocks" t=2024-05-29T13:44:14.05490378Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=il-ingest-parcel-events" t=2024-05-29T13:44:14.054876115Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=il-ingest-long-term-redirections" t=2024-05-29T13:44:14.054855941Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.054846458Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pyegqibi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.054847565Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=il-ingest-collections" t=2024-05-29T13:44:14.054784424Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=il-ingest-collection-labels" t=2024-05-29T13:44:14.054732544Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=fido-dx-work-days-backfill" t=2024-05-29T13:44:14.054700344Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pydn73so-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.054676663Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pydn73so-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.054617033Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=fido-dx-customers-backfill" t=2024-05-29T13:44:14.054616883Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pydn73so-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.054585273Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=fido-dx-customers-backfill" t=2024-05-29T13:44:14.054606832Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=fido-dx-customer-addresses-backfill" t=2024-05-29T13:44:14.054575497Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.054411954Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-py731u2k-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.0543781Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-py731u2k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.05433931Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.054251043Z caller=remote_instance_store.go:51 user=147497 slug=rhodev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pxy1jd2b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.054228879Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pxy1jd2b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.054183458Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pxy1jd2b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.054113998Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.053880911Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.053854933Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pxtcqnnv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.053682253Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pxtcqnnv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.053654253Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pxsdu9jb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.053449581Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pxsdu9jb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.0533449Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:14.053282524Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=523054 slug=vialtopartners instance= t=2024-05-29T13:44:14.053265561Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=523054 slug=vialtopartners instance= t=2024-05-29T13:44:14.053258996Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pxpyi3lx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.053079887Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pxpyi3lx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.053063007Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pxpyi3lx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.052976166Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=530405 slug=zetetic instance="chain=Kusama, pool=Uno 1" t=2024-05-29T13:44:14.052968036Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pxoz4if1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.052869455Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pxoz4if1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.052842405Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pxoz4if1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.052772234Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pxkppnwl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.052705553Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=70430 slug=dapperlabs instance="datasource_uid=sAAhZ0a7z, ref_id=D" t=2024-05-29T13:44:14.052699927Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pxkppnwl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.052644903Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=70430 slug=dapperlabs instance="datasource_uid=sAAhZ0a7z, ref_id=D" t=2024-05-29T13:44:14.052579117Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=70430 slug=dapperlabs instance="datasource_uid=sAAhZ0a7z, ref_id=D" t=2024-05-29T13:44:14.05253893Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pxiwd7to-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.052497141Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.052407744Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.052445169Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pxiwd7to-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.05241532Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.052407769Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pxivq13a-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.052239948Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pxivq13a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.052163388Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pxivq13a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.052129137Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.05202391Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.051680533Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-px8kww2a-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.051307559Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.05124985Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-px8kww2a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.051209658Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.051177027Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=49546 slug=nulogyinfra instance= t=2024-05-29T13:44:14.05103022Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pwxsz9ou-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.050986336Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=873368 slug=euid instance="datasource_uid=grafanacloud-prom, ref_id=QUERY" t=2024-05-29T13:44:14.050964501Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="datasource_uid=grafanacloud-prom, ref_id=QUERY" t=2024-05-29T13:44:14.050956649Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=49546 slug=nulogyinfra version=2 fingerprint=0fa809d605dac665 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.050951725Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.050685485s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=65.59721ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pwxsz9ou-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.050947805Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=873368 slug=euid version=42 fingerprint=5893f56682a6c41f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.050845989Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=QUERY State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.050570417s EvaluationString:}]" duration=8.516965ms + level=debug ts=2024-05-29T13:44:14.050654737Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pwduer7e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.050550681Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.050552372Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:14.050579288Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:14.050487986Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pwduer7e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.050492151Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=698963 slug=lemonade version=3 fingerprint=33989b7fe81779d0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.050418173Z level=debug msg="Alert rule evaluated" results="[{Instance:app=ds-label-studio-listener, cluster=lmnd-production-us-east-1, container=kube-state-metrics, deployment=ds-label-studio-listener, endpoint=http, instance=10.16.27.82:8080, job=kube-state-metrics, namespace=production, pod=kube-state-metrics-6c795d5489-v5txw, region=us-east-1, service=kube-state-metrics, stage=production State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=ds-label-studio-listener, cluster=lmnd-production-us-east-1, container=kube-state-metrics, deployment=ds-label-studio-listener, endpoint=http, instance=10.16.27.82:8080, job=kube-state-metrics, namespace=production, pod=kube-state-metrics-6c795d5489-v5txw, region=us-east-1, service=kube-state-metrics, stage=production Value:0xc01665dc60} THRESHOLD:{Var:THRESHOLD Labels:app=ds-label-studio-listener, cluster=lmnd-production-us-east-1, container=kube-state-metrics, deployment=ds-label-studio-listener, endpoint=http, instance=10.16.27.82:8080, job=kube-state-metrics, namespace=production, pod=kube-state-metrics-6c795d5489-v5txw, region=us-east-1, service=kube-state-metrics, stage=production Value:0xc01665dd30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.050083161s EvaluationString:[ var='QUERY' labels={app=ds-label-studio-listener, cluster=lmnd-production-us-east-1, container=kube-state-metrics, deployment=ds-label-studio-listener, endpoint=http, instance=10.16.27.82:8080, job=kube-state-metrics, namespace=production, pod=kube-state-metrics-6c795d5489-v5txw, region=us-east-1, service=kube-state-metrics, stage=production} value=0 ], [ var='THRESHOLD' labels={app=ds-label-studio-listener, cluster=lmnd-production-us-east-1, container=kube-state-metrics, deployment=ds-label-studio-listener, endpoint=http, instance=10.16.27.82:8080, job=kube-state-metrics, namespace=production, pod=kube-state-metrics-6c795d5489-v5txw, region=us-east-1, service=kube-state-metrics, stage=production} value=0 ]}]" duration=62.708782ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pwcvo13z-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.050386499Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pwcvo13z-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.050320209Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pwb9zf2l-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.050231688Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pwb9zf2l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.050173787Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pw9qhgoq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.050060676Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pw9qhgoq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.049979235Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.049843615Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:14.049727322Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:14.049713292Z level=debug msg="Setting next state" handler=resultNoData + level=info ts=2024-05-29T13:44:14.049587201Z caller=remote_alert_sender.go:94 user=807171 slug=unstarnp host=unstarnp-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.16.109.137:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ec2f46b2-7267-4503-94bb-4820c22d4743 alerts=1 + level=debug ts=2024-05-29T13:44:14.049602689Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pvsktm53-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.04945008Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.049413911Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.049342951Z caller=remote_instance_store.go:51 user=766364 slug=asdg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pvp0wg9a-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.049311448Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.04927739Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=766364 slug=asdg t=2024-05-29T13:44:14.04930864Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=766364 slug=asdg instance="instance=https://agdata.go.ke/asdg/api, job=blackbox-asdg-api" t=2024-05-29T13:44:14.049289079Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pvmk3aed-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.049027425Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pvmk3aed-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.049013405Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.048741236Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.048706972Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=465668 slug=xpressinfra instance="datasource_uid=b3546312-64d1-414d-809a-5c608fb2f54f, ref_id=C,D" t=2024-05-29T13:44:14.048608123Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pvjo0mrn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.048641371Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.048642972Z caller=remote_instance_store.go:51 user=465668 slug=xpressinfra msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.048539203Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pvjo0mrn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.04853557Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pvjo0mrn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.04850856Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pv8ieeno-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.048439599Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.048337932Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pv8ieeno-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.048301448Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.048272849Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pv5ewhgw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.048260697Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pv5ewhgw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.048118936Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=148654 slug=tinybeans t=2024-05-29T13:44:14.047868287Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=27.799651ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-puxlc2wl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.047821153Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-puxlc2wl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.047701322Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-puw7c2hr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.047667441Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-puw7c2hr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.047656801Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.047651522Z caller=remote_instance_store.go:51 user=656284 slug=cencosudx msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.04766431Z caller=remote_instance_store.go:51 user=71676 slug=lemonbeat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=656284 slug=cencosudx instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.047576081Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=656284 slug=cencosudx instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.04753493Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-puw7c2hr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.0475492Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.047443375Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=656284 slug=cencosudx version=3 fingerprint=5b6b6566668c75a9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.047418439Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.047007036s EvaluationString:}]" duration=9.151377ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-purfd09j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.047446639Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.047412123Z caller=remote_instance_store.go:51 user=856040 slug=kuady msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-purfd09j-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.047337478Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pum1y9ou-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.047272267Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.047331301Z caller=grafana.go:247 user=438185 slug=nodeinfra msg="rules manager rule groups request" path=/prometheus/api/v1/alerts grafana_org_id=1 query= groups=0 alerts=256 + logger=ngalert.state.manager user=856040 slug=kuady instance= t=2024-05-29T13:44:14.047341151Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=856040 slug=kuady instance= t=2024-05-29T13:44:14.047336011Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pum1y9ou-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.047206646Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=856040 slug=kuady t=2024-05-29T13:44:14.047300471Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pum1y9ou-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.047162266Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pul2gbb4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.047126636Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pul2gbb4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.047071205Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pul2gbb4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.046998134Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pukjeoew-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.046936314Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pukjeoew-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.046863513Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-puca00pf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.04653592Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-puca00pf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.046521209Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.047058317Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.046948423Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-puca00pf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.046474459Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pu9eeetc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.046373968Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.046905615Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=87052 slug=polystream instance= t=2024-05-29T13:44:14.046878126Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:14.046845182Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.046814887Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.046810338Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.046632031Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.046722527Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=22.625021ms + level=debug ts=2024-05-29T13:44:14.046550759Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.046589576Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.046418164Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=158536 slug=clearsaleantifraude instance="datasource_uid=grafanacloud-prom, ref_id=jobs_p95" t=2024-05-29T13:44:14.046501706Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=zammad, service=zammad-redis-master" t=2024-05-29T13:44:14.046460808Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=zammad, service=zammad-memcached" t=2024-05-29T13:44:14.046299962Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.046169321Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.046156735Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ptryu0hf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.046124395Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ptqmm0aj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.045987064Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ptqmm0aj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.045915173Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ptqmm0aj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.045881753Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ptcs7p16-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.045727801Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ptcs7p16-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.045684961Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ptcs7p16-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.04563222Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=zammad, service=zammad" t=2024-05-29T13:44:14.045935367Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ptcs7p16-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.045548519Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ptbypoyt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.045417568Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ptbypoyt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.045322057Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ptbv716e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.045147945Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ptbv716e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.045117905Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ptakmmla-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.044916573Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.045710136Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ptakmmla-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.044766981Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ptaj5lgk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.044484238Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.045675529Z caller=remote_instance_store.go:51 user=686395 slug=containerfoundation msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pt9p3p8p-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.044420848Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.045474157Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=redis-cluster, service=redis-cluster" t=2024-05-29T13:44:14.045424117Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=845543 slug=deliveryhero t=2024-05-29T13:44:14.045377513Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="account_id=457710302499, dimension_QueueName=talabat-prod-salus-queue-sonarcloud, name=arn:aws:sqs:eu-west-2:457710302499:talabat-prod-salus-queue-sonarcloud, region=eu-west-2" t=2024-05-29T13:44:14.045353841Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=845543 slug=deliveryhero instance="account_id=457710302499, dimension_QueueName=talabat-prod-salus-queue-github, name=arn:aws:sqs:eu-west-2:457710302499:talabat-prod-salus-queue-github, region=eu-west-2" t=2024-05-29T13:44:14.045324601Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.045290579Z caller=grafana.go:247 user=438185 slug=nodeinfra msg="rules manager rule groups request" path=/prometheus/api/v1/alerts grafana_org_id=1 query= groups=0 alerts=373 + level=debug ts=2024-05-29T13:44:14.045238401Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.045004647Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=321534 slug=atlasiq t=2024-05-29T13:44:14.044990065Z level=debug msg="Skip rule evaluation because it is paused" + level=debug ts=2024-05-29T13:44:14.044905096Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.044939844Z caller=remote_instance_store.go:51 user=68499 slug=identt msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.044895542Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=pulsar-cluster, service=pulsar-cluster-recovery" t=2024-05-29T13:44:14.044890224Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=68499 slug=identt instance= t=2024-05-29T13:44:14.044878303Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.044736822Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=pulsar-cluster, service=pulsar-cluster-pulsar-manager" t=2024-05-29T13:44:14.044683416Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.044592934Z caller=remote_image_capturer.go:61 user=228733 slug=csmoney rule_org_id=1 rule_uid=fc3674ed-eb7f-437b-9715-c523a8adbac8 dashboard=ba5ad42a-2495-4f47-99a1-26e05d601796 panel=11 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=538037 slug=drivewealth instance= t=2024-05-29T13:44:14.044399107Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.044304922Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=538037 slug=drivewealth instance= t=2024-05-29T13:44:14.044386508Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538037 slug=drivewealth t=2024-05-29T13:44:14.04432927Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=538037 slug=drivewealth version=14 fingerprint=de292a29f4b49205 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.044206569Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[C:{Var:C Labels: Value:0xc019a58370} D:{Var:D Labels: Value:0xc019a58378} E:{Var:E Labels: Value:0xc019a58950}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.043455518s EvaluationString:[ var='C' labels={} value=42.812698375242576 ], [ var='D' labels={} value=0 ], [ var='E' labels={} value=0 ]}]" duration=46.604889ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pt9p3p8p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.044314177Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pt5o3b8v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.044109455Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=payto-shared, service=payto-web-cuscal" t=2024-05-29T13:44:14.044064331Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-psznd4cz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.043848072Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.04386565Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-psznd4cz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.043814162Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-psznd4cz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.043788461Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=payto-shared, service=payto-cuscal-api-simulate" t=2024-05-29T13:44:14.043750805Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-psus8r37-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.04367501Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=228733 slug=csmoney instance= t=2024-05-29T13:44:14.043614776Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=228733 slug=csmoney t=2024-05-29T13:44:14.043560015Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=url-shortener" t=2024-05-29T13:44:14.043641331Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=url-shortener" t=2024-05-29T13:44:14.043632478Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=transactions-srv-core" t=2024-05-29T13:44:14.043513603Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.043461293Z caller=remote_instance_store.go:51 user=777670 slug=fakku msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=159532 slug=getfabric instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.043435026Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pskwvwu8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.043403137Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pskwvwu8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.043355667Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=159532 slug=getfabric t=2024-05-29T13:44:14.043376611Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pskwvwu8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.043317597Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=159532 slug=getfabric version=4 fingerprint=4d23a5c5f515991d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.043303273Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.042954529s EvaluationString:}]" duration=169.046172ms + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=pearl-docs-paypaplane" t=2024-05-29T13:44:14.043228845Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.043000719Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-psjyi4zb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.043042054Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=pearl-docs-billycart" t=2024-05-29T13:44:14.043032365Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-psjyi4zb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.042861352Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=payto-srv-selector" t=2024-05-29T13:44:14.04253039Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.042447058Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.042487686Z caller=remote_alert_sender.go:94 user=87780 slug=zencloudandhosting host=zencloudandhosting-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.192.7:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=bd4b45a1-4aab-4e4e-a3fc-bfc7a38a6b15 alerts=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-veridas-db, env=au" t=2024-05-29T13:44:14.042491254Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=108112 slug=btctrader t=2024-05-29T13:44:14.042436848Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-v4monitoring-db, env=au" t=2024-05-29T13:44:14.042364201Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-psen865e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.042304486Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-sar-investigation-db, env=au" t=2024-05-29T13:44:14.042223197Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.042033267Z caller=remote_instance_store.go:51 user=196413 slug=form3production msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=payto-srv-core" t=2024-05-29T13:44:14.042175209Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=payto-srv-core" t=2024-05-29T13:44:14.042165462Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-psd7as80-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.042090044Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=82372 slug=fout t=2024-05-29T13:44:14.042063697Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=148.348568ms + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=payto-cuscal-api-simulate" t=2024-05-29T13:44:14.042037052Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-psd7as80-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.042057664Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=608555 slug=ias t=2024-05-29T13:44:14.042039186Z level=debug msg="Deleting alert states" count=1 + logger=ngalert.state.manager user=608555 slug=ias instance="Series=query9844c0a06fca46d79afd208764794848, TargetGroup=targetgroup/eng-ct-zh-zt-ml/7540ca45763622da" t=2024-05-29T13:44:14.042000811Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.041926273Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=608555 slug=ias version=26 fingerprint=bfbca7b0ca220728 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.041905906Z level=debug msg="Alert rule evaluated" results="[{Instance:Series=query9844c0a06fca46d79afd208764794848, TargetGroup=targetgroup/eng-ct-zh-zt-ml/7540ca45763622da State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Series=query9844c0a06fca46d79afd208764794848, TargetGroup=targetgroup/eng-ct-zh-zt-ml/7540ca45763622da Value:0xc09247cde0} C:{Var:C Labels:Series=query9844c0a06fca46d79afd208764794848, TargetGroup=targetgroup/eng-ct-zh-zt-ml/7540ca45763622da Value:0xc09247ce00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.040976563s EvaluationString:[ var='B' labels={Series=query9844c0a06fca46d79afd208764794848, TargetGroup=targetgroup/eng-ct-zh-zt-ml/7540ca45763622da} value=NaN ], [ var='C' labels={Series=query9844c0a06fca46d79afd208764794848, TargetGroup=targetgroup/eng-ct-zh-zt-ml/7540ca45763622da} value=0 ]}]" duration=33.329144ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ps8rp8wk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.041891062Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=payto-api-core" t=2024-05-29T13:44:14.041820005Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ps8rp8wk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.041776521Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-instant-id-qa-db, env=au" t=2024-05-29T13:44:14.041714755Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.041748848Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:14.041676821Z level=debug msg="Saving alert states done" count=45 max_state_save_concurrency=1 duration=516.816939ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ps5301gc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.04170709Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.041570197Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ps5301gc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.041617389Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ps5301gc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.041582659Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=paypaplane-appgateway-api" t=2024-05-29T13:44:14.041625751Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-idverse-enterprise-db, env=au" t=2024-05-29T13:44:14.041572955Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=paypaplane-appgateway-api" t=2024-05-29T13:44:14.041610149Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=465816 slug=metricgamingqa t=2024-05-29T13:44:14.041498427Z level=debug msg="Saving alert states done" count=6 max_state_save_concurrency=1 duration=171.080304ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ps4mv7bk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.041475088Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-frontend-log-db, env=au" t=2024-05-29T13:44:14.041450846Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-frontend-log-db, env=au" t=2024-05-29T13:44:14.041441259Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=paymods-srv-square" t=2024-05-29T13:44:14.04140784Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=paymods-srv-square" t=2024-05-29T13:44:14.041389806Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ps2c37p5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.041382787Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.041359025Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ps2c37p5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.041359406Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ps2c37p5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.041326956Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ps2c37p5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.041254125Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.041247827Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ps2btckk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.041200765Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=paymods-srv-securepay" t=2024-05-29T13:44:14.041178782Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.0411473Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ps2btckk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.041117184Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-frontend-db, env=au" t=2024-05-29T13:44:14.041123588Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ps2btckk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.041094144Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=paymods-srv-master" t=2024-05-29T13:44:14.041010747Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.040998189Z caller=remote_instance_store.go:51 user=807171 slug=unstarnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.040990985Z caller=remote_instance_store.go:51 user=403369 slug=clearsaletechlabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.041000254Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-dow-jones-db, env=au" t=2024-05-29T13:44:14.04098052Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=901230 slug=integromonitor instance= previous_handler=resultError t=2024-05-29T13:44:14.040980145Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=901230 slug=integromonitor instance= previous_handler=resultError t=2024-05-29T13:44:14.040971547Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=807171 slug=unstarnp t=2024-05-29T13:44:14.040828218Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=901230 slug=integromonitor instance= t=2024-05-29T13:44:14.040962233Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ps21bqcw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.040944552Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ps1ikyte-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.040914462Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=4947 slug=mediamath instance= t=2024-05-29T13:44:14.040948892Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.040912439Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ps1ikyte-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.040892152Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.04084325Z caller=remote_rule_evaluator.go:193 user=235691 slug=om2 msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ps1ikyte-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.040852871Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ps1ikyte-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.040801051Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-przcapag-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.04074609Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-przcapag-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.04071272Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-creditsafe-db, env=au" t=2024-05-29T13:44:14.040674428Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pruumtt8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.040572198Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-comply-advantage-db, env=au" t=2024-05-29T13:44:14.040516559Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pruumtt8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.040455787Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-prod01awsuswest2wssys-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.040402737Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-prod01awsuswest2wssys-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.040380046Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-compliance-lens-db, env=au" t=2024-05-29T13:44:14.040315293Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-prod01awsuswest2wssys-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.040351196Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-prod01awsuswest2wssys-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.040290325Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-prkse1o3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.040259345Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-prkse1o3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.040191274Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-prkse1o3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.040082503Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.040040832Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.040045112Z caller=remote_instance_store.go:51 user=310637 slug=notino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=payconnector" t=2024-05-29T13:44:14.04003353Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.040001666Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.039958734Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-prij5nkz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.039909982Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=metadata-cdc" t=2024-05-29T13:44:14.039895311Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-prij5nkz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.039884441Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.03976923Z caller=remote_instance_store.go:51 user=763376 slug=f5nginxone msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-backend-db-read-replica-1, env=au" t=2024-05-29T13:44:14.039780369Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pr3pybma-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.039601868Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=merchants-srv-core" t=2024-05-29T13:44:14.039606286Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-backend-db, env=au" t=2024-05-29T13:44:14.039561277Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pr3pybma-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.039558438Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.039484228Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pr3pybma-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.039461587Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=kratos" t=2024-05-29T13:44:14.039453707Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=399183 slug=guidion t=2024-05-29T13:44:14.039419316Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-veridas-db, env=apac" t=2024-05-29T13:44:14.039382608Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=identity-srv-core" t=2024-05-29T13:44:14.039264273Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-v4monitoring-db, env=apac" t=2024-05-29T13:44:14.039167742Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pr3izf11-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.039071923Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.039122947Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=id" t=2024-05-29T13:44:14.03910024Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:14.039060133Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=ds-label-studio-listener, cluster=lmnd-staging-us-east-1, container=kube-state-metrics, deployment=ds-label-studio-listener, endpoint=http, instance=10.24.74.71:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-t4kxx, region=us-east-1, service=kube-state-metrics, stage=staging" t=2024-05-29T13:44:14.039025901Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.039012151Z caller=grafana.go:247 user=438185 slug=nodeinfra msg="rules manager rule groups request" path=/prometheus/api/v1/alerts grafana_org_id=1 query= groups=0 alerts=373 + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=gateway" t=2024-05-29T13:44:14.038929884Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.038914776Z caller=remote_instance_store.go:51 user=652086 slug=unihosted msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1084.unifi.unihosted.com" t=2024-05-29T13:44:14.038820975Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.038687051Z caller=grafana.go:247 user=438185 slug=nodeinfra msg="rules manager rule groups request" path=/prometheus/api/v1/alerts grafana_org_id=1 query= groups=0 alerts=256 + level=debug ts=2024-05-29T13:44:14.038704073Z caller=remote_instance_store.go:51 user=824501 slug=bendingspoons msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=824501 slug=bendingspoons t=2024-05-29T13:44:14.038640235Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1082.unifi.unihosted.com" t=2024-05-29T13:44:14.038653774Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pqu1lcs8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.038601918Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=824501 slug=bendingspoons instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.038625923Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1081.unifi.unihosted.com" t=2024-05-29T13:44:14.038576874Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1081.unifi.unihosted.com" t=2024-05-29T13:44:14.038560974Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=824501 slug=bendingspoons instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.038580047Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=824501 slug=bendingspoons instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.038564916Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=external-web-agreements" t=2024-05-29T13:44:14.038493977Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-periodic-reviews-db, env=apac" t=2024-05-29T13:44:14.038481569Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.038401405Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1078.unifi.unihosted.com" t=2024-05-29T13:44:14.038378673Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.038377291Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.038337399Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=external-api-paymods" t=2024-05-29T13:44:14.038282442Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1076.unifi.unihosted.com" t=2024-05-29T13:44:14.038314972Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pqlc2xfb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.038248625Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.038212936Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1075.unifi.unihosted.com" t=2024-05-29T13:44:14.038238872Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pqilcut9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.038069833Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-idverse-enterprise-db, env=apac" t=2024-05-29T13:44:14.038050407Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-idverse-enterprise-db, env=apac" t=2024-05-29T13:44:14.038022783Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=external-api-merchants" t=2024-05-29T13:44:14.038064709Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.037978289Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pqilcut9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.037971592Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pqihmxyw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.037900991Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1070.unifi.unihosted.com" t=2024-05-29T13:44:14.03790887Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pqfgfqag-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.037675039Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=error" t=2024-05-29T13:44:14.037782748Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1068.unifi.unihosted.com" t=2024-05-29T13:44:14.037749469Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1068.unifi.unihosted.com" t=2024-05-29T13:44:14.037734669Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=767797 slug=mgmresorts t=2024-05-29T13:44:14.037637479Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.148472ms + logger=ngalert.state.manager.persist user=828988 slug=kdc t=2024-05-29T13:44:14.037691512Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=828988 slug=kdc instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.037669096Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.037675221Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=828988 slug=kdc instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.037655146Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.037626326Z caller=remote_instance_store.go:51 user=703825 slug=andrewbauman msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=828988 slug=kdc version=13 fingerprint=3fcfea6c661950f5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.037545122Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.037201915s EvaluationString:}]" duration=10.27853ms + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-frontend-db-read-replica-1, env=apac" t=2024-05-29T13:44:14.037553132Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1066.unifi.unihosted.com" t=2024-05-29T13:44:14.037567468Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1065.unifi.unihosted.com" t=2024-05-29T13:44:14.037484467Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.037462729Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=driver-background" t=2024-05-29T13:44:14.037433437Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pq25jkot-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.037349865Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1060.unifi.unihosted.com" t=2024-05-29T13:44:14.037270066Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=driver" t=2024-05-29T13:44:14.037269284Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pq25jkot-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.037239974Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1059.unifi.unihosted.com" t=2024-05-29T13:44:14.037206165Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pq162w6p-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.037187614Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=337951 slug=pawapay instance= t=2024-05-29T13:44:14.037211634Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=337951 slug=pawapay instance= t=2024-05-29T13:44:14.037196966Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.037146198Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.037044357Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1058.unifi.unihosted.com" t=2024-05-29T13:44:14.037121965Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=337951 slug=pawapay version=8 fingerprint=b3a0f1eea93d0576 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.037074202Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.036745306s EvaluationString:}]" duration=10.85442ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pppgv4z2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.037023302Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.037060786Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.037034506Z caller=remote_instance_store.go:51 user=283914 slug=emmasleep msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pppgv4z2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.036966961Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=527202 slug=lnrsusinsurancedev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.036933731Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.03694572Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=oms-stock-receiver, dimension_QueueName=ecom-stage-oms-stock-receiver-tracking-number-updates-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-oms-stock-receiver-tracking-number-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.036912514Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.036870214Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.361983ms + level=debug ts=2024-05-29T13:44:14.036904938Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ppgd4qdu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.03686123Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.0368319Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=527202 slug=lnrsusinsurancedev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.036826565Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=core-srv-email" t=2024-05-29T13:44:14.036807533Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=527202 slug=lnrsusinsurancedev version=25 fingerprint=62475a2ab8e79aaa attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.03674065Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.036345658s EvaluationString:}]" duration=28.418864ms + level=debug ts=2024-05-29T13:44:14.036749802Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.036785602Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.036715933Z caller=remote_image_capturer.go:61 user=283914 slug=emmasleep rule_org_id=1 rule_uid=f7a257b5-5b4b-4bb5-b8c5-1d3b84d6f862 dashboard=CNR8LzU7z2323213wrrwewr panel=16 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ppgd4qdu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.036710929Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.036666068Z caller=remote_instance_store.go:51 user=686395 slug=containerfoundation msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ppdv9fow-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.036651678Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.036686053Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1046.unifi.unihosted.com" t=2024-05-29T13:44:14.036639562Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-data-management-db, env=apac" t=2024-05-29T13:44:14.036573895Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1046.unifi.unihosted.com" t=2024-05-29T13:44:14.036624662Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ppdv9fow-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.036589818Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1045.unifi.unihosted.com" t=2024-05-29T13:44:14.036548462Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ppdv9fow-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.036513877Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.036540956Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.036502519Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1044.unifi.unihosted.com" t=2024-05-29T13:44:14.036492061Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=cdc-cloudevents-srv-outbox" t=2024-05-29T13:44:14.036454133Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-creditsafe-db, env=apac" t=2024-05-29T13:44:14.03632293Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1033.unifi.unihosted.com" t=2024-05-29T13:44:14.03624516Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.036109631Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pp9pznxs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.036183443Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=auth-api-pearl" t=2024-05-29T13:44:14.036205788Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.036155836Z caller=remote_instance_store.go:51 user=137351 slug=pinnacle21 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pp9pznxs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.036159263Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=auth-api-pearl" t=2024-05-29T13:44:14.036196219Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pp9pznxs-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.036119563Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1028.unifi.unihosted.com" t=2024-05-29T13:44:14.036087759Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=agreements-srv-postbacks" t=2024-05-29T13:44:14.036067553Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.03608195Z caller=remote_image_capturer.go:54 user=283914 slug=emmasleep rule_org_id=1 rule_uid=f7a257b5-5b4b-4bb5-b8c5-1d3b84d6f862 dashboard=CNR8LzU7z2323213wrrwewr panel=16 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-comply-advantage-db, env=apac" t=2024-05-29T13:44:14.036050338Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=agreements-srv-postbacks" t=2024-05-29T13:44:14.036056329Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=oms-stock-receiver, dimension_QueueName=ecom-stage-oms-stock-receiver-fulfilment-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-oms-stock-receiver-fulfilment-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.035795007Z level=warn msg="Failed to take an image" dashboard=CNR8LzU7z2323213wrrwewr panel=16 error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:14.035905655Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=paypaplane, service=agreements-srv-core" t=2024-05-29T13:44:14.035902147Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:14.035764681Z caller=remote_image_capturer.go:61 user=283914 slug=emmasleep rule_org_id=1 rule_uid=f7a257b5-5b4b-4bb5-b8c5-1d3b84d6f862 dashboard=CNR8LzU7z2323213wrrwewr panel=16 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1015.unifi.unihosted.com" t=2024-05-29T13:44:14.035807057Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-poxfve3k-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.035782459Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-poxfve3k-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.035755319Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=network, service=egress" t=2024-05-29T13:44:14.035776341Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.035632359Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=426229 slug=accelbyte t=2024-05-29T13:44:14.035602516Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.513415ms + logger=ngalert.state.manager user=652086 slug=unihosted instance="instance=m1011.unifi.unihosted.com" t=2024-05-29T13:44:14.035611656Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=monitoring, service=prometheus-prometheus-kube-prometheus-prometheus" t=2024-05-29T13:44:14.035523917Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698103 slug=vericast instance= t=2024-05-29T13:44:14.035503372Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698103 slug=vericast instance= t=2024-05-29T13:44:14.035483672Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=652086 slug=unihosted version=37 fingerprint=a21c2096ce36191e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.034764851Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=m1000.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1000.unifi.unihosted.com Value:0xc005c39c50} B:{Var:B Labels:instance=m1000.unifi.unihosted.com Value:0xc005c39c60} C:{Var:C Labels:instance=m1000.unifi.unihosted.com Value:0xc005c39c80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.031992419s EvaluationString:[ var='A' labels={instance=m1000.unifi.unihosted.com} value=5 ], [ var='B' labels={instance=m1000.unifi.unihosted.com} value=5 ], [ var='C' labels={instance=m1000.unifi.unihosted.com} value=0 ]} {Instance:instance=m1001.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1001.unifi.unihosted.com Value:0xc03d1fa040} B:{Var:B Labels:instance=m1001.unifi.unihosted.com Value:0xc04822b770} C:{Var:C Labels:instance=m1001.unifi.unihosted.com Value:0xc03d1fa010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032015919s EvaluationString:[ var='A' labels={instance=m1001.unifi.unihosted.com} value=30 ], [ var='B' labels={instance=m1001.unifi.unihosted.com} value=30 ], [ var='C' labels={instance=m1001.unifi.unihosted.com} value=0 ]} {Instance:instance=m1011.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1011.unifi.unihosted.com Value:0xc03d1fa060} B:{Var:B Labels:instance=m1011.unifi.unihosted.com Value:0xc03d1fa080} C:{Var:C Labels:instance=m1011.unifi.unihosted.com Value:0xc03d1fa0b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032024319s EvaluationString:[ var='A' labels={instance=m1011.unifi.unihosted.com} value=2 ], [ var='B' labels={instance=m1011.unifi.unihosted.com} value=2 ], [ var='C' labels={instance=m1011.unifi.unihosted.com} value=0 ]} {Instance:instance=m1014.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1014.unifi.unihosted.com Value:0xc03d1fa0e0} B:{Var:B Labels:instance=m1014.unifi.unihosted.com Value:0xc03d1fa0f0} C:{Var:C Labels:instance=m1014.unifi.unihosted.com Value:0xc03d1fa0d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032031019s EvaluationString:[ var='A' labels={instance=m1014.unifi.unihosted.com} value=0 ], [ var='B' labels={instance=m1014.unifi.unihosted.com} value=0 ], [ var='C' labels={instance=m1014.unifi.unihosted.com} value=0 ]} {Instance:instance=m1015.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1015.unifi.unihosted.com Value:0xc03d1fa150} B:{Var:B Labels:instance=m1015.unifi.unihosted.com Value:0xc03d1fa110} C:{Var:C Labels:instance=m1015.unifi.unihosted.com Value:0xc03d1fa130}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032038519s EvaluationString:[ var='A' labels={instance=m1015.unifi.unihosted.com} value=10 ], [ var='B' labels={instance=m1015.unifi.unihosted.com} value=10 ], [ var='C' labels={instance=m1015.unifi.unihosted.com} value=0 ]} {Instance:instance=m1025.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1025.unifi.unihosted.com Value:0xc03d1fa170} B:{Var:B Labels:instance=m1025.unifi.unihosted.com Value:0xc03d1fa180} C:{Var:C Labels:instance=m1025.unifi.unihosted.com Value:0xc03d1fa1b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032045619s EvaluationString:[ var='A' labels={instance=m1025.unifi.unihosted.com} value=11 ], [ var='B' labels={instance=m1025.unifi.unihosted.com} value=11 ], [ var='C' labels={instance=m1025.unifi.unihosted.com} value=0 ]} {Instance:instance=m1026.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1026.unifi.unihosted.com Value:0xc03d1fa1f0} B:{Var:B Labels:instance=m1026.unifi.unihosted.com Value:0xc03d1fa200} C:{Var:C Labels:instance=m1026.unifi.unihosted.com Value:0xc03d1fa230}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032055218s EvaluationString:[ var='A' labels={instance=m1026.unifi.unihosted.com} value=8 ], [ var='B' labels={instance=m1026.unifi.unihosted.com} value=8 ], [ var='C' labels={instance=m1026.unifi.unihosted.com} value=0 ]} {Instance:instance=m1027.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1027.unifi.unihosted.com Value:0xc03d1fa280} B:{Var:B Labels:instance=m1027.unifi.unihosted.com Value:0xc03d1fa250} C:{Var:C Labels:instance=m1027.unifi.unihosted.com Value:0xc03d1fa270}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032071318s EvaluationString:[ var='A' labels={instance=m1027.unifi.unihosted.com} value=15 ], [ var='B' labels={instance=m1027.unifi.unihosted.com} value=15 ], [ var='C' labels={instance=m1027.unifi.unihosted.com} value=0 ]} {Instance:instance=m1028.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1028.unifi.unihosted.com Value:0xc03d1fa2c0} B:{Var:B Labels:instance=m1028.unifi.unihosted.com Value:0xc03d1fa2d0} C:{Var:C Labels:instance=m1028.unifi.unihosted.com Value:0xc03d1fa2e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032077518s EvaluationString:[ var='A' labels={instance=m1028.unifi.unihosted.com} value=25 ], [ var='B' labels={instance=m1028.unifi.unihosted.com} value=25 ], [ var='C' labels={instance=m1028.unifi.unihosted.com} value=0 ]} {Instance:instance=m1030.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1030.unifi.unihosted.com Value:0xc03d1fa350} B:{Var:B Labels:instance=m1030.unifi.unihosted.com Value:0xc03d1fa310} C:{Var:C Labels:instance=m1030.unifi.unihosted.com Value:0xc03d1fa330}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032086418s EvaluationString:[ var='A' labels={instance=m1030.unifi.unihosted.com} value=10 ], [ var='B' labels={instance=m1030.unifi.unihosted.com} value=10 ], [ var='C' labels={instance=m1030.unifi.unihosted.com} value=0 ]} {Instance:instance=m1033.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1033.unifi.unihosted.com Value:0xc03d1fa380} B:{Var:B Labels:instance=m1033.unifi.unihosted.com Value:0xc03d1fa3c0} C:{Var:C Labels:instance=m1033.unifi.unihosted.com Value:0xc03d1fa3d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032099318s EvaluationString:[ var='A' labels={instance=m1033.unifi.unihosted.com} value=9 ], [ var='B' labels={instance=m1033.unifi.unihosted.com} value=9 ], [ var='C' labels={instance=m1033.unifi.unihosted.com} value=0 ]} {Instance:instance=m1038.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1038.unifi.unihosted.com Value:0xc03d1fa410} B:{Var:B Labels:instance=m1038.unifi.unihosted.com Value:0xc03d1fa3f0} C:{Var:C Labels:instance=m1038.unifi.unihosted.com Value:0xc03d1fa400}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032108318s EvaluationString:[ var='A' labels={instance=m1038.unifi.unihosted.com} value=23 ], [ var='B' labels={instance=m1038.unifi.unihosted.com} value=23 ], [ var='C' labels={instance=m1038.unifi.unihosted.com} value=0 ]} {Instance:instance=m1039.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1039.unifi.unihosted.com Value:0xc03d1fa440} B:{Var:B Labels:instance=m1039.unifi.unihosted.com Value:0xc03d1fa450} C:{Var:C Labels:instance=m1039.unifi.unihosted.com Value:0xc03d1fa480}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032116218s EvaluationString:[ var='A' labels={instance=m1039.unifi.unihosted.com} value=5 ], [ var='B' labels={instance=m1039.unifi.unihosted.com} value=5 ], [ var='C' labels={instance=m1039.unifi.unihosted.com} value=0 ]} {Instance:instance=m1044.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1044.unifi.unihosted.com Value:0xc03d1fa4e0} B:{Var:B Labels:instance=m1044.unifi.unihosted.com Value:0xc03d1fa4f0} C:{Var:C Labels:instance=m1044.unifi.unihosted.com Value:0xc03d1fa4b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032121518s EvaluationString:[ var='A' labels={instance=m1044.unifi.unihosted.com} value=3 ], [ var='B' labels={instance=m1044.unifi.unihosted.com} value=3 ], [ var='C' labels={instance=m1044.unifi.unihosted.com} value=0 ]} {Instance:instance=m1045.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1045.unifi.unihosted.com Value:0xc03d1fa560} B:{Var:B Labels:instance=m1045.unifi.unihosted.com Value:0xc03d1fa510} C:{Var:C Labels:instance=m1045.unifi.unihosted.com Value:0xc03d1fa520}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032127118s EvaluationString:[ var='A' labels={instance=m1045.unifi.unihosted.com} value=5 ], [ var='B' labels={instance=m1045.unifi.unihosted.com} value=5 ], [ var='C' labels={instance=m1045.unifi.unihosted.com} value=0 ]} {Instance:instance=m1046.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1046.unifi.unihosted.com Value:0xc03d1fa580} B:{Var:B Labels:instance=m1046.unifi.unihosted.com Value:0xc03d1fa590} C:{Var:C Labels:instance=m1046.unifi.unihosted.com Value:0xc03d1fa5a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032133018s EvaluationString:[ var='A' labels={instance=m1046.unifi.unihosted.com} value=6 ], [ var='B' labels={instance=m1046.unifi.unihosted.com} value=6 ], [ var='C' labels={instance=m1046.unifi.unihosted.com} value=0 ]} {Instance:instance=m1047.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1047.unifi.unihosted.com Value:0xc03d1fa5d0} B:{Var:B Labels:instance=m1047.unifi.unihosted.com Value:0xc03d1fa730} C:{Var:C Labels:instance=m1047.unifi.unihosted.com Value:0xc03d1fa740}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032142618s EvaluationString:[ var='A' labels={instance=m1047.unifi.unihosted.com} value=12 ], [ var='B' labels={instance=m1047.unifi.unihosted.com} value=12 ], [ var='C' labels={instance=m1047.unifi.unihosted.com} value=0 ]} {Instance:instance=m1051.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1051.unifi.unihosted.com Value:0xc03d1fa770} B:{Var:B Labels:instance=m1051.unifi.unihosted.com Value:0xc03d1fa780} C:{Var:C Labels:instance=m1051.unifi.unihosted.com Value:0xc03d1fa790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032152518s EvaluationString:[ var='A' labels={instance=m1051.unifi.unihosted.com} value=7 ], [ var='B' labels={instance=m1051.unifi.unihosted.com} value=7 ], [ var='C' labels={instance=m1051.unifi.unihosted.com} value=0 ]} {Instance:instance=m1054.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1054.unifi.unihosted.com Value:0xc03d1fa7d0} B:{Var:B Labels:instance=m1054.unifi.unihosted.com Value:0xc03d1fa7e0} C:{Var:C Labels:instance=m1054.unifi.unihosted.com Value:0xc03d1fa7f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032164818s EvaluationString:[ var='A' labels={instance=m1054.unifi.unihosted.com} value=3 ], [ var='B' labels={instance=m1054.unifi.unihosted.com} value=3 ], [ var='C' labels={instance=m1054.unifi.unihosted.com} value=0 ]} {Instance:instance=m1055.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1055.unifi.unihosted.com Value:0xc03d1fa880} B:{Var:B Labels:instance=m1055.unifi.unihosted.com Value:0xc03d1fa860} C:{Var:C Labels:instance=m1055.unifi.unihosted.com Value:0xc03d1fa870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032176318s EvaluationString:[ var='A' labels={instance=m1055.unifi.unihosted.com} value=2 ], [ var='B' labels={instance=m1055.unifi.unihosted.com} value=2 ], [ var='C' labels={instance=m1055.unifi.unihosted.com} value=0 ]} {Instance:instance=m1057.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1057.unifi.unihosted.com Value:0xc03d1fa8d0} B:{Var:B Labels:instance=m1057.unifi.unihosted.com Value:0xc03d1fa8e0} C:{Var:C Labels:instance=m1057.unifi.unihosted.com Value:0xc03d1fa8c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032186218s EvaluationString:[ var='A' labels={instance=m1057.unifi.unihosted.com} value=4 ], [ var='B' labels={instance=m1057.unifi.unihosted.com} value=4 ], [ var='C' labels={instance=m1057.unifi.unihosted.com} value=0 ]} {Instance:instance=m1058.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1058.unifi.unihosted.com Value:0xc03d1fa900} B:{Var:B Labels:instance=m1058.unifi.unihosted.com Value:0xc03d1fa910} C:{Var:C Labels:instance=m1058.unifi.unihosted.com Value:0xc03d1fa950}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032201818s EvaluationString:[ var='A' labels={instance=m1058.unifi.unihosted.com} value=0 ], [ var='B' labels={instance=m1058.unifi.unihosted.com} value=0 ], [ var='C' labels={instance=m1058.unifi.unihosted.com} value=0 ]} {Instance:instance=m1059.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1059.unifi.unihosted.com Value:0xc03d1fa970} B:{Var:B Labels:instance=m1059.unifi.unihosted.com Value:0xc03d1fa980} C:{Var:C Labels:instance=m1059.unifi.unihosted.com Value:0xc03d1fa9b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032207317s EvaluationString:[ var='A' labels={instance=m1059.unifi.unihosted.com} value=5 ], [ var='B' labels={instance=m1059.unifi.unihosted.com} value=5 ], [ var='C' labels={instance=m1059.unifi.unihosted.com} value=0 ]} {Instance:instance=m1060.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1060.unifi.unihosted.com Value:0xc03d1faa00} B:{Var:B Labels:instance=m1060.unifi.unihosted.com Value:0xc03d1faa20} C:{Var:C Labels:instance=m1060.unifi.unihosted.com Value:0xc03d1fa9f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032212717s EvaluationString:[ var='A' labels={instance=m1060.unifi.unihosted.com} value=12 ], [ var='B' labels={instance=m1060.unifi.unihosted.com} value=12 ], [ var='C' labels={instance=m1060.unifi.unihosted.com} value=0 ]} {Instance:instance=m1061.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1061.unifi.unihosted.com Value:0xc03d1faa60} B:{Var:B Labels:instance=m1061.unifi.unihosted.com Value:0xc03d1faa70} C:{Var:C Labels:instance=m1061.unifi.unihosted.com Value:0xc03d1faa90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032218017s EvaluationString:[ var='A' labels={instance=m1061.unifi.unihosted.com} value=1 ], [ var='B' labels={instance=m1061.unifi.unihosted.com} value=1 ], [ var='C' labels={instance=m1061.unifi.unihosted.com} value=0 ]} {Instance:instance=m1065.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1065.unifi.unihosted.com Value:0xc03d1faab0} B:{Var:B Labels:instance=m1065.unifi.unihosted.com Value:0xc03d1faac0} C:{Var:C Labels:instance=m1065.unifi.unihosted.com Value:0xc03d1faad0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032225517s EvaluationString:[ var='A' labels={instance=m1065.unifi.unihosted.com} value=21 ], [ var='B' labels={instance=m1065.unifi.unihosted.com} value=21 ], [ var='C' labels={instance=m1065.unifi.unihosted.com} value=0 ]} {Instance:instance=m1066.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1066.unifi.unihosted.com Value:0xc03d1fab00} B:{Var:B Labels:instance=m1066.unifi.unihosted.com Value:0xc03d1fab10} C:{Var:C Labels:instance=m1066.unifi.unihosted.com Value:0xc03d1fab20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032234117s EvaluationString:[ var='A' labels={instance=m1066.unifi.unihosted.com} value=7 ], [ var='B' labels={instance=m1066.unifi.unihosted.com} value=7 ], [ var='C' labels={instance=m1066.unifi.unihosted.com} value=0 ]} {Instance:instance=m1067.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1067.unifi.unihosted.com Value:0xc03d1fab70} B:{Var:B Labels:instance=m1067.unifi.unihosted.com Value:0xc03d1fab80} C:{Var:C Labels:instance=m1067.unifi.unihosted.com Value:0xc03d1faba0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032250217s EvaluationString:[ var='A' labels={instance=m1067.unifi.unihosted.com} value=15 ], [ var='B' labels={instance=m1067.unifi.unihosted.com} value=15 ], [ var='C' labels={instance=m1067.unifi.unihosted.com} value=0 ]} {Instance:instance=m1068.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1068.unifi.unihosted.com Value:0xc03d1fac60} B:{Var:B Labels:instance=m1068.unifi.unihosted.com Value:0xc03d1fabf0} C:{Var:C Labels:instance=m1068.unifi.unihosted.com Value:0xc03d1fac50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032257317s EvaluationString:[ var='A' labels={instance=m1068.unifi.unihosted.com} value=0 ], [ var='B' labels={instance=m1068.unifi.unihosted.com} value=0 ], [ var='C' labels={instance=m1068.unifi.unihosted.com} value=0 ]} {Instance:instance=m1069.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1069.unifi.unihosted.com Value:0xc03d1faca0} B:{Var:B Labels:instance=m1069.unifi.unihosted.com Value:0xc03d1facc0} C:{Var:C Labels:instance=m1069.unifi.unihosted.com Value:0xc03d1fac90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032265817s EvaluationString:[ var='A' labels={instance=m1069.unifi.unihosted.com} value=21 ], [ var='B' labels={instance=m1069.unifi.unihosted.com} value=21 ], [ var='C' labels={instance=m1069.unifi.unihosted.com} value=0 ]} {Instance:instance=m1070.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1070.unifi.unihosted.com Value:0xc03d1facf0} B:{Var:B Labels:instance=m1070.unifi.unihosted.com Value:0xc03d1fad00} C:{Var:C Labels:instance=m1070.unifi.unihosted.com Value:0xc03d1fad10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032275617s EvaluationString:[ var='A' labels={instance=m1070.unifi.unihosted.com} value=6 ], [ var='B' labels={instance=m1070.unifi.unihosted.com} value=6 ], [ var='C' labels={instance=m1070.unifi.unihosted.com} value=0 ]} {Instance:instance=m1071.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1071.unifi.unihosted.com Value:0xc03d1fae30} B:{Var:B Labels:instance=m1071.unifi.unihosted.com Value:0xc03d1fae70} C:{Var:C Labels:instance=m1071.unifi.unihosted.com Value:0xc03d1fae80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032288017s EvaluationString:[ var='A' labels={instance=m1071.unifi.unihosted.com} value=2 ], [ var='B' labels={instance=m1071.unifi.unihosted.com} value=2 ], [ var='C' labels={instance=m1071.unifi.unihosted.com} value=0 ]} {Instance:instance=m1072.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1072.unifi.unihosted.com Value:0xc03d1faed0} B:{Var:B Labels:instance=m1072.unifi.unihosted.com Value:0xc03d1faef0} C:{Var:C Labels:instance=m1072.unifi.unihosted.com Value:0xc03d1faec0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032293617s EvaluationString:[ var='A' labels={instance=m1072.unifi.unihosted.com} value=9 ], [ var='B' labels={instance=m1072.unifi.unihosted.com} value=9 ], [ var='C' labels={instance=m1072.unifi.unihosted.com} value=0 ]} {Instance:instance=m1073.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1073.unifi.unihosted.com Value:0xc03d1fb020} B:{Var:B Labels:instance=m1073.unifi.unihosted.com Value:0xc03d1fb030} C:{Var:C Labels:instance=m1073.unifi.unihosted.com Value:0xc03d1fb010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032299117s EvaluationString:[ var='A' labels={instance=m1073.unifi.unihosted.com} value=29 ], [ var='B' labels={instance=m1073.unifi.unihosted.com} value=29 ], [ var='C' labels={instance=m1073.unifi.unihosted.com} value=0 ]} {Instance:instance=m1075.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1075.unifi.unihosted.com Value:0xc03d1fb060} B:{Var:B Labels:instance=m1075.unifi.unihosted.com Value:0xc03d1fb0a0} C:{Var:C Labels:instance=m1075.unifi.unihosted.com Value:0xc03d1fb0c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032306017s EvaluationString:[ var='A' labels={instance=m1075.unifi.unihosted.com} value=2 ], [ var='B' labels={instance=m1075.unifi.unihosted.com} value=2 ], [ var='C' labels={instance=m1075.unifi.unihosted.com} value=0 ]} {Instance:instance=m1076.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1076.unifi.unihosted.com Value:0xc03d1fb0f0} B:{Var:B Labels:instance=m1076.unifi.unihosted.com Value:0xc03d1fb100} C:{Var:C Labels:instance=m1076.unifi.unihosted.com Value:0xc03d1fb0e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032315417s EvaluationString:[ var='A' labels={instance=m1076.unifi.unihosted.com} value=2 ], [ var='B' labels={instance=m1076.unifi.unihosted.com} value=2 ], [ var='C' labels={instance=m1076.unifi.unihosted.com} value=0 ]} {Instance:instance=m1078.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1078.unifi.unihosted.com Value:0xc03d1fb130} B:{Var:B Labels:instance=m1078.unifi.unihosted.com Value:0xc03d1fb140} C:{Var:C Labels:instance=m1078.unifi.unihosted.com Value:0xc03d1fb180}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032334217s EvaluationString:[ var='A' labels={instance=m1078.unifi.unihosted.com} value=3 ], [ var='B' labels={instance=m1078.unifi.unihosted.com} value=3 ], [ var='C' labels={instance=m1078.unifi.unihosted.com} value=0 ]} {Instance:instance=m1080.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1080.unifi.unihosted.com Value:0xc03d1fb1d0} B:{Var:B Labels:instance=m1080.unifi.unihosted.com Value:0xc03d1fb1a0} C:{Var:C Labels:instance=m1080.unifi.unihosted.com Value:0xc03d1fb1b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032343817s EvaluationString:[ var='A' labels={instance=m1080.unifi.unihosted.com} value=0 ], [ var='B' labels={instance=m1080.unifi.unihosted.com} value=0 ], [ var='C' labels={instance=m1080.unifi.unihosted.com} value=0 ]} {Instance:instance=m1081.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1081.unifi.unihosted.com Value:0xc03d1fb220} B:{Var:B Labels:instance=m1081.unifi.unihosted.com Value:0xc03d1fb250} C:{Var:C Labels:instance=m1081.unifi.unihosted.com Value:0xc03d1fb210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032352417s EvaluationString:[ var='A' labels={instance=m1081.unifi.unihosted.com} value=27 ], [ var='B' labels={instance=m1081.unifi.unihosted.com} value=27 ], [ var='C' labels={instance=m1081.unifi.unihosted.com} value=0 ]} {Instance:instance=m1082.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1082.unifi.unihosted.com Value:0xc03d1fb370} B:{Var:B Labels:instance=m1082.unifi.unihosted.com Value:0xc03d1fb380} C:{Var:C Labels:instance=m1082.unifi.unihosted.com Value:0xc03d1fb360}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032362217s EvaluationString:[ var='A' labels={instance=m1082.unifi.unihosted.com} value=0 ], [ var='B' labels={instance=m1082.unifi.unihosted.com} value=0 ], [ var='C' labels={instance=m1082.unifi.unihosted.com} value=0 ]} {Instance:instance=m1083.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1083.unifi.unihosted.com Value:0xc03d1fb3b0} B:{Var:B Labels:instance=m1083.unifi.unihosted.com Value:0xc03d1fb3c0} C:{Var:C Labels:instance=m1083.unifi.unihosted.com Value:0xc03d1fb3d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032379016s EvaluationString:[ var='A' labels={instance=m1083.unifi.unihosted.com} value=6 ], [ var='B' labels={instance=m1083.unifi.unihosted.com} value=6 ], [ var='C' labels={instance=m1083.unifi.unihosted.com} value=0 ]} {Instance:instance=m1084.unifi.unihosted.com State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=m1084.unifi.unihosted.com Value:0xc03d1fb420} B:{Var:B Labels:instance=m1084.unifi.unihosted.com Value:0xc03d1fb430} C:{Var:C Labels:instance=m1084.unifi.unihosted.com Value:0xc03d1fb510}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.032384816s EvaluationString:[ var='A' labels={instance=m1084.unifi.unihosted.com} value=0 ], [ var='B' labels={instance=m1084.unifi.unihosted.com} value=0 ], [ var='C' labels={instance=m1084.unifi.unihosted.com} value=0 ]}]" duration=12.321774ms + logger=ngalert.state.manager.persist user=700783 slug=gsgmedia t=2024-05-29T13:44:14.035351438Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=14.099459ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-poqtjdlj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.035448626Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-poqtjdlj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.035423196Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-backend-db-read-replica-1, env=apac" t=2024-05-29T13:44:14.035318795Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=698103 slug=vericast version=54 fingerprint=faece4eca4115641 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.035318408Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.034980414s EvaluationString:}]" duration=788.448832ms + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=monitoring, service=prometheus-grafana" t=2024-05-29T13:44:14.035288161Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-poqtjdlj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.035311204Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-poqtjdlj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.035282044Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=monitoring, service=prometheus-grafana" t=2024-05-29T13:44:14.035279507Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ponor8tx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.035195943Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.035078472Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ponor8tx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.035145703Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.035099433Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=471861 slug=planetstaging t=2024-05-29T13:44:14.03503115Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.712893ms + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=oms-stock-receiver, dimension_QueueName=ecom-stage-oms-stock-receiver-fulfilment-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-oms-stock-receiver-fulfilment-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.034977689Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pomso1qv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.03491325Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-poh5yxxe-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.034703688Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=391359 slug=linklogistics t=2024-05-29T13:44:14.034634145Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=391359 slug=linklogistics instance= t=2024-05-29T13:44:14.034609544Z level=warn msg="Failed to take an image" dashboard=ddh3r9avwaqdce panel=1 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pogw45ly-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.034612077Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.034499502Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.034528061Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.10695ms + logger=ngalert.state.manager user=112732 slug=gleamer instance= t=2024-05-29T13:44:14.034529443Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112732 slug=gleamer instance= t=2024-05-29T13:44:14.034514232Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-v4monitoring-db, env=us" t=2024-05-29T13:44:14.034470359Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pogw45ly-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.034530546Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pogw45ly-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.034509856Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pog77wyg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.034481836Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.034352896Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pog77wyg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.034427525Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=linkerd-viz, service=prometheus" t=2024-05-29T13:44:14.034399287Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pog77wyg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.034373815Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pog77wyg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.034352115Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.034295919Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pod93l2i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.034300254Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=linkerd-viz, service=metrics-api" t=2024-05-29T13:44:14.034291451Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=linkerd-viz, service=metrics-api" t=2024-05-29T13:44:14.034282585Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pod93l2i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.034270524Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pod93l2i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.034204693Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=oms-partners-integrator, dimension_QueueName=ecom-stage-oms-partners-integrator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-oms-partners-integrator-dead-letter-queue, pagerduty_service=order-management-core, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.034016466Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=oms-partners-integrator, dimension_QueueName=ecom-stage-oms-partners-integrator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-oms-partners-integrator-dead-letter-queue, pagerduty_service=order-management-core, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.034000427Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pocv71f6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.034011681Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-po2njyzb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.03394762Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-po2njyzb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.03390401Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=linkerd, service=linkerd-destination" t=2024-05-29T13:44:14.033827975Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=916149 slug=cmfollpd t=2024-05-29T13:44:14.033741249Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.555599ms + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=krakend, service=krakend-deployment" t=2024-05-29T13:44:14.033687532Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=krakend, service=krakend-deployment" t=2024-05-29T13:44:14.033676934Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-po1bt6qt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.033660408Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.033630978Z caller=remote_image_capturer.go:61 user=283914 slug=emmasleep rule_org_id=1 rule_uid=f7a257b5-5b4b-4bb5-b8c5-1d3b84d6f862 dashboard=CNR8LzU7z2323213wrrwewr panel=16 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-instant-id-qa-db, env=us" t=2024-05-29T13:44:14.03356518Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.033415216Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=keycloak, service=keycloak" t=2024-05-29T13:44:14.033521441Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pnv55e89-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.033402445Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.033361631Z caller=remote_image_capturer.go:54 user=391359 slug=linklogistics rule_org_id=1 rule_uid=cdlrdgjytb4e8e dashboard=ddh3r9avwaqdce panel=1 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=84360 slug=sib t=2024-05-29T13:44:14.033341811Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pntfnrg3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.033358764Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=ingress-nginx-controller, service=ingress-nginx-defaultbackend" t=2024-05-29T13:44:14.03335431Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pntfnrg3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.033295014Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pntfnrg3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.033218443Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pnm9du5p-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.033158862Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.033107205Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pnm9du5p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.033084962Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pnm9du5p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.033034031Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pnm9du5p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.033020281Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.033038022Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=chaos-mesh, service=chaos-dns-server" t=2024-05-29T13:44:14.032993086Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.032968831Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pnkg78kb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.03293388Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pnkg78kb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.0329054Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pn9nzsk8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.032799189Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=chaos-mesh, service=chaos-dashboard" t=2024-05-29T13:44:14.032800823Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pn9nzsk8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.032770148Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.032667026Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=475170 slug=paypaplane instance="environment=development, namespace=chaos-mesh, service=chaos-daemon" t=2024-05-29T13:44:14.032651985Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pn9nzsk8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.032639637Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pn802ty7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.032435855Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.032312236Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=oms-invoice-mediator, dimension_QueueName=ecom-stage-oms-invoice-mediator-emails-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-oms-invoice-mediator-emails-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.032383236Z level=warn msg="Failed to take an image" dashboard=CNR8LzU7z2323213wrrwewr panel=16 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pn802ty7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.032384764Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-frontend-db, env=us" t=2024-05-29T13:44:14.032231502Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pn5g9mkv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.032180652Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pn5g9mkv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.032151772Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pn5g9mkv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.032090891Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.031980461Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pn38498i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.03199543Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-dow-jones-db, env=us" t=2024-05-29T13:44:14.031979647Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pn38498i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.03195291Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pmxx1hw6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.031692787Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pmxx1hw6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.031665447Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.031601043Z caller=remote_alert_sender.go:94 user=679831 slug=joveostageaws host=joveostageaws-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.120.48:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=c383bf7c-1b3d-4dee-874f-6e07b1fbd4e0 alerts=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-creditsafe-db, env=us" t=2024-05-29T13:44:14.031595549Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=679831 slug=joveostageaws t=2024-05-29T13:44:14.031479142Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.678775ms + logger=ngalert.state.manager.persist user=173374 slug=felmo t=2024-05-29T13:44:14.031398275Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.018179ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pmv9murz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.031414505Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pmv9murz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.031291613Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pmv9murz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.031281213Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pmqfptq4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.031226383Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pmqfptq4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.031179192Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pmqfptq4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.031110801Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-compliance-lens-db, env=us" t=2024-05-29T13:44:14.031319706Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.031205401Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-billing-db, env=us" t=2024-05-29T13:44:14.031147004Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.031053106Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pmmf54g5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.03096418Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-backend-db-read-replica-1, env=us" t=2024-05-29T13:44:14.030922974Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pmj4m3nt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.030848449Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pmj4m3nt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.030747388Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.030805777Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.030669714Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:14.030658084Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:14.030470344Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pmj2ab3f-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.030443415Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-veridas-db, env=qa" t=2024-05-29T13:44:14.030490564Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=oms-invoice-mediator, dimension_QueueName=ecom-stage-oms-invoice-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-oms-invoice-mediator-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.030390561Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-v4monitoring-db, env=qa" t=2024-05-29T13:44:14.030303124Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.030328325Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:14.030297291Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pmibvz8l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.030273273Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.030265812Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=837396 slug=hovesbx t=2024-05-29T13:44:14.030154467Z level=debug msg="Skip rule evaluation because it is paused" + level=debug ts=2024-05-29T13:44:14.030205256Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=oms-fulfillment-mediator, dimension_QueueName=ecom-stage-oms-fulfillment-mediator-s3-wholesale-events-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-oms-fulfillment-mediator-s3-wholesale-events-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.030166098Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pmibvz8l-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.030231252Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-sar-investigation-db, env=qa" t=2024-05-29T13:44:14.03016838Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-sar-investigation-db, env=qa" t=2024-05-29T13:44:14.030158087Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pmdiopsr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.030172072Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.030126686Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pmdiopsr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.030140251Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pmdiopsr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.030095181Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=oms-fulfillment-mediator, dimension_QueueName=ecom-stage-oms-fulfillment-mediator-magento-fulfilments-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-oms-fulfillment-mediator-magento-fulfilments-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.030010444Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.029908479Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.029971555Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-plf4nc6l-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.029745597Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-periodic-reviews-db, env=qa" t=2024-05-29T13:44:14.029679809Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:14.029603567Z caller=remote_image_capturer.go:61 user=283914 slug=emmasleep rule_org_id=1 rule_uid=f7a257b5-5b4b-4bb5-b8c5-1d3b84d6f862 dashboard=CNR8LzU7z2323213wrrwewr panel=16 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pldn5f94-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.029600186Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-instant-id-qa-db, env=qa" t=2024-05-29T13:44:14.029505838Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pldn5f94-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.029502935Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pldn5f94-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.029471974Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pl91beui-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.029416054Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.029283327Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-idverse-enterprise-db, env=qa" t=2024-05-29T13:44:14.029352949Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pl91beui-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.029338523Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pl91beui-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.029275332Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pl91beui-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.029250852Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.029134827Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-frontend-log-db, env=qa" t=2024-05-29T13:44:14.029188537Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pl6yyq5r-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.029167691Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pl6yyq5r-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.029116231Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pl6yyq5r-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.02909128Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-frontend-db, env=qa" t=2024-05-29T13:44:14.02902035Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pl4ugc1b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.028966339Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pl4ugc1b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.028913729Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pkzeea2w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.028842998Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=664219 slug=gitpoddedicatedprod t=2024-05-29T13:44:14.028737068Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=664219 slug=gitpoddedicatedprod t=2024-05-29T13:44:14.028690337Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=oms-fulfillment-mediator, dimension_QueueName=ecom-stage-oms-fulfillment-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-oms-fulfillment-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.028710139Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pky0s3yo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.028493834Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=oms-fraud-check-service, dimension_QueueName=ecom-stage-oms-fraud-check-service-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-oms-fraud-check-service-dead-letter-queue, pagerduty_service=oms-fraud, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.028466193Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.028387013Z caller=remote_instance_store.go:51 user=243675 slug=oneschema msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.028402762Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-data-management-db, env=qa" t=2024-05-29T13:44:14.028424307Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-data-management-db, env=qa" t=2024-05-29T13:44:14.028415478Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pkwyfmmv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.028334233Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.028263395Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.028229145Z caller=remote_instance_store.go:51 user=557231 slug=lnrsusinsuranceprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pkwyfmmv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.028234822Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.028148382Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:14.028157511Z caller=remote_image_capturer.go:61 user=283914 slug=emmasleep rule_org_id=1 rule_uid=f7a257b5-5b4b-4bb5-b8c5-1d3b84d6f862 dashboard=CNR8LzU7z2323213wrrwewr panel=16 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:14.028090623Z caller=remote_instance_store.go:51 user=700783 slug=gsgmedia msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-comply-advantage-db, env=qa" t=2024-05-29T13:44:14.028173707Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.028034051Z caller=remote_instance_store.go:51 user=403369 slug=clearsaletechlabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-compliance-lens-db, env=qa" t=2024-05-29T13:44:14.02801804Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-compliance-lens-db, env=qa" t=2024-05-29T13:44:14.028004549Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pkrltd6c-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.028006379Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pkrltd6c-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.027980179Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pko3u9p2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.027869118Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=421567 slug=nexx360 t=2024-05-29T13:44:14.027823217Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pko3u9p2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.027843608Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:14.027756814Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=47.862812ms + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-billing-db, env=qa" t=2024-05-29T13:44:14.027827414Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=421567 slug=nexx360 t=2024-05-29T13:44:14.027762977Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:14.027826289Z caller=remote_instance_store.go:51 user=236496 slug=improbable msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:14.027765727Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +level=debug ts=2024-05-29T13:44:14.02780686Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pkkozce8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.027738846Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-backend-db, env=qa" t=2024-05-29T13:44:14.027694356Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pkkozce8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.027672186Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pkkozce8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.027647336Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pkkozce8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.027601555Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-veridas-db, env=uk" t=2024-05-29T13:44:14.027572063Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-ztisww" t=2024-05-29T13:44:14.027618606Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pkft4suc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.027538074Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-zswya6" t=2024-05-29T13:44:14.027594403Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-zrhug4" t=2024-05-29T13:44:14.027528987Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-zpr6fs" t=2024-05-29T13:44:14.027494417Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-zpi1uc" t=2024-05-29T13:44:14.02748407Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pkft4suc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.027506904Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-zpi1uc" t=2024-05-29T13:44:14.027478373Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-zp8jvw" t=2024-05-29T13:44:14.027468707Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-v4monitoring-db, env=uk" t=2024-05-29T13:44:14.027388038Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-zp0r1n" t=2024-05-29T13:44:14.027452919Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-zp0r1n" t=2024-05-29T13:44:14.027449817Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-zminsp" t=2024-05-29T13:44:14.027399423Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=646202 slug=kairosaerospace t=2024-05-29T13:44:14.027341264Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.240372ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pkdwtz12-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.027372953Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-zc5feb" t=2024-05-29T13:44:14.027330203Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-z9te99" t=2024-05-29T13:44:14.027303732Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-sar-investigation-db, env=uk" t=2024-05-29T13:44:14.027232836Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-sar-investigation-db, env=uk" t=2024-05-29T13:44:14.027223278Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-z4g5pw" t=2024-05-29T13:44:14.027217199Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=oms-customer-mediator, dimension_QueueName=ecom-stage-oms-customer-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-oms-customer-mediator-dead-letter-queue, pagerduty_service=customer-distributor, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.027177878Z level=debug msg="Setting next state" handler=resultAlerting +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-z1o0rc" t=2024-05-29T13:44:14.027164902Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-z1dfuf" t=2024-05-29T13:44:14.027138795Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-yyliwe" t=2024-05-29T13:44:14.027097584Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-yyliwe" t=2024-05-29T13:44:14.027089443Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.027024266Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pk9faqx3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.02708135Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-yqll5h" t=2024-05-29T13:44:14.027062882Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:14.02695082Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-yqll5h" t=2024-05-29T13:44:14.027056072Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.027042568Z caller=remote_instance_store.go:51 user=277970 slug=teckresourcestest msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pk9faqx3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.027000059Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-fluent-event-publisher-fscm-wholesale-perf-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-fluent-event-publisher-fscm-wholesale-perf-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.027024387Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-yp7xyx" t=2024-05-29T13:44:14.027037376Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-ym538y" t=2024-05-29T13:44:14.0270224Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.026984077Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-ym0yti" t=2024-05-29T13:44:14.027006573Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:14.026926461Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-ym0yti" t=2024-05-29T13:44:14.027000112Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pk95gpu5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.026966658Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pk95gpu5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.026939628Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-yjvp9t" t=2024-05-29T13:44:14.026966784Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-periodic-reviews-db, env=uk" t=2024-05-29T13:44:14.026896424Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pk95gpu5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.026866797Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-yi1zlz" t=2024-05-29T13:44:14.026928419Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-periodic-reviews-db, env=uk" t=2024-05-29T13:44:14.026879775Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-yi1zlz" t=2024-05-29T13:44:14.02691973Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=277970 slug=teckresourcestest version=2 fingerprint=8aa476c8b08194d8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.026848615Z level=error msg="Failed to evaluate rule" error="failed to build query 'C': data source not found" duration=7.029111ms +level=error ts=2024-05-29T13:44:14.026804779Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'C': data source not found" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-yh5nxj" t=2024-05-29T13:44:14.026902119Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-yh5nxj" t=2024-05-29T13:44:14.026892927Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pk95gpu5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.026740026Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-yaarrj" t=2024-05-29T13:44:14.02672002Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-y8370k" t=2024-05-29T13:44:14.026701825Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-instant-id-qa-db, env=uk" t=2024-05-29T13:44:14.026661156Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-y8370k" t=2024-05-29T13:44:14.026692949Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pk1wsrpi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.026634775Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-y7bbdf" t=2024-05-29T13:44:14.026664679Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.026454384Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-y6dda8" t=2024-05-29T13:44:14.026649185Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-y6dda8" t=2024-05-29T13:44:14.026641383Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-xzhkeu" t=2024-05-29T13:44:14.026594827Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-xw5n6z" t=2024-05-29T13:44:14.026527225Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-idverse-enterprise-db, env=uk" t=2024-05-29T13:44:14.02643779Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-xriuv4" t=2024-05-29T13:44:14.026474699Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-xriuv4" t=2024-05-29T13:44:14.026464565Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.026397138Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-xnook9" t=2024-05-29T13:44:14.026421185Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pjy3snoi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.026405043Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-xmltde" t=2024-05-29T13:44:14.026340041Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-xmbbuw" t=2024-05-29T13:44:14.026320969Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-xiuq9m" t=2024-05-29T13:44:14.026294377Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-xhzf7z" t=2024-05-29T13:44:14.026242086Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-xccfkt" t=2024-05-29T13:44:14.026217418Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pju10r1b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.02617939Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-x63wfv" t=2024-05-29T13:44:14.026185395Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.025985817Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-x0kx0c" t=2024-05-29T13:44:14.026085267Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-x0kx0c" t=2024-05-29T13:44:14.026076237Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pjtmygix-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.026015219Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pjtmygix-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.025967758Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-wy2imr" t=2024-05-29T13:44:14.026038228Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-wsewxo" t=2024-05-29T13:44:14.025955424Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-wsewxo" t=2024-05-29T13:44:14.025946573Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-wrgrno" t=2024-05-29T13:44:14.025903144Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-frontend-database, env=uk" t=2024-05-29T13:44:14.025816533Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pjqyaigs-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.025785536Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-wpp0ca" t=2024-05-29T13:44:14.025868416Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pjqyaigs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.025749776Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-wju67m" t=2024-05-29T13:44:14.025765884Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-wjmtdo" t=2024-05-29T13:44:14.025728241Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-dow-jones-db, env=uk" t=2024-05-29T13:44:14.025611252Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-w7kbtg" t=2024-05-29T13:44:14.025641694Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pjhcgwnp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.025496933Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-vvkjqz" t=2024-05-29T13:44:14.025577542Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-vu2874" t=2024-05-29T13:44:14.02554432Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-vu2874" t=2024-05-29T13:44:14.025537286Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=177453 slug=clabs instance="datasource_uid=7UodHjDnz, ref_id=A" t=2024-05-29T13:44:14.025507877Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-vtn6uk" t=2024-05-29T13:44:14.025519623Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-vtejaz" t=2024-05-29T13:44:14.02550926Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-vqhi4p" t=2024-05-29T13:44:14.025486407Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-data-management-db, env=uk" t=2024-05-29T13:44:14.025406699Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-vq3foz" t=2024-05-29T13:44:14.025462871Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-vpx83f" t=2024-05-29T13:44:14.025438163Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pjdw34rh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.025326702Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-vlridw" t=2024-05-29T13:44:14.025394836Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-vliyg8" t=2024-05-29T13:44:14.025369737Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pjdw34rh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.025282801Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-vlb8gt" t=2024-05-29T13:44:14.025357451Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-vklp8h" t=2024-05-29T13:44:14.025334237Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-vigkyw" t=2024-05-29T13:44:14.025258091Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pjdenjnu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.025080689Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-creditsafe-db, env=uk" t=2024-05-29T13:44:14.025206414Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-creditsafe-db, env=uk" t=2024-05-29T13:44:14.025193254Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-vgdrcn" t=2024-05-29T13:44:14.025180271Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-vemyjp" t=2024-05-29T13:44:14.025154281Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-vbrjbt" t=2024-05-29T13:44:14.025138331Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-vaunqn" t=2024-05-29T13:44:14.025112122Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-v6rdbh" t=2024-05-29T13:44:14.025089151Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-comply-advantage-db, env=uk" t=2024-05-29T13:44:14.025049564Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-v6hw4e" t=2024-05-29T13:44:14.02506071Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pjcd4ulx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.024947928Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-fluent-event-publisher-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-fluent-event-publisher-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.024955244Z level=debug msg="Setting next state" handler=resultAlerting +logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.024919431Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-uzcm3b" t=2024-05-29T13:44:14.024907164Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-uyu8q9" t=2024-05-29T13:44:14.024872563Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-compliance-lens-db, env=uk" t=2024-05-29T13:44:14.024797807Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=361282 slug=turing t=2024-05-29T13:44:14.024807362Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-ux0zq3" t=2024-05-29T13:44:14.02479534Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.024751717Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-ux0zq3" t=2024-05-29T13:44:14.024785918Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pjagk2l0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.024751946Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=361282 slug=turing instance="cluster=prod-cluster-new, endpoint=metrics, job=service-developer-master-backend, method=GET, namespace=service-developer, prometheus=monitoring/prometheus-kube-prometheus-prometheus, prometheus_replica=prometheus-prometheus-kube-prometheus-prometheus-0, route=/api/v1/developer/#val/skills, service=service-developer-master-backend, turing_env=prod" t=2024-05-29T13:44:14.024709216Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=361282 slug=turing t=2024-05-29T13:44:14.0246511Z level=debug msg="State manager processing evaluation results" resultCount=2 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-uw9sqy" t=2024-05-29T13:44:14.024740266Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pjagk2l0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.024649545Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.024646643Z caller=remote_instance_store.go:51 user=196413 slug=form3production msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-wholesale_fr-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-wholesale_fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.024624927Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-urwp4v" t=2024-05-29T13:44:14.02463371Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-urwp4v" t=2024-05-29T13:44:14.024624787Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pj5zya8j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.024547994Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.024546388Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pj5zya8j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.024516323Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-unumzs" t=2024-05-29T13:44:14.02457138Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-un8xur" t=2024-05-29T13:44:14.024518377Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-umqbqo" t=2024-05-29T13:44:14.024492472Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-uk7sjm" t=2024-05-29T13:44:14.024467731Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-ujzj2o" t=2024-05-29T13:44:14.024451364Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pj5zya8j-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.024472993Z level=debug msg="Keeping state" state=Normal +level=info ts=2024-05-29T13:44:14.024373184Z caller=remote_alert_sender.go:94 user=884866 slug=cnonumerique host=cnonumerique-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.74.54:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fdlnyccs1r4e8e alerts=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pj4slasm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.024331311Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pj4slasm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.024265171Z level=debug msg="Keeping state" state=Normal +level=info ts=2024-05-29T13:44:14.024311063Z caller=remote_alert_sender.go:94 user=884866 slug=cnonumerique host=cnonumerique-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.51.155:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fdlnyccs1r4e8e alerts=1 +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-veridas-db, env=pp" t=2024-05-29T13:44:14.024282341Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-veridas-db, env=pp" t=2024-05-29T13:44:14.024267058Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-tw-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-tw-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.024254359Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-u3q13l" t=2024-05-29T13:44:14.024246491Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.024197607Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pj4slasm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.02415395Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-tz1wbo" t=2024-05-29T13:44:14.024201027Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.024136652Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.024078907Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-piye3taq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.024097449Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-piye3taq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.024068619Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-twy0i3" t=2024-05-29T13:44:14.024102327Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-piye3taq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.024023588Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-v4monitoring-db, env=pp" t=2024-05-29T13:44:14.024030014Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-piye3taq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.023995288Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-tuwumg" t=2024-05-29T13:44:14.024055157Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-tuwumg" t=2024-05-29T13:44:14.024047693Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-tpxllf" t=2024-05-29T13:44:14.023989029Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-tpxllf" t=2024-05-29T13:44:14.023977808Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-tphljd" t=2024-05-29T13:44:14.023961783Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-picjuqei-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.023836696Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-picjuqei-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.023809856Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-tle0wo" t=2024-05-29T13:44:14.023889942Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-tkz9zr" t=2024-05-29T13:44:14.023878665Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-tkz9zr" t=2024-05-29T13:44:14.023872403Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.023817499Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-picjuqei-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.023769146Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-tikghl" t=2024-05-29T13:44:14.023830372Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.023751285Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pi9vxu6y-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.023701765Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-sar-investigation-db, env=pp" t=2024-05-29T13:44:14.023749034Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-tajuxx" t=2024-05-29T13:44:14.02373243Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-pl-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-pl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.023701661Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=127813 slug=clearsale t=2024-05-29T13:44:14.023694466Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-sywlsl" t=2024-05-29T13:44:14.023701169Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-sxaddy" t=2024-05-29T13:44:14.02365321Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=127813 slug=clearsale instance= t=2024-05-29T13:44:14.023680431Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-stno7m" t=2024-05-29T13:44:14.023622661Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=127813 slug=clearsale version=7 fingerprint=428e43443a8203b2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.023505957Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.02216011s EvaluationString:}]" duration=173.618534ms +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-stno7m" t=2024-05-29T13:44:14.023616811Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pi9vxu6y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.023546263Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-ssvjfo" t=2024-05-29T13:44:14.023576418Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-risk-defense-platform-db, env=pp" t=2024-05-29T13:44:14.023493256Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=453308 slug=hyperzodprod t=2024-05-29T13:44:14.023552799Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pi7r3127-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.023509873Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.023504034Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.023450768Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-sq4yqz" t=2024-05-29T13:44:14.023492931Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=453308 slug=hyperzodprod instance= t=2024-05-29T13:44:14.02343946Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-soae96" t=2024-05-29T13:44:14.02345636Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.023374503Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-snt88v" t=2024-05-29T13:44:14.023415812Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.023376397Z caller=remote_instance_store.go:51 user=471861 slug=planetstaging msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=471861 slug=planetstaging t=2024-05-29T13:44:14.023316562Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pi428qo6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.023273761Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=471861 slug=planetstaging instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.023245971Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-sh8s6v" t=2024-05-29T13:44:14.023276427Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-sgnfwo" t=2024-05-29T13:44:14.023254692Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=471861 slug=planetstaging t=2024-05-29T13:44:14.023235491Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.scheduler user=471861 slug=planetstaging version=2 fingerprint=de1aa32b73524b04 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.023185931Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.022946182s EvaluationString:}]" duration=16.646154ms +level=info ts=2024-05-29T13:44:14.023105121Z caller=remote_alert_sender.go:94 user=395357 slug=sensen host=sensen-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.164.36.22:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=c9ca580f-3ae5-4674-97ed-2990a9483296 alerts=1 +logger=ngalert.state.manager.persist user=231576 slug=om2phoenixpoc t=2024-05-29T13:44:14.022970719Z level=debug msg="Saving alert states done" count=7 max_state_save_concurrency=1 duration=95.579769ms +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-sbjawl" t=2024-05-29T13:44:14.023085949Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-s7t1hr" t=2024-05-29T13:44:14.023049905Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.022992046Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-s6laiw" t=2024-05-29T13:44:14.023019961Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.023035575Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-s2p7dr" t=2024-05-29T13:44:14.023010627Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.022867779Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-rygvm9" t=2024-05-29T13:44:14.022961278Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-phtmcape-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.022918747Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-ruz9jc" t=2024-05-29T13:44:14.022906711Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-phtmcape-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.022819626Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-nl-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-nl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.022775188Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-phr9j4bh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.022719805Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=767797 slug=mgmresorts instance="datasource_uid=d1aebc62-96b9-4d63-9239-4734a6bc96ce, ref_id=A" t=2024-05-29T13:44:14.022469446Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-r39y3x" t=2024-05-29T13:44:14.022632602Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-qxxfji" t=2024-05-29T13:44:14.0225995Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-qxs8yn" t=2024-05-29T13:44:14.022578197Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.022492491Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=767797 slug=mgmresorts instance="datasource_uid=d1aebc62-96b9-4d63-9239-4734a6bc96ce, ref_id=A" t=2024-05-29T13:44:14.022450945Z level=debug msg="Setting next state" handler=resultNoData +level=debug ts=2024-05-29T13:44:14.022536617Z caller=remote_instance_store.go:51 user=767797 slug=mgmresorts msg="calling SaveAlertInstance" +logger=ngalert.scheduler user=423441 slug=outgoinc version=9 fingerprint=cbe69a64b5a7f434 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.02232351Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc0166516c0} C:{Var:C Labels: Value:0xc0166516c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.021037584s EvaluationString:[ var='B' labels={} value=1 ], [ var='C' labels={} value=0 ]}]" duration=256.932603ms +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-qv8b5r" t=2024-05-29T13:44:14.022483668Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-qv8b5r" t=2024-05-29T13:44:14.022475103Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-qux8uo" t=2024-05-29T13:44:14.022450483Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-qurdm2" t=2024-05-29T13:44:14.022433025Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-phmwhyke-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.022332651Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-qtnvcr" t=2024-05-29T13:44:14.022407255Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-phmwhyke-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.022307021Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-qpa8ie" t=2024-05-29T13:44:14.022354972Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.022303475Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-phmwhyke-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.02225553Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-phmwhyke-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.02222328Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-qigvqe" t=2024-05-29T13:44:14.022321042Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.022245963Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.022307947Z caller=remote_instance_store.go:51 user=173374 slug=felmo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-qglfbp" t=2024-05-29T13:44:14.022303945Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-qglfbp" t=2024-05-29T13:44:14.022294531Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=173374 slug=felmo t=2024-05-29T13:44:14.022265524Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=debug ts=2024-05-29T13:44:14.022246829Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=173374 slug=felmo instance="FunctionName=slots-scheduling" t=2024-05-29T13:44:14.022232936Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-hk-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-hk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.022171026Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.022136868Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" +logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:14.022237063Z level=debug msg="Saving alert states" count=20 max_state_save_concurrency=1 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-qe2qem" t=2024-05-29T13:44:14.022213445Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-qe2qem" t=2024-05-29T13:44:14.022204249Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-qamivh" t=2024-05-29T13:44:14.022183878Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=797725 slug=soltegm t=2024-05-29T13:44:14.02207465Z level=debug msg="Skip rule evaluation because it is paused" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-phhtzghz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.021930457Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-q6tfzs" t=2024-05-29T13:44:14.022121544Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-phgvpo7n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.021852576Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-phgvpo7n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.021840886Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-phgvpo7n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.021801185Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.02205585Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-q5dvxn" t=2024-05-29T13:44:14.022071081Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-phgvpo7n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.021790215Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.021995125Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-pylhd2" t=2024-05-29T13:44:14.022018222Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_SchedulerRecord, scope=app, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:14.021968971Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-pvxh05" t=2024-05-29T13:44:14.022006436Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-pvxh05" t=2024-05-29T13:44:14.021998839Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_QueuedEvents, scope=app, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:14.021921294Z level=debug msg="Setting next state" handler=resultNormal +level=info ts=2024-05-29T13:44:14.021893567Z caller=grafana.go:247 user=289650 slug=eurostar msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=16" groups=48 alerts=0 +logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_QueueSummaries, scope=app, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:14.021889161Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-puzfc7" t=2024-05-29T13:44:14.021952417Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:14.021824982Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=288032 slug=dapperlabssre instance= t=2024-05-29T13:44:14.02181245Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_NodeRecord, scope=app, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:14.021850934Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-prphy3" t=2024-05-29T13:44:14.021900755Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.021865007Z caller=remote_instance_store.go:51 user=288032 slug=dapperlabssre msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-ppd5st" t=2024-05-29T13:44:14.021847675Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-ppd5st" t=2024-05-29T13:44:14.021838607Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_KafkaOffsetRecord, scope=app, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:14.021808301Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_KafkaOffsetRecord, scope=app, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:14.021794815Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=679831 slug=joveostageaws instance="app=ser-cron-job" t=2024-05-29T13:44:14.021765098Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +level=debug ts=2024-05-29T13:44:14.021776016Z caller=remote_image_capturer.go:33 user=679831 slug=joveostageaws rule_org_id=1 rule_uid=c383bf7c-1b3d-4dee-874f-6e07b1fbd4e0 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" +logger=ngalert.state.manager user=679831 slug=joveostageaws t=2024-05-29T13:44:14.021723799Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_FixConnectionRecord, scope=app, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:14.021751093Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pheqzod8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.021658414Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-pkzrva" t=2024-05-29T13:44:14.02177231Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-pkfqfv" t=2024-05-29T13:44:14.021756018Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=288032 slug=dapperlabssre version=1 fingerprint=b9f3dc22a101e99a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.021614689Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.020077714s EvaluationString:}]" duration=2.065990692s +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-pkfqfv" t=2024-05-29T13:44:14.021746861Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-pgjdv6" t=2024-05-29T13:44:14.021702503Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_RegistrationRecord, scope=platform, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:14.021672632Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_RegistrationRecord, scope=platform, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:14.021659964Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.021619579Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-pghiue" t=2024-05-29T13:44:14.021668356Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-pdfxc7" t=2024-05-29T13:44:14.021617378Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-pd5jrt" t=2024-05-29T13:44:14.021590119Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_LockRequest, scope=platform, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:14.021536615Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-dk-se-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-dk-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.02155537Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_LockRequest, scope=platform, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:14.021519596Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-paulve" t=2024-05-29T13:44:14.021539629Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.021467073Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" +logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_Lock, scope=platform, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:14.021482877Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-phb2ntjl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.021462642Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-phb2ntjl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.021449672Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-phb2ntjl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.021411201Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-de-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-de-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.021405397Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-oxu1qw" t=2024-05-29T13:44:14.021484884Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-de-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-de-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.021397446Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-ouvktc" t=2024-05-29T13:44:14.021458571Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-ouvktc" t=2024-05-29T13:44:14.021449888Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-otiezg" t=2024-05-29T13:44:14.021431756Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-otiezg" t=2024-05-29T13:44:14.021422265Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-ot0g70" t=2024-05-29T13:44:14.021403316Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-phb2ntjl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.021335841Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-oqqd9c" t=2024-05-29T13:44:14.021375758Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.02133509Z caller=remote_instance_store.go:51 user=700783 slug=gsgmedia msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-oqqd9c" t=2024-05-29T13:44:14.021365605Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-ch-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-ch-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.021306747Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-opgcwd" t=2024-05-29T13:44:14.021323049Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ph7n4czb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.021179179Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.021285112Z caller=remote_instance_store.go:51 user=173730 slug=nikon msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-onfyts" t=2024-05-29T13:44:14.021250642Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.021113858Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-ojput6" t=2024-05-29T13:44:14.02123342Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-frontend-database, env=pp" t=2024-05-29T13:44:14.021200088Z level=debug msg="Keeping state" state=Normal +logger=ngalert.scheduler user=700783 slug=gsgmedia version=4 fingerprint=dd6853da55365fc9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.021040543Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.020492038s EvaluationString:}]" duration=14.083529ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ph7n4czb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.021102498Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:14.021092458Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ph25b3ok-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.020990397Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-ofsywy" t=2024-05-29T13:44:14.021124876Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=245291 slug=pismo instance="QueueName=MigratedCreditCycleAccountsGoLive" t=2024-05-29T13:44:14.021046899Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=245291 slug=pismo instance="QueueName=MigratedCreditCycleAccountsGoLive" t=2024-05-29T13:44:14.021029738Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-ocwp0v" t=2024-05-29T13:44:14.02106338Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-ocnmtf" t=2024-05-29T13:44:14.021012491Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-oanq7f" t=2024-05-29T13:44:14.020979928Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:14.020957317Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-o8j3ru" t=2024-05-29T13:44:14.020963311Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-o8j3ru" t=2024-05-29T13:44:14.020954275Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-nxupzj" t=2024-05-29T13:44:14.020856419Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-nxtx7b" t=2024-05-29T13:44:14.02083808Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=OHIO Query" t=2024-05-29T13:44:14.020792075Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.020775098Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-nsgatk" t=2024-05-29T13:44:14.020741423Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-noyu5o" t=2024-05-29T13:44:14.020723084Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-data-management-db, env=pp" t=2024-05-29T13:44:14.020735562Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:14.020626393Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-payment-mediator-dead-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.020675866Z level=warn msg="Failed to take an image" dashboard=CNR8LzU7z2323213wrrwewr panel=16 error="rpc error: code = Code(422) desc = screenshots unavailable" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pgue3pgp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.020674574Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pgsp9869-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.020564003Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-nnz48e" t=2024-05-29T13:44:14.020662493Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pgsp9869-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.020537533Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.020563979Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-creditsafe-db, env=pp" t=2024-05-29T13:44:14.02054077Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-nnlrlr" t=2024-05-29T13:44:14.020588049Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=217320 slug=workpath t=2024-05-29T13:44:14.020512712Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=67.34619ms +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-nm8z3z" t=2024-05-29T13:44:14.020530168Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pgsp9869-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.020443722Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pgg389p1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.020392091Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pgg389p1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.020368231Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pgg389p1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.02030082Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pgg389p1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.0202604Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-n4leni" t=2024-05-29T13:44:14.020314064Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pgg389p1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.02024489Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-n2qnnk" t=2024-05-29T13:44:14.020287839Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-n2fhgb" t=2024-05-29T13:44:14.020240832Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-mwzzr6" t=2024-05-29T13:44:14.020224367Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-mwzzr6" t=2024-05-29T13:44:14.020216766Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-mtw1wv" t=2024-05-29T13:44:14.020182879Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-mtrked" t=2024-05-29T13:44:14.020126487Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-compliance-lens-db, env=pp" t=2024-05-29T13:44:14.020094503Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=148654 slug=tinybeans t=2024-05-29T13:44:14.020063057Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=148654 slug=tinybeans t=2024-05-29T13:44:14.019992193Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.scheduler user=916149 slug=cmfollpd version=1 fingerprint=e16fb85e026abee7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.019963498Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=puusea4afolloraoms1001.foll.gcp.hclsw.internal State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=puusea4afolloraoms1001.foll.gcp.hclsw.internal Value:0xc05212ef10} B:{Var:B Labels:instance=puusea4afolloraoms1001.foll.gcp.hclsw.internal Value:0xc05212ef20} C:{Var:C Labels:instance=puusea4afolloraoms1001.foll.gcp.hclsw.internal Value:0xc05212eeb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.019529787s EvaluationString:[ var='A' labels={instance=puusea4afolloraoms1001.foll.gcp.hclsw.internal} value=49.23140779266012 ], [ var='B' labels={instance=puusea4afolloraoms1001.foll.gcp.hclsw.internal} value=49.23140779266012 ], [ var='C' labels={instance=puusea4afolloraoms1001.foll.gcp.hclsw.internal} value=0 ]}]" duration=28.101489ms +logger=ngalert.scheduler user=70430 slug=dapperlabs version=2 fingerprint=55f725c99d55777d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.019914157Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=000000002, ref_id=B State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.019590577s EvaluationString:}]" duration=25.797381ms +level=debug ts=2024-05-29T13:44:14.019972858Z caller=remote_image_capturer.go:54 user=283914 slug=emmasleep rule_org_id=1 rule_uid=f7a257b5-5b4b-4bb5-b8c5-1d3b84d6f862 dashboard=CNR8LzU7z2323213wrrwewr panel=16 msg="rendering alert image with grafana" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-mliu3z" t=2024-05-29T13:44:14.01996495Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-mlcd97" t=2024-05-29T13:44:14.019949248Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-mlcd97" t=2024-05-29T13:44:14.019940137Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-billing-db, env=pp" t=2024-05-29T13:44:14.019890185Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-mjqohl" t=2024-05-29T13:44:14.019913151Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-mepno6" t=2024-05-29T13:44:14.019871034Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-mepno6" t=2024-05-29T13:44:14.019862234Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-mc40mt" t=2024-05-29T13:44:14.019806464Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-mauqo1" t=2024-05-29T13:44:14.019789113Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pgbdockn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.019776005Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pga7bcdj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.019703714Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pga7bcdj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.019627513Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pga7bcdj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.019549472Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-lud8om" t=2024-05-29T13:44:14.019620725Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.019623498Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-lsmybn" t=2024-05-29T13:44:14.019596455Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pg8p3inp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.019430871Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.019566802Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pg8p3inp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.01930828Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-loltnt" t=2024-05-29T13:44:14.019542786Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pg8mdw9z-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.019267309Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=698963 slug=lemonade instance="app=marketing, pod=marketing-5d95d455b8-t2f4w" t=2024-05-29T13:44:14.019530296Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=698963 slug=lemonade instance="app=marketing, pod=marketing-5d95d455b8-g9hc9" t=2024-05-29T13:44:14.019490761Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-lnejoc" t=2024-05-29T13:44:14.019484286Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pg7aj4tp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.019078128Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pg5zkgve-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.018934526Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-lk4xd3" t=2024-05-29T13:44:14.019415229Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-lk4xd3" t=2024-05-29T13:44:14.019405705Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=698963 slug=lemonade version=4 fingerprint=725f03e90d9d7b32 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.019251627Z level=debug msg="Alert rule evaluated" results="[{Instance:app=marketing, pod=marketing-5d95d455b8-27895 State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=marketing, pod=marketing-5d95d455b8-27895 Value:0xc060114df0} THRESHOLD:{Var:THRESHOLD Labels:app=marketing, pod=marketing-5d95d455b8-27895 Value:0xc060114e30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.018807668s EvaluationString:[ var='QUERY' labels={app=marketing, pod=marketing-5d95d455b8-27895} value=1.2736055555555554 ], [ var='THRESHOLD' labels={app=marketing, pod=marketing-5d95d455b8-27895} value=0 ]} {Instance:app=marketing, pod=marketing-5d95d455b8-g9hc9 State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=marketing, pod=marketing-5d95d455b8-g9hc9 Value:0xc060114ec0} THRESHOLD:{Var:THRESHOLD Labels:app=marketing, pod=marketing-5d95d455b8-g9hc9 Value:0xc060114e80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.018825147s EvaluationString:[ var='QUERY' labels={app=marketing, pod=marketing-5d95d455b8-g9hc9} value=1.25 ], [ var='THRESHOLD' labels={app=marketing, pod=marketing-5d95d455b8-g9hc9} value=0 ]} {Instance:app=marketing, pod=marketing-5d95d455b8-t2f4w State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=marketing, pod=marketing-5d95d455b8-t2f4w Value:0xc060114f30} THRESHOLD:{Var:THRESHOLD Labels:app=marketing, pod=marketing-5d95d455b8-t2f4w Value:0xc060114f80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.018831983s EvaluationString:[ var='QUERY' labels={app=marketing, pod=marketing-5d95d455b8-t2f4w} value=0 ], [ var='THRESHOLD' labels={app=marketing, pod=marketing-5d95d455b8-t2f4w} value=0 ]}]" duration=76.63377ms +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-lieih6" t=2024-05-29T13:44:14.019387618Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-lcylqe" t=2024-05-29T13:44:14.01925738Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-l9zedd" t=2024-05-29T13:44:14.019208121Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-kiyvg4" t=2024-05-29T13:44:14.019032794Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-kdiogx" t=2024-05-29T13:44:14.018952839Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-kao2kc" t=2024-05-29T13:44:14.018935403Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-k5mr6i" t=2024-05-29T13:44:14.018867556Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-k1ddwv" t=2024-05-29T13:44:14.018841137Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-jyz7om" t=2024-05-29T13:44:14.01879058Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-jyz7om" t=2024-05-29T13:44:14.018780785Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-jydqcg" t=2024-05-29T13:44:14.018761843Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-jydqcg" t=2024-05-29T13:44:14.018751714Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-jvovh5" t=2024-05-29T13:44:14.018724194Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-juvcdg" t=2024-05-29T13:44:14.018707007Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-jswnit" t=2024-05-29T13:44:14.018654923Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-jswnit" t=2024-05-29T13:44:14.018645543Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-jsoyax" t=2024-05-29T13:44:14.018618585Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-jqnyay" t=2024-05-29T13:44:14.018557718Z level=debug msg="Setting next state" handler=resultNormal +level=warn ts=2024-05-29T13:44:14.018482585Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=509852 slug=lillepold +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-jmfwck" t=2024-05-29T13:44:14.018490368Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=400599 slug=unionai t=2024-05-29T13:44:14.01844475Z level=debug msg="Saving alert states" count=4 max_state_save_concurrency=1 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-jiv4wh" t=2024-05-29T13:44:14.018468061Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-jigdck" t=2024-05-29T13:44:14.018425655Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-jhymat" t=2024-05-29T13:44:14.018409604Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.018367759Z caller=remote_instance_store.go:51 user=635771 slug=sharedservices msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-jggf3x" t=2024-05-29T13:44:14.01837326Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pfojvnqz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.01830802Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-jfbnbl" t=2024-05-29T13:44:14.018347103Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=635771 slug=sharedservices t=2024-05-29T13:44:14.018273348Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pfojvnqz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.018232589Z level=debug msg="Setting next state" handler=resultNormal +level=info ts=2024-05-29T13:44:14.018292041Z caller=remote_alert_sender.go:94 user=916144 slug=cmjjilpd host=cmjjilpd-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.109.171:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=cdkvnam1jz5z4c alerts=3 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-jcb5yt" t=2024-05-29T13:44:14.018261033Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-jc2ao2" t=2024-05-29T13:44:14.018235363Z level=debug msg="Keeping state" state=Normal +level=info component=discovery ts=2024-05-29T13:44:14.018177082Z caller=client.go:80 msg="creating client for grafana instance" user=492975 addr=dns:///mdgtest-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-jbluir" t=2024-05-29T13:44:14.018190272Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pfmmoln7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.018154798Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-j8vyz8" t=2024-05-29T13:44:14.018129718Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=400599 slug=unionai instance="cluster=canary" t=2024-05-29T13:44:14.018074805Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-j3durc" t=2024-05-29T13:44:14.018094056Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-j1cher" t=2024-05-29T13:44:14.018068117Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-izdar7" t=2024-05-29T13:44:14.018023176Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.017897985Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pfifyhkq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.017964496Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pfifyhkq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.017934106Z level=debug msg="Setting next state" handler=resultNormal +level=warn ts=2024-05-29T13:44:14.017812878Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=473233 slug=lpskdl +level=debug ts=2024-05-29T13:44:14.017818878Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.017774778Z caller=ruler.go:522 msg="tenant is owned by this instance" user=473233 slug=lpskdl groups=0 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pfifyhkq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.017856115Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-iulcj6" t=2024-05-29T13:44:14.017880409Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-is31wf" t=2024-05-29T13:44:14.017835862Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-is31wf" t=2024-05-29T13:44:14.017826707Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-iquvi5" t=2024-05-29T13:44:14.017794376Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pfieaubo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.017668183Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-iqcon8" t=2024-05-29T13:44:14.017767391Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-imuye3" t=2024-05-29T13:44:14.017662515Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.017199134Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pfi6vsnc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.017484471Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-iegjxa" t=2024-05-29T13:44:14.017484885Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.017409425Z caller=remote_image_capturer.go:54 user=283914 slug=emmasleep rule_org_id=1 rule_uid=f7a257b5-5b4b-4bb5-b8c5-1d3b84d6f862 dashboard=CNR8LzU7z2323213wrrwewr panel=16 msg="rendering alert image with grafana" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-iegjxa" t=2024-05-29T13:44:14.017474554Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-idowgo" t=2024-05-29T13:44:14.017449262Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.017101807Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-iaxzqy" t=2024-05-29T13:44:14.01741147Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.0170236Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.017259989Z level=debug msg="Setting next state" handler=resultAlerting +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-i7mq0j" t=2024-05-29T13:44:14.017306423Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pfgkl4ix-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.017243069Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016936329Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.017220707Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016928475Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-hxumkh" t=2024-05-29T13:44:14.017250423Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-hxumkh" t=2024-05-29T13:44:14.017240943Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.01687568Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pfgkl4ix-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.017207358Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:14.017182515Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.012914ms +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016859755Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-hxqtc8" t=2024-05-29T13:44:14.017222931Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.01684732Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016801359Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +logger=ngalert.state.manager.persist user=426229 slug=accelbyte t=2024-05-29T13:44:14.01707405Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-marketplace-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-marketplace-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.017079769Z level=warn msg="Failed to take an image" dashboard=CNR8LzU7z2323213wrrwewr panel=16 error="rpc error: code = Code(422) desc = screenshots unavailable" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-hv1kgy" t=2024-05-29T13:44:14.017162473Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=426229 slug=accelbyte instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:14.017046828Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-hstphs" t=2024-05-29T13:44:14.017118416Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016748625Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016731195Z level=debug msg="Setting next state" handler=resultError +level=warn ts=2024-05-29T13:44:14.017006371Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=760916 slug=lomasmhc +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pffsrhvj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.016997816Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-hshuid" t=2024-05-29T13:44:14.017082043Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016706516Z level=debug msg="Setting next state" handler=resultError +level=debug ts=2024-05-29T13:44:14.01696522Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.01698547Z caller=ruler.go:522 msg="tenant is owned by this instance" user=760916 slug=lomasmhc groups=0 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-hrkxrg" t=2024-05-29T13:44:14.017029838Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016683933Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016642714Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +level=debug ts=2024-05-29T13:44:14.016896055Z caller=remote_instance_store.go:51 user=763376 slug=f5nginxone msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-hhlnhx" t=2024-05-29T13:44:14.016982361Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016623694Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-hhlnhx" t=2024-05-29T13:44:14.016975764Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-hghi2c" t=2024-05-29T13:44:14.016957563Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pffsrhvj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.016946696Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-hbjars" t=2024-05-29T13:44:14.016940213Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016618422Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.01660937Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-h852tx" t=2024-05-29T13:44:14.016898353Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-h70hpp" t=2024-05-29T13:44:14.016882841Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016589966Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-h5ywiq" t=2024-05-29T13:44:14.016865944Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-h5ywiq" t=2024-05-29T13:44:14.016859406Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016570885Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-h0jrqh" t=2024-05-29T13:44:14.016846374Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016529869Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-h0fp8g" t=2024-05-29T13:44:14.016810809Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pf9d4gdn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.016729003Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-gxpz0v" t=2024-05-29T13:44:14.016744423Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-gxpz0v" t=2024-05-29T13:44:14.016736258Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-gxp7tu" t=2024-05-29T13:44:14.016726433Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-gxea4e" t=2024-05-29T13:44:14.016710516Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-gvfkii" t=2024-05-29T13:44:14.01664606Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016445212Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-gumfnf" t=2024-05-29T13:44:14.01661136Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016429675Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016420623Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-gtockw" t=2024-05-29T13:44:14.016575984Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016406769Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-gq9fmc" t=2024-05-29T13:44:14.016557928Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=295631 slug=dapvizor instance="datasource_uid=Ta6tIPbnz, ref_id=A" t=2024-05-29T13:44:14.01657302Z level=debug msg="Setting next state" handler=resultNoData +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-gq9fmc" t=2024-05-29T13:44:14.016552105Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pf9d4gdn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.016585552Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016332107Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-gpgtv0" t=2024-05-29T13:44:14.016520387Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-goixky" t=2024-05-29T13:44:14.016504233Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pf9d4gdn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.016563462Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-gn5zhp" t=2024-05-29T13:44:14.01647216Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-glsphd" t=2024-05-29T13:44:14.016455678Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016285482Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016279436Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016270855Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.01625746Z level=debug msg="Setting next state" handler=resultError +level=debug ts=2024-05-29T13:44:14.016432446Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-gistqw" t=2024-05-29T13:44:14.016380603Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.016451936Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:14.016418554Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.01613939Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-gfm19b" t=2024-05-29T13:44:14.01633714Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016127659Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:14.016377297Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-gdbg22" t=2024-05-29T13:44:14.016322068Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.016317217Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-data-management-db, env=dev" t=2024-05-29T13:44:14.016346989Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-gag3ek" t=2024-05-29T13:44:14.016282131Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:14.016345606Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-g8cbxi" t=2024-05-29T13:44:14.01625756Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016112064Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-g8a8nb" t=2024-05-29T13:44:14.016235046Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-g7aof2" t=2024-05-29T13:44:14.01619995Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016090497Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-g7aof2" t=2024-05-29T13:44:14.016196211Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-g3rxv0" t=2024-05-29T13:44:14.016187232Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.016036445Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-fywasr" t=2024-05-29T13:44:14.016166626Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-fxgyrm" t=2024-05-29T13:44:14.016155607Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-fxgyrm" t=2024-05-29T13:44:14.016149273Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.016116468Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-fowisp" t=2024-05-29T13:44:14.016039746Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=480731 slug=brightmove t=2024-05-29T13:44:14.016065838Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-fowisp" t=2024-05-29T13:44:14.016033847Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.016101478Z caller=remote_instance_store.go:51 user=480731 slug=brightmove msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.015928506Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +logger=ngalert.state.manager user=480731 slug=brightmove instance="jobName=ProcessInboundEmailJob" t=2024-05-29T13:44:14.016040779Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-fmerd7" t=2024-05-29T13:44:14.016000321Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=480731 slug=brightmove t=2024-05-29T13:44:14.016005523Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.015883149Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.015876765Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +level=debug ts=2024-05-29T13:44:14.01602867Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.015867146Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-fi0cfb" t=2024-05-29T13:44:14.015974901Z level=debug msg="Keeping state" state=Normal +level=warn ts=2024-05-29T13:44:14.015963161Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=634166 slug=madv113 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-fhpywf" t=2024-05-29T13:44:14.015954457Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pezpkt2t-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.015947105Z level=debug msg="Setting next state" handler=resultNormal +level=info component=discovery ts=2024-05-29T13:44:14.015933761Z caller=client.go:80 msg="creating client for grafana instance" user=656602 addr=dns:///mbopt-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-fhi101" t=2024-05-29T13:44:14.015941014Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-fh0h9p" t=2024-05-29T13:44:14.015931381Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.01578055Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-fg46j8" t=2024-05-29T13:44:14.015917197Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.015754348Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-fg1edn" t=2024-05-29T13:44:14.015904451Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-magento-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-magento-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core" t=2024-05-29T13:44:14.015870284Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.015724314Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.015849675Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.01584013Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=740777 slug=nequico instance="datasource_uid=e255e76d-9672-4fb3-b9a1-71f65e6e61b3, ref_id=A,B,D,E" t=2024-05-29T13:44:14.015881961Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.015687784Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pes69fyt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.015815624Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-f9ha95" t=2024-05-29T13:44:14.015821319Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-f9ha95" t=2024-05-29T13:44:14.015809598Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.015798767Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.015674561Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-f0nv70" t=2024-05-29T13:44:14.015793633Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.015801653Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-ezeoq9" t=2024-05-29T13:44:14.015782181Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.015657257Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.01561399Z level=debug msg="Setting next state" handler=resultError +level=debug ts=2024-05-29T13:44:14.015730223Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-perhzbmh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.015689363Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-perhzbmh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.015656822Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-perhzbmh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.015639302Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.015594995Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.015589214Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.015577492Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-eknlmx" t=2024-05-29T13:44:14.015638233Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-eknlmx" t=2024-05-29T13:44:14.015632617Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pel0howl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.015562442Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-ed8uiz" t=2024-05-29T13:44:14.015538812Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=692010 slug=mercariusprod instance= t=2024-05-29T13:44:14.015506158Z level=debug msg="Setting next state" handler=resultError +logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.015464526Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" +level=debug ts=2024-05-29T13:44:14.015440956Z caller=ruler.go:522 msg="tenant is owned by this instance" user=503438 slug=luisneto groups=0 +logger=ngalert.scheduler user=884866 slug=cnonumerique version=119 fingerprint=abdd34b2a17f0d31 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.015350407Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=fdhk917z41xj4a, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.015060113s EvaluationString:}]" duration=10.125097ms +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-dw95kk" t=2024-05-29T13:44:14.015483105Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-dttyyg" t=2024-05-29T13:44:14.015471535Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-dtgijd" t=2024-05-29T13:44:14.01544963Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-dswtnd" t=2024-05-29T13:44:14.015437568Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-dswtnd" t=2024-05-29T13:44:14.015427374Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-dqrctq" t=2024-05-29T13:44:14.015410752Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-dqrctq" t=2024-05-29T13:44:14.015404535Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-dpxjtn" t=2024-05-29T13:44:14.015387388Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pegchzvd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.015313149Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-dntyok" t=2024-05-29T13:44:14.015327167Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.015234948Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-dkre6r" t=2024-05-29T13:44:14.01530211Z level=debug msg="Setting next state" handler=resultNormal +level=info component=discovery ts=2024-05-29T13:44:14.015225654Z caller=client.go:80 msg="creating client for grafana instance" user=540475 addr=dns:///mawa-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-dk9hs8" t=2024-05-29T13:44:14.015287044Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-djdykg" t=2024-05-29T13:44:14.015248644Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-djdykg" t=2024-05-29T13:44:14.015239663Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pegchzvd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.015207848Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-dhdael" t=2024-05-29T13:44:14.015223094Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-de7m3t" t=2024-05-29T13:44:14.015161153Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.015172186Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-backend-db, env=dev" t=2024-05-29T13:44:14.015126376Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.015113768Z caller=remote_instance_store.go:51 user=407477 slug=inventa msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pe9x8b11-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.015065946Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-dbrnka" t=2024-05-29T13:44:14.015106012Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.015032093Z caller=remote_instance_store.go:51 user=316418 slug=workmotion msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-d1gyo2" t=2024-05-29T13:44:14.015002976Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-d1dv2r" t=2024-05-29T13:44:14.014976331Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-d1dv2r" t=2024-05-29T13:44:14.014967053Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.014829132Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=316418 slug=workmotion instance="datasource_uid=uu7Nh0Bnk, ref_id=A" t=2024-05-29T13:44:14.014893174Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-cxdacv" t=2024-05-29T13:44:14.014894687Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=316418 slug=workmotion t=2024-05-29T13:44:14.014865436Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-crjqv3" t=2024-05-29T13:44:14.014855019Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-colcpt" t=2024-05-29T13:44:14.014821045Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-co5gdv" t=2024-05-29T13:44:14.01479394Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.01472242Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.014712957Z caller=remote_instance_store.go:51 user=71676 slug=lemonbeat msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-cf13rz" t=2024-05-29T13:44:14.014708209Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-v4monitoring-db, env=eu" t=2024-05-29T13:44:14.014642216Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pe7gnjbt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.014535841Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.014515654Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-c3shu2" t=2024-05-29T13:44:14.014484738Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pe7gnjbt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.01443652Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-c2pdfd" t=2024-05-29T13:44:14.014459985Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.014381023Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=oms-stock-receiver, dimension_QueueName=ecom-prod-oms-stock-receiver-fulfilment-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-stock-receiver-fulfilment-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.014406308Z level=debug msg="Setting next state" handler=resultAlerting +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pe7gnjbt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.014347209Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pe7gnjbt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.014332329Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-bzkshz" t=2024-05-29T13:44:14.014385136Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-bxyf8w" t=2024-05-29T13:44:14.014368097Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-bxyf8w" t=2024-05-29T13:44:14.014358228Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:14.014147941Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.231655ms +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-bxobjq" t=2024-05-29T13:44:14.014341342Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-bxeyfn" t=2024-05-29T13:44:14.014306202Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pe65nd8w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.014129527Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pe65nd8w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.014117267Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-bukqzd" t=2024-05-29T13:44:14.014231175Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.01415558Z caller=remote_instance_store.go:51 user=384712 slug=nearinc msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pe3jx7gx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.014062106Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.014157778Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-bo78um" t=2024-05-29T13:44:14.014107586Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.01410399Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-blvccq" t=2024-05-29T13:44:14.014084741Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pe3jx7gx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.014022106Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-bkjpdf" t=2024-05-29T13:44:14.014038444Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-bg3hhf" t=2024-05-29T13:44:14.014029758Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-bg3hhf" t=2024-05-29T13:44:14.014024154Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-periodic-reviews-db, env=eu" t=2024-05-29T13:44:14.013980767Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-bc4dxp" t=2024-05-29T13:44:14.01398114Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-bc4dxp" t=2024-05-29T13:44:14.013974971Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-bajbhk" t=2024-05-29T13:44:14.013957445Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-bais6z" t=2024-05-29T13:44:14.013941483Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.013893981Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.013859034Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-b8fbqm" t=2024-05-29T13:44:14.013906036Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-b87hz7" t=2024-05-29T13:44:14.013889806Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-b87hz7" t=2024-05-29T13:44:14.013881637Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.013797067Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-azyrgd" t=2024-05-29T13:44:14.01378395Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.013740006Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-ayvq4k" t=2024-05-29T13:44:14.013747066Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-avlj7o" t=2024-05-29T13:44:14.013730886Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-avlj7o" t=2024-05-29T13:44:14.01372051Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.013583594Z caller=remote_instance_store.go:51 user=94501 slug=datastax msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-instant-id-qa-db, env=eu" t=2024-05-29T13:44:14.013580253Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-aragbz" t=2024-05-29T13:44:14.013639261Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-aqt1tb" t=2024-05-29T13:44:14.013621174Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-aqt1tb" t=2024-05-29T13:44:14.013611925Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-aoivkn" t=2024-05-29T13:44:14.013586282Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pdzgfbi6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.01349491Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pdx69yva-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.013411599Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-idverse-enterprise-db, env=eu" t=2024-05-29T13:44:14.013363757Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.013312082Z caller=remote_instance_store.go:51 user=224047 slug=ppbtradingtribeprd msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.013336748Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.013330539Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-aeo0nu" t=2024-05-29T13:44:14.01337981Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.013325557Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-a1lmgi" t=2024-05-29T13:44:14.013277589Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-9zk2au" t=2024-05-29T13:44:14.013247774Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.013087992Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-9zk2au" t=2024-05-29T13:44:14.013238744Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.013160787Z caller=remote_instance_store.go:51 user=320778 slug=omegaai msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-9ujmzs" t=2024-05-29T13:44:14.013168856Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-9ujmzs" t=2024-05-29T13:44:14.013159816Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.013132836Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.013076167Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pdwbvhes-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.013032576Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-9ttzlm" t=2024-05-29T13:44:14.013136864Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pdwbvhes-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.013006735Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-9nxuhp" t=2024-05-29T13:44:14.01309537Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=320778 slug=omegaai instance= t=2024-05-29T13:44:14.013059337Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pdwbvhes-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.012964025Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.012968066Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.012950031Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-9dk279" t=2024-05-29T13:44:14.012951741Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-9cmwtc" t=2024-05-29T13:44:14.012934725Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-909ajq" t=2024-05-29T13:44:14.012897279Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pdwbvhes-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.012890644Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-frontend-db-read-replica-1, env=eu" t=2024-05-29T13:44:14.012817032Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-8oymlj" t=2024-05-29T13:44:14.012846965Z level=debug msg="Setting next state" handler=resultNormal +level=info component=discovery ts=2024-05-29T13:44:14.012805831Z caller=client.go:80 msg="creating client for grafana instance" user=664981 addr=dns:///matthewwall-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-8m9qvs" t=2024-05-29T13:44:14.012813927Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager.persist user=112732 slug=gleamer t=2024-05-29T13:44:14.012660858Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=31.510268ms +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-8iql02" t=2024-05-29T13:44:14.012769741Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-8hwntd" t=2024-05-29T13:44:14.012748168Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pdth530y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.012659352Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-8hqdyu" t=2024-05-29T13:44:14.012705854Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-8fdc9f" t=2024-05-29T13:44:14.012647166Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-8e2vaf" t=2024-05-29T13:44:14.012629655Z level=debug msg="Keeping state" state=Normal +level=info ts=2024-05-29T13:44:14.012591088Z caller=remote_image_capturer.go:61 user=283914 slug=emmasleep rule_org_id=1 rule_uid=f7a257b5-5b4b-4bb5-b8c5-1d3b84d6f862 dashboard=CNR8LzU7z2323213wrrwewr panel=16 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-frontend-db, env=eu" t=2024-05-29T13:44:14.012533473Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-8cik9e" t=2024-05-29T13:44:14.012592796Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-8aphhy" t=2024-05-29T13:44:14.012546754Z level=debug msg="Keeping state" state=Normal +level=info component=discovery ts=2024-05-29T13:44:14.012422627Z caller=client.go:80 msg="creating client for grafana instance" user=740108 addr=dns:///martlegters-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-80vasv" t=2024-05-29T13:44:14.012467199Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-7zdtph" t=2024-05-29T13:44:14.012441946Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.012365579Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pdroomj9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.01244066Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.012384327Z caller=ruler.go:522 msg="tenant is owned by this instance" user=602248 slug=liorasta groups=0 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pdroomj9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.012409979Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-7s8suh" t=2024-05-29T13:44:14.012403955Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=698963 slug=lemonade instance="app=life, cluster=lmnd-production-us-east-1, container=kube-state-metrics, deployment=life, endpoint=http, instance=10.16.27.82:8080, job=kube-state-metrics, namespace=production, pod=kube-state-metrics-6c795d5489-v5txw, region=us-east-1, service=kube-state-metrics, stage=production" t=2024-05-29T13:44:14.012387386Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:14.012330847Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="region=us-east-1, service=kube-state-metrics, stage=production" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pdqt1gng-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.012252328Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-7rvgef" t=2024-05-29T13:44:14.012376403Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:14.012305086Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pdbzs905-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.012197647Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-7og3tl" t=2024-05-29T13:44:14.012298176Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-7juau6" t=2024-05-29T13:44:14.012279485Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=236496 slug=improbable t=2024-05-29T13:44:14.012114663Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.95264ms +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-7jfdj0" t=2024-05-29T13:44:14.012231112Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-7jebig" t=2024-05-29T13:44:14.012214274Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-7jebig" t=2024-05-29T13:44:14.012205576Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pdbzs905-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.012067526Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-79amhi" t=2024-05-29T13:44:14.012151033Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-71lulu" t=2024-05-29T13:44:14.012125908Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-6ysngz" t=2024-05-29T13:44:14.0121154Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-6ysngz" t=2024-05-29T13:44:14.012108053Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-data-management-db, env=eu" t=2024-05-29T13:44:14.012090491Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-6vjf9r" t=2024-05-29T13:44:14.012080341Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-6u86f0" t=2024-05-29T13:44:14.012058622Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-6qzqh7" t=2024-05-29T13:44:14.0120479Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=178032 slug=gtplan t=2024-05-29T13:44:14.01196139Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=178032 slug=gtplan t=2024-05-29T13:44:14.011903754Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.scheduler user=178032 slug=gtplan version=25 fingerprint=016b027c8307cb4a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.011792037Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.011419048s EvaluationString:}]" duration=84.933919ms +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-6e4t87" t=2024-05-29T13:44:14.011967832Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-creditsafe-db, env=eu" t=2024-05-29T13:44:14.011849573Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.011905172Z caller=remote_instance_store.go:51 user=432323 slug=lithic msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-66l8ek" t=2024-05-29T13:44:14.011926732Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pd3472m8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.011895354Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-61swus" t=2024-05-29T13:44:14.011894676Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-5z9mqk" t=2024-05-29T13:44:14.011873674Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-5yyk5r" t=2024-05-29T13:44:14.011840007Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.011763223Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-5xykde" t=2024-05-29T13:44:14.011809763Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pd2x7xqe-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.011752152Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-5xevow" t=2024-05-29T13:44:14.011774285Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pd2x7xqe-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.011723662Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.011695189Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-emails-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-emails-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.011728533Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-emails-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-emails-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.011708701Z level=debug msg="Setting next state" handler=resultAlerting +logger=ngalert.scheduler user=432323 slug=lithic version=1 fingerprint=152ffe2b2f75d70b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.011630618Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.011261703s EvaluationString:}]" duration=53.266801ms +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pd2x7xqe-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.011681522Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pd2fkgx8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.011574701Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-comply-advantage-db, env=eu" t=2024-05-29T13:44:14.01161264Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-5quizx" t=2024-05-29T13:44:14.011596154Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-5oxepi" t=2024-05-29T13:44:14.011575771Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pd2fkgx8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.01151308Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-5onoz1" t=2024-05-29T13:44:14.011549184Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-5mprvs" t=2024-05-29T13:44:14.011512349Z level=debug msg="Setting next state" handler=resultNormal +level=info ts=2024-05-29T13:44:14.011458488Z caller=remote_image_capturer.go:61 user=283914 slug=emmasleep rule_org_id=1 rule_uid=f7a257b5-5b4b-4bb5-b8c5-1d3b84d6f862 dashboard=CNR8LzU7z2323213wrrwewr panel=16 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-compliance-lens-db, env=eu" t=2024-05-29T13:44:14.011373019Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-5fpktp" t=2024-05-29T13:44:14.011457052Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-4zrdx4" t=2024-05-29T13:44:14.011337605Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.01130762Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-4ymvc0" t=2024-05-29T13:44:14.011317328Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.011241716Z caller=ruler.go:522 msg="tenant is owned by this instance" user=666056 slug=kreicer groups=1 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-4ymvc0" t=2024-05-29T13:44:14.011306383Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-4tvvux" t=2024-05-29T13:44:14.011287051Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.011195627Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-4tvvux" t=2024-05-29T13:44:14.011276019Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-4okvmm" t=2024-05-29T13:44:14.011211644Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pcwb6kgq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.011126166Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.01112543Z caller=remote_instance_store.go:51 user=444728 slug=stgnextgen msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=444728 slug=stgnextgen t=2024-05-29T13:44:14.011091078Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-4nya0b" t=2024-05-29T13:44:14.011052183Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pcwb6kgq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.011016145Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pcwb6kgq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.010980675Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pcuhpfm9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.010927294Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pcuhpfm9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.010893844Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-4j0xh2" t=2024-05-29T13:44:14.010973839Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.01091615Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.01085495Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-4bkrtj" t=2024-05-29T13:44:14.010892871Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.010709667Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pcuhpfm9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.010838953Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.010719962Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pcuhpfm9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.010728282Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.010658171Z caller=remote_instance_store.go:51 user=738479 slug=gohero msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-3w9sg7" t=2024-05-29T13:44:14.010761156Z level=debug msg="Setting next state" handler=resultNormal +level=info component=discovery ts=2024-05-29T13:44:14.010659711Z caller=client.go:80 msg="creating client for grafana instance" user=671262 addr=dns:///marineris-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-3r7jn2" t=2024-05-29T13:44:14.010699932Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.010676987Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.010642397Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" +level=warn ts=2024-05-29T13:44:14.01063481Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=784784 slug=liantisitnonprod +logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-backend-db, env=eu" t=2024-05-29T13:44:14.010628655Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pcg7bkpd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.010608711Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-3jzxds" t=2024-05-29T13:44:14.010645364Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-3j2aa1" t=2024-05-29T13:44:14.010622849Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-3fwyty" t=2024-05-29T13:44:14.010581353Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-3fwyty" t=2024-05-29T13:44:14.010575233Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.010522233Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-3aqwev" t=2024-05-29T13:44:14.01049301Z level=debug msg="Setting next state" handler=resultNormal +level=info component=discovery ts=2024-05-29T13:44:14.010491209Z caller=client.go:80 msg="creating client for grafana instance" user=414860 addr=dns:///marialehmann-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-3amvzg" t=2024-05-29T13:44:14.010477387Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-3amvzg" t=2024-05-29T13:44:14.010468261Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-37s4xq" t=2024-05-29T13:44:14.010442425Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-36u5wl" t=2024-05-29T13:44:14.010427446Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pcfq1t1v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.010384668Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pcagwgdr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.010289517Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-30umnk" t=2024-05-29T13:44:14.010307096Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-2z1zkw" t=2024-05-29T13:44:14.010290029Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-2z1zkw" t=2024-05-29T13:44:14.01028074Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-2ybcet" t=2024-05-29T13:44:14.010237586Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pcagwgdr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.010225727Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pcagwgdr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.010188796Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-2xdiue" t=2024-05-29T13:44:14.010211999Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-2rcmkr" t=2024-05-29T13:44:14.010119195Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=646202 slug=kairosaerospace t=2024-05-29T13:44:14.010093942Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +level=info component=discovery ts=2024-05-29T13:44:14.010011105Z caller=client.go:80 msg="creating client for grafana instance" user=543033 addr=dns:///maneshipocrates-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pc907re2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.010025775Z level=debug msg="Setting next state" handler=resultNormal +level=warn ts=2024-05-29T13:44:14.010007105Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=606335 slug=krowiorsch +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-2mlr8m" t=2024-05-29T13:44:14.01008678Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.009948404Z caller=ruler.go:522 msg="tenant is owned by this instance" user=509118 slug=larsmollereriksen groups=1 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-2jgalp" t=2024-05-29T13:44:14.010034781Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-2hcusy" t=2024-05-29T13:44:14.010016435Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pc907re2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.009987124Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-27caqx" t=2024-05-29T13:44:14.009817707Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=201644 slug=thoughtspot instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.009836733Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-1pxyfr" t=2024-05-29T13:44:14.009682489Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-1hp204" t=2024-05-29T13:44:14.009629539Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.009716402Z caller=ruler.go:522 msg="tenant is owned by this instance" user=536272 slug=kovalikadam00 groups=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pc7rkw3m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.009733832Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pc7rkw3m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.009696141Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.009746544Z caller=remote_instance_store.go:51 user=35611 slug=play msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-16xgmv" t=2024-05-29T13:44:14.009494427Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-137gz7" t=2024-05-29T13:44:14.009452612Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-137gz7" t=2024-05-29T13:44:14.009443441Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-0pz16s" t=2024-05-29T13:44:14.009398393Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pc7rkw3m-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.009647341Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.009605582Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pc7rkw3m-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.00961752Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-0n7z67" t=2024-05-29T13:44:14.00935519Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pbyseail-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.00953785Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-0hvtlh" t=2024-05-29T13:44:14.009291219Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-0gvonp" t=2024-05-29T13:44:14.009240153Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.009472999Z caller=ruler.go:522 msg="tenant is owned by this instance" user=528965 slug=letsatsi groups=0 +logger=ngalert.state.manager user=327842 slug=exabeam instance="project_id=ecp-data-09a3vw" t=2024-05-29T13:44:14.009141694Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pbyseail-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.009389498Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pbyseail-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.009357518Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.scheduler user=327842 slug=exabeam version=104 fingerprint=160def4b9b79b953 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.002634724Z level=debug msg="Alert rule evaluated" results="[{Instance:project_id=ecp-data-078rtj State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-078rtj Value:0xc01de0fda0} B:{Var:B Labels:project_id=ecp-data-078rtj Value:0xc01de0fda8} C:{Var:C Labels:project_id=ecp-data-078rtj Value:0xc01de0fdf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980222611s EvaluationString:[ var='A' labels={project_id=ecp-data-078rtj} value=0 ], [ var='B' labels={project_id=ecp-data-078rtj} value=0 ], [ var='C' labels={project_id=ecp-data-078rtj} value=0 ]} {Instance:project_id=ecp-data-09a3vw State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-09a3vw Value:0xc01de0fe40} B:{Var:B Labels:project_id=ecp-data-09a3vw Value:0xc01de0fe48} C:{Var:C Labels:project_id=ecp-data-09a3vw Value:0xc01de0fe90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980247237s EvaluationString:[ var='A' labels={project_id=ecp-data-09a3vw} value=0 ], [ var='B' labels={project_id=ecp-data-09a3vw} value=0 ], [ var='C' labels={project_id=ecp-data-09a3vw} value=0 ]} {Instance:project_id=ecp-data-0ehehn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-0ehehn Value:0xc01de0ff30} B:{Var:B Labels:project_id=ecp-data-0ehehn Value:0xc01de0fee0} C:{Var:C Labels:project_id=ecp-data-0ehehn Value:0xc01de0fee8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980258218s EvaluationString:[ var='A' labels={project_id=ecp-data-0ehehn} value=0 ], [ var='B' labels={project_id=ecp-data-0ehehn} value=0 ], [ var='C' labels={project_id=ecp-data-0ehehn} value=0 ]} {Instance:project_id=ecp-data-0fnpzj State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-0fnpzj Value:0xc01de0ffd0} B:{Var:B Labels:project_id=ecp-data-0fnpzj Value:0xc01de0ff80} C:{Var:C Labels:project_id=ecp-data-0fnpzj Value:0xc01de0ff88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980269968s EvaluationString:[ var='A' labels={project_id=ecp-data-0fnpzj} value=4 ], [ var='B' labels={project_id=ecp-data-0fnpzj} value=4 ], [ var='C' labels={project_id=ecp-data-0fnpzj} value=0 ]} {Instance:project_id=ecp-data-0gvonp State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-0gvonp Value:0xc01a1fc088} B:{Var:B Labels:project_id=ecp-data-0gvonp Value:0xc01a1fc0d0} C:{Var:C Labels:project_id=ecp-data-0gvonp Value:0xc01a1fc080}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980278713s EvaluationString:[ var='A' labels={project_id=ecp-data-0gvonp} value=1 ], [ var='B' labels={project_id=ecp-data-0gvonp} value=1 ], [ var='C' labels={project_id=ecp-data-0gvonp} value=0 ]} {Instance:project_id=ecp-data-0hbojg State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-0hbojg Value:0xc01a1fc120} B:{Var:B Labels:project_id=ecp-data-0hbojg Value:0xc01a1fc128} C:{Var:C Labels:project_id=ecp-data-0hbojg Value:0xc01a1fc1e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980289698s EvaluationString:[ var='A' labels={project_id=ecp-data-0hbojg} value=4 ], [ var='B' labels={project_id=ecp-data-0hbojg} value=4 ], [ var='C' labels={project_id=ecp-data-0hbojg} value=0 ]} {Instance:project_id=ecp-data-0hvtlh State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-0hvtlh Value:0xc01a1fc230} B:{Var:B Labels:project_id=ecp-data-0hvtlh Value:0xc01a1fc238} C:{Var:C Labels:project_id=ecp-data-0hvtlh Value:0xc01a1fc280}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98029724s EvaluationString:[ var='A' labels={project_id=ecp-data-0hvtlh} value=0 ], [ var='B' labels={project_id=ecp-data-0hvtlh} value=0 ], [ var='C' labels={project_id=ecp-data-0hvtlh} value=0 ]} {Instance:project_id=ecp-data-0jom5n State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-0jom5n Value:0xc01a1fc2d0} B:{Var:B Labels:project_id=ecp-data-0jom5n Value:0xc01a1fc2d8} C:{Var:C Labels:project_id=ecp-data-0jom5n Value:0xc01a1fc320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98030427s EvaluationString:[ var='A' labels={project_id=ecp-data-0jom5n} value=0 ], [ var='B' labels={project_id=ecp-data-0jom5n} value=0 ], [ var='C' labels={project_id=ecp-data-0jom5n} value=0 ]} {Instance:project_id=ecp-data-0n7z67 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-0n7z67 Value:0xc01a1fc370} B:{Var:B Labels:project_id=ecp-data-0n7z67 Value:0xc01a1fc378} C:{Var:C Labels:project_id=ecp-data-0n7z67 Value:0xc01a1fc3c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98031174s EvaluationString:[ var='A' labels={project_id=ecp-data-0n7z67} value=0 ], [ var='B' labels={project_id=ecp-data-0n7z67} value=0 ], [ var='C' labels={project_id=ecp-data-0n7z67} value=0 ]} {Instance:project_id=ecp-data-0pw145 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-0pw145 Value:0xc01a1fc410} B:{Var:B Labels:project_id=ecp-data-0pw145 Value:0xc01a1fc418} C:{Var:C Labels:project_id=ecp-data-0pw145 Value:0xc01a1fc4c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980318695s EvaluationString:[ var='A' labels={project_id=ecp-data-0pw145} value=0 ], [ var='B' labels={project_id=ecp-data-0pw145} value=0 ], [ var='C' labels={project_id=ecp-data-0pw145} value=0 ]} {Instance:project_id=ecp-data-0pz16s State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-0pz16s Value:0xc01a1fc680} B:{Var:B Labels:project_id=ecp-data-0pz16s Value:0xc01a1fc688} C:{Var:C Labels:project_id=ecp-data-0pz16s Value:0xc01a1fc6d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980326977s EvaluationString:[ var='A' labels={project_id=ecp-data-0pz16s} value=0 ], [ var='B' labels={project_id=ecp-data-0pz16s} value=0 ], [ var='C' labels={project_id=ecp-data-0pz16s} value=0 ]} {Instance:project_id=ecp-data-0srlei State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-0srlei Value:0xc01a1fc770} B:{Var:B Labels:project_id=ecp-data-0srlei Value:0xc01a1fc720} C:{Var:C Labels:project_id=ecp-data-0srlei Value:0xc01a1fc728}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980333944s EvaluationString:[ var='A' labels={project_id=ecp-data-0srlei} value=0 ], [ var='B' labels={project_id=ecp-data-0srlei} value=0 ], [ var='C' labels={project_id=ecp-data-0srlei} value=0 ]} {Instance:project_id=ecp-data-137gz7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-137gz7 Value:0xc01a1fc7c0} B:{Var:B Labels:project_id=ecp-data-137gz7 Value:0xc01a1fc7c8} C:{Var:C Labels:project_id=ecp-data-137gz7 Value:0xc01a1fc810}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980341998s EvaluationString:[ var='A' labels={project_id=ecp-data-137gz7} value=0 ], [ var='B' labels={project_id=ecp-data-137gz7} value=0 ], [ var='C' labels={project_id=ecp-data-137gz7} value=0 ]} {Instance:project_id=ecp-data-14a7pl State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-14a7pl Value:0xc01a1fc930} B:{Var:B Labels:project_id=ecp-data-14a7pl Value:0xc01a1fc8e0} C:{Var:C Labels:project_id=ecp-data-14a7pl Value:0xc01a1fc8e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980349706s EvaluationString:[ var='A' labels={project_id=ecp-data-14a7pl} value=2 ], [ var='B' labels={project_id=ecp-data-14a7pl} value=2 ], [ var='C' labels={project_id=ecp-data-14a7pl} value=0 ]} {Instance:project_id=ecp-data-16xgmv State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-16xgmv Value:0xc01a1fc988} B:{Var:B Labels:project_id=ecp-data-16xgmv Value:0xc01a1fca50} C:{Var:C Labels:project_id=ecp-data-16xgmv Value:0xc01a1fc980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98035726s EvaluationString:[ var='A' labels={project_id=ecp-data-16xgmv} value=8 ], [ var='B' labels={project_id=ecp-data-16xgmv} value=8 ], [ var='C' labels={project_id=ecp-data-16xgmv} value=0 ]} {Instance:project_id=ecp-data-1bgrtj State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-1bgrtj Value:0xc01a1fcaa0} B:{Var:B Labels:project_id=ecp-data-1bgrtj Value:0xc01a1fcaa8} C:{Var:C Labels:project_id=ecp-data-1bgrtj Value:0xc01a1fcaf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980363666s EvaluationString:[ var='A' labels={project_id=ecp-data-1bgrtj} value=4 ], [ var='B' labels={project_id=ecp-data-1bgrtj} value=4 ], [ var='C' labels={project_id=ecp-data-1bgrtj} value=0 ]} {Instance:project_id=ecp-data-1fg3dt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-1fg3dt Value:0xc01a1fcb90} B:{Var:B Labels:project_id=ecp-data-1fg3dt Value:0xc01a1fcb40} C:{Var:C Labels:project_id=ecp-data-1fg3dt Value:0xc01a1fcb48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980371634s EvaluationString:[ var='A' labels={project_id=ecp-data-1fg3dt} value=0 ], [ var='B' labels={project_id=ecp-data-1fg3dt} value=0 ], [ var='C' labels={project_id=ecp-data-1fg3dt} value=0 ]} {Instance:project_id=ecp-data-1fkvi4 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-1fkvi4 Value:0xc01a1fcbe0} B:{Var:B Labels:project_id=ecp-data-1fkvi4 Value:0xc01a1fcbe8} C:{Var:C Labels:project_id=ecp-data-1fkvi4 Value:0xc01a1fcc30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980378762s EvaluationString:[ var='A' labels={project_id=ecp-data-1fkvi4} value=0 ], [ var='B' labels={project_id=ecp-data-1fkvi4} value=0 ], [ var='C' labels={project_id=ecp-data-1fkvi4} value=0 ]} {Instance:project_id=ecp-data-1hb7os State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-1hb7os Value:0xc01a1fcc80} B:{Var:B Labels:project_id=ecp-data-1hb7os Value:0xc01a1fcc88} C:{Var:C Labels:project_id=ecp-data-1hb7os Value:0xc01a1fcd40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980386613s EvaluationString:[ var='A' labels={project_id=ecp-data-1hb7os} value=0 ], [ var='B' labels={project_id=ecp-data-1hb7os} value=0 ], [ var='C' labels={project_id=ecp-data-1hb7os} value=0 ]} {Instance:project_id=ecp-data-1hp204 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-1hp204 Value:0xc01a1fcde0} B:{Var:B Labels:project_id=ecp-data-1hp204 Value:0xc01a1fcd90} C:{Var:C Labels:project_id=ecp-data-1hp204 Value:0xc01a1fcd98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980394294s EvaluationString:[ var='A' labels={project_id=ecp-data-1hp204} value=0 ], [ var='B' labels={project_id=ecp-data-1hp204} value=0 ], [ var='C' labels={project_id=ecp-data-1hp204} value=0 ]} {Instance:project_id=ecp-data-1k1r3k State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-1k1r3k Value:0xc01a1fcf28} B:{Var:B Labels:project_id=ecp-data-1k1r3k Value:0xc01a1fcf70} C:{Var:C Labels:project_id=ecp-data-1k1r3k Value:0xc01a1fcf20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980401587s EvaluationString:[ var='A' labels={project_id=ecp-data-1k1r3k} value=1 ], [ var='B' labels={project_id=ecp-data-1k1r3k} value=1 ], [ var='C' labels={project_id=ecp-data-1k1r3k} value=0 ]} {Instance:project_id=ecp-data-1pxyfr State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-1pxyfr Value:0xc01a1fcfc8} B:{Var:B Labels:project_id=ecp-data-1pxyfr Value:0xc01a1fd020} C:{Var:C Labels:project_id=ecp-data-1pxyfr Value:0xc01a1fcfc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980407714s EvaluationString:[ var='A' labels={project_id=ecp-data-1pxyfr} value=15 ], [ var='B' labels={project_id=ecp-data-1pxyfr} value=15 ], [ var='C' labels={project_id=ecp-data-1pxyfr} value=0 ]} {Instance:project_id=ecp-data-1t804a State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-1t804a Value:0xc01a1fd080} B:{Var:B Labels:project_id=ecp-data-1t804a Value:0xc01a1fd088} C:{Var:C Labels:project_id=ecp-data-1t804a Value:0xc01a1fd0d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980415989s EvaluationString:[ var='A' labels={project_id=ecp-data-1t804a} value=0 ], [ var='B' labels={project_id=ecp-data-1t804a} value=0 ], [ var='C' labels={project_id=ecp-data-1t804a} value=0 ]} {Instance:project_id=ecp-data-1ul831 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-1ul831 Value:0xc01a1fd190} B:{Var:B Labels:project_id=ecp-data-1ul831 Value:0xc01a1fd198} C:{Var:C Labels:project_id=ecp-data-1ul831 Value:0xc01a1fd1e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980423582s EvaluationString:[ var='A' labels={project_id=ecp-data-1ul831} value=0 ], [ var='B' labels={project_id=ecp-data-1ul831} value=0 ], [ var='C' labels={project_id=ecp-data-1ul831} value=0 ]} {Instance:project_id=ecp-data-1wlx4v State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-1wlx4v Value:0xc01a1fd230} B:{Var:B Labels:project_id=ecp-data-1wlx4v Value:0xc01a1fd238} C:{Var:C Labels:project_id=ecp-data-1wlx4v Value:0xc01a1fd2b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980431863s EvaluationString:[ var='A' labels={project_id=ecp-data-1wlx4v} value=1 ], [ var='B' labels={project_id=ecp-data-1wlx4v} value=1 ], [ var='C' labels={project_id=ecp-data-1wlx4v} value=0 ]} {Instance:project_id=ecp-data-270lmn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-270lmn Value:0xc01a1fd3a0} B:{Var:B Labels:project_id=ecp-data-270lmn Value:0xc01a1fd3a8} C:{Var:C Labels:project_id=ecp-data-270lmn Value:0xc01a1fd3f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980438679s EvaluationString:[ var='A' labels={project_id=ecp-data-270lmn} value=0 ], [ var='B' labels={project_id=ecp-data-270lmn} value=0 ], [ var='C' labels={project_id=ecp-data-270lmn} value=0 ]} {Instance:project_id=ecp-data-27caqx State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-27caqx Value:0xc01a1fd448} B:{Var:B Labels:project_id=ecp-data-27caqx Value:0xc01a1fd490} C:{Var:C Labels:project_id=ecp-data-27caqx Value:0xc01a1fd440}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98044516s EvaluationString:[ var='A' labels={project_id=ecp-data-27caqx} value=0 ], [ var='B' labels={project_id=ecp-data-27caqx} value=0 ], [ var='C' labels={project_id=ecp-data-27caqx} value=0 ]} {Instance:project_id=ecp-data-28sgao State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-28sgao Value:0xc01a1fd4e0} B:{Var:B Labels:project_id=ecp-data-28sgao Value:0xc01a1fd4e8} C:{Var:C Labels:project_id=ecp-data-28sgao Value:0xc01a1fd530}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98045162s EvaluationString:[ var='A' labels={project_id=ecp-data-28sgao} value=5 ], [ var='B' labels={project_id=ecp-data-28sgao} value=5 ], [ var='C' labels={project_id=ecp-data-28sgao} value=0 ]} {Instance:project_id=ecp-data-297xlz State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-297xlz Value:0xc01a1fd5f0} B:{Var:B Labels:project_id=ecp-data-297xlz Value:0xc01a1fd5f8} C:{Var:C Labels:project_id=ecp-data-297xlz Value:0xc01a1fd640}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980464767s EvaluationString:[ var='A' labels={project_id=ecp-data-297xlz} value=0 ], [ var='B' labels={project_id=ecp-data-297xlz} value=0 ], [ var='C' labels={project_id=ecp-data-297xlz} value=0 ]} {Instance:project_id=ecp-data-2c9gas State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-2c9gas Value:0xc01a1fd6b0} B:{Var:B Labels:project_id=ecp-data-2c9gas Value:0xc01a1fd6b8} C:{Var:C Labels:project_id=ecp-data-2c9gas Value:0xc01a1fd750}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980471004s EvaluationString:[ var='A' labels={project_id=ecp-data-2c9gas} value=1 ], [ var='B' labels={project_id=ecp-data-2c9gas} value=1 ], [ var='C' labels={project_id=ecp-data-2c9gas} value=0 ]} {Instance:project_id=ecp-data-2dawd1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-2dawd1 Value:0xc01a1fd7a0} B:{Var:B Labels:project_id=ecp-data-2dawd1 Value:0xc01a1fd7a8} C:{Var:C Labels:project_id=ecp-data-2dawd1 Value:0xc01a1fd7f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980479297s EvaluationString:[ var='A' labels={project_id=ecp-data-2dawd1} value=0 ], [ var='B' labels={project_id=ecp-data-2dawd1} value=0 ], [ var='C' labels={project_id=ecp-data-2dawd1} value=0 ]} {Instance:project_id=ecp-data-2hcusy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-2hcusy Value:0xc01a1fd840} B:{Var:B Labels:project_id=ecp-data-2hcusy Value:0xc01a1fd848} C:{Var:C Labels:project_id=ecp-data-2hcusy Value:0xc01a1fd890}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980485747s EvaluationString:[ var='A' labels={project_id=ecp-data-2hcusy} value=0 ], [ var='B' labels={project_id=ecp-data-2hcusy} value=0 ], [ var='C' labels={project_id=ecp-data-2hcusy} value=0 ]} {Instance:project_id=ecp-data-2jgalp State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-2jgalp Value:0xc01a1fd930} B:{Var:B Labels:project_id=ecp-data-2jgalp Value:0xc01a1fd8e0} C:{Var:C Labels:project_id=ecp-data-2jgalp Value:0xc01a1fd8e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98049314s EvaluationString:[ var='A' labels={project_id=ecp-data-2jgalp} value=0 ], [ var='B' labels={project_id=ecp-data-2jgalp} value=0 ], [ var='C' labels={project_id=ecp-data-2jgalp} value=0 ]} {Instance:project_id=ecp-data-2kwbaw State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-2kwbaw Value:0xc01a1fd9f0} B:{Var:B Labels:project_id=ecp-data-2kwbaw Value:0xc01a1fd9f8} C:{Var:C Labels:project_id=ecp-data-2kwbaw Value:0xc01a1fda80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980500177s EvaluationString:[ var='A' labels={project_id=ecp-data-2kwbaw} value=6 ], [ var='B' labels={project_id=ecp-data-2kwbaw} value=6 ], [ var='C' labels={project_id=ecp-data-2kwbaw} value=0 ]} {Instance:project_id=ecp-data-2mlr8m State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-2mlr8m Value:0xc01a1fdb90} B:{Var:B Labels:project_id=ecp-data-2mlr8m Value:0xc01a1fdb98} C:{Var:C Labels:project_id=ecp-data-2mlr8m Value:0xc01a1fdc70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980522565s EvaluationString:[ var='A' labels={project_id=ecp-data-2mlr8m} value=3 ], [ var='B' labels={project_id=ecp-data-2mlr8m} value=3 ], [ var='C' labels={project_id=ecp-data-2mlr8m} value=0 ]} {Instance:project_id=ecp-data-2rcmkr State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-2rcmkr Value:0xc01a1fdcc0} B:{Var:B Labels:project_id=ecp-data-2rcmkr Value:0xc01a1fdcc8} C:{Var:C Labels:project_id=ecp-data-2rcmkr Value:0xc01a1fdd10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980530783s EvaluationString:[ var='A' labels={project_id=ecp-data-2rcmkr} value=0 ], [ var='B' labels={project_id=ecp-data-2rcmkr} value=0 ], [ var='C' labels={project_id=ecp-data-2rcmkr} value=0 ]} {Instance:project_id=ecp-data-2rufzv State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-2rufzv Value:0xc01a1fde20} B:{Var:B Labels:project_id=ecp-data-2rufzv Value:0xc01a1fdd60} C:{Var:C Labels:project_id=ecp-data-2rufzv Value:0xc01a1fdd68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980537565s EvaluationString:[ var='A' labels={project_id=ecp-data-2rufzv} value=3 ], [ var='B' labels={project_id=ecp-data-2rufzv} value=3 ], [ var='C' labels={project_id=ecp-data-2rufzv} value=0 ]} {Instance:project_id=ecp-data-2rusqe State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-2rusqe Value:0xc01a1fde70} B:{Var:B Labels:project_id=ecp-data-2rusqe Value:0xc01a1fde78} C:{Var:C Labels:project_id=ecp-data-2rusqe Value:0xc01a1fdec0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980545997s EvaluationString:[ var='A' labels={project_id=ecp-data-2rusqe} value=3 ], [ var='B' labels={project_id=ecp-data-2rusqe} value=3 ], [ var='C' labels={project_id=ecp-data-2rusqe} value=0 ]} {Instance:project_id=ecp-data-2xdiue State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-2xdiue Value:0xc01a1fdf80} B:{Var:B Labels:project_id=ecp-data-2xdiue Value:0xc01a1fdf88} C:{Var:C Labels:project_id=ecp-data-2xdiue Value:0xc01a1fdfd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980553587s EvaluationString:[ var='A' labels={project_id=ecp-data-2xdiue} value=0 ], [ var='B' labels={project_id=ecp-data-2xdiue} value=0 ], [ var='C' labels={project_id=ecp-data-2xdiue} value=0 ]} {Instance:project_id=ecp-data-2ybcet State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-2ybcet Value:0xc00e554020} B:{Var:B Labels:project_id=ecp-data-2ybcet Value:0xc00e554028} C:{Var:C Labels:project_id=ecp-data-2ybcet Value:0xc00e554070}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980561858s EvaluationString:[ var='A' labels={project_id=ecp-data-2ybcet} value=0 ], [ var='B' labels={project_id=ecp-data-2ybcet} value=0 ], [ var='C' labels={project_id=ecp-data-2ybcet} value=0 ]} {Instance:project_id=ecp-data-2z1zkw State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-2z1zkw Value:0xc00e5540d0} B:{Var:B Labels:project_id=ecp-data-2z1zkw Value:0xc00e5540d8} C:{Var:C Labels:project_id=ecp-data-2z1zkw Value:0xc00e554120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980569034s EvaluationString:[ var='A' labels={project_id=ecp-data-2z1zkw} value=0 ], [ var='B' labels={project_id=ecp-data-2z1zkw} value=0 ], [ var='C' labels={project_id=ecp-data-2z1zkw} value=0 ]} {Instance:project_id=ecp-data-30umnk State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-30umnk Value:0xc00e5541d0} B:{Var:B Labels:project_id=ecp-data-30umnk Value:0xc00e554170} C:{Var:C Labels:project_id=ecp-data-30umnk Value:0xc00e554178}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980575647s EvaluationString:[ var='A' labels={project_id=ecp-data-30umnk} value=0 ], [ var='B' labels={project_id=ecp-data-30umnk} value=0 ], [ var='C' labels={project_id=ecp-data-30umnk} value=0 ]} {Instance:project_id=ecp-data-315vge State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-315vge Value:0xc00e554260} B:{Var:B Labels:project_id=ecp-data-315vge Value:0xc00e554268} C:{Var:C Labels:project_id=ecp-data-315vge Value:0xc00e5542b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.9805828s EvaluationString:[ var='A' labels={project_id=ecp-data-315vge} value=33 ], [ var='B' labels={project_id=ecp-data-315vge} value=33 ], [ var='C' labels={project_id=ecp-data-315vge} value=0 ]} {Instance:project_id=ecp-data-32gjco State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-32gjco Value:0xc00e554360} B:{Var:B Labels:project_id=ecp-data-32gjco Value:0xc00e554300} C:{Var:C Labels:project_id=ecp-data-32gjco Value:0xc00e554308}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980620657s EvaluationString:[ var='A' labels={project_id=ecp-data-32gjco} value=0 ], [ var='B' labels={project_id=ecp-data-32gjco} value=0 ], [ var='C' labels={project_id=ecp-data-32gjco} value=0 ]} {Instance:project_id=ecp-data-33t2h8 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-33t2h8 Value:0xc00e554410} B:{Var:B Labels:project_id=ecp-data-33t2h8 Value:0xc00e5543b0} C:{Var:C Labels:project_id=ecp-data-33t2h8 Value:0xc00e5543b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980635467s EvaluationString:[ var='A' labels={project_id=ecp-data-33t2h8} value=2 ], [ var='B' labels={project_id=ecp-data-33t2h8} value=2 ], [ var='C' labels={project_id=ecp-data-33t2h8} value=0 ]} {Instance:project_id=ecp-data-36u5wl State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-36u5wl Value:0xc00e554460} B:{Var:B Labels:project_id=ecp-data-36u5wl Value:0xc00e554468} C:{Var:C Labels:project_id=ecp-data-36u5wl Value:0xc00e5544b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980641807s EvaluationString:[ var='A' labels={project_id=ecp-data-36u5wl} value=0 ], [ var='B' labels={project_id=ecp-data-36u5wl} value=0 ], [ var='C' labels={project_id=ecp-data-36u5wl} value=0 ]} {Instance:project_id=ecp-data-37s4xq State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-37s4xq Value:0xc00e554518} B:{Var:B Labels:project_id=ecp-data-37s4xq Value:0xc00e554560} C:{Var:C Labels:project_id=ecp-data-37s4xq Value:0xc00e554510}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980650248s EvaluationString:[ var='A' labels={project_id=ecp-data-37s4xq} value=0 ], [ var='B' labels={project_id=ecp-data-37s4xq} value=0 ], [ var='C' labels={project_id=ecp-data-37s4xq} value=0 ]} {Instance:project_id=ecp-data-3amvzg State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-3amvzg Value:0xc00e5545b8} B:{Var:B Labels:project_id=ecp-data-3amvzg Value:0xc00e554610} C:{Var:C Labels:project_id=ecp-data-3amvzg Value:0xc00e5545b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980656481s EvaluationString:[ var='A' labels={project_id=ecp-data-3amvzg} value=1 ], [ var='B' labels={project_id=ecp-data-3amvzg} value=1 ], [ var='C' labels={project_id=ecp-data-3amvzg} value=0 ]} {Instance:project_id=ecp-data-3aqwev State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-3aqwev Value:0xc00e554660} B:{Var:B Labels:project_id=ecp-data-3aqwev Value:0xc00e554668} C:{Var:C Labels:project_id=ecp-data-3aqwev Value:0xc00e5546b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980663622s EvaluationString:[ var='A' labels={project_id=ecp-data-3aqwev} value=0 ], [ var='B' labels={project_id=ecp-data-3aqwev} value=0 ], [ var='C' labels={project_id=ecp-data-3aqwev} value=0 ]} {Instance:project_id=ecp-data-3at1b1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-3at1b1 Value:0xc00e554710} B:{Var:B Labels:project_id=ecp-data-3at1b1 Value:0xc00e554718} C:{Var:C Labels:project_id=ecp-data-3at1b1 Value:0xc00e554760}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980671532s EvaluationString:[ var='A' labels={project_id=ecp-data-3at1b1} value=0 ], [ var='B' labels={project_id=ecp-data-3at1b1} value=0 ], [ var='C' labels={project_id=ecp-data-3at1b1} value=0 ]} {Instance:project_id=ecp-data-3b13sa State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-3b13sa Value:0xc00e5547b0} B:{Var:B Labels:project_id=ecp-data-3b13sa Value:0xc00e5547b8} C:{Var:C Labels:project_id=ecp-data-3b13sa Value:0xc00e554810}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980678396s EvaluationString:[ var='A' labels={project_id=ecp-data-3b13sa} value=0 ], [ var='B' labels={project_id=ecp-data-3b13sa} value=0 ], [ var='C' labels={project_id=ecp-data-3b13sa} value=0 ]} {Instance:project_id=ecp-data-3fwyty State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-3fwyty Value:0xc00e554860} B:{Var:B Labels:project_id=ecp-data-3fwyty Value:0xc00e554868} C:{Var:C Labels:project_id=ecp-data-3fwyty Value:0xc00e5548b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980684624s EvaluationString:[ var='A' labels={project_id=ecp-data-3fwyty} value=1 ], [ var='B' labels={project_id=ecp-data-3fwyty} value=1 ], [ var='C' labels={project_id=ecp-data-3fwyty} value=0 ]} {Instance:project_id=ecp-data-3j2aa1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-3j2aa1 Value:0xc00e554910} B:{Var:B Labels:project_id=ecp-data-3j2aa1 Value:0xc00e554918} C:{Var:C Labels:project_id=ecp-data-3j2aa1 Value:0xc00e554960}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980693264s EvaluationString:[ var='A' labels={project_id=ecp-data-3j2aa1} value=0 ], [ var='B' labels={project_id=ecp-data-3j2aa1} value=0 ], [ var='C' labels={project_id=ecp-data-3j2aa1} value=0 ]} {Instance:project_id=ecp-data-3jzxds State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-3jzxds Value:0xc00e5549b0} B:{Var:B Labels:project_id=ecp-data-3jzxds Value:0xc00e5549b8} C:{Var:C Labels:project_id=ecp-data-3jzxds Value:0xc00e554a10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980700277s EvaluationString:[ var='A' labels={project_id=ecp-data-3jzxds} value=0 ], [ var='B' labels={project_id=ecp-data-3jzxds} value=0 ], [ var='C' labels={project_id=ecp-data-3jzxds} value=0 ]} {Instance:project_id=ecp-data-3llg4d State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-3llg4d Value:0xc00e554ab0} B:{Var:B Labels:project_id=ecp-data-3llg4d Value:0xc00e554a60} C:{Var:C Labels:project_id=ecp-data-3llg4d Value:0xc00e554a68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980709297s EvaluationString:[ var='A' labels={project_id=ecp-data-3llg4d} value=0 ], [ var='B' labels={project_id=ecp-data-3llg4d} value=0 ], [ var='C' labels={project_id=ecp-data-3llg4d} value=0 ]} {Instance:project_id=ecp-data-3r7jn2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-3r7jn2 Value:0xc00e554b00} B:{Var:B Labels:project_id=ecp-data-3r7jn2 Value:0xc00e554b08} C:{Var:C Labels:project_id=ecp-data-3r7jn2 Value:0xc00e554b60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980715555s EvaluationString:[ var='A' labels={project_id=ecp-data-3r7jn2} value=0 ], [ var='B' labels={project_id=ecp-data-3r7jn2} value=0 ], [ var='C' labels={project_id=ecp-data-3r7jn2} value=0 ]} {Instance:project_id=ecp-data-3swgrz State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-3swgrz Value:0xc00e554bb0} B:{Var:B Labels:project_id=ecp-data-3swgrz Value:0xc00e554bb8} C:{Var:C Labels:project_id=ecp-data-3swgrz Value:0xc00e554c00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980722125s EvaluationString:[ var='A' labels={project_id=ecp-data-3swgrz} value=0 ], [ var='B' labels={project_id=ecp-data-3swgrz} value=0 ], [ var='C' labels={project_id=ecp-data-3swgrz} value=0 ]} {Instance:project_id=ecp-data-3w9sg7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-3w9sg7 Value:0xc00e554ca0} B:{Var:B Labels:project_id=ecp-data-3w9sg7 Value:0xc00e554c50} C:{Var:C Labels:project_id=ecp-data-3w9sg7 Value:0xc00e554c58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980728842s EvaluationString:[ var='A' labels={project_id=ecp-data-3w9sg7} value=0 ], [ var='B' labels={project_id=ecp-data-3w9sg7} value=0 ], [ var='C' labels={project_id=ecp-data-3w9sg7} value=0 ]} {Instance:project_id=ecp-data-3x2vri State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-3x2vri Value:0xc00e554d00} B:{Var:B Labels:project_id=ecp-data-3x2vri Value:0xc00e554d08} C:{Var:C Labels:project_id=ecp-data-3x2vri Value:0xc00e554d50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980736022s EvaluationString:[ var='A' labels={project_id=ecp-data-3x2vri} value=0 ], [ var='B' labels={project_id=ecp-data-3x2vri} value=0 ], [ var='C' labels={project_id=ecp-data-3x2vri} value=0 ]} {Instance:project_id=ecp-data-4103of State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-4103of Value:0xc00e554da0} B:{Var:B Labels:project_id=ecp-data-4103of Value:0xc00e554da8} C:{Var:C Labels:project_id=ecp-data-4103of Value:0xc00e554df0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98076859s EvaluationString:[ var='A' labels={project_id=ecp-data-4103of} value=0 ], [ var='B' labels={project_id=ecp-data-4103of} value=0 ], [ var='C' labels={project_id=ecp-data-4103of} value=0 ]} {Instance:project_id=ecp-data-48tqv0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-48tqv0 Value:0xc00e554e48} B:{Var:B Labels:project_id=ecp-data-48tqv0 Value:0xc00e554ea0} C:{Var:C Labels:project_id=ecp-data-48tqv0 Value:0xc00e554e40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980775547s EvaluationString:[ var='A' labels={project_id=ecp-data-48tqv0} value=2 ], [ var='B' labels={project_id=ecp-data-48tqv0} value=2 ], [ var='C' labels={project_id=ecp-data-48tqv0} value=0 ]} {Instance:project_id=ecp-data-4auczu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-4auczu Value:0xc00e554ef0} B:{Var:B Labels:project_id=ecp-data-4auczu Value:0xc00e554ef8} C:{Var:C Labels:project_id=ecp-data-4auczu Value:0xc00e554f40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98078198s EvaluationString:[ var='A' labels={project_id=ecp-data-4auczu} value=0 ], [ var='B' labels={project_id=ecp-data-4auczu} value=0 ], [ var='C' labels={project_id=ecp-data-4auczu} value=0 ]} {Instance:project_id=ecp-data-4bkrtj State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-4bkrtj Value:0xc00e554fe0} B:{Var:B Labels:project_id=ecp-data-4bkrtj Value:0xc00e554f90} C:{Var:C Labels:project_id=ecp-data-4bkrtj Value:0xc00e554f98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98078818s EvaluationString:[ var='A' labels={project_id=ecp-data-4bkrtj} value=0 ], [ var='B' labels={project_id=ecp-data-4bkrtj} value=0 ], [ var='C' labels={project_id=ecp-data-4bkrtj} value=0 ]} {Instance:project_id=ecp-data-4cbo7c State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-4cbo7c Value:0xc00e555090} B:{Var:B Labels:project_id=ecp-data-4cbo7c Value:0xc00e555030} C:{Var:C Labels:project_id=ecp-data-4cbo7c Value:0xc00e555038}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980795054s EvaluationString:[ var='A' labels={project_id=ecp-data-4cbo7c} value=1 ], [ var='B' labels={project_id=ecp-data-4cbo7c} value=1 ], [ var='C' labels={project_id=ecp-data-4cbo7c} value=0 ]} {Instance:project_id=ecp-data-4d70zq State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-4d70zq Value:0xc00e5550e0} B:{Var:B Labels:project_id=ecp-data-4d70zq Value:0xc00e5550e8} C:{Var:C Labels:project_id=ecp-data-4d70zq Value:0xc00e555130}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980801754s EvaluationString:[ var='A' labels={project_id=ecp-data-4d70zq} value=9 ], [ var='B' labels={project_id=ecp-data-4d70zq} value=9 ], [ var='C' labels={project_id=ecp-data-4d70zq} value=0 ]} {Instance:project_id=ecp-data-4j0xh2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-4j0xh2 Value:0xc00e555198} B:{Var:B Labels:project_id=ecp-data-4j0xh2 Value:0xc00e5551e0} C:{Var:C Labels:project_id=ecp-data-4j0xh2 Value:0xc00e555190}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980808875s EvaluationString:[ var='A' labels={project_id=ecp-data-4j0xh2} value=0 ], [ var='B' labels={project_id=ecp-data-4j0xh2} value=0 ], [ var='C' labels={project_id=ecp-data-4j0xh2} value=0 ]} {Instance:project_id=ecp-data-4k7lbf State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-4k7lbf Value:0xc00e555238} B:{Var:B Labels:project_id=ecp-data-4k7lbf Value:0xc00e555290} C:{Var:C Labels:project_id=ecp-data-4k7lbf Value:0xc00e555230}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980815457s EvaluationString:[ var='A' labels={project_id=ecp-data-4k7lbf} value=0 ], [ var='B' labels={project_id=ecp-data-4k7lbf} value=0 ], [ var='C' labels={project_id=ecp-data-4k7lbf} value=0 ]} {Instance:project_id=ecp-data-4kgnlv State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-4kgnlv Value:0xc00e5552e0} B:{Var:B Labels:project_id=ecp-data-4kgnlv Value:0xc00e5552e8} C:{Var:C Labels:project_id=ecp-data-4kgnlv Value:0xc00e555330}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980821448s EvaluationString:[ var='A' labels={project_id=ecp-data-4kgnlv} value=12 ], [ var='B' labels={project_id=ecp-data-4kgnlv} value=12 ], [ var='C' labels={project_id=ecp-data-4kgnlv} value=0 ]} {Instance:project_id=ecp-data-4nya0b State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-4nya0b Value:0xc00e555390} B:{Var:B Labels:project_id=ecp-data-4nya0b Value:0xc00e555398} C:{Var:C Labels:project_id=ecp-data-4nya0b Value:0xc00e5553e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980827632s EvaluationString:[ var='A' labels={project_id=ecp-data-4nya0b} value=0 ], [ var='B' labels={project_id=ecp-data-4nya0b} value=0 ], [ var='C' labels={project_id=ecp-data-4nya0b} value=0 ]} {Instance:project_id=ecp-data-4ok7uv State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-4ok7uv Value:0xc00e555440} B:{Var:B Labels:project_id=ecp-data-4ok7uv Value:0xc00e555448} C:{Var:C Labels:project_id=ecp-data-4ok7uv Value:0xc00e555490}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980835328s EvaluationString:[ var='A' labels={project_id=ecp-data-4ok7uv} value=0 ], [ var='B' labels={project_id=ecp-data-4ok7uv} value=0 ], [ var='C' labels={project_id=ecp-data-4ok7uv} value=0 ]} {Instance:project_id=ecp-data-4okvmm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-4okvmm Value:0xc00e5554e0} B:{Var:B Labels:project_id=ecp-data-4okvmm Value:0xc00e5554e8} C:{Var:C Labels:project_id=ecp-data-4okvmm Value:0xc00e555530}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980842587s EvaluationString:[ var='A' labels={project_id=ecp-data-4okvmm} value=0 ], [ var='B' labels={project_id=ecp-data-4okvmm} value=0 ], [ var='C' labels={project_id=ecp-data-4okvmm} value=0 ]} {Instance:project_id=ecp-data-4oz6yv State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-4oz6yv Value:0xc00e5555e0} B:{Var:B Labels:project_id=ecp-data-4oz6yv Value:0xc00e555590} C:{Var:C Labels:project_id=ecp-data-4oz6yv Value:0xc00e555598}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980850007s EvaluationString:[ var='A' labels={project_id=ecp-data-4oz6yv} value=0 ], [ var='B' labels={project_id=ecp-data-4oz6yv} value=0 ], [ var='C' labels={project_id=ecp-data-4oz6yv} value=0 ]} {Instance:project_id=ecp-data-4tvvux State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-4tvvux Value:0xc00e555690} B:{Var:B Labels:project_id=ecp-data-4tvvux Value:0xc00e555630} C:{Var:C Labels:project_id=ecp-data-4tvvux Value:0xc00e555638}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980857902s EvaluationString:[ var='A' labels={project_id=ecp-data-4tvvux} value=1 ], [ var='B' labels={project_id=ecp-data-4tvvux} value=1 ], [ var='C' labels={project_id=ecp-data-4tvvux} value=0 ]} {Instance:project_id=ecp-data-4ymvc0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-4ymvc0 Value:0xc00e5556e8} B:{Var:B Labels:project_id=ecp-data-4ymvc0 Value:0xc00e555730} C:{Var:C Labels:project_id=ecp-data-4ymvc0 Value:0xc00e5556e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980865655s EvaluationString:[ var='A' labels={project_id=ecp-data-4ymvc0} value=0 ], [ var='B' labels={project_id=ecp-data-4ymvc0} value=0 ], [ var='C' labels={project_id=ecp-data-4ymvc0} value=0 ]} {Instance:project_id=ecp-data-4zrdx4 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-4zrdx4 Value:0xc00e555780} B:{Var:B Labels:project_id=ecp-data-4zrdx4 Value:0xc00e555788} C:{Var:C Labels:project_id=ecp-data-4zrdx4 Value:0xc00e5557e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980872807s EvaluationString:[ var='A' labels={project_id=ecp-data-4zrdx4} value=2 ], [ var='B' labels={project_id=ecp-data-4zrdx4} value=2 ], [ var='C' labels={project_id=ecp-data-4zrdx4} value=0 ]} {Instance:project_id=ecp-data-58fb4h State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-58fb4h Value:0xc00e555830} B:{Var:B Labels:project_id=ecp-data-58fb4h Value:0xc00e555838} C:{Var:C Labels:project_id=ecp-data-58fb4h Value:0xc00e555880}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980880431s EvaluationString:[ var='A' labels={project_id=ecp-data-58fb4h} value=1 ], [ var='B' labels={project_id=ecp-data-58fb4h} value=1 ], [ var='C' labels={project_id=ecp-data-58fb4h} value=0 ]} {Instance:project_id=ecp-data-59gd1m State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-59gd1m Value:0xc00e5558e0} B:{Var:B Labels:project_id=ecp-data-59gd1m Value:0xc00e5558e8} C:{Var:C Labels:project_id=ecp-data-59gd1m Value:0xc00e555930}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980888072s EvaluationString:[ var='A' labels={project_id=ecp-data-59gd1m} value=1 ], [ var='B' labels={project_id=ecp-data-59gd1m} value=1 ], [ var='C' labels={project_id=ecp-data-59gd1m} value=0 ]} {Instance:project_id=ecp-data-5ai94k State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-5ai94k Value:0xc00e555980} B:{Var:B Labels:project_id=ecp-data-5ai94k Value:0xc00e555988} C:{Var:C Labels:project_id=ecp-data-5ai94k Value:0xc00e5559e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980894973s EvaluationString:[ var='A' labels={project_id=ecp-data-5ai94k} value=1 ], [ var='B' labels={project_id=ecp-data-5ai94k} value=1 ], [ var='C' labels={project_id=ecp-data-5ai94k} value=0 ]} {Instance:project_id=ecp-data-5fpktp State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-5fpktp Value:0xc00e555a30} B:{Var:B Labels:project_id=ecp-data-5fpktp Value:0xc00e555a38} C:{Var:C Labels:project_id=ecp-data-5fpktp Value:0xc00e555a80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980903583s EvaluationString:[ var='A' labels={project_id=ecp-data-5fpktp} value=0 ], [ var='B' labels={project_id=ecp-data-5fpktp} value=0 ], [ var='C' labels={project_id=ecp-data-5fpktp} value=0 ]} {Instance:project_id=ecp-data-5ig24c State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-5ig24c Value:0xc00e555b30} B:{Var:B Labels:project_id=ecp-data-5ig24c Value:0xc00e555ae0} C:{Var:C Labels:project_id=ecp-data-5ig24c Value:0xc00e555ae8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980909686s EvaluationString:[ var='A' labels={project_id=ecp-data-5ig24c} value=6 ], [ var='B' labels={project_id=ecp-data-5ig24c} value=6 ], [ var='C' labels={project_id=ecp-data-5ig24c} value=0 ]} {Instance:project_id=ecp-data-5mprvs State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-5mprvs Value:0xc00e555b80} B:{Var:B Labels:project_id=ecp-data-5mprvs Value:0xc00e555b88} C:{Var:C Labels:project_id=ecp-data-5mprvs Value:0xc00e555bd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980916988s EvaluationString:[ var='A' labels={project_id=ecp-data-5mprvs} value=0 ], [ var='B' labels={project_id=ecp-data-5mprvs} value=0 ], [ var='C' labels={project_id=ecp-data-5mprvs} value=0 ]} {Instance:project_id=ecp-data-5onoz1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-5onoz1 Value:0xc00e555c30} B:{Var:B Labels:project_id=ecp-data-5onoz1 Value:0xc00e555c38} C:{Var:C Labels:project_id=ecp-data-5onoz1 Value:0xc00e555c80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98092677s EvaluationString:[ var='A' labels={project_id=ecp-data-5onoz1} value=0 ], [ var='B' labels={project_id=ecp-data-5onoz1} value=0 ], [ var='C' labels={project_id=ecp-data-5onoz1} value=0 ]} {Instance:project_id=ecp-data-5oxepi State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-5oxepi Value:0xc00e555cd0} B:{Var:B Labels:project_id=ecp-data-5oxepi Value:0xc00e555cd8} C:{Var:C Labels:project_id=ecp-data-5oxepi Value:0xc00e555d30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980933814s EvaluationString:[ var='A' labels={project_id=ecp-data-5oxepi} value=3 ], [ var='B' labels={project_id=ecp-data-5oxepi} value=3 ], [ var='C' labels={project_id=ecp-data-5oxepi} value=0 ]} {Instance:project_id=ecp-data-5quizx State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-5quizx Value:0xc00e555d80} B:{Var:B Labels:project_id=ecp-data-5quizx Value:0xc00e555d88} C:{Var:C Labels:project_id=ecp-data-5quizx Value:0xc00e555dd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980940837s EvaluationString:[ var='A' labels={project_id=ecp-data-5quizx} value=42 ], [ var='B' labels={project_id=ecp-data-5quizx} value=42 ], [ var='C' labels={project_id=ecp-data-5quizx} value=0 ]} {Instance:project_id=ecp-data-5rehuq State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-5rehuq Value:0xc00e555e30} B:{Var:B Labels:project_id=ecp-data-5rehuq Value:0xc00e555e38} C:{Var:C Labels:project_id=ecp-data-5rehuq Value:0xc00e555e80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980948519s EvaluationString:[ var='A' labels={project_id=ecp-data-5rehuq} value=13 ], [ var='B' labels={project_id=ecp-data-5rehuq} value=13 ], [ var='C' labels={project_id=ecp-data-5rehuq} value=0 ]} {Instance:project_id=ecp-data-5suwa6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-5suwa6 Value:0xc00e555ed8} B:{Var:B Labels:project_id=ecp-data-5suwa6 Value:0xc00e555f20} C:{Var:C Labels:project_id=ecp-data-5suwa6 Value:0xc00e555ed0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980955494s EvaluationString:[ var='A' labels={project_id=ecp-data-5suwa6} value=5 ], [ var='B' labels={project_id=ecp-data-5suwa6} value=5 ], [ var='C' labels={project_id=ecp-data-5suwa6} value=0 ]} {Instance:project_id=ecp-data-5uwx4p State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-5uwx4p Value:0xc00e555f80} B:{Var:B Labels:project_id=ecp-data-5uwx4p Value:0xc00e555f88} C:{Var:C Labels:project_id=ecp-data-5uwx4p Value:0xc00e555fd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980969788s EvaluationString:[ var='A' labels={project_id=ecp-data-5uwx4p} value=24 ], [ var='B' labels={project_id=ecp-data-5uwx4p} value=24 ], [ var='C' labels={project_id=ecp-data-5uwx4p} value=0 ]} {Instance:project_id=ecp-data-5xevow State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-5xevow Value:0xc023650020} B:{Var:B Labels:project_id=ecp-data-5xevow Value:0xc023650028} C:{Var:C Labels:project_id=ecp-data-5xevow Value:0xc023650070}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980976659s EvaluationString:[ var='A' labels={project_id=ecp-data-5xevow} value=1 ], [ var='B' labels={project_id=ecp-data-5xevow} value=1 ], [ var='C' labels={project_id=ecp-data-5xevow} value=0 ]} {Instance:project_id=ecp-data-5xykde State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-5xykde Value:0xc0236500c8} B:{Var:B Labels:project_id=ecp-data-5xykde Value:0xc023650110} C:{Var:C Labels:project_id=ecp-data-5xykde Value:0xc0236500c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980982997s EvaluationString:[ var='A' labels={project_id=ecp-data-5xykde} value=0 ], [ var='B' labels={project_id=ecp-data-5xykde} value=0 ], [ var='C' labels={project_id=ecp-data-5xykde} value=0 ]} {Instance:project_id=ecp-data-5yyk5r State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-5yyk5r Value:0xc0236501b0} B:{Var:B Labels:project_id=ecp-data-5yyk5r Value:0xc023650160} C:{Var:C Labels:project_id=ecp-data-5yyk5r Value:0xc023650168}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980990877s EvaluationString:[ var='A' labels={project_id=ecp-data-5yyk5r} value=2 ], [ var='B' labels={project_id=ecp-data-5yyk5r} value=2 ], [ var='C' labels={project_id=ecp-data-5yyk5r} value=0 ]} {Instance:project_id=ecp-data-5z9mqk State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-5z9mqk Value:0xc023650208} B:{Var:B Labels:project_id=ecp-data-5z9mqk Value:0xc023650250} C:{Var:C Labels:project_id=ecp-data-5z9mqk Value:0xc023650200}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.980997098s EvaluationString:[ var='A' labels={project_id=ecp-data-5z9mqk} value=0 ], [ var='B' labels={project_id=ecp-data-5z9mqk} value=0 ], [ var='C' labels={project_id=ecp-data-5z9mqk} value=0 ]} {Instance:project_id=ecp-data-61swus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-61swus Value:0xc0236502a8} B:{Var:B Labels:project_id=ecp-data-61swus Value:0xc0236502f0} C:{Var:C Labels:project_id=ecp-data-61swus Value:0xc0236502a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981003637s EvaluationString:[ var='A' labels={project_id=ecp-data-61swus} value=0 ], [ var='B' labels={project_id=ecp-data-61swus} value=0 ], [ var='C' labels={project_id=ecp-data-61swus} value=0 ]} {Instance:project_id=ecp-data-66jkcg State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-66jkcg Value:0xc0236503d8} B:{Var:B Labels:project_id=ecp-data-66jkcg Value:0xc023650420} C:{Var:C Labels:project_id=ecp-data-66jkcg Value:0xc0236503d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981009887s EvaluationString:[ var='A' labels={project_id=ecp-data-66jkcg} value=0 ], [ var='B' labels={project_id=ecp-data-66jkcg} value=0 ], [ var='C' labels={project_id=ecp-data-66jkcg} value=0 ]} {Instance:project_id=ecp-data-66l8ek State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-66l8ek Value:0xc0236504c0} B:{Var:B Labels:project_id=ecp-data-66l8ek Value:0xc023650470} C:{Var:C Labels:project_id=ecp-data-66l8ek Value:0xc023650478}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981017426s EvaluationString:[ var='A' labels={project_id=ecp-data-66l8ek} value=0 ], [ var='B' labels={project_id=ecp-data-66l8ek} value=0 ], [ var='C' labels={project_id=ecp-data-66l8ek} value=0 ]} {Instance:project_id=ecp-data-6cglil State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-6cglil Value:0xc023650510} B:{Var:B Labels:project_id=ecp-data-6cglil Value:0xc023650518} C:{Var:C Labels:project_id=ecp-data-6cglil Value:0xc023650560}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981024228s EvaluationString:[ var='A' labels={project_id=ecp-data-6cglil} value=0 ], [ var='B' labels={project_id=ecp-data-6cglil} value=0 ], [ var='C' labels={project_id=ecp-data-6cglil} value=0 ]} {Instance:project_id=ecp-data-6e4t87 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-6e4t87 Value:0xc0236505b0} B:{Var:B Labels:project_id=ecp-data-6e4t87 Value:0xc0236505b8} C:{Var:C Labels:project_id=ecp-data-6e4t87 Value:0xc023650600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981030396s EvaluationString:[ var='A' labels={project_id=ecp-data-6e4t87} value=6220 ], [ var='B' labels={project_id=ecp-data-6e4t87} value=6220 ], [ var='C' labels={project_id=ecp-data-6e4t87} value=0 ]} {Instance:project_id=ecp-data-6gyi0l State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-6gyi0l Value:0xc023650650} B:{Var:B Labels:project_id=ecp-data-6gyi0l Value:0xc023650658} C:{Var:C Labels:project_id=ecp-data-6gyi0l Value:0xc0236506a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981037762s EvaluationString:[ var='A' labels={project_id=ecp-data-6gyi0l} value=1 ], [ var='B' labels={project_id=ecp-data-6gyi0l} value=1 ], [ var='C' labels={project_id=ecp-data-6gyi0l} value=0 ]} {Instance:project_id=ecp-data-6mjo2u State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-6mjo2u Value:0xc023650740} B:{Var:B Labels:project_id=ecp-data-6mjo2u Value:0xc0236506f0} C:{Var:C Labels:project_id=ecp-data-6mjo2u Value:0xc0236506f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981044823s EvaluationString:[ var='A' labels={project_id=ecp-data-6mjo2u} value=0 ], [ var='B' labels={project_id=ecp-data-6mjo2u} value=0 ], [ var='C' labels={project_id=ecp-data-6mjo2u} value=0 ]} {Instance:project_id=ecp-data-6mwnnn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-6mwnnn Value:0xc023650790} B:{Var:B Labels:project_id=ecp-data-6mwnnn Value:0xc023650798} C:{Var:C Labels:project_id=ecp-data-6mwnnn Value:0xc0236507e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981051787s EvaluationString:[ var='A' labels={project_id=ecp-data-6mwnnn} value=0 ], [ var='B' labels={project_id=ecp-data-6mwnnn} value=0 ], [ var='C' labels={project_id=ecp-data-6mwnnn} value=0 ]} {Instance:project_id=ecp-data-6q1ntk State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-6q1ntk Value:0xc023650838} B:{Var:B Labels:project_id=ecp-data-6q1ntk Value:0xc023650880} C:{Var:C Labels:project_id=ecp-data-6q1ntk Value:0xc023650830}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981060154s EvaluationString:[ var='A' labels={project_id=ecp-data-6q1ntk} value=0 ], [ var='B' labels={project_id=ecp-data-6q1ntk} value=0 ], [ var='C' labels={project_id=ecp-data-6q1ntk} value=0 ]} {Instance:project_id=ecp-data-6qzqh7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-6qzqh7 Value:0xc0236508d0} B:{Var:B Labels:project_id=ecp-data-6qzqh7 Value:0xc0236508d8} C:{Var:C Labels:project_id=ecp-data-6qzqh7 Value:0xc023650920}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981067742s EvaluationString:[ var='A' labels={project_id=ecp-data-6qzqh7} value=1 ], [ var='B' labels={project_id=ecp-data-6qzqh7} value=1 ], [ var='C' labels={project_id=ecp-data-6qzqh7} value=0 ]} {Instance:project_id=ecp-data-6u86f0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-6u86f0 Value:0xc023650970} B:{Var:B Labels:project_id=ecp-data-6u86f0 Value:0xc023650978} C:{Var:C Labels:project_id=ecp-data-6u86f0 Value:0xc0236509c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981074259s EvaluationString:[ var='A' labels={project_id=ecp-data-6u86f0} value=0 ], [ var='B' labels={project_id=ecp-data-6u86f0} value=0 ], [ var='C' labels={project_id=ecp-data-6u86f0} value=0 ]} {Instance:project_id=ecp-data-6vjf9r State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-6vjf9r Value:0xc023650a10} B:{Var:B Labels:project_id=ecp-data-6vjf9r Value:0xc023650a18} C:{Var:C Labels:project_id=ecp-data-6vjf9r Value:0xc023650a60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981080612s EvaluationString:[ var='A' labels={project_id=ecp-data-6vjf9r} value=0 ], [ var='B' labels={project_id=ecp-data-6vjf9r} value=0 ], [ var='C' labels={project_id=ecp-data-6vjf9r} value=0 ]} {Instance:project_id=ecp-data-6xx0ta State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-6xx0ta Value:0xc023650ab0} B:{Var:B Labels:project_id=ecp-data-6xx0ta Value:0xc023650ab8} C:{Var:C Labels:project_id=ecp-data-6xx0ta Value:0xc023650b00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981087625s EvaluationString:[ var='A' labels={project_id=ecp-data-6xx0ta} value=2 ], [ var='B' labels={project_id=ecp-data-6xx0ta} value=2 ], [ var='C' labels={project_id=ecp-data-6xx0ta} value=0 ]} {Instance:project_id=ecp-data-6ysngz State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-6ysngz Value:0xc023650b50} B:{Var:B Labels:project_id=ecp-data-6ysngz Value:0xc023650b58} C:{Var:C Labels:project_id=ecp-data-6ysngz Value:0xc023650ba0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98109366s EvaluationString:[ var='A' labels={project_id=ecp-data-6ysngz} value=2 ], [ var='B' labels={project_id=ecp-data-6ysngz} value=2 ], [ var='C' labels={project_id=ecp-data-6ysngz} value=0 ]} {Instance:project_id=ecp-data-71lulu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-71lulu Value:0xc023650bf0} B:{Var:B Labels:project_id=ecp-data-71lulu Value:0xc023650bf8} C:{Var:C Labels:project_id=ecp-data-71lulu Value:0xc023650c40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981100596s EvaluationString:[ var='A' labels={project_id=ecp-data-71lulu} value=0 ], [ var='B' labels={project_id=ecp-data-71lulu} value=0 ], [ var='C' labels={project_id=ecp-data-71lulu} value=0 ]} {Instance:project_id=ecp-data-79amhi State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-79amhi Value:0xc023650c98} B:{Var:B Labels:project_id=ecp-data-79amhi Value:0xc023650ce0} C:{Var:C Labels:project_id=ecp-data-79amhi Value:0xc023650c90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981107179s EvaluationString:[ var='A' labels={project_id=ecp-data-79amhi} value=0 ], [ var='B' labels={project_id=ecp-data-79amhi} value=0 ], [ var='C' labels={project_id=ecp-data-79amhi} value=0 ]} {Instance:project_id=ecp-data-7e8xid State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-7e8xid Value:0xc023650d30} B:{Var:B Labels:project_id=ecp-data-7e8xid Value:0xc023650d38} C:{Var:C Labels:project_id=ecp-data-7e8xid Value:0xc023650d80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981114137s EvaluationString:[ var='A' labels={project_id=ecp-data-7e8xid} value=13 ], [ var='B' labels={project_id=ecp-data-7e8xid} value=13 ], [ var='C' labels={project_id=ecp-data-7e8xid} value=0 ]} {Instance:project_id=ecp-data-7jebig State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-7jebig Value:0xc023650dd0} B:{Var:B Labels:project_id=ecp-data-7jebig Value:0xc023650dd8} C:{Var:C Labels:project_id=ecp-data-7jebig Value:0xc023650e20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981121639s EvaluationString:[ var='A' labels={project_id=ecp-data-7jebig} value=0 ], [ var='B' labels={project_id=ecp-data-7jebig} value=0 ], [ var='C' labels={project_id=ecp-data-7jebig} value=0 ]} {Instance:project_id=ecp-data-7jfdj0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-7jfdj0 Value:0xc023650e70} B:{Var:B Labels:project_id=ecp-data-7jfdj0 Value:0xc023650e78} C:{Var:C Labels:project_id=ecp-data-7jfdj0 Value:0xc023650ec0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981128656s EvaluationString:[ var='A' labels={project_id=ecp-data-7jfdj0} value=6 ], [ var='B' labels={project_id=ecp-data-7jfdj0} value=6 ], [ var='C' labels={project_id=ecp-data-7jfdj0} value=0 ]} {Instance:project_id=ecp-data-7juau6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-7juau6 Value:0xc023650f10} B:{Var:B Labels:project_id=ecp-data-7juau6 Value:0xc023650f18} C:{Var:C Labels:project_id=ecp-data-7juau6 Value:0xc023650f60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981148164s EvaluationString:[ var='A' labels={project_id=ecp-data-7juau6} value=6 ], [ var='B' labels={project_id=ecp-data-7juau6} value=6 ], [ var='C' labels={project_id=ecp-data-7juau6} value=0 ]} {Instance:project_id=ecp-data-7og3tl State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-7og3tl Value:0xc023650fb0} B:{Var:B Labels:project_id=ecp-data-7og3tl Value:0xc023650fb8} C:{Var:C Labels:project_id=ecp-data-7og3tl Value:0xc023651000}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981156912s EvaluationString:[ var='A' labels={project_id=ecp-data-7og3tl} value=0 ], [ var='B' labels={project_id=ecp-data-7og3tl} value=0 ], [ var='C' labels={project_id=ecp-data-7og3tl} value=0 ]} {Instance:project_id=ecp-data-7oj6n1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-7oj6n1 Value:0xc023651050} B:{Var:B Labels:project_id=ecp-data-7oj6n1 Value:0xc023651058} C:{Var:C Labels:project_id=ecp-data-7oj6n1 Value:0xc0236510a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981163303s EvaluationString:[ var='A' labels={project_id=ecp-data-7oj6n1} value=0 ], [ var='B' labels={project_id=ecp-data-7oj6n1} value=0 ], [ var='C' labels={project_id=ecp-data-7oj6n1} value=0 ]} {Instance:project_id=ecp-data-7riwkp State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-7riwkp Value:0xc023651140} B:{Var:B Labels:project_id=ecp-data-7riwkp Value:0xc0236510f0} C:{Var:C Labels:project_id=ecp-data-7riwkp Value:0xc0236510f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981169792s EvaluationString:[ var='A' labels={project_id=ecp-data-7riwkp} value=2 ], [ var='B' labels={project_id=ecp-data-7riwkp} value=2 ], [ var='C' labels={project_id=ecp-data-7riwkp} value=0 ]} {Instance:project_id=ecp-data-7rvgef State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-7rvgef Value:0xc023651198} B:{Var:B Labels:project_id=ecp-data-7rvgef Value:0xc0236511e0} C:{Var:C Labels:project_id=ecp-data-7rvgef Value:0xc023651190}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98117618s EvaluationString:[ var='A' labels={project_id=ecp-data-7rvgef} value=94 ], [ var='B' labels={project_id=ecp-data-7rvgef} value=94 ], [ var='C' labels={project_id=ecp-data-7rvgef} value=0 ]} {Instance:project_id=ecp-data-7s8suh State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-7s8suh Value:0xc023651230} B:{Var:B Labels:project_id=ecp-data-7s8suh Value:0xc023651238} C:{Var:C Labels:project_id=ecp-data-7s8suh Value:0xc023651280}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981183854s EvaluationString:[ var='A' labels={project_id=ecp-data-7s8suh} value=0 ], [ var='B' labels={project_id=ecp-data-7s8suh} value=0 ], [ var='C' labels={project_id=ecp-data-7s8suh} value=0 ]} {Instance:project_id=ecp-data-7zdtph State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-7zdtph Value:0xc0236512d0} B:{Var:B Labels:project_id=ecp-data-7zdtph Value:0xc0236512d8} C:{Var:C Labels:project_id=ecp-data-7zdtph Value:0xc023651320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981194587s EvaluationString:[ var='A' labels={project_id=ecp-data-7zdtph} value=5 ], [ var='B' labels={project_id=ecp-data-7zdtph} value=5 ], [ var='C' labels={project_id=ecp-data-7zdtph} value=0 ]} {Instance:project_id=ecp-data-80vasv State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-80vasv Value:0xc023651370} B:{Var:B Labels:project_id=ecp-data-80vasv Value:0xc023651378} C:{Var:C Labels:project_id=ecp-data-80vasv Value:0xc0236513c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981201524s EvaluationString:[ var='A' labels={project_id=ecp-data-80vasv} value=0 ], [ var='B' labels={project_id=ecp-data-80vasv} value=0 ], [ var='C' labels={project_id=ecp-data-80vasv} value=0 ]} {Instance:project_id=ecp-data-80ymd4 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-80ymd4 Value:0xc023651418} B:{Var:B Labels:project_id=ecp-data-80ymd4 Value:0xc023651460} C:{Var:C Labels:project_id=ecp-data-80ymd4 Value:0xc023651410}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981209214s EvaluationString:[ var='A' labels={project_id=ecp-data-80ymd4} value=0 ], [ var='B' labels={project_id=ecp-data-80ymd4} value=0 ], [ var='C' labels={project_id=ecp-data-80ymd4} value=0 ]} {Instance:project_id=ecp-data-8aphhy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-8aphhy Value:0xc0236514b0} B:{Var:B Labels:project_id=ecp-data-8aphhy Value:0xc0236514b8} C:{Var:C Labels:project_id=ecp-data-8aphhy Value:0xc023651500}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981216538s EvaluationString:[ var='A' labels={project_id=ecp-data-8aphhy} value=1 ], [ var='B' labels={project_id=ecp-data-8aphhy} value=1 ], [ var='C' labels={project_id=ecp-data-8aphhy} value=0 ]} {Instance:project_id=ecp-data-8bvcdr State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-8bvcdr Value:0xc023651550} B:{Var:B Labels:project_id=ecp-data-8bvcdr Value:0xc023651558} C:{Var:C Labels:project_id=ecp-data-8bvcdr Value:0xc0236515a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981223099s EvaluationString:[ var='A' labels={project_id=ecp-data-8bvcdr} value=0 ], [ var='B' labels={project_id=ecp-data-8bvcdr} value=0 ], [ var='C' labels={project_id=ecp-data-8bvcdr} value=0 ]} {Instance:project_id=ecp-data-8cik9e State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-8cik9e Value:0xc0236515f0} B:{Var:B Labels:project_id=ecp-data-8cik9e Value:0xc0236515f8} C:{Var:C Labels:project_id=ecp-data-8cik9e Value:0xc023651640}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981229628s EvaluationString:[ var='A' labels={project_id=ecp-data-8cik9e} value=5 ], [ var='B' labels={project_id=ecp-data-8cik9e} value=5 ], [ var='C' labels={project_id=ecp-data-8cik9e} value=0 ]} {Instance:project_id=ecp-data-8e2vaf State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-8e2vaf Value:0xc0236516e0} B:{Var:B Labels:project_id=ecp-data-8e2vaf Value:0xc023651690} C:{Var:C Labels:project_id=ecp-data-8e2vaf Value:0xc023651698}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98123702s EvaluationString:[ var='A' labels={project_id=ecp-data-8e2vaf} value=0 ], [ var='B' labels={project_id=ecp-data-8e2vaf} value=0 ], [ var='C' labels={project_id=ecp-data-8e2vaf} value=0 ]} {Instance:project_id=ecp-data-8fdc9f State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-8fdc9f Value:0xc023651780} B:{Var:B Labels:project_id=ecp-data-8fdc9f Value:0xc023651730} C:{Var:C Labels:project_id=ecp-data-8fdc9f Value:0xc023651738}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981245589s EvaluationString:[ var='A' labels={project_id=ecp-data-8fdc9f} value=0 ], [ var='B' labels={project_id=ecp-data-8fdc9f} value=0 ], [ var='C' labels={project_id=ecp-data-8fdc9f} value=0 ]} {Instance:project_id=ecp-data-8guaki State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-8guaki Value:0xc023651800} B:{Var:B Labels:project_id=ecp-data-8guaki Value:0xc023651808} C:{Var:C Labels:project_id=ecp-data-8guaki Value:0xc023651850}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981252431s EvaluationString:[ var='A' labels={project_id=ecp-data-8guaki} value=0 ], [ var='B' labels={project_id=ecp-data-8guaki} value=0 ], [ var='C' labels={project_id=ecp-data-8guaki} value=0 ]} {Instance:project_id=ecp-data-8hqdyu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-8hqdyu Value:0xc0236518f0} B:{Var:B Labels:project_id=ecp-data-8hqdyu Value:0xc0236518a0} C:{Var:C Labels:project_id=ecp-data-8hqdyu Value:0xc0236518a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981258578s EvaluationString:[ var='A' labels={project_id=ecp-data-8hqdyu} value=0 ], [ var='B' labels={project_id=ecp-data-8hqdyu} value=0 ], [ var='C' labels={project_id=ecp-data-8hqdyu} value=0 ]} {Instance:project_id=ecp-data-8hwntd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-8hwntd Value:0xc023651940} B:{Var:B Labels:project_id=ecp-data-8hwntd Value:0xc023651948} C:{Var:C Labels:project_id=ecp-data-8hwntd Value:0xc023651990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981265481s EvaluationString:[ var='A' labels={project_id=ecp-data-8hwntd} value=0 ], [ var='B' labels={project_id=ecp-data-8hwntd} value=0 ], [ var='C' labels={project_id=ecp-data-8hwntd} value=0 ]} {Instance:project_id=ecp-data-8iql02 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-8iql02 Value:0xc0236519e0} B:{Var:B Labels:project_id=ecp-data-8iql02 Value:0xc0236519e8} C:{Var:C Labels:project_id=ecp-data-8iql02 Value:0xc023651a30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981271774s EvaluationString:[ var='A' labels={project_id=ecp-data-8iql02} value=0 ], [ var='B' labels={project_id=ecp-data-8iql02} value=0 ], [ var='C' labels={project_id=ecp-data-8iql02} value=0 ]} {Instance:project_id=ecp-data-8j8c4b State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-8j8c4b Value:0xc023651ad0} B:{Var:B Labels:project_id=ecp-data-8j8c4b Value:0xc023651a80} C:{Var:C Labels:project_id=ecp-data-8j8c4b Value:0xc023651a88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98127893s EvaluationString:[ var='A' labels={project_id=ecp-data-8j8c4b} value=0 ], [ var='B' labels={project_id=ecp-data-8j8c4b} value=0 ], [ var='C' labels={project_id=ecp-data-8j8c4b} value=0 ]} {Instance:project_id=ecp-data-8m9qvs State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-8m9qvs Value:0xc023651b20} B:{Var:B Labels:project_id=ecp-data-8m9qvs Value:0xc023651b28} C:{Var:C Labels:project_id=ecp-data-8m9qvs Value:0xc023651b70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981286091s EvaluationString:[ var='A' labels={project_id=ecp-data-8m9qvs} value=1 ], [ var='B' labels={project_id=ecp-data-8m9qvs} value=1 ], [ var='C' labels={project_id=ecp-data-8m9qvs} value=0 ]} {Instance:project_id=ecp-data-8oymlj State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-8oymlj Value:0xc023651bc0} B:{Var:B Labels:project_id=ecp-data-8oymlj Value:0xc023651bc8} C:{Var:C Labels:project_id=ecp-data-8oymlj Value:0xc023651c10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981293967s EvaluationString:[ var='A' labels={project_id=ecp-data-8oymlj} value=1 ], [ var='B' labels={project_id=ecp-data-8oymlj} value=1 ], [ var='C' labels={project_id=ecp-data-8oymlj} value=0 ]} {Instance:project_id=ecp-data-8t14iq State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-8t14iq Value:0xc023651c60} B:{Var:B Labels:project_id=ecp-data-8t14iq Value:0xc023651c68} C:{Var:C Labels:project_id=ecp-data-8t14iq Value:0xc023651cd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981301647s EvaluationString:[ var='A' labels={project_id=ecp-data-8t14iq} value=5 ], [ var='B' labels={project_id=ecp-data-8t14iq} value=5 ], [ var='C' labels={project_id=ecp-data-8t14iq} value=0 ]} {Instance:project_id=ecp-data-909ajq State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-909ajq Value:0xc023651d28} B:{Var:B Labels:project_id=ecp-data-909ajq Value:0xc023651d70} C:{Var:C Labels:project_id=ecp-data-909ajq Value:0xc023651d20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981308394s EvaluationString:[ var='A' labels={project_id=ecp-data-909ajq} value=0 ], [ var='B' labels={project_id=ecp-data-909ajq} value=0 ], [ var='C' labels={project_id=ecp-data-909ajq} value=0 ]} {Instance:project_id=ecp-data-9cmwtc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-9cmwtc Value:0xc023651e10} B:{Var:B Labels:project_id=ecp-data-9cmwtc Value:0xc023651dc0} C:{Var:C Labels:project_id=ecp-data-9cmwtc Value:0xc023651dc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981316301s EvaluationString:[ var='A' labels={project_id=ecp-data-9cmwtc} value=1 ], [ var='B' labels={project_id=ecp-data-9cmwtc} value=1 ], [ var='C' labels={project_id=ecp-data-9cmwtc} value=0 ]} {Instance:project_id=ecp-data-9dk279 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-9dk279 Value:0xc023651e68} B:{Var:B Labels:project_id=ecp-data-9dk279 Value:0xc023651eb0} C:{Var:C Labels:project_id=ecp-data-9dk279 Value:0xc023651e60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981324444s EvaluationString:[ var='A' labels={project_id=ecp-data-9dk279} value=24 ], [ var='B' labels={project_id=ecp-data-9dk279} value=24 ], [ var='C' labels={project_id=ecp-data-9dk279} value=0 ]} {Instance:project_id=ecp-data-9fe8lo State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-9fe8lo Value:0xc023651f50} B:{Var:B Labels:project_id=ecp-data-9fe8lo Value:0xc023651f00} C:{Var:C Labels:project_id=ecp-data-9fe8lo Value:0xc023651f08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981333037s EvaluationString:[ var='A' labels={project_id=ecp-data-9fe8lo} value=58 ], [ var='B' labels={project_id=ecp-data-9fe8lo} value=58 ], [ var='C' labels={project_id=ecp-data-9fe8lo} value=0 ]} {Instance:project_id=ecp-data-9grgit State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-9grgit Value:0xc023651fa8} B:{Var:B Labels:project_id=ecp-data-9grgit Value:0xc023651ff0} C:{Var:C Labels:project_id=ecp-data-9grgit Value:0xc023651fa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981341957s EvaluationString:[ var='A' labels={project_id=ecp-data-9grgit} value=2 ], [ var='B' labels={project_id=ecp-data-9grgit} value=2 ], [ var='C' labels={project_id=ecp-data-9grgit} value=0 ]} {Instance:project_id=ecp-data-9j06ai State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-9j06ai Value:0xc02143a048} B:{Var:B Labels:project_id=ecp-data-9j06ai Value:0xc02143a090} C:{Var:C Labels:project_id=ecp-data-9j06ai Value:0xc02143a040}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981349578s EvaluationString:[ var='A' labels={project_id=ecp-data-9j06ai} value=0 ], [ var='B' labels={project_id=ecp-data-9j06ai} value=0 ], [ var='C' labels={project_id=ecp-data-9j06ai} value=0 ]} {Instance:project_id=ecp-data-9jlxi0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-9jlxi0 Value:0xc02143a0e8} B:{Var:B Labels:project_id=ecp-data-9jlxi0 Value:0xc02143a130} C:{Var:C Labels:project_id=ecp-data-9jlxi0 Value:0xc02143a0e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981356015s EvaluationString:[ var='A' labels={project_id=ecp-data-9jlxi0} value=6 ], [ var='B' labels={project_id=ecp-data-9jlxi0} value=6 ], [ var='C' labels={project_id=ecp-data-9jlxi0} value=0 ]} {Instance:project_id=ecp-data-9nxuhp State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-9nxuhp Value:0xc02143a180} B:{Var:B Labels:project_id=ecp-data-9nxuhp Value:0xc02143a188} C:{Var:C Labels:project_id=ecp-data-9nxuhp Value:0xc02143a1d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981362512s EvaluationString:[ var='A' labels={project_id=ecp-data-9nxuhp} value=1 ], [ var='B' labels={project_id=ecp-data-9nxuhp} value=1 ], [ var='C' labels={project_id=ecp-data-9nxuhp} value=0 ]} {Instance:project_id=ecp-data-9ofcvs State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-9ofcvs Value:0xc02143a270} B:{Var:B Labels:project_id=ecp-data-9ofcvs Value:0xc02143a220} C:{Var:C Labels:project_id=ecp-data-9ofcvs Value:0xc02143a228}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981371287s EvaluationString:[ var='A' labels={project_id=ecp-data-9ofcvs} value=0 ], [ var='B' labels={project_id=ecp-data-9ofcvs} value=0 ], [ var='C' labels={project_id=ecp-data-9ofcvs} value=0 ]} {Instance:project_id=ecp-data-9ttzlm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-9ttzlm Value:0xc02143a2c8} B:{Var:B Labels:project_id=ecp-data-9ttzlm Value:0xc02143a310} C:{Var:C Labels:project_id=ecp-data-9ttzlm Value:0xc02143a2c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981379384s EvaluationString:[ var='A' labels={project_id=ecp-data-9ttzlm} value=1 ], [ var='B' labels={project_id=ecp-data-9ttzlm} value=1 ], [ var='C' labels={project_id=ecp-data-9ttzlm} value=0 ]} {Instance:project_id=ecp-data-9ujmzs State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-9ujmzs Value:0xc02143a368} B:{Var:B Labels:project_id=ecp-data-9ujmzs Value:0xc02143a3b0} C:{Var:C Labels:project_id=ecp-data-9ujmzs Value:0xc02143a360}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98138685s EvaluationString:[ var='A' labels={project_id=ecp-data-9ujmzs} value=0 ], [ var='B' labels={project_id=ecp-data-9ujmzs} value=0 ], [ var='C' labels={project_id=ecp-data-9ujmzs} value=0 ]} {Instance:project_id=ecp-data-9y0zdr State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-9y0zdr Value:0xc02143a400} B:{Var:B Labels:project_id=ecp-data-9y0zdr Value:0xc02143a408} C:{Var:C Labels:project_id=ecp-data-9y0zdr Value:0xc02143a450}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981393236s EvaluationString:[ var='A' labels={project_id=ecp-data-9y0zdr} value=3 ], [ var='B' labels={project_id=ecp-data-9y0zdr} value=3 ], [ var='C' labels={project_id=ecp-data-9y0zdr} value=0 ]} {Instance:project_id=ecp-data-9ys4p0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-9ys4p0 Value:0xc02143a4a8} B:{Var:B Labels:project_id=ecp-data-9ys4p0 Value:0xc02143a4f0} C:{Var:C Labels:project_id=ecp-data-9ys4p0 Value:0xc02143a4a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981399819s EvaluationString:[ var='A' labels={project_id=ecp-data-9ys4p0} value=8 ], [ var='B' labels={project_id=ecp-data-9ys4p0} value=8 ], [ var='C' labels={project_id=ecp-data-9ys4p0} value=0 ]} {Instance:project_id=ecp-data-9zk2au State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-9zk2au Value:0xc02143a540} B:{Var:B Labels:project_id=ecp-data-9zk2au Value:0xc02143a548} C:{Var:C Labels:project_id=ecp-data-9zk2au Value:0xc02143a590}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981406794s EvaluationString:[ var='A' labels={project_id=ecp-data-9zk2au} value=0 ], [ var='B' labels={project_id=ecp-data-9zk2au} value=0 ], [ var='C' labels={project_id=ecp-data-9zk2au} value=0 ]} {Instance:project_id=ecp-data-a1lmgi State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-a1lmgi Value:0xc02143a5e0} B:{Var:B Labels:project_id=ecp-data-a1lmgi Value:0xc02143a5e8} C:{Var:C Labels:project_id=ecp-data-a1lmgi Value:0xc02143a630}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981413144s EvaluationString:[ var='A' labels={project_id=ecp-data-a1lmgi} value=0 ], [ var='B' labels={project_id=ecp-data-a1lmgi} value=0 ], [ var='C' labels={project_id=ecp-data-a1lmgi} value=0 ]} {Instance:project_id=ecp-data-a5agep State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-a5agep Value:0xc02143a680} B:{Var:B Labels:project_id=ecp-data-a5agep Value:0xc02143a688} C:{Var:C Labels:project_id=ecp-data-a5agep Value:0xc02143a6d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981419831s EvaluationString:[ var='A' labels={project_id=ecp-data-a5agep} value=0 ], [ var='B' labels={project_id=ecp-data-a5agep} value=0 ], [ var='C' labels={project_id=ecp-data-a5agep} value=0 ]} {Instance:project_id=ecp-data-abzqhv State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-abzqhv Value:0xc02143a720} B:{Var:B Labels:project_id=ecp-data-abzqhv Value:0xc02143a728} C:{Var:C Labels:project_id=ecp-data-abzqhv Value:0xc02143a770}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981425736s EvaluationString:[ var='A' labels={project_id=ecp-data-abzqhv} value=19 ], [ var='B' labels={project_id=ecp-data-abzqhv} value=19 ], [ var='C' labels={project_id=ecp-data-abzqhv} value=0 ]} {Instance:project_id=ecp-data-aeo0nu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-aeo0nu Value:0xc02143a7c8} B:{Var:B Labels:project_id=ecp-data-aeo0nu Value:0xc02143a810} C:{Var:C Labels:project_id=ecp-data-aeo0nu Value:0xc02143a7c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981433269s EvaluationString:[ var='A' labels={project_id=ecp-data-aeo0nu} value=0 ], [ var='B' labels={project_id=ecp-data-aeo0nu} value=0 ], [ var='C' labels={project_id=ecp-data-aeo0nu} value=0 ]} {Instance:project_id=ecp-data-agyub8 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-agyub8 Value:0xc02143a860} B:{Var:B Labels:project_id=ecp-data-agyub8 Value:0xc02143a868} C:{Var:C Labels:project_id=ecp-data-agyub8 Value:0xc02143a8b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98144084s EvaluationString:[ var='A' labels={project_id=ecp-data-agyub8} value=2 ], [ var='B' labels={project_id=ecp-data-agyub8} value=2 ], [ var='C' labels={project_id=ecp-data-agyub8} value=0 ]} {Instance:project_id=ecp-data-ahcti7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ahcti7 Value:0xc02143a950} B:{Var:B Labels:project_id=ecp-data-ahcti7 Value:0xc02143a900} C:{Var:C Labels:project_id=ecp-data-ahcti7 Value:0xc02143a908}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981447359s EvaluationString:[ var='A' labels={project_id=ecp-data-ahcti7} value=28 ], [ var='B' labels={project_id=ecp-data-ahcti7} value=28 ], [ var='C' labels={project_id=ecp-data-ahcti7} value=0 ]} {Instance:project_id=ecp-data-ahzist State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ahzist Value:0xc02143a9a0} B:{Var:B Labels:project_id=ecp-data-ahzist Value:0xc02143a9a8} C:{Var:C Labels:project_id=ecp-data-ahzist Value:0xc02143a9f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981464304s EvaluationString:[ var='A' labels={project_id=ecp-data-ahzist} value=54 ], [ var='B' labels={project_id=ecp-data-ahzist} value=54 ], [ var='C' labels={project_id=ecp-data-ahzist} value=0 ]} {Instance:project_id=ecp-data-aihlj5 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-aihlj5 Value:0xc02143aa40} B:{Var:B Labels:project_id=ecp-data-aihlj5 Value:0xc02143aa48} C:{Var:C Labels:project_id=ecp-data-aihlj5 Value:0xc02143aa90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981471806s EvaluationString:[ var='A' labels={project_id=ecp-data-aihlj5} value=24 ], [ var='B' labels={project_id=ecp-data-aihlj5} value=24 ], [ var='C' labels={project_id=ecp-data-aihlj5} value=0 ]} {Instance:project_id=ecp-data-aihxfy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-aihxfy Value:0xc02143aae0} B:{Var:B Labels:project_id=ecp-data-aihxfy Value:0xc02143aae8} C:{Var:C Labels:project_id=ecp-data-aihxfy Value:0xc02143ab30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981478074s EvaluationString:[ var='A' labels={project_id=ecp-data-aihxfy} value=0 ], [ var='B' labels={project_id=ecp-data-aihxfy} value=0 ], [ var='C' labels={project_id=ecp-data-aihxfy} value=0 ]} {Instance:project_id=ecp-data-aiy2ct State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-aiy2ct Value:0xc02143ab80} B:{Var:B Labels:project_id=ecp-data-aiy2ct Value:0xc02143ab88} C:{Var:C Labels:project_id=ecp-data-aiy2ct Value:0xc02143abd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981486164s EvaluationString:[ var='A' labels={project_id=ecp-data-aiy2ct} value=0 ], [ var='B' labels={project_id=ecp-data-aiy2ct} value=0 ], [ var='C' labels={project_id=ecp-data-aiy2ct} value=0 ]} {Instance:project_id=ecp-data-ameuio State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ameuio Value:0xc02143ac28} B:{Var:B Labels:project_id=ecp-data-ameuio Value:0xc02143ac70} C:{Var:C Labels:project_id=ecp-data-ameuio Value:0xc02143ac20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981494728s EvaluationString:[ var='A' labels={project_id=ecp-data-ameuio} value=1 ], [ var='B' labels={project_id=ecp-data-ameuio} value=1 ], [ var='C' labels={project_id=ecp-data-ameuio} value=0 ]} {Instance:project_id=ecp-data-aoivkn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-aoivkn Value:0xc02143acc0} B:{Var:B Labels:project_id=ecp-data-aoivkn Value:0xc02143acc8} C:{Var:C Labels:project_id=ecp-data-aoivkn Value:0xc02143ad10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981502396s EvaluationString:[ var='A' labels={project_id=ecp-data-aoivkn} value=0 ], [ var='B' labels={project_id=ecp-data-aoivkn} value=0 ], [ var='C' labels={project_id=ecp-data-aoivkn} value=0 ]} {Instance:project_id=ecp-data-aqt1tb State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-aqt1tb Value:0xc02143ad60} B:{Var:B Labels:project_id=ecp-data-aqt1tb Value:0xc02143ad68} C:{Var:C Labels:project_id=ecp-data-aqt1tb Value:0xc02143adb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981509253s EvaluationString:[ var='A' labels={project_id=ecp-data-aqt1tb} value=0 ], [ var='B' labels={project_id=ecp-data-aqt1tb} value=0 ], [ var='C' labels={project_id=ecp-data-aqt1tb} value=0 ]} {Instance:project_id=ecp-data-aragbz State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-aragbz Value:0xc02143ae00} B:{Var:B Labels:project_id=ecp-data-aragbz Value:0xc02143ae08} C:{Var:C Labels:project_id=ecp-data-aragbz Value:0xc02143ae50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981516526s EvaluationString:[ var='A' labels={project_id=ecp-data-aragbz} value=0 ], [ var='B' labels={project_id=ecp-data-aragbz} value=0 ], [ var='C' labels={project_id=ecp-data-aragbz} value=0 ]} {Instance:project_id=ecp-data-aupau7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-aupau7 Value:0xc02143aef0} B:{Var:B Labels:project_id=ecp-data-aupau7 Value:0xc02143aea0} C:{Var:C Labels:project_id=ecp-data-aupau7 Value:0xc02143aea8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981523406s EvaluationString:[ var='A' labels={project_id=ecp-data-aupau7} value=0 ], [ var='B' labels={project_id=ecp-data-aupau7} value=0 ], [ var='C' labels={project_id=ecp-data-aupau7} value=0 ]} {Instance:project_id=ecp-data-avgjhf State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-avgjhf Value:0xc02143af40} B:{Var:B Labels:project_id=ecp-data-avgjhf Value:0xc02143af48} C:{Var:C Labels:project_id=ecp-data-avgjhf Value:0xc02143af90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981529814s EvaluationString:[ var='A' labels={project_id=ecp-data-avgjhf} value=0 ], [ var='B' labels={project_id=ecp-data-avgjhf} value=0 ], [ var='C' labels={project_id=ecp-data-avgjhf} value=0 ]} {Instance:project_id=ecp-data-avlj7o State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-avlj7o Value:0xc02143afe0} B:{Var:B Labels:project_id=ecp-data-avlj7o Value:0xc02143afe8} C:{Var:C Labels:project_id=ecp-data-avlj7o Value:0xc02143b030}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981536286s EvaluationString:[ var='A' labels={project_id=ecp-data-avlj7o} value=2 ], [ var='B' labels={project_id=ecp-data-avlj7o} value=2 ], [ var='C' labels={project_id=ecp-data-avlj7o} value=0 ]} {Instance:project_id=ecp-data-ayvq4k State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ayvq4k Value:0xc02143b100} B:{Var:B Labels:project_id=ecp-data-ayvq4k Value:0xc02143b080} C:{Var:C Labels:project_id=ecp-data-ayvq4k Value:0xc02143b088}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981543656s EvaluationString:[ var='A' labels={project_id=ecp-data-ayvq4k} value=0 ], [ var='B' labels={project_id=ecp-data-ayvq4k} value=0 ], [ var='C' labels={project_id=ecp-data-ayvq4k} value=0 ]} {Instance:project_id=ecp-data-azyrgd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-azyrgd Value:0xc02143b160} B:{Var:B Labels:project_id=ecp-data-azyrgd Value:0xc02143b168} C:{Var:C Labels:project_id=ecp-data-azyrgd Value:0xc02143b1b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981550313s EvaluationString:[ var='A' labels={project_id=ecp-data-azyrgd} value=0 ], [ var='B' labels={project_id=ecp-data-azyrgd} value=0 ], [ var='C' labels={project_id=ecp-data-azyrgd} value=0 ]} {Instance:project_id=ecp-data-b3aizn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-b3aizn Value:0xc02143b200} B:{Var:B Labels:project_id=ecp-data-b3aizn Value:0xc02143b208} C:{Var:C Labels:project_id=ecp-data-b3aizn Value:0xc02143b270}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981557119s EvaluationString:[ var='A' labels={project_id=ecp-data-b3aizn} value=0 ], [ var='B' labels={project_id=ecp-data-b3aizn} value=0 ], [ var='C' labels={project_id=ecp-data-b3aizn} value=0 ]} {Instance:project_id=ecp-data-b64ovk State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-b64ovk Value:0xc02143b2e0} B:{Var:B Labels:project_id=ecp-data-b64ovk Value:0xc02143b2e8} C:{Var:C Labels:project_id=ecp-data-b64ovk Value:0xc02143b340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981565594s EvaluationString:[ var='A' labels={project_id=ecp-data-b64ovk} value=0 ], [ var='B' labels={project_id=ecp-data-b64ovk} value=0 ], [ var='C' labels={project_id=ecp-data-b64ovk} value=0 ]} {Instance:project_id=ecp-data-b7wgnf State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-b7wgnf Value:0xc02143b390} B:{Var:B Labels:project_id=ecp-data-b7wgnf Value:0xc02143b398} C:{Var:C Labels:project_id=ecp-data-b7wgnf Value:0xc02143b3e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98157844s EvaluationString:[ var='A' labels={project_id=ecp-data-b7wgnf} value=0 ], [ var='B' labels={project_id=ecp-data-b7wgnf} value=0 ], [ var='C' labels={project_id=ecp-data-b7wgnf} value=0 ]} {Instance:project_id=ecp-data-b87hz7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-b87hz7 Value:0xc02143b438} B:{Var:B Labels:project_id=ecp-data-b87hz7 Value:0xc02143b480} C:{Var:C Labels:project_id=ecp-data-b87hz7 Value:0xc02143b430}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981586914s EvaluationString:[ var='A' labels={project_id=ecp-data-b87hz7} value=0 ], [ var='B' labels={project_id=ecp-data-b87hz7} value=0 ], [ var='C' labels={project_id=ecp-data-b87hz7} value=0 ]} {Instance:project_id=ecp-data-b8fbqm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-b8fbqm Value:0xc02143b520} B:{Var:B Labels:project_id=ecp-data-b8fbqm Value:0xc02143b4d0} C:{Var:C Labels:project_id=ecp-data-b8fbqm Value:0xc02143b4d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981593914s EvaluationString:[ var='A' labels={project_id=ecp-data-b8fbqm} value=4 ], [ var='B' labels={project_id=ecp-data-b8fbqm} value=4 ], [ var='C' labels={project_id=ecp-data-b8fbqm} value=0 ]} {Instance:project_id=ecp-data-badnnl State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-badnnl Value:0xc02143b5c0} B:{Var:B Labels:project_id=ecp-data-badnnl Value:0xc02143b570} C:{Var:C Labels:project_id=ecp-data-badnnl Value:0xc02143b578}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981601089s EvaluationString:[ var='A' labels={project_id=ecp-data-badnnl} value=3 ], [ var='B' labels={project_id=ecp-data-badnnl} value=3 ], [ var='C' labels={project_id=ecp-data-badnnl} value=0 ]} {Instance:project_id=ecp-data-bais6z State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-bais6z Value:0xc02143b618} B:{Var:B Labels:project_id=ecp-data-bais6z Value:0xc02143b660} C:{Var:C Labels:project_id=ecp-data-bais6z Value:0xc02143b610}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981620671s EvaluationString:[ var='A' labels={project_id=ecp-data-bais6z} value=0 ], [ var='B' labels={project_id=ecp-data-bais6z} value=0 ], [ var='C' labels={project_id=ecp-data-bais6z} value=0 ]} {Instance:project_id=ecp-data-bajbhk State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-bajbhk Value:0xc02143b6b8} B:{Var:B Labels:project_id=ecp-data-bajbhk Value:0xc02143b700} C:{Var:C Labels:project_id=ecp-data-bajbhk Value:0xc02143b6b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981626943s EvaluationString:[ var='A' labels={project_id=ecp-data-bajbhk} value=0 ], [ var='B' labels={project_id=ecp-data-bajbhk} value=0 ], [ var='C' labels={project_id=ecp-data-bajbhk} value=0 ]} {Instance:project_id=ecp-data-bc4dxp State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-bc4dxp Value:0xc02143b750} B:{Var:B Labels:project_id=ecp-data-bc4dxp Value:0xc02143b758} C:{Var:C Labels:project_id=ecp-data-bc4dxp Value:0xc02143b7a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981632601s EvaluationString:[ var='A' labels={project_id=ecp-data-bc4dxp} value=1 ], [ var='B' labels={project_id=ecp-data-bc4dxp} value=1 ], [ var='C' labels={project_id=ecp-data-bc4dxp} value=0 ]} {Instance:project_id=ecp-data-bchu0r State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-bchu0r Value:0xc02143b840} B:{Var:B Labels:project_id=ecp-data-bchu0r Value:0xc02143b7f0} C:{Var:C Labels:project_id=ecp-data-bchu0r Value:0xc02143b7f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981640774s EvaluationString:[ var='A' labels={project_id=ecp-data-bchu0r} value=0 ], [ var='B' labels={project_id=ecp-data-bchu0r} value=0 ], [ var='C' labels={project_id=ecp-data-bchu0r} value=0 ]} {Instance:project_id=ecp-data-bdytmj State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-bdytmj Value:0xc02143b8e0} B:{Var:B Labels:project_id=ecp-data-bdytmj Value:0xc02143b890} C:{Var:C Labels:project_id=ecp-data-bdytmj Value:0xc02143b898}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981647793s EvaluationString:[ var='A' labels={project_id=ecp-data-bdytmj} value=0 ], [ var='B' labels={project_id=ecp-data-bdytmj} value=0 ], [ var='C' labels={project_id=ecp-data-bdytmj} value=0 ]} {Instance:project_id=ecp-data-bg3hhf State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-bg3hhf Value:0xc02143b938} B:{Var:B Labels:project_id=ecp-data-bg3hhf Value:0xc02143b980} C:{Var:C Labels:project_id=ecp-data-bg3hhf Value:0xc02143b930}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981653632s EvaluationString:[ var='A' labels={project_id=ecp-data-bg3hhf} value=0 ], [ var='B' labels={project_id=ecp-data-bg3hhf} value=0 ], [ var='C' labels={project_id=ecp-data-bg3hhf} value=0 ]} {Instance:project_id=ecp-data-bkjpdf State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-bkjpdf Value:0xc02143b9e0} B:{Var:B Labels:project_id=ecp-data-bkjpdf Value:0xc02143b9e8} C:{Var:C Labels:project_id=ecp-data-bkjpdf Value:0xc02143ba30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981659936s EvaluationString:[ var='A' labels={project_id=ecp-data-bkjpdf} value=0 ], [ var='B' labels={project_id=ecp-data-bkjpdf} value=0 ], [ var='C' labels={project_id=ecp-data-bkjpdf} value=0 ]} {Instance:project_id=ecp-data-bluq9y State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-bluq9y Value:0xc02143bad0} B:{Var:B Labels:project_id=ecp-data-bluq9y Value:0xc02143ba80} C:{Var:C Labels:project_id=ecp-data-bluq9y Value:0xc02143ba88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981666452s EvaluationString:[ var='A' labels={project_id=ecp-data-bluq9y} value=1 ], [ var='B' labels={project_id=ecp-data-bluq9y} value=1 ], [ var='C' labels={project_id=ecp-data-bluq9y} value=0 ]} {Instance:project_id=ecp-data-blvccq State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-blvccq Value:0xc02143bb20} B:{Var:B Labels:project_id=ecp-data-blvccq Value:0xc02143bb28} C:{Var:C Labels:project_id=ecp-data-blvccq Value:0xc02143bb70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981673676s EvaluationString:[ var='A' labels={project_id=ecp-data-blvccq} value=88 ], [ var='B' labels={project_id=ecp-data-blvccq} value=88 ], [ var='C' labels={project_id=ecp-data-blvccq} value=0 ]} {Instance:project_id=ecp-data-bo78um State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-bo78um Value:0xc02143bbc0} B:{Var:B Labels:project_id=ecp-data-bo78um Value:0xc02143bbc8} C:{Var:C Labels:project_id=ecp-data-bo78um Value:0xc02143bc10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98168041s EvaluationString:[ var='A' labels={project_id=ecp-data-bo78um} value=0 ], [ var='B' labels={project_id=ecp-data-bo78um} value=0 ], [ var='C' labels={project_id=ecp-data-bo78um} value=0 ]} {Instance:project_id=ecp-data-bqmiis State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-bqmiis Value:0xc02143bc60} B:{Var:B Labels:project_id=ecp-data-bqmiis Value:0xc02143bc68} C:{Var:C Labels:project_id=ecp-data-bqmiis Value:0xc02143bcb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981687889s EvaluationString:[ var='A' labels={project_id=ecp-data-bqmiis} value=0 ], [ var='B' labels={project_id=ecp-data-bqmiis} value=0 ], [ var='C' labels={project_id=ecp-data-bqmiis} value=0 ]} {Instance:project_id=ecp-data-bqpiqj State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-bqpiqj Value:0xc02143bd00} B:{Var:B Labels:project_id=ecp-data-bqpiqj Value:0xc02143bd08} C:{Var:C Labels:project_id=ecp-data-bqpiqj Value:0xc02143bd50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981694133s EvaluationString:[ var='A' labels={project_id=ecp-data-bqpiqj} value=0 ], [ var='B' labels={project_id=ecp-data-bqpiqj} value=0 ], [ var='C' labels={project_id=ecp-data-bqpiqj} value=0 ]} {Instance:project_id=ecp-data-br77yo State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-br77yo Value:0xc02143bda0} B:{Var:B Labels:project_id=ecp-data-br77yo Value:0xc02143bda8} C:{Var:C Labels:project_id=ecp-data-br77yo Value:0xc02143be40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981700563s EvaluationString:[ var='A' labels={project_id=ecp-data-br77yo} value=0 ], [ var='B' labels={project_id=ecp-data-br77yo} value=0 ], [ var='C' labels={project_id=ecp-data-br77yo} value=0 ]} {Instance:project_id=ecp-data-bsuwp0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-bsuwp0 Value:0xc02143be98} B:{Var:B Labels:project_id=ecp-data-bsuwp0 Value:0xc02143bee0} C:{Var:C Labels:project_id=ecp-data-bsuwp0 Value:0xc02143be90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981707333s EvaluationString:[ var='A' labels={project_id=ecp-data-bsuwp0} value=0 ], [ var='B' labels={project_id=ecp-data-bsuwp0} value=0 ], [ var='C' labels={project_id=ecp-data-bsuwp0} value=0 ]} {Instance:project_id=ecp-data-bukqzd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-bukqzd Value:0xc02143bfb0} B:{Var:B Labels:project_id=ecp-data-bukqzd Value:0xc02143bfb8} C:{Var:C Labels:project_id=ecp-data-bukqzd Value:0xc01cf44000}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98171525s EvaluationString:[ var='A' labels={project_id=ecp-data-bukqzd} value=0 ], [ var='B' labels={project_id=ecp-data-bukqzd} value=0 ], [ var='C' labels={project_id=ecp-data-bukqzd} value=0 ]} {Instance:project_id=ecp-data-bv98rm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-bv98rm Value:0xc01cf44150} B:{Var:B Labels:project_id=ecp-data-bv98rm Value:0xc01cf440d0} C:{Var:C Labels:project_id=ecp-data-bv98rm Value:0xc01cf440d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98172142s EvaluationString:[ var='A' labels={project_id=ecp-data-bv98rm} value=0 ], [ var='B' labels={project_id=ecp-data-bv98rm} value=0 ], [ var='C' labels={project_id=ecp-data-bv98rm} value=0 ]} {Instance:project_id=ecp-data-bxeyfn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-bxeyfn Value:0xc01cf441c0} B:{Var:B Labels:project_id=ecp-data-bxeyfn Value:0xc01cf441c8} C:{Var:C Labels:project_id=ecp-data-bxeyfn Value:0xc01cf442f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98172936s EvaluationString:[ var='A' labels={project_id=ecp-data-bxeyfn} value=6 ], [ var='B' labels={project_id=ecp-data-bxeyfn} value=6 ], [ var='C' labels={project_id=ecp-data-bxeyfn} value=0 ]} {Instance:project_id=ecp-data-bxobjq State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-bxobjq Value:0xc01cf44380} B:{Var:B Labels:project_id=ecp-data-bxobjq Value:0xc01cf44388} C:{Var:C Labels:project_id=ecp-data-bxobjq Value:0xc01cf443d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981735898s EvaluationString:[ var='A' labels={project_id=ecp-data-bxobjq} value=2 ], [ var='B' labels={project_id=ecp-data-bxobjq} value=2 ], [ var='C' labels={project_id=ecp-data-bxobjq} value=0 ]} {Instance:project_id=ecp-data-bxyf8w State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-bxyf8w Value:0xc01cf44420} B:{Var:B Labels:project_id=ecp-data-bxyf8w Value:0xc01cf44428} C:{Var:C Labels:project_id=ecp-data-bxyf8w Value:0xc01cf44470}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981744236s EvaluationString:[ var='A' labels={project_id=ecp-data-bxyf8w} value=5 ], [ var='B' labels={project_id=ecp-data-bxyf8w} value=5 ], [ var='C' labels={project_id=ecp-data-bxyf8w} value=0 ]} {Instance:project_id=ecp-data-bzkshz State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-bzkshz Value:0xc01cf44510} B:{Var:B Labels:project_id=ecp-data-bzkshz Value:0xc01cf444c0} C:{Var:C Labels:project_id=ecp-data-bzkshz Value:0xc01cf444c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981751633s EvaluationString:[ var='A' labels={project_id=ecp-data-bzkshz} value=0 ], [ var='B' labels={project_id=ecp-data-bzkshz} value=0 ], [ var='C' labels={project_id=ecp-data-bzkshz} value=0 ]} {Instance:project_id=ecp-data-c26ecn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-c26ecn Value:0xc01cf445b0} B:{Var:B Labels:project_id=ecp-data-c26ecn Value:0xc01cf44560} C:{Var:C Labels:project_id=ecp-data-c26ecn Value:0xc01cf44568}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981758335s EvaluationString:[ var='A' labels={project_id=ecp-data-c26ecn} value=0 ], [ var='B' labels={project_id=ecp-data-c26ecn} value=0 ], [ var='C' labels={project_id=ecp-data-c26ecn} value=0 ]} {Instance:project_id=ecp-data-c2bfiq State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-c2bfiq Value:0xc01cf44608} B:{Var:B Labels:project_id=ecp-data-c2bfiq Value:0xc01cf44650} C:{Var:C Labels:project_id=ecp-data-c2bfiq Value:0xc01cf44600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981764775s EvaluationString:[ var='A' labels={project_id=ecp-data-c2bfiq} value=124 ], [ var='B' labels={project_id=ecp-data-c2bfiq} value=124 ], [ var='C' labels={project_id=ecp-data-c2bfiq} value=0 ]} {Instance:project_id=ecp-data-c2pdfd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-c2pdfd Value:0xc01cf446a0} B:{Var:B Labels:project_id=ecp-data-c2pdfd Value:0xc01cf446a8} C:{Var:C Labels:project_id=ecp-data-c2pdfd Value:0xc01cf446f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981772651s EvaluationString:[ var='A' labels={project_id=ecp-data-c2pdfd} value=0 ], [ var='B' labels={project_id=ecp-data-c2pdfd} value=0 ], [ var='C' labels={project_id=ecp-data-c2pdfd} value=0 ]} {Instance:project_id=ecp-data-c3shu2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-c3shu2 Value:0xc01cf44790} B:{Var:B Labels:project_id=ecp-data-c3shu2 Value:0xc01cf44740} C:{Var:C Labels:project_id=ecp-data-c3shu2 Value:0xc01cf44748}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981780309s EvaluationString:[ var='A' labels={project_id=ecp-data-c3shu2} value=1 ], [ var='B' labels={project_id=ecp-data-c3shu2} value=1 ], [ var='C' labels={project_id=ecp-data-c3shu2} value=0 ]} {Instance:project_id=ecp-data-c47drn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-c47drn Value:0xc01cf447e8} B:{Var:B Labels:project_id=ecp-data-c47drn Value:0xc01cf44830} C:{Var:C Labels:project_id=ecp-data-c47drn Value:0xc01cf447e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981786213s EvaluationString:[ var='A' labels={project_id=ecp-data-c47drn} value=0 ], [ var='B' labels={project_id=ecp-data-c47drn} value=0 ], [ var='C' labels={project_id=ecp-data-c47drn} value=0 ]} {Instance:project_id=ecp-data-cabsaa State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-cabsaa Value:0xc01cf448d0} B:{Var:B Labels:project_id=ecp-data-cabsaa Value:0xc01cf44880} C:{Var:C Labels:project_id=ecp-data-cabsaa Value:0xc01cf44888}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981792759s EvaluationString:[ var='A' labels={project_id=ecp-data-cabsaa} value=0 ], [ var='B' labels={project_id=ecp-data-cabsaa} value=0 ], [ var='C' labels={project_id=ecp-data-cabsaa} value=0 ]} {Instance:project_id=ecp-data-cbgtv9 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-cbgtv9 Value:0xc01cf44920} B:{Var:B Labels:project_id=ecp-data-cbgtv9 Value:0xc01cf44928} C:{Var:C Labels:project_id=ecp-data-cbgtv9 Value:0xc01cf44970}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981799496s EvaluationString:[ var='A' labels={project_id=ecp-data-cbgtv9} value=0 ], [ var='B' labels={project_id=ecp-data-cbgtv9} value=0 ], [ var='C' labels={project_id=ecp-data-cbgtv9} value=0 ]} {Instance:project_id=ecp-data-cdvjck State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-cdvjck Value:0xc01cf44a10} B:{Var:B Labels:project_id=ecp-data-cdvjck Value:0xc01cf449c0} C:{Var:C Labels:project_id=ecp-data-cdvjck Value:0xc01cf449c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98180607s EvaluationString:[ var='A' labels={project_id=ecp-data-cdvjck} value=340 ], [ var='B' labels={project_id=ecp-data-cdvjck} value=340 ], [ var='C' labels={project_id=ecp-data-cdvjck} value=0 ]} {Instance:project_id=ecp-data-celm1k State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-celm1k Value:0xc01cf44ab0} B:{Var:B Labels:project_id=ecp-data-celm1k Value:0xc01cf44a60} C:{Var:C Labels:project_id=ecp-data-celm1k Value:0xc01cf44a68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981812966s EvaluationString:[ var='A' labels={project_id=ecp-data-celm1k} value=0 ], [ var='B' labels={project_id=ecp-data-celm1k} value=0 ], [ var='C' labels={project_id=ecp-data-celm1k} value=0 ]} {Instance:project_id=ecp-data-cf13rz State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-cf13rz Value:0xc01cf44b50} B:{Var:B Labels:project_id=ecp-data-cf13rz Value:0xc01cf44b00} C:{Var:C Labels:project_id=ecp-data-cf13rz Value:0xc01cf44b08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981819363s EvaluationString:[ var='A' labels={project_id=ecp-data-cf13rz} value=15 ], [ var='B' labels={project_id=ecp-data-cf13rz} value=15 ], [ var='C' labels={project_id=ecp-data-cf13rz} value=0 ]} {Instance:project_id=ecp-data-cgvs9q State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-cgvs9q Value:0xc01cf44ba0} B:{Var:B Labels:project_id=ecp-data-cgvs9q Value:0xc01cf44ba8} C:{Var:C Labels:project_id=ecp-data-cgvs9q Value:0xc01cf44bf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981825929s EvaluationString:[ var='A' labels={project_id=ecp-data-cgvs9q} value=0 ], [ var='B' labels={project_id=ecp-data-cgvs9q} value=0 ], [ var='C' labels={project_id=ecp-data-cgvs9q} value=0 ]} {Instance:project_id=ecp-data-cixzmc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-cixzmc Value:0xc01cf44c40} B:{Var:B Labels:project_id=ecp-data-cixzmc Value:0xc01cf44c48} C:{Var:C Labels:project_id=ecp-data-cixzmc Value:0xc01cf44c90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981832405s EvaluationString:[ var='A' labels={project_id=ecp-data-cixzmc} value=0 ], [ var='B' labels={project_id=ecp-data-cixzmc} value=0 ], [ var='C' labels={project_id=ecp-data-cixzmc} value=0 ]} {Instance:project_id=ecp-data-co5gdv State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-co5gdv Value:0xc01cf44ce0} B:{Var:B Labels:project_id=ecp-data-co5gdv Value:0xc01cf44ce8} C:{Var:C Labels:project_id=ecp-data-co5gdv Value:0xc01cf44d30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981839297s EvaluationString:[ var='A' labels={project_id=ecp-data-co5gdv} value=0 ], [ var='B' labels={project_id=ecp-data-co5gdv} value=0 ], [ var='C' labels={project_id=ecp-data-co5gdv} value=0 ]} {Instance:project_id=ecp-data-colcpt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-colcpt Value:0xc01cf44d80} B:{Var:B Labels:project_id=ecp-data-colcpt Value:0xc01cf44d88} C:{Var:C Labels:project_id=ecp-data-colcpt Value:0xc01cf44dd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981845703s EvaluationString:[ var='A' labels={project_id=ecp-data-colcpt} value=0 ], [ var='B' labels={project_id=ecp-data-colcpt} value=0 ], [ var='C' labels={project_id=ecp-data-colcpt} value=0 ]} {Instance:project_id=ecp-data-cpvmep State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-cpvmep Value:0xc01cf44e20} B:{Var:B Labels:project_id=ecp-data-cpvmep Value:0xc01cf44e28} C:{Var:C Labels:project_id=ecp-data-cpvmep Value:0xc01cf44e70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981855003s EvaluationString:[ var='A' labels={project_id=ecp-data-cpvmep} value=5 ], [ var='B' labels={project_id=ecp-data-cpvmep} value=5 ], [ var='C' labels={project_id=ecp-data-cpvmep} value=0 ]} {Instance:project_id=ecp-data-crjqv3 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-crjqv3 Value:0xc01cf44ec0} B:{Var:B Labels:project_id=ecp-data-crjqv3 Value:0xc01cf44ec8} C:{Var:C Labels:project_id=ecp-data-crjqv3 Value:0xc01cf44f10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981861315s EvaluationString:[ var='A' labels={project_id=ecp-data-crjqv3} value=0 ], [ var='B' labels={project_id=ecp-data-crjqv3} value=0 ], [ var='C' labels={project_id=ecp-data-crjqv3} value=0 ]} {Instance:project_id=ecp-data-crkcwv State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-crkcwv Value:0xc01cf44fb0} B:{Var:B Labels:project_id=ecp-data-crkcwv Value:0xc01cf44f60} C:{Var:C Labels:project_id=ecp-data-crkcwv Value:0xc01cf44f68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981868799s EvaluationString:[ var='A' labels={project_id=ecp-data-crkcwv} value=0 ], [ var='B' labels={project_id=ecp-data-crkcwv} value=0 ], [ var='C' labels={project_id=ecp-data-crkcwv} value=0 ]} {Instance:project_id=ecp-data-cxdacv State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-cxdacv Value:0xc01cf45000} B:{Var:B Labels:project_id=ecp-data-cxdacv Value:0xc01cf45008} C:{Var:C Labels:project_id=ecp-data-cxdacv Value:0xc01cf45050}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98187518s EvaluationString:[ var='A' labels={project_id=ecp-data-cxdacv} value=0 ], [ var='B' labels={project_id=ecp-data-cxdacv} value=0 ], [ var='C' labels={project_id=ecp-data-cxdacv} value=0 ]} {Instance:project_id=ecp-data-cxzoc0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-cxzoc0 Value:0xc01cf450a0} B:{Var:B Labels:project_id=ecp-data-cxzoc0 Value:0xc01cf450a8} C:{Var:C Labels:project_id=ecp-data-cxzoc0 Value:0xc01cf450f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981881466s EvaluationString:[ var='A' labels={project_id=ecp-data-cxzoc0} value=3 ], [ var='B' labels={project_id=ecp-data-cxzoc0} value=3 ], [ var='C' labels={project_id=ecp-data-cxzoc0} value=0 ]} {Instance:project_id=ecp-data-czukcq State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-czukcq Value:0xc01cf45140} B:{Var:B Labels:project_id=ecp-data-czukcq Value:0xc01cf45148} C:{Var:C Labels:project_id=ecp-data-czukcq Value:0xc01cf45190}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981888703s EvaluationString:[ var='A' labels={project_id=ecp-data-czukcq} value=0 ], [ var='B' labels={project_id=ecp-data-czukcq} value=0 ], [ var='C' labels={project_id=ecp-data-czukcq} value=0 ]} {Instance:project_id=ecp-data-d1dv2r State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-d1dv2r Value:0xc01cf451e8} B:{Var:B Labels:project_id=ecp-data-d1dv2r Value:0xc01cf45230} C:{Var:C Labels:project_id=ecp-data-d1dv2r Value:0xc01cf451e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981894525s EvaluationString:[ var='A' labels={project_id=ecp-data-d1dv2r} value=0 ], [ var='B' labels={project_id=ecp-data-d1dv2r} value=0 ], [ var='C' labels={project_id=ecp-data-d1dv2r} value=0 ]} {Instance:project_id=ecp-data-d1gyo2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-d1gyo2 Value:0xc01cf45288} B:{Var:B Labels:project_id=ecp-data-d1gyo2 Value:0xc01cf452d0} C:{Var:C Labels:project_id=ecp-data-d1gyo2 Value:0xc01cf45280}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981900671s EvaluationString:[ var='A' labels={project_id=ecp-data-d1gyo2} value=0 ], [ var='B' labels={project_id=ecp-data-d1gyo2} value=0 ], [ var='C' labels={project_id=ecp-data-d1gyo2} value=0 ]} {Instance:project_id=ecp-data-d5p02h State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-d5p02h Value:0xc01cf45370} B:{Var:B Labels:project_id=ecp-data-d5p02h Value:0xc01cf45320} C:{Var:C Labels:project_id=ecp-data-d5p02h Value:0xc01cf45328}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981906839s EvaluationString:[ var='A' labels={project_id=ecp-data-d5p02h} value=2 ], [ var='B' labels={project_id=ecp-data-d5p02h} value=2 ], [ var='C' labels={project_id=ecp-data-d5p02h} value=0 ]} {Instance:project_id=ecp-data-d812dj State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-d812dj Value:0xc01cf453c0} B:{Var:B Labels:project_id=ecp-data-d812dj Value:0xc01cf453c8} C:{Var:C Labels:project_id=ecp-data-d812dj Value:0xc01cf45410}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981913627s EvaluationString:[ var='A' labels={project_id=ecp-data-d812dj} value=0 ], [ var='B' labels={project_id=ecp-data-d812dj} value=0 ], [ var='C' labels={project_id=ecp-data-d812dj} value=0 ]} {Instance:project_id=ecp-data-dbnei9 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-dbnei9 Value:0xc01cf45468} B:{Var:B Labels:project_id=ecp-data-dbnei9 Value:0xc01cf454b0} C:{Var:C Labels:project_id=ecp-data-dbnei9 Value:0xc01cf45460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981919777s EvaluationString:[ var='A' labels={project_id=ecp-data-dbnei9} value=0 ], [ var='B' labels={project_id=ecp-data-dbnei9} value=0 ], [ var='C' labels={project_id=ecp-data-dbnei9} value=0 ]} {Instance:project_id=ecp-data-dbrnka State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-dbrnka Value:0xc01cf45500} B:{Var:B Labels:project_id=ecp-data-dbrnka Value:0xc01cf45508} C:{Var:C Labels:project_id=ecp-data-dbrnka Value:0xc01cf45550}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981927692s EvaluationString:[ var='A' labels={project_id=ecp-data-dbrnka} value=0 ], [ var='B' labels={project_id=ecp-data-dbrnka} value=0 ], [ var='C' labels={project_id=ecp-data-dbrnka} value=0 ]} {Instance:project_id=ecp-data-dd5klf State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-dd5klf Value:0xc01cf455a8} B:{Var:B Labels:project_id=ecp-data-dd5klf Value:0xc01cf455f0} C:{Var:C Labels:project_id=ecp-data-dd5klf Value:0xc01cf455a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98193432s EvaluationString:[ var='A' labels={project_id=ecp-data-dd5klf} value=4 ], [ var='B' labels={project_id=ecp-data-dd5klf} value=4 ], [ var='C' labels={project_id=ecp-data-dd5klf} value=0 ]} {Instance:project_id=ecp-data-de7m3t State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-de7m3t Value:0xc01cf45690} B:{Var:B Labels:project_id=ecp-data-de7m3t Value:0xc01cf45640} C:{Var:C Labels:project_id=ecp-data-de7m3t Value:0xc01cf45648}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98194123s EvaluationString:[ var='A' labels={project_id=ecp-data-de7m3t} value=1 ], [ var='B' labels={project_id=ecp-data-de7m3t} value=1 ], [ var='C' labels={project_id=ecp-data-de7m3t} value=0 ]} {Instance:project_id=ecp-data-dh6j5y State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-dh6j5y Value:0xc01cf456e8} B:{Var:B Labels:project_id=ecp-data-dh6j5y Value:0xc01cf45730} C:{Var:C Labels:project_id=ecp-data-dh6j5y Value:0xc01cf456e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98194789s EvaluationString:[ var='A' labels={project_id=ecp-data-dh6j5y} value=0 ], [ var='B' labels={project_id=ecp-data-dh6j5y} value=0 ], [ var='C' labels={project_id=ecp-data-dh6j5y} value=0 ]} {Instance:project_id=ecp-data-dhdael State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-dhdael Value:0xc01cf45788} B:{Var:B Labels:project_id=ecp-data-dhdael Value:0xc01cf457d0} C:{Var:C Labels:project_id=ecp-data-dhdael Value:0xc01cf45780}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981955999s EvaluationString:[ var='A' labels={project_id=ecp-data-dhdael} value=2 ], [ var='B' labels={project_id=ecp-data-dhdael} value=2 ], [ var='C' labels={project_id=ecp-data-dhdael} value=0 ]} {Instance:project_id=ecp-data-djdykg State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-djdykg Value:0xc01cf45850} B:{Var:B Labels:project_id=ecp-data-djdykg Value:0xc01cf45858} C:{Var:C Labels:project_id=ecp-data-djdykg Value:0xc01cf458d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981963205s EvaluationString:[ var='A' labels={project_id=ecp-data-djdykg} value=0 ], [ var='B' labels={project_id=ecp-data-djdykg} value=0 ], [ var='C' labels={project_id=ecp-data-djdykg} value=0 ]} {Instance:project_id=ecp-data-dk9hs8 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-dk9hs8 Value:0xc01cf45938} B:{Var:B Labels:project_id=ecp-data-dk9hs8 Value:0xc01cf45980} C:{Var:C Labels:project_id=ecp-data-dk9hs8 Value:0xc01cf45930}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981970285s EvaluationString:[ var='A' labels={project_id=ecp-data-dk9hs8} value=10 ], [ var='B' labels={project_id=ecp-data-dk9hs8} value=10 ], [ var='C' labels={project_id=ecp-data-dk9hs8} value=0 ]} {Instance:project_id=ecp-data-dkre6r State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-dkre6r Value:0xc01cf459d0} B:{Var:B Labels:project_id=ecp-data-dkre6r Value:0xc01cf459d8} C:{Var:C Labels:project_id=ecp-data-dkre6r Value:0xc01cf45a20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981978913s EvaluationString:[ var='A' labels={project_id=ecp-data-dkre6r} value=3 ], [ var='B' labels={project_id=ecp-data-dkre6r} value=3 ], [ var='C' labels={project_id=ecp-data-dkre6r} value=0 ]} {Instance:project_id=ecp-data-dntyok State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-dntyok Value:0xc01cf45ac0} B:{Var:B Labels:project_id=ecp-data-dntyok Value:0xc01cf45a70} C:{Var:C Labels:project_id=ecp-data-dntyok Value:0xc01cf45a78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981985912s EvaluationString:[ var='A' labels={project_id=ecp-data-dntyok} value=706 ], [ var='B' labels={project_id=ecp-data-dntyok} value=706 ], [ var='C' labels={project_id=ecp-data-dntyok} value=0 ]} {Instance:project_id=ecp-data-dptbde State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-dptbde Value:0xc01cf45b40} B:{Var:B Labels:project_id=ecp-data-dptbde Value:0xc01cf45b48} C:{Var:C Labels:project_id=ecp-data-dptbde Value:0xc01cf45c00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.981993335s EvaluationString:[ var='A' labels={project_id=ecp-data-dptbde} value=0 ], [ var='B' labels={project_id=ecp-data-dptbde} value=0 ], [ var='C' labels={project_id=ecp-data-dptbde} value=0 ]} {Instance:project_id=ecp-data-dpxjtn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-dpxjtn Value:0xc01cf45c70} B:{Var:B Labels:project_id=ecp-data-dpxjtn Value:0xc01cf45c78} C:{Var:C Labels:project_id=ecp-data-dpxjtn Value:0xc01cf45cc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982000867s EvaluationString:[ var='A' labels={project_id=ecp-data-dpxjtn} value=0 ], [ var='B' labels={project_id=ecp-data-dpxjtn} value=0 ], [ var='C' labels={project_id=ecp-data-dpxjtn} value=0 ]} {Instance:project_id=ecp-data-dqrctq State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-dqrctq Value:0xc01cf45d10} B:{Var:B Labels:project_id=ecp-data-dqrctq Value:0xc01cf45d18} C:{Var:C Labels:project_id=ecp-data-dqrctq Value:0xc01cf45d60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982008035s EvaluationString:[ var='A' labels={project_id=ecp-data-dqrctq} value=0 ], [ var='B' labels={project_id=ecp-data-dqrctq} value=0 ], [ var='C' labels={project_id=ecp-data-dqrctq} value=0 ]} {Instance:project_id=ecp-data-dswtnd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-dswtnd Value:0xc01cf45db0} B:{Var:B Labels:project_id=ecp-data-dswtnd Value:0xc01cf45db8} C:{Var:C Labels:project_id=ecp-data-dswtnd Value:0xc01cf45e00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982014191s EvaluationString:[ var='A' labels={project_id=ecp-data-dswtnd} value=0 ], [ var='B' labels={project_id=ecp-data-dswtnd} value=0 ], [ var='C' labels={project_id=ecp-data-dswtnd} value=0 ]} {Instance:project_id=ecp-data-dtgijd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-dtgijd Value:0xc01cf45e50} B:{Var:B Labels:project_id=ecp-data-dtgijd Value:0xc01cf45e58} C:{Var:C Labels:project_id=ecp-data-dtgijd Value:0xc01cf45ea0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982020783s EvaluationString:[ var='A' labels={project_id=ecp-data-dtgijd} value=0 ], [ var='B' labels={project_id=ecp-data-dtgijd} value=0 ], [ var='C' labels={project_id=ecp-data-dtgijd} value=0 ]} {Instance:project_id=ecp-data-dttyyg State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-dttyyg Value:0xc01cf45ef0} B:{Var:B Labels:project_id=ecp-data-dttyyg Value:0xc01cf45ef8} C:{Var:C Labels:project_id=ecp-data-dttyyg Value:0xc01cf45f40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982027528s EvaluationString:[ var='A' labels={project_id=ecp-data-dttyyg} value=3 ], [ var='B' labels={project_id=ecp-data-dttyyg} value=3 ], [ var='C' labels={project_id=ecp-data-dttyyg} value=0 ]} {Instance:project_id=ecp-data-dw95kk State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-dw95kk Value:0xc02165e050} B:{Var:B Labels:project_id=ecp-data-dw95kk Value:0xc01cf45fe0} C:{Var:C Labels:project_id=ecp-data-dw95kk Value:0xc01cf45fe8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982034796s EvaluationString:[ var='A' labels={project_id=ecp-data-dw95kk} value=1 ], [ var='B' labels={project_id=ecp-data-dw95kk} value=1 ], [ var='C' labels={project_id=ecp-data-dw95kk} value=0 ]} {Instance:project_id=ecp-data-e3ce53 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-e3ce53 Value:0xc02165e0a8} B:{Var:B Labels:project_id=ecp-data-e3ce53 Value:0xc02165e0f0} C:{Var:C Labels:project_id=ecp-data-e3ce53 Value:0xc02165e0a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98204328s EvaluationString:[ var='A' labels={project_id=ecp-data-e3ce53} value=4 ], [ var='B' labels={project_id=ecp-data-e3ce53} value=4 ], [ var='C' labels={project_id=ecp-data-e3ce53} value=0 ]} {Instance:project_id=ecp-data-ebis36 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ebis36 Value:0xc02165e1b0} B:{Var:B Labels:project_id=ecp-data-ebis36 Value:0xc02165e1b8} C:{Var:C Labels:project_id=ecp-data-ebis36 Value:0xc02165e200}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98205146s EvaluationString:[ var='A' labels={project_id=ecp-data-ebis36} value=1 ], [ var='B' labels={project_id=ecp-data-ebis36} value=1 ], [ var='C' labels={project_id=ecp-data-ebis36} value=0 ]} {Instance:project_id=ecp-data-ed8uiz State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ed8uiz Value:0xc02165e258} B:{Var:B Labels:project_id=ecp-data-ed8uiz Value:0xc02165e320} C:{Var:C Labels:project_id=ecp-data-ed8uiz Value:0xc02165e250}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98205847s EvaluationString:[ var='A' labels={project_id=ecp-data-ed8uiz} value=2 ], [ var='B' labels={project_id=ecp-data-ed8uiz} value=2 ], [ var='C' labels={project_id=ecp-data-ed8uiz} value=0 ]} {Instance:project_id=ecp-data-edxpd2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-edxpd2 Value:0xc02165e3c0} B:{Var:B Labels:project_id=ecp-data-edxpd2 Value:0xc02165e370} C:{Var:C Labels:project_id=ecp-data-edxpd2 Value:0xc02165e378}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982064585s EvaluationString:[ var='A' labels={project_id=ecp-data-edxpd2} value=0 ], [ var='B' labels={project_id=ecp-data-edxpd2} value=0 ], [ var='C' labels={project_id=ecp-data-edxpd2} value=0 ]} {Instance:project_id=ecp-data-een1sl State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-een1sl Value:0xc02165e4e0} B:{Var:B Labels:project_id=ecp-data-een1sl Value:0xc02165e490} C:{Var:C Labels:project_id=ecp-data-een1sl Value:0xc02165e498}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982073328s EvaluationString:[ var='A' labels={project_id=ecp-data-een1sl} value=0 ], [ var='B' labels={project_id=ecp-data-een1sl} value=0 ], [ var='C' labels={project_id=ecp-data-een1sl} value=0 ]} {Instance:project_id=ecp-data-eg66xu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-eg66xu Value:0xc02165e538} B:{Var:B Labels:project_id=ecp-data-eg66xu Value:0xc02165e600} C:{Var:C Labels:project_id=ecp-data-eg66xu Value:0xc02165e530}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982080152s EvaluationString:[ var='A' labels={project_id=ecp-data-eg66xu} value=1 ], [ var='B' labels={project_id=ecp-data-eg66xu} value=1 ], [ var='C' labels={project_id=ecp-data-eg66xu} value=0 ]} {Instance:project_id=ecp-data-ejcgnk State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ejcgnk Value:0xc02165e658} B:{Var:B Labels:project_id=ecp-data-ejcgnk Value:0xc02165e6a0} C:{Var:C Labels:project_id=ecp-data-ejcgnk Value:0xc02165e650}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982087122s EvaluationString:[ var='A' labels={project_id=ecp-data-ejcgnk} value=0 ], [ var='B' labels={project_id=ecp-data-ejcgnk} value=0 ], [ var='C' labels={project_id=ecp-data-ejcgnk} value=0 ]} {Instance:project_id=ecp-data-ekcfvi State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ekcfvi Value:0xc02165e770} B:{Var:B Labels:project_id=ecp-data-ekcfvi Value:0xc02165e778} C:{Var:C Labels:project_id=ecp-data-ekcfvi Value:0xc02165e7c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982094003s EvaluationString:[ var='A' labels={project_id=ecp-data-ekcfvi} value=0 ], [ var='B' labels={project_id=ecp-data-ekcfvi} value=0 ], [ var='C' labels={project_id=ecp-data-ekcfvi} value=0 ]} {Instance:project_id=ecp-data-eknlmx State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-eknlmx Value:0xc02165e810} B:{Var:B Labels:project_id=ecp-data-eknlmx Value:0xc02165e818} C:{Var:C Labels:project_id=ecp-data-eknlmx Value:0xc02165e8d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982100703s EvaluationString:[ var='A' labels={project_id=ecp-data-eknlmx} value=0 ], [ var='B' labels={project_id=ecp-data-eknlmx} value=0 ], [ var='C' labels={project_id=ecp-data-eknlmx} value=0 ]} {Instance:project_id=ecp-data-eowkh8 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-eowkh8 Value:0xc02165e970} B:{Var:B Labels:project_id=ecp-data-eowkh8 Value:0xc02165e920} C:{Var:C Labels:project_id=ecp-data-eowkh8 Value:0xc02165e928}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982125392s EvaluationString:[ var='A' labels={project_id=ecp-data-eowkh8} value=0 ], [ var='B' labels={project_id=ecp-data-eowkh8} value=0 ], [ var='C' labels={project_id=ecp-data-eowkh8} value=0 ]} {Instance:project_id=ecp-data-eqx4pg State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-eqx4pg Value:0xc02165e9c0} B:{Var:B Labels:project_id=ecp-data-eqx4pg Value:0xc02165e9c8} C:{Var:C Labels:project_id=ecp-data-eqx4pg Value:0xc02165ea10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982131804s EvaluationString:[ var='A' labels={project_id=ecp-data-eqx4pg} value=0 ], [ var='B' labels={project_id=ecp-data-eqx4pg} value=0 ], [ var='C' labels={project_id=ecp-data-eqx4pg} value=0 ]} {Instance:project_id=ecp-data-eqxnmj State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-eqxnmj Value:0xc02165ea68} B:{Var:B Labels:project_id=ecp-data-eqxnmj Value:0xc02165eab0} C:{Var:C Labels:project_id=ecp-data-eqxnmj Value:0xc02165ea60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982170855s EvaluationString:[ var='A' labels={project_id=ecp-data-eqxnmj} value=0 ], [ var='B' labels={project_id=ecp-data-eqxnmj} value=0 ], [ var='C' labels={project_id=ecp-data-eqxnmj} value=0 ]} {Instance:project_id=ecp-data-er7xgh State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-er7xgh Value:0xc02165ebd0} B:{Var:B Labels:project_id=ecp-data-er7xgh Value:0xc02165eb00} C:{Var:C Labels:project_id=ecp-data-er7xgh Value:0xc02165eb08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982181089s EvaluationString:[ var='A' labels={project_id=ecp-data-er7xgh} value=0 ], [ var='B' labels={project_id=ecp-data-er7xgh} value=0 ], [ var='C' labels={project_id=ecp-data-er7xgh} value=0 ]} {Instance:project_id=ecp-data-erg4xv State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-erg4xv Value:0xc02165ec28} B:{Var:B Labels:project_id=ecp-data-erg4xv Value:0xc02165ec70} C:{Var:C Labels:project_id=ecp-data-erg4xv Value:0xc02165ec20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982188883s EvaluationString:[ var='A' labels={project_id=ecp-data-erg4xv} value=2 ], [ var='B' labels={project_id=ecp-data-erg4xv} value=2 ], [ var='C' labels={project_id=ecp-data-erg4xv} value=0 ]} {Instance:project_id=ecp-data-esqung State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-esqung Value:0xc02165ed48} B:{Var:B Labels:project_id=ecp-data-esqung Value:0xc02165ed90} C:{Var:C Labels:project_id=ecp-data-esqung Value:0xc02165ed40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982195636s EvaluationString:[ var='A' labels={project_id=ecp-data-esqung} value=0 ], [ var='B' labels={project_id=ecp-data-esqung} value=0 ], [ var='C' labels={project_id=ecp-data-esqung} value=0 ]} {Instance:project_id=ecp-data-etjsa6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-etjsa6 Value:0xc02165ede0} B:{Var:B Labels:project_id=ecp-data-etjsa6 Value:0xc02165ede8} C:{Var:C Labels:project_id=ecp-data-etjsa6 Value:0xc02165eeb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982203283s EvaluationString:[ var='A' labels={project_id=ecp-data-etjsa6} value=0 ], [ var='B' labels={project_id=ecp-data-etjsa6} value=0 ], [ var='C' labels={project_id=ecp-data-etjsa6} value=0 ]} {Instance:project_id=ecp-data-etqexv State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-etqexv Value:0xc02165ef50} B:{Var:B Labels:project_id=ecp-data-etqexv Value:0xc02165ef00} C:{Var:C Labels:project_id=ecp-data-etqexv Value:0xc02165ef08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982209653s EvaluationString:[ var='A' labels={project_id=ecp-data-etqexv} value=0 ], [ var='B' labels={project_id=ecp-data-etqexv} value=0 ], [ var='C' labels={project_id=ecp-data-etqexv} value=0 ]} {Instance:project_id=ecp-data-ezeoq9 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ezeoq9 Value:0xc02165f028} B:{Var:B Labels:project_id=ecp-data-ezeoq9 Value:0xc02165f070} C:{Var:C Labels:project_id=ecp-data-ezeoq9 Value:0xc02165f020}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982221093s EvaluationString:[ var='A' labels={project_id=ecp-data-ezeoq9} value=0 ], [ var='B' labels={project_id=ecp-data-ezeoq9} value=0 ], [ var='C' labels={project_id=ecp-data-ezeoq9} value=0 ]} {Instance:project_id=ecp-data-f0nv70 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-f0nv70 Value:0xc02165f0c0} B:{Var:B Labels:project_id=ecp-data-f0nv70 Value:0xc02165f0c8} C:{Var:C Labels:project_id=ecp-data-f0nv70 Value:0xc02165f180}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98222781s EvaluationString:[ var='A' labels={project_id=ecp-data-f0nv70} value=0 ], [ var='B' labels={project_id=ecp-data-f0nv70} value=0 ], [ var='C' labels={project_id=ecp-data-f0nv70} value=0 ]} {Instance:project_id=ecp-data-f9ha95 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-f9ha95 Value:0xc02165f1d0} B:{Var:B Labels:project_id=ecp-data-f9ha95 Value:0xc02165f1d8} C:{Var:C Labels:project_id=ecp-data-f9ha95 Value:0xc02165f220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982235116s EvaluationString:[ var='A' labels={project_id=ecp-data-f9ha95} value=0 ], [ var='B' labels={project_id=ecp-data-f9ha95} value=0 ], [ var='C' labels={project_id=ecp-data-f9ha95} value=0 ]} {Instance:project_id=ecp-data-fa6jsd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-fa6jsd Value:0xc02165f2f0} B:{Var:B Labels:project_id=ecp-data-fa6jsd Value:0xc02165f2f8} C:{Var:C Labels:project_id=ecp-data-fa6jsd Value:0xc02165f340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98224226s EvaluationString:[ var='A' labels={project_id=ecp-data-fa6jsd} value=5 ], [ var='B' labels={project_id=ecp-data-fa6jsd} value=5 ], [ var='C' labels={project_id=ecp-data-fa6jsd} value=0 ]} {Instance:project_id=ecp-data-ffa76b State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ffa76b Value:0xc02165f398} B:{Var:B Labels:project_id=ecp-data-ffa76b Value:0xc02165f460} C:{Var:C Labels:project_id=ecp-data-ffa76b Value:0xc02165f390}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.9822495s EvaluationString:[ var='A' labels={project_id=ecp-data-ffa76b} value=2 ], [ var='B' labels={project_id=ecp-data-ffa76b} value=2 ], [ var='C' labels={project_id=ecp-data-ffa76b} value=0 ]} {Instance:project_id=ecp-data-ffv8sx State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ffv8sx Value:0xc02165f4b0} B:{Var:B Labels:project_id=ecp-data-ffv8sx Value:0xc02165f4b8} C:{Var:C Labels:project_id=ecp-data-ffv8sx Value:0xc02165f500}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982256033s EvaluationString:[ var='A' labels={project_id=ecp-data-ffv8sx} value=2 ], [ var='B' labels={project_id=ecp-data-ffv8sx} value=2 ], [ var='C' labels={project_id=ecp-data-ffv8sx} value=0 ]} {Instance:project_id=ecp-data-fg1edn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-fg1edn Value:0xc02165f5d0} B:{Var:B Labels:project_id=ecp-data-fg1edn Value:0xc02165f5d8} C:{Var:C Labels:project_id=ecp-data-fg1edn Value:0xc02165f620}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982262265s EvaluationString:[ var='A' labels={project_id=ecp-data-fg1edn} value=0 ], [ var='B' labels={project_id=ecp-data-fg1edn} value=0 ], [ var='C' labels={project_id=ecp-data-fg1edn} value=0 ]} {Instance:project_id=ecp-data-fg46j8 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-fg46j8 Value:0xc02165f670} B:{Var:B Labels:project_id=ecp-data-fg46j8 Value:0xc02165f678} C:{Var:C Labels:project_id=ecp-data-fg46j8 Value:0xc02165f6c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982268176s EvaluationString:[ var='A' labels={project_id=ecp-data-fg46j8} value=0 ], [ var='B' labels={project_id=ecp-data-fg46j8} value=0 ], [ var='C' labels={project_id=ecp-data-fg46j8} value=0 ]} {Instance:project_id=ecp-data-fh0h9p State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-fh0h9p Value:0xc02165f7e0} B:{Var:B Labels:project_id=ecp-data-fh0h9p Value:0xc02165f790} C:{Var:C Labels:project_id=ecp-data-fh0h9p Value:0xc02165f798}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982277654s EvaluationString:[ var='A' labels={project_id=ecp-data-fh0h9p} value=0 ], [ var='B' labels={project_id=ecp-data-fh0h9p} value=0 ], [ var='C' labels={project_id=ecp-data-fh0h9p} value=0 ]} {Instance:project_id=ecp-data-fhi101 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-fhi101 Value:0xc02165f880} B:{Var:B Labels:project_id=ecp-data-fhi101 Value:0xc02165f830} C:{Var:C Labels:project_id=ecp-data-fhi101 Value:0xc02165f838}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98228454s EvaluationString:[ var='A' labels={project_id=ecp-data-fhi101} value=7 ], [ var='B' labels={project_id=ecp-data-fhi101} value=7 ], [ var='C' labels={project_id=ecp-data-fhi101} value=0 ]} {Instance:project_id=ecp-data-fhpywf State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-fhpywf Value:0xc02165f8d0} B:{Var:B Labels:project_id=ecp-data-fhpywf Value:0xc02165f8d8} C:{Var:C Labels:project_id=ecp-data-fhpywf Value:0xc02165f920}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982291631s EvaluationString:[ var='A' labels={project_id=ecp-data-fhpywf} value=16 ], [ var='B' labels={project_id=ecp-data-fhpywf} value=16 ], [ var='C' labels={project_id=ecp-data-fhpywf} value=0 ]} {Instance:project_id=ecp-data-fi0cfb State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-fi0cfb Value:0xc02165f970} B:{Var:B Labels:project_id=ecp-data-fi0cfb Value:0xc02165f978} C:{Var:C Labels:project_id=ecp-data-fi0cfb Value:0xc02165fa40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982299733s EvaluationString:[ var='A' labels={project_id=ecp-data-fi0cfb} value=0 ], [ var='B' labels={project_id=ecp-data-fi0cfb} value=0 ], [ var='C' labels={project_id=ecp-data-fi0cfb} value=0 ]} {Instance:project_id=ecp-data-fk1d8q State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-fk1d8q Value:0xc02165fae0} B:{Var:B Labels:project_id=ecp-data-fk1d8q Value:0xc02165fa90} C:{Var:C Labels:project_id=ecp-data-fk1d8q Value:0xc02165fa98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982306226s EvaluationString:[ var='A' labels={project_id=ecp-data-fk1d8q} value=478 ], [ var='B' labels={project_id=ecp-data-fk1d8q} value=478 ], [ var='C' labels={project_id=ecp-data-fk1d8q} value=0 ]} {Instance:project_id=ecp-data-fmerd7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-fmerd7 Value:0xc02165fba0} B:{Var:B Labels:project_id=ecp-data-fmerd7 Value:0xc02165fba8} C:{Var:C Labels:project_id=ecp-data-fmerd7 Value:0xc02165fbf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982313916s EvaluationString:[ var='A' labels={project_id=ecp-data-fmerd7} value=23 ], [ var='B' labels={project_id=ecp-data-fmerd7} value=23 ], [ var='C' labels={project_id=ecp-data-fmerd7} value=0 ]} {Instance:project_id=ecp-data-fmswzh State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-fmswzh Value:0xc02165fc48} B:{Var:B Labels:project_id=ecp-data-fmswzh Value:0xc02165fd00} C:{Var:C Labels:project_id=ecp-data-fmswzh Value:0xc02165fc40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982322154s EvaluationString:[ var='A' labels={project_id=ecp-data-fmswzh} value=0 ], [ var='B' labels={project_id=ecp-data-fmswzh} value=0 ], [ var='C' labels={project_id=ecp-data-fmswzh} value=0 ]} {Instance:project_id=ecp-data-fowisp State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-fowisp Value:0xc02165fda0} B:{Var:B Labels:project_id=ecp-data-fowisp Value:0xc02165fd50} C:{Var:C Labels:project_id=ecp-data-fowisp Value:0xc02165fd58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982329442s EvaluationString:[ var='A' labels={project_id=ecp-data-fowisp} value=0 ], [ var='B' labels={project_id=ecp-data-fowisp} value=0 ], [ var='C' labels={project_id=ecp-data-fowisp} value=0 ]} {Instance:project_id=ecp-data-fqq58z State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-fqq58z Value:0xc02165fe70} B:{Var:B Labels:project_id=ecp-data-fqq58z Value:0xc02165fe78} C:{Var:C Labels:project_id=ecp-data-fqq58z Value:0xc02165fec0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982337145s EvaluationString:[ var='A' labels={project_id=ecp-data-fqq58z} value=1 ], [ var='B' labels={project_id=ecp-data-fqq58z} value=1 ], [ var='C' labels={project_id=ecp-data-fqq58z} value=0 ]} {Instance:project_id=ecp-data-fsqvpe State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-fsqvpe Value:0xc02165ff10} B:{Var:B Labels:project_id=ecp-data-fsqvpe Value:0xc02165ff18} C:{Var:C Labels:project_id=ecp-data-fsqvpe Value:0xc02165ff60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98234385s EvaluationString:[ var='A' labels={project_id=ecp-data-fsqvpe} value=0 ], [ var='B' labels={project_id=ecp-data-fsqvpe} value=0 ], [ var='C' labels={project_id=ecp-data-fsqvpe} value=0 ]} {Instance:project_id=ecp-data-fuyfdn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-fuyfdn Value:0xc013200040} B:{Var:B Labels:project_id=ecp-data-fuyfdn Value:0xc013200048} C:{Var:C Labels:project_id=ecp-data-fuyfdn Value:0xc013200090}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982350945s EvaluationString:[ var='A' labels={project_id=ecp-data-fuyfdn} value=4 ], [ var='B' labels={project_id=ecp-data-fuyfdn} value=4 ], [ var='C' labels={project_id=ecp-data-fuyfdn} value=0 ]} {Instance:project_id=ecp-data-fvgjf9 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-fvgjf9 Value:0xc0132000f0} B:{Var:B Labels:project_id=ecp-data-fvgjf9 Value:0xc0132000f8} C:{Var:C Labels:project_id=ecp-data-fvgjf9 Value:0xc013200160}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982360566s EvaluationString:[ var='A' labels={project_id=ecp-data-fvgjf9} value=0 ], [ var='B' labels={project_id=ecp-data-fvgjf9} value=0 ], [ var='C' labels={project_id=ecp-data-fvgjf9} value=0 ]} {Instance:project_id=ecp-data-fwy7ad State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-fwy7ad Value:0xc0132001e0} B:{Var:B Labels:project_id=ecp-data-fwy7ad Value:0xc0132001e8} C:{Var:C Labels:project_id=ecp-data-fwy7ad Value:0xc013200230}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982367399s EvaluationString:[ var='A' labels={project_id=ecp-data-fwy7ad} value=0 ], [ var='B' labels={project_id=ecp-data-fwy7ad} value=0 ], [ var='C' labels={project_id=ecp-data-fwy7ad} value=0 ]} {Instance:project_id=ecp-data-fxgyrm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-fxgyrm Value:0xc013200300} B:{Var:B Labels:project_id=ecp-data-fxgyrm Value:0xc013200290} C:{Var:C Labels:project_id=ecp-data-fxgyrm Value:0xc013200298}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98237385s EvaluationString:[ var='A' labels={project_id=ecp-data-fxgyrm} value=2 ], [ var='B' labels={project_id=ecp-data-fxgyrm} value=2 ], [ var='C' labels={project_id=ecp-data-fxgyrm} value=0 ]} {Instance:project_id=ecp-data-fywasr State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-fywasr Value:0xc0132003d0} B:{Var:B Labels:project_id=ecp-data-fywasr Value:0xc013200370} C:{Var:C Labels:project_id=ecp-data-fywasr Value:0xc013200378}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982379696s EvaluationString:[ var='A' labels={project_id=ecp-data-fywasr} value=3 ], [ var='B' labels={project_id=ecp-data-fywasr} value=3 ], [ var='C' labels={project_id=ecp-data-fywasr} value=0 ]} {Instance:project_id=ecp-data-g3rxv0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-g3rxv0 Value:0xc013200420} B:{Var:B Labels:project_id=ecp-data-g3rxv0 Value:0xc013200428} C:{Var:C Labels:project_id=ecp-data-g3rxv0 Value:0xc013200480}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982386582s EvaluationString:[ var='A' labels={project_id=ecp-data-g3rxv0} value=0 ], [ var='B' labels={project_id=ecp-data-g3rxv0} value=0 ], [ var='C' labels={project_id=ecp-data-g3rxv0} value=0 ]} {Instance:project_id=ecp-data-g7aof2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-g7aof2 Value:0xc0132004f0} B:{Var:B Labels:project_id=ecp-data-g7aof2 Value:0xc0132004f8} C:{Var:C Labels:project_id=ecp-data-g7aof2 Value:0xc013200570}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982393703s EvaluationString:[ var='A' labels={project_id=ecp-data-g7aof2} value=1 ], [ var='B' labels={project_id=ecp-data-g7aof2} value=1 ], [ var='C' labels={project_id=ecp-data-g7aof2} value=0 ]} {Instance:project_id=ecp-data-g83o4z State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-g83o4z Value:0xc0132005c0} B:{Var:B Labels:project_id=ecp-data-g83o4z Value:0xc0132005c8} C:{Var:C Labels:project_id=ecp-data-g83o4z Value:0xc013200620}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98240043s EvaluationString:[ var='A' labels={project_id=ecp-data-g83o4z} value=1 ], [ var='B' labels={project_id=ecp-data-g83o4z} value=1 ], [ var='C' labels={project_id=ecp-data-g83o4z} value=0 ]} {Instance:project_id=ecp-data-g8a8nb State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-g8a8nb Value:0xc013200690} B:{Var:B Labels:project_id=ecp-data-g8a8nb Value:0xc013200698} C:{Var:C Labels:project_id=ecp-data-g8a8nb Value:0xc013200710}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98240907s EvaluationString:[ var='A' labels={project_id=ecp-data-g8a8nb} value=0 ], [ var='B' labels={project_id=ecp-data-g8a8nb} value=0 ], [ var='C' labels={project_id=ecp-data-g8a8nb} value=0 ]} {Instance:project_id=ecp-data-g8cbxi State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-g8cbxi Value:0xc0132007e0} B:{Var:B Labels:project_id=ecp-data-g8cbxi Value:0xc013200770} C:{Var:C Labels:project_id=ecp-data-g8cbxi Value:0xc013200778}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982416732s EvaluationString:[ var='A' labels={project_id=ecp-data-g8cbxi} value=2 ], [ var='B' labels={project_id=ecp-data-g8cbxi} value=2 ], [ var='C' labels={project_id=ecp-data-g8cbxi} value=0 ]} {Instance:project_id=ecp-data-gag3ek State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gag3ek Value:0xc013200858} B:{Var:B Labels:project_id=ecp-data-gag3ek Value:0xc0132008c0} C:{Var:C Labels:project_id=ecp-data-gag3ek Value:0xc013200850}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982447328s EvaluationString:[ var='A' labels={project_id=ecp-data-gag3ek} value=0 ], [ var='B' labels={project_id=ecp-data-gag3ek} value=0 ], [ var='C' labels={project_id=ecp-data-gag3ek} value=0 ]} {Instance:project_id=ecp-data-gd1bz8 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gd1bz8 Value:0xc013200910} B:{Var:B Labels:project_id=ecp-data-gd1bz8 Value:0xc013200918} C:{Var:C Labels:project_id=ecp-data-gd1bz8 Value:0xc013200970}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982464445s EvaluationString:[ var='A' labels={project_id=ecp-data-gd1bz8} value=2971 ], [ var='B' labels={project_id=ecp-data-gd1bz8} value=2971 ], [ var='C' labels={project_id=ecp-data-gd1bz8} value=0 ]} {Instance:project_id=ecp-data-gdbg22 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gdbg22 Value:0xc013200a40} B:{Var:B Labels:project_id=ecp-data-gdbg22 Value:0xc0132009d0} C:{Var:C Labels:project_id=ecp-data-gdbg22 Value:0xc0132009d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982472469s EvaluationString:[ var='A' labels={project_id=ecp-data-gdbg22} value=0 ], [ var='B' labels={project_id=ecp-data-gdbg22} value=0 ], [ var='C' labels={project_id=ecp-data-gdbg22} value=0 ]} {Instance:project_id=ecp-data-gfm19b State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gfm19b Value:0xc013200ab0} B:{Var:B Labels:project_id=ecp-data-gfm19b Value:0xc013200ab8} C:{Var:C Labels:project_id=ecp-data-gfm19b Value:0xc013200b10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982481772s EvaluationString:[ var='A' labels={project_id=ecp-data-gfm19b} value=2 ], [ var='B' labels={project_id=ecp-data-gfm19b} value=2 ], [ var='C' labels={project_id=ecp-data-gfm19b} value=0 ]} {Instance:project_id=ecp-data-gg82dh State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gg82dh Value:0xc013200b70} B:{Var:B Labels:project_id=ecp-data-gg82dh Value:0xc013200b78} C:{Var:C Labels:project_id=ecp-data-gg82dh Value:0xc013200bd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982489585s EvaluationString:[ var='A' labels={project_id=ecp-data-gg82dh} value=1 ], [ var='B' labels={project_id=ecp-data-gg82dh} value=1 ], [ var='C' labels={project_id=ecp-data-gg82dh} value=0 ]} {Instance:project_id=ecp-data-gi6wmb State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gi6wmb Value:0xc013200c20} B:{Var:B Labels:project_id=ecp-data-gi6wmb Value:0xc013200c28} C:{Var:C Labels:project_id=ecp-data-gi6wmb Value:0xc013200c80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982496913s EvaluationString:[ var='A' labels={project_id=ecp-data-gi6wmb} value=3 ], [ var='B' labels={project_id=ecp-data-gi6wmb} value=3 ], [ var='C' labels={project_id=ecp-data-gi6wmb} value=0 ]} {Instance:project_id=ecp-data-gistqw State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gistqw Value:0xc013200d60} B:{Var:B Labels:project_id=ecp-data-gistqw Value:0xc013200ce0} C:{Var:C Labels:project_id=ecp-data-gistqw Value:0xc013200ce8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982507025s EvaluationString:[ var='A' labels={project_id=ecp-data-gistqw} value=0 ], [ var='B' labels={project_id=ecp-data-gistqw} value=0 ], [ var='C' labels={project_id=ecp-data-gistqw} value=0 ]} {Instance:project_id=ecp-data-gizmwo State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gizmwo Value:0xc013200db0} B:{Var:B Labels:project_id=ecp-data-gizmwo Value:0xc013200db8} C:{Var:C Labels:project_id=ecp-data-gizmwo Value:0xc013200e00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982513831s EvaluationString:[ var='A' labels={project_id=ecp-data-gizmwo} value=0 ], [ var='B' labels={project_id=ecp-data-gizmwo} value=0 ], [ var='C' labels={project_id=ecp-data-gizmwo} value=0 ]} {Instance:project_id=ecp-data-gjkt93 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gjkt93 Value:0xc013200e80} B:{Var:B Labels:project_id=ecp-data-gjkt93 Value:0xc013200e88} C:{Var:C Labels:project_id=ecp-data-gjkt93 Value:0xc013200ef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982521409s EvaluationString:[ var='A' labels={project_id=ecp-data-gjkt93} value=0 ], [ var='B' labels={project_id=ecp-data-gjkt93} value=0 ], [ var='C' labels={project_id=ecp-data-gjkt93} value=0 ]} {Instance:project_id=ecp-data-gjunp8 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gjunp8 Value:0xc013200fa0} B:{Var:B Labels:project_id=ecp-data-gjunp8 Value:0xc013200f40} C:{Var:C Labels:project_id=ecp-data-gjunp8 Value:0xc013200f48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982528945s EvaluationString:[ var='A' labels={project_id=ecp-data-gjunp8} value=2 ], [ var='B' labels={project_id=ecp-data-gjunp8} value=2 ], [ var='C' labels={project_id=ecp-data-gjunp8} value=0 ]} {Instance:project_id=ecp-data-glrgfw State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-glrgfw Value:0xc013201010} B:{Var:B Labels:project_id=ecp-data-glrgfw Value:0xc013201018} C:{Var:C Labels:project_id=ecp-data-glrgfw Value:0xc013201070}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982536626s EvaluationString:[ var='A' labels={project_id=ecp-data-glrgfw} value=0 ], [ var='B' labels={project_id=ecp-data-glrgfw} value=0 ], [ var='C' labels={project_id=ecp-data-glrgfw} value=0 ]} {Instance:project_id=ecp-data-glsphd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-glsphd Value:0xc0132010c8} B:{Var:B Labels:project_id=ecp-data-glsphd Value:0xc013201150} C:{Var:C Labels:project_id=ecp-data-glsphd Value:0xc0132010c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982543813s EvaluationString:[ var='A' labels={project_id=ecp-data-glsphd} value=25 ], [ var='B' labels={project_id=ecp-data-glsphd} value=25 ], [ var='C' labels={project_id=ecp-data-glsphd} value=0 ]} {Instance:project_id=ecp-data-gn5zhp State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gn5zhp Value:0xc0132011c0} B:{Var:B Labels:project_id=ecp-data-gn5zhp Value:0xc0132011c8} C:{Var:C Labels:project_id=ecp-data-gn5zhp Value:0xc013201260}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982552379s EvaluationString:[ var='A' labels={project_id=ecp-data-gn5zhp} value=1 ], [ var='B' labels={project_id=ecp-data-gn5zhp} value=1 ], [ var='C' labels={project_id=ecp-data-gn5zhp} value=0 ]} {Instance:project_id=ecp-data-go0gxf State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-go0gxf Value:0xc0132012c0} B:{Var:B Labels:project_id=ecp-data-go0gxf Value:0xc0132012c8} C:{Var:C Labels:project_id=ecp-data-go0gxf Value:0xc013201320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982559298s EvaluationString:[ var='A' labels={project_id=ecp-data-go0gxf} value=6 ], [ var='B' labels={project_id=ecp-data-go0gxf} value=6 ], [ var='C' labels={project_id=ecp-data-go0gxf} value=0 ]} {Instance:project_id=ecp-data-goixky State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-goixky Value:0xc013201380} B:{Var:B Labels:project_id=ecp-data-goixky Value:0xc013201388} C:{Var:C Labels:project_id=ecp-data-goixky Value:0xc0132013e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982566833s EvaluationString:[ var='A' labels={project_id=ecp-data-goixky} value=0 ], [ var='B' labels={project_id=ecp-data-goixky} value=0 ], [ var='C' labels={project_id=ecp-data-goixky} value=0 ]} {Instance:project_id=ecp-data-gpgtv0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gpgtv0 Value:0xc013201460} B:{Var:B Labels:project_id=ecp-data-gpgtv0 Value:0xc013201468} C:{Var:C Labels:project_id=ecp-data-gpgtv0 Value:0xc0132014d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982574592s EvaluationString:[ var='A' labels={project_id=ecp-data-gpgtv0} value=0 ], [ var='B' labels={project_id=ecp-data-gpgtv0} value=0 ], [ var='C' labels={project_id=ecp-data-gpgtv0} value=0 ]} {Instance:project_id=ecp-data-gpu4uv State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gpu4uv Value:0xc013201590} B:{Var:B Labels:project_id=ecp-data-gpu4uv Value:0xc013201530} C:{Var:C Labels:project_id=ecp-data-gpu4uv Value:0xc013201538}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982582002s EvaluationString:[ var='A' labels={project_id=ecp-data-gpu4uv} value=1 ], [ var='B' labels={project_id=ecp-data-gpu4uv} value=1 ], [ var='C' labels={project_id=ecp-data-gpu4uv} value=0 ]} {Instance:project_id=ecp-data-gq9fmc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gq9fmc Value:0xc013201600} B:{Var:B Labels:project_id=ecp-data-gq9fmc Value:0xc013201608} C:{Var:C Labels:project_id=ecp-data-gq9fmc Value:0xc013201680}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982589307s EvaluationString:[ var='A' labels={project_id=ecp-data-gq9fmc} value=12 ], [ var='B' labels={project_id=ecp-data-gq9fmc} value=12 ], [ var='C' labels={project_id=ecp-data-gq9fmc} value=0 ]} {Instance:project_id=ecp-data-gtockw State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gtockw Value:0xc013201730} B:{Var:B Labels:project_id=ecp-data-gtockw Value:0xc0132016d0} C:{Var:C Labels:project_id=ecp-data-gtockw Value:0xc0132016d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982596907s EvaluationString:[ var='A' labels={project_id=ecp-data-gtockw} value=0 ], [ var='B' labels={project_id=ecp-data-gtockw} value=0 ], [ var='C' labels={project_id=ecp-data-gtockw} value=0 ]} {Instance:project_id=ecp-data-gu1emi State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gu1emi Value:0xc0132017a0} B:{Var:B Labels:project_id=ecp-data-gu1emi Value:0xc0132017a8} C:{Var:C Labels:project_id=ecp-data-gu1emi Value:0xc013201800}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982604361s EvaluationString:[ var='A' labels={project_id=ecp-data-gu1emi} value=3 ], [ var='B' labels={project_id=ecp-data-gu1emi} value=3 ], [ var='C' labels={project_id=ecp-data-gu1emi} value=0 ]} {Instance:project_id=ecp-data-gumfnf State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gumfnf Value:0xc0132018b0} B:{Var:B Labels:project_id=ecp-data-gumfnf Value:0xc013201850} C:{Var:C Labels:project_id=ecp-data-gumfnf Value:0xc013201858}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982612246s EvaluationString:[ var='A' labels={project_id=ecp-data-gumfnf} value=0 ], [ var='B' labels={project_id=ecp-data-gumfnf} value=0 ], [ var='C' labels={project_id=ecp-data-gumfnf} value=0 ]} {Instance:project_id=ecp-data-guqslt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-guqslt Value:0xc013201928} B:{Var:B Labels:project_id=ecp-data-guqslt Value:0xc013201980} C:{Var:C Labels:project_id=ecp-data-guqslt Value:0xc013201920}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982619386s EvaluationString:[ var='A' labels={project_id=ecp-data-guqslt} value=1 ], [ var='B' labels={project_id=ecp-data-guqslt} value=1 ], [ var='C' labels={project_id=ecp-data-guqslt} value=0 ]} {Instance:project_id=ecp-data-gvfkii State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gvfkii Value:0xc0132019d0} B:{Var:B Labels:project_id=ecp-data-gvfkii Value:0xc0132019d8} C:{Var:C Labels:project_id=ecp-data-gvfkii Value:0xc013201a30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982628183s EvaluationString:[ var='A' labels={project_id=ecp-data-gvfkii} value=2 ], [ var='B' labels={project_id=ecp-data-gvfkii} value=2 ], [ var='C' labels={project_id=ecp-data-gvfkii} value=0 ]} {Instance:project_id=ecp-data-gwvmam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gwvmam Value:0xc013201a90} B:{Var:B Labels:project_id=ecp-data-gwvmam Value:0xc013201a98} C:{Var:C Labels:project_id=ecp-data-gwvmam Value:0xc013201af0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982636452s EvaluationString:[ var='A' labels={project_id=ecp-data-gwvmam} value=0 ], [ var='B' labels={project_id=ecp-data-gwvmam} value=0 ], [ var='C' labels={project_id=ecp-data-gwvmam} value=0 ]} {Instance:project_id=ecp-data-gwzmam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gwzmam Value:0xc013201ba0} B:{Var:B Labels:project_id=ecp-data-gwzmam Value:0xc013201b40} C:{Var:C Labels:project_id=ecp-data-gwzmam Value:0xc013201b48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982645113s EvaluationString:[ var='A' labels={project_id=ecp-data-gwzmam} value=4 ], [ var='B' labels={project_id=ecp-data-gwzmam} value=4 ], [ var='C' labels={project_id=ecp-data-gwzmam} value=0 ]} {Instance:project_id=ecp-data-gxea4e State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gxea4e Value:0xc013201c08} B:{Var:B Labels:project_id=ecp-data-gxea4e Value:0xc013201c70} C:{Var:C Labels:project_id=ecp-data-gxea4e Value:0xc013201c00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982654538s EvaluationString:[ var='A' labels={project_id=ecp-data-gxea4e} value=1 ], [ var='B' labels={project_id=ecp-data-gxea4e} value=1 ], [ var='C' labels={project_id=ecp-data-gxea4e} value=0 ]} {Instance:project_id=ecp-data-gxp7tu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gxp7tu Value:0xc013201d30} B:{Var:B Labels:project_id=ecp-data-gxp7tu Value:0xc013201cd0} C:{Var:C Labels:project_id=ecp-data-gxp7tu Value:0xc013201cd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982662282s EvaluationString:[ var='A' labels={project_id=ecp-data-gxp7tu} value=0 ], [ var='B' labels={project_id=ecp-data-gxp7tu} value=0 ], [ var='C' labels={project_id=ecp-data-gxp7tu} value=0 ]} {Instance:project_id=ecp-data-gxpz0v State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gxpz0v Value:0xc013201d98} B:{Var:B Labels:project_id=ecp-data-gxpz0v Value:0xc013201e00} C:{Var:C Labels:project_id=ecp-data-gxpz0v Value:0xc013201d90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982684543s EvaluationString:[ var='A' labels={project_id=ecp-data-gxpz0v} value=2 ], [ var='B' labels={project_id=ecp-data-gxpz0v} value=2 ], [ var='C' labels={project_id=ecp-data-gxpz0v} value=0 ]} {Instance:project_id=ecp-data-gzhu0a State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-gzhu0a Value:0xc013201ec0} B:{Var:B Labels:project_id=ecp-data-gzhu0a Value:0xc013201e60} C:{Var:C Labels:project_id=ecp-data-gzhu0a Value:0xc013201e68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982692248s EvaluationString:[ var='A' labels={project_id=ecp-data-gzhu0a} value=0 ], [ var='B' labels={project_id=ecp-data-gzhu0a} value=0 ], [ var='C' labels={project_id=ecp-data-gzhu0a} value=0 ]} {Instance:project_id=ecp-data-h04d5l State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-h04d5l Value:0xc013201fa0} B:{Var:B Labels:project_id=ecp-data-h04d5l Value:0xc013201f30} C:{Var:C Labels:project_id=ecp-data-h04d5l Value:0xc013201f38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982701013s EvaluationString:[ var='A' labels={project_id=ecp-data-h04d5l} value=1 ], [ var='B' labels={project_id=ecp-data-h04d5l} value=1 ], [ var='C' labels={project_id=ecp-data-h04d5l} value=0 ]} {Instance:project_id=ecp-data-h0fp8g State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-h0fp8g Value:0xc01ab12000} B:{Var:B Labels:project_id=ecp-data-h0fp8g Value:0xc01ab12008} C:{Var:C Labels:project_id=ecp-data-h0fp8g Value:0xc01ab12050}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982708962s EvaluationString:[ var='A' labels={project_id=ecp-data-h0fp8g} value=28 ], [ var='B' labels={project_id=ecp-data-h0fp8g} value=28 ], [ var='C' labels={project_id=ecp-data-h0fp8g} value=0 ]} {Instance:project_id=ecp-data-h0jrqh State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-h0jrqh Value:0xc01ab121c8} B:{Var:B Labels:project_id=ecp-data-h0jrqh Value:0xc01ab12210} C:{Var:C Labels:project_id=ecp-data-h0jrqh Value:0xc01ab121c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982716034s EvaluationString:[ var='A' labels={project_id=ecp-data-h0jrqh} value=2 ], [ var='B' labels={project_id=ecp-data-h0jrqh} value=2 ], [ var='C' labels={project_id=ecp-data-h0jrqh} value=0 ]} {Instance:project_id=ecp-data-h5ywiq State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-h5ywiq Value:0xc01ab12318} B:{Var:B Labels:project_id=ecp-data-h5ywiq Value:0xc01ab12360} C:{Var:C Labels:project_id=ecp-data-h5ywiq Value:0xc01ab12310}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982723303s EvaluationString:[ var='A' labels={project_id=ecp-data-h5ywiq} value=3 ], [ var='B' labels={project_id=ecp-data-h5ywiq} value=3 ], [ var='C' labels={project_id=ecp-data-h5ywiq} value=0 ]} {Instance:project_id=ecp-data-h70hpp State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-h70hpp Value:0xc01ab123b0} B:{Var:B Labels:project_id=ecp-data-h70hpp Value:0xc01ab123b8} C:{Var:C Labels:project_id=ecp-data-h70hpp Value:0xc01ab12400}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982731066s EvaluationString:[ var='A' labels={project_id=ecp-data-h70hpp} value=0 ], [ var='B' labels={project_id=ecp-data-h70hpp} value=0 ], [ var='C' labels={project_id=ecp-data-h70hpp} value=0 ]} {Instance:project_id=ecp-data-h852tx State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-h852tx Value:0xc01ab12458} B:{Var:B Labels:project_id=ecp-data-h852tx Value:0xc01ab124a0} C:{Var:C Labels:project_id=ecp-data-h852tx Value:0xc01ab12450}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98273885s EvaluationString:[ var='A' labels={project_id=ecp-data-h852tx} value=0 ], [ var='B' labels={project_id=ecp-data-h852tx} value=0 ], [ var='C' labels={project_id=ecp-data-h852tx} value=0 ]} {Instance:project_id=ecp-data-h8d042 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-h8d042 Value:0xc01ab124f8} B:{Var:B Labels:project_id=ecp-data-h8d042 Value:0xc01ab125f0} C:{Var:C Labels:project_id=ecp-data-h8d042 Value:0xc01ab124f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982747042s EvaluationString:[ var='A' labels={project_id=ecp-data-h8d042} value=0 ], [ var='B' labels={project_id=ecp-data-h8d042} value=0 ], [ var='C' labels={project_id=ecp-data-h8d042} value=0 ]} {Instance:project_id=ecp-data-h983tm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-h983tm Value:0xc01ab12640} B:{Var:B Labels:project_id=ecp-data-h983tm Value:0xc01ab12648} C:{Var:C Labels:project_id=ecp-data-h983tm Value:0xc01ab12690}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98275482s EvaluationString:[ var='A' labels={project_id=ecp-data-h983tm} value=8 ], [ var='B' labels={project_id=ecp-data-h983tm} value=8 ], [ var='C' labels={project_id=ecp-data-h983tm} value=0 ]} {Instance:project_id=ecp-data-hbjars State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-hbjars Value:0xc01ab12700} B:{Var:B Labels:project_id=ecp-data-hbjars Value:0xc01ab12708} C:{Var:C Labels:project_id=ecp-data-hbjars Value:0xc01ab12750}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982762926s EvaluationString:[ var='A' labels={project_id=ecp-data-hbjars} value=0 ], [ var='B' labels={project_id=ecp-data-hbjars} value=0 ], [ var='C' labels={project_id=ecp-data-hbjars} value=0 ]} {Instance:project_id=ecp-data-hghi2c State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-hghi2c Value:0xc01ab127a0} B:{Var:B Labels:project_id=ecp-data-hghi2c Value:0xc01ab127a8} C:{Var:C Labels:project_id=ecp-data-hghi2c Value:0xc01ab128d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982786954s EvaluationString:[ var='A' labels={project_id=ecp-data-hghi2c} value=0 ], [ var='B' labels={project_id=ecp-data-hghi2c} value=0 ], [ var='C' labels={project_id=ecp-data-hghi2c} value=0 ]} {Instance:project_id=ecp-data-hhlnhx State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-hhlnhx Value:0xc01ab12920} B:{Var:B Labels:project_id=ecp-data-hhlnhx Value:0xc01ab12928} C:{Var:C Labels:project_id=ecp-data-hhlnhx Value:0xc01ab12970}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.9827947s EvaluationString:[ var='A' labels={project_id=ecp-data-hhlnhx} value=0 ], [ var='B' labels={project_id=ecp-data-hhlnhx} value=0 ], [ var='C' labels={project_id=ecp-data-hhlnhx} value=0 ]} {Instance:project_id=ecp-data-hj3ksx State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-hj3ksx Value:0xc01ab129c8} B:{Var:B Labels:project_id=ecp-data-hj3ksx Value:0xc01ab12a10} C:{Var:C Labels:project_id=ecp-data-hj3ksx Value:0xc01ab129c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982801809s EvaluationString:[ var='A' labels={project_id=ecp-data-hj3ksx} value=16 ], [ var='B' labels={project_id=ecp-data-hj3ksx} value=16 ], [ var='C' labels={project_id=ecp-data-hj3ksx} value=0 ]} {Instance:project_id=ecp-data-hjagd7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-hjagd7 Value:0xc01ab12a60} B:{Var:B Labels:project_id=ecp-data-hjagd7 Value:0xc01ab12a68} C:{Var:C Labels:project_id=ecp-data-hjagd7 Value:0xc01ab12ab0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982809059s EvaluationString:[ var='A' labels={project_id=ecp-data-hjagd7} value=6 ], [ var='B' labels={project_id=ecp-data-hjagd7} value=6 ], [ var='C' labels={project_id=ecp-data-hjagd7} value=0 ]} {Instance:project_id=ecp-data-hrkxrg State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-hrkxrg Value:0xc01ab12b00} B:{Var:B Labels:project_id=ecp-data-hrkxrg Value:0xc01ab12b08} C:{Var:C Labels:project_id=ecp-data-hrkxrg Value:0xc01ab12b50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982818306s EvaluationString:[ var='A' labels={project_id=ecp-data-hrkxrg} value=16 ], [ var='B' labels={project_id=ecp-data-hrkxrg} value=16 ], [ var='C' labels={project_id=ecp-data-hrkxrg} value=0 ]} {Instance:project_id=ecp-data-hsbsyb State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-hsbsyb Value:0xc01ab12ba0} B:{Var:B Labels:project_id=ecp-data-hsbsyb Value:0xc01ab12ba8} C:{Var:C Labels:project_id=ecp-data-hsbsyb Value:0xc01ab12bf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982828208s EvaluationString:[ var='A' labels={project_id=ecp-data-hsbsyb} value=0 ], [ var='B' labels={project_id=ecp-data-hsbsyb} value=0 ], [ var='C' labels={project_id=ecp-data-hsbsyb} value=0 ]} {Instance:project_id=ecp-data-hshuid State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-hshuid Value:0xc01ab12c90} B:{Var:B Labels:project_id=ecp-data-hshuid Value:0xc01ab12c40} C:{Var:C Labels:project_id=ecp-data-hshuid Value:0xc01ab12c48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982836088s EvaluationString:[ var='A' labels={project_id=ecp-data-hshuid} value=0 ], [ var='B' labels={project_id=ecp-data-hshuid} value=0 ], [ var='C' labels={project_id=ecp-data-hshuid} value=0 ]} {Instance:project_id=ecp-data-hstphs State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-hstphs Value:0xc01ab12ce8} B:{Var:B Labels:project_id=ecp-data-hstphs Value:0xc01ab12d30} C:{Var:C Labels:project_id=ecp-data-hstphs Value:0xc01ab12ce0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982844134s EvaluationString:[ var='A' labels={project_id=ecp-data-hstphs} value=2 ], [ var='B' labels={project_id=ecp-data-hstphs} value=2 ], [ var='C' labels={project_id=ecp-data-hstphs} value=0 ]} {Instance:project_id=ecp-data-huijt8 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-huijt8 Value:0xc01ab12d88} B:{Var:B Labels:project_id=ecp-data-huijt8 Value:0xc01ab12dd0} C:{Var:C Labels:project_id=ecp-data-huijt8 Value:0xc01ab12d80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982852227s EvaluationString:[ var='A' labels={project_id=ecp-data-huijt8} value=1 ], [ var='B' labels={project_id=ecp-data-huijt8} value=1 ], [ var='C' labels={project_id=ecp-data-huijt8} value=0 ]} {Instance:project_id=ecp-data-hv1kgy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-hv1kgy Value:0xc01ab12e20} B:{Var:B Labels:project_id=ecp-data-hv1kgy Value:0xc01ab12e28} C:{Var:C Labels:project_id=ecp-data-hv1kgy Value:0xc01ab12e70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98286183s EvaluationString:[ var='A' labels={project_id=ecp-data-hv1kgy} value=0 ], [ var='B' labels={project_id=ecp-data-hv1kgy} value=0 ], [ var='C' labels={project_id=ecp-data-hv1kgy} value=0 ]} {Instance:project_id=ecp-data-hvxaue State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-hvxaue Value:0xc01ab12ec0} B:{Var:B Labels:project_id=ecp-data-hvxaue Value:0xc01ab12ec8} C:{Var:C Labels:project_id=ecp-data-hvxaue Value:0xc01ab12f10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982869761s EvaluationString:[ var='A' labels={project_id=ecp-data-hvxaue} value=95 ], [ var='B' labels={project_id=ecp-data-hvxaue} value=95 ], [ var='C' labels={project_id=ecp-data-hvxaue} value=0 ]} {Instance:project_id=ecp-data-hxqtc8 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-hxqtc8 Value:0xc01ab12f68} B:{Var:B Labels:project_id=ecp-data-hxqtc8 Value:0xc01ab12fb0} C:{Var:C Labels:project_id=ecp-data-hxqtc8 Value:0xc01ab12f60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982878405s EvaluationString:[ var='A' labels={project_id=ecp-data-hxqtc8} value=1 ], [ var='B' labels={project_id=ecp-data-hxqtc8} value=1 ], [ var='C' labels={project_id=ecp-data-hxqtc8} value=0 ]} {Instance:project_id=ecp-data-hxumkh State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-hxumkh Value:0xc01ab13000} B:{Var:B Labels:project_id=ecp-data-hxumkh Value:0xc01ab13008} C:{Var:C Labels:project_id=ecp-data-hxumkh Value:0xc01ab13160}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982888699s EvaluationString:[ var='A' labels={project_id=ecp-data-hxumkh} value=7 ], [ var='B' labels={project_id=ecp-data-hxumkh} value=7 ], [ var='C' labels={project_id=ecp-data-hxumkh} value=0 ]} {Instance:project_id=ecp-data-i7mq0j State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-i7mq0j Value:0xc01ab131b8} B:{Var:B Labels:project_id=ecp-data-i7mq0j Value:0xc01ab13200} C:{Var:C Labels:project_id=ecp-data-i7mq0j Value:0xc01ab131b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982896255s EvaluationString:[ var='A' labels={project_id=ecp-data-i7mq0j} value=1 ], [ var='B' labels={project_id=ecp-data-i7mq0j} value=1 ], [ var='C' labels={project_id=ecp-data-i7mq0j} value=0 ]} {Instance:project_id=ecp-data-i9rpwz State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-i9rpwz Value:0xc01ab13250} B:{Var:B Labels:project_id=ecp-data-i9rpwz Value:0xc01ab13258} C:{Var:C Labels:project_id=ecp-data-i9rpwz Value:0xc01ab132a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982904991s EvaluationString:[ var='A' labels={project_id=ecp-data-i9rpwz} value=0 ], [ var='B' labels={project_id=ecp-data-i9rpwz} value=0 ], [ var='C' labels={project_id=ecp-data-i9rpwz} value=0 ]} {Instance:project_id=ecp-data-iamduj State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-iamduj Value:0xc01ab13348} B:{Var:B Labels:project_id=ecp-data-iamduj Value:0xc01ab13390} C:{Var:C Labels:project_id=ecp-data-iamduj Value:0xc01ab13340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982912259s EvaluationString:[ var='A' labels={project_id=ecp-data-iamduj} value=0 ], [ var='B' labels={project_id=ecp-data-iamduj} value=0 ], [ var='C' labels={project_id=ecp-data-iamduj} value=0 ]} {Instance:project_id=ecp-data-iamznv State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-iamznv Value:0xc01ab133e0} B:{Var:B Labels:project_id=ecp-data-iamznv Value:0xc01ab133e8} C:{Var:C Labels:project_id=ecp-data-iamznv Value:0xc01ab13430}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982919657s EvaluationString:[ var='A' labels={project_id=ecp-data-iamznv} value=0 ], [ var='B' labels={project_id=ecp-data-iamznv} value=0 ], [ var='C' labels={project_id=ecp-data-iamznv} value=0 ]} {Instance:project_id=ecp-data-iaxzqy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-iaxzqy Value:0xc01ab13488} B:{Var:B Labels:project_id=ecp-data-iaxzqy Value:0xc01ab134d0} C:{Var:C Labels:project_id=ecp-data-iaxzqy Value:0xc01ab13480}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982927559s EvaluationString:[ var='A' labels={project_id=ecp-data-iaxzqy} value=0 ], [ var='B' labels={project_id=ecp-data-iaxzqy} value=0 ], [ var='C' labels={project_id=ecp-data-iaxzqy} value=0 ]} {Instance:project_id=ecp-data-idowgo State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-idowgo Value:0xc01ab13528} B:{Var:B Labels:project_id=ecp-data-idowgo Value:0xc01ab13570} C:{Var:C Labels:project_id=ecp-data-idowgo Value:0xc01ab13520}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982934936s EvaluationString:[ var='A' labels={project_id=ecp-data-idowgo} value=1 ], [ var='B' labels={project_id=ecp-data-idowgo} value=1 ], [ var='C' labels={project_id=ecp-data-idowgo} value=0 ]} {Instance:project_id=ecp-data-iegjxa State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-iegjxa Value:0xc01ab135c0} B:{Var:B Labels:project_id=ecp-data-iegjxa Value:0xc01ab135c8} C:{Var:C Labels:project_id=ecp-data-iegjxa Value:0xc01ab13610}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982943053s EvaluationString:[ var='A' labels={project_id=ecp-data-iegjxa} value=0 ], [ var='B' labels={project_id=ecp-data-iegjxa} value=0 ], [ var='C' labels={project_id=ecp-data-iegjxa} value=0 ]} {Instance:project_id=ecp-data-ii4yya State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ii4yya Value:0xc01ab136e0} B:{Var:B Labels:project_id=ecp-data-ii4yya Value:0xc01ab13660} C:{Var:C Labels:project_id=ecp-data-ii4yya Value:0xc01ab13668}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982950183s EvaluationString:[ var='A' labels={project_id=ecp-data-ii4yya} value=3 ], [ var='B' labels={project_id=ecp-data-ii4yya} value=3 ], [ var='C' labels={project_id=ecp-data-ii4yya} value=0 ]} {Instance:project_id=ecp-data-iiykcx State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-iiykcx Value:0xc01ab137d0} B:{Var:B Labels:project_id=ecp-data-iiykcx Value:0xc01ab137d8} C:{Var:C Labels:project_id=ecp-data-iiykcx Value:0xc01ab13820}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982958827s EvaluationString:[ var='A' labels={project_id=ecp-data-iiykcx} value=2 ], [ var='B' labels={project_id=ecp-data-iiykcx} value=2 ], [ var='C' labels={project_id=ecp-data-iiykcx} value=0 ]} {Instance:project_id=ecp-data-ik4a0u State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ik4a0u Value:0xc01ab13878} B:{Var:B Labels:project_id=ecp-data-ik4a0u Value:0xc01ab138c0} C:{Var:C Labels:project_id=ecp-data-ik4a0u Value:0xc01ab13870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982966068s EvaluationString:[ var='A' labels={project_id=ecp-data-ik4a0u} value=0 ], [ var='B' labels={project_id=ecp-data-ik4a0u} value=0 ], [ var='C' labels={project_id=ecp-data-ik4a0u} value=0 ]} {Instance:project_id=ecp-data-imleb2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-imleb2 Value:0xc01ab13910} B:{Var:B Labels:project_id=ecp-data-imleb2 Value:0xc01ab13918} C:{Var:C Labels:project_id=ecp-data-imleb2 Value:0xc01ab13960}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982973649s EvaluationString:[ var='A' labels={project_id=ecp-data-imleb2} value=0 ], [ var='B' labels={project_id=ecp-data-imleb2} value=0 ], [ var='C' labels={project_id=ecp-data-imleb2} value=0 ]} {Instance:project_id=ecp-data-imo0jw State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-imo0jw Value:0xc01ab13a00} B:{Var:B Labels:project_id=ecp-data-imo0jw Value:0xc01ab139b0} C:{Var:C Labels:project_id=ecp-data-imo0jw Value:0xc01ab139b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982981434s EvaluationString:[ var='A' labels={project_id=ecp-data-imo0jw} value=0 ], [ var='B' labels={project_id=ecp-data-imo0jw} value=0 ], [ var='C' labels={project_id=ecp-data-imo0jw} value=0 ]} {Instance:project_id=ecp-data-imuye3 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-imuye3 Value:0xc01ab13a50} B:{Var:B Labels:project_id=ecp-data-imuye3 Value:0xc01ab13a58} C:{Var:C Labels:project_id=ecp-data-imuye3 Value:0xc01ab13aa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982988962s EvaluationString:[ var='A' labels={project_id=ecp-data-imuye3} value=0 ], [ var='B' labels={project_id=ecp-data-imuye3} value=0 ], [ var='C' labels={project_id=ecp-data-imuye3} value=0 ]} {Instance:project_id=ecp-data-io6mct State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-io6mct Value:0xc01ab13af8} B:{Var:B Labels:project_id=ecp-data-io6mct Value:0xc01ab13b40} C:{Var:C Labels:project_id=ecp-data-io6mct Value:0xc01ab13af0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98299641s EvaluationString:[ var='A' labels={project_id=ecp-data-io6mct} value=38 ], [ var='B' labels={project_id=ecp-data-io6mct} value=38 ], [ var='C' labels={project_id=ecp-data-io6mct} value=0 ]} {Instance:project_id=ecp-data-ip2bnc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ip2bnc Value:0xc01ab13b98} B:{Var:B Labels:project_id=ecp-data-ip2bnc Value:0xc01ab13be0} C:{Var:C Labels:project_id=ecp-data-ip2bnc Value:0xc01ab13b90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983023238s EvaluationString:[ var='A' labels={project_id=ecp-data-ip2bnc} value=1 ], [ var='B' labels={project_id=ecp-data-ip2bnc} value=1 ], [ var='C' labels={project_id=ecp-data-ip2bnc} value=0 ]} {Instance:project_id=ecp-data-ip388n State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ip388n Value:0xc01ab13c38} B:{Var:B Labels:project_id=ecp-data-ip388n Value:0xc01ab13d20} C:{Var:C Labels:project_id=ecp-data-ip388n Value:0xc01ab13c30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983030635s EvaluationString:[ var='A' labels={project_id=ecp-data-ip388n} value=1 ], [ var='B' labels={project_id=ecp-data-ip388n} value=1 ], [ var='C' labels={project_id=ecp-data-ip388n} value=0 ]} {Instance:project_id=ecp-data-iqcon8 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-iqcon8 Value:0xc01ab13d78} B:{Var:B Labels:project_id=ecp-data-iqcon8 Value:0xc01ab13dc0} C:{Var:C Labels:project_id=ecp-data-iqcon8 Value:0xc01ab13d70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983038402s EvaluationString:[ var='A' labels={project_id=ecp-data-iqcon8} value=0 ], [ var='B' labels={project_id=ecp-data-iqcon8} value=0 ], [ var='C' labels={project_id=ecp-data-iqcon8} value=0 ]} {Instance:project_id=ecp-data-iquvi5 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-iquvi5 Value:0xc01ab13e18} B:{Var:B Labels:project_id=ecp-data-iquvi5 Value:0xc01ab13e60} C:{Var:C Labels:project_id=ecp-data-iquvi5 Value:0xc01ab13e10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983046742s EvaluationString:[ var='A' labels={project_id=ecp-data-iquvi5} value=1 ], [ var='B' labels={project_id=ecp-data-iquvi5} value=1 ], [ var='C' labels={project_id=ecp-data-iquvi5} value=0 ]} {Instance:project_id=ecp-data-is31wf State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-is31wf Value:0xc01ab13eb0} B:{Var:B Labels:project_id=ecp-data-is31wf Value:0xc01ab13eb8} C:{Var:C Labels:project_id=ecp-data-is31wf Value:0xc01ab13f00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98305492s EvaluationString:[ var='A' labels={project_id=ecp-data-is31wf} value=2 ], [ var='B' labels={project_id=ecp-data-is31wf} value=2 ], [ var='C' labels={project_id=ecp-data-is31wf} value=0 ]} {Instance:project_id=ecp-data-itj7gb State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-itj7gb Value:0xc01ab13f58} B:{Var:B Labels:project_id=ecp-data-itj7gb Value:0xc01ab13fa0} C:{Var:C Labels:project_id=ecp-data-itj7gb Value:0xc01ab13f50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983062517s EvaluationString:[ var='A' labels={project_id=ecp-data-itj7gb} value=1 ], [ var='B' labels={project_id=ecp-data-itj7gb} value=1 ], [ var='C' labels={project_id=ecp-data-itj7gb} value=0 ]} {Instance:project_id=ecp-data-iulcj6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-iulcj6 Value:0xc01ab13ff8} B:{Var:B Labels:project_id=ecp-data-iulcj6 Value:0xc0200fe040} C:{Var:C Labels:project_id=ecp-data-iulcj6 Value:0xc01ab13ff0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983071912s EvaluationString:[ var='A' labels={project_id=ecp-data-iulcj6} value=14 ], [ var='B' labels={project_id=ecp-data-iulcj6} value=14 ], [ var='C' labels={project_id=ecp-data-iulcj6} value=0 ]} {Instance:project_id=ecp-data-iutsii State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-iutsii Value:0xc0200fe090} B:{Var:B Labels:project_id=ecp-data-iutsii Value:0xc0200fe098} C:{Var:C Labels:project_id=ecp-data-iutsii Value:0xc0200fe0e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983079834s EvaluationString:[ var='A' labels={project_id=ecp-data-iutsii} value=0 ], [ var='B' labels={project_id=ecp-data-iutsii} value=0 ], [ var='C' labels={project_id=ecp-data-iutsii} value=0 ]} {Instance:project_id=ecp-data-ivf92d State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ivf92d Value:0xc0200fe130} B:{Var:B Labels:project_id=ecp-data-ivf92d Value:0xc0200fe138} C:{Var:C Labels:project_id=ecp-data-ivf92d Value:0xc0200fe1c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98308773s EvaluationString:[ var='A' labels={project_id=ecp-data-ivf92d} value=1 ], [ var='B' labels={project_id=ecp-data-ivf92d} value=1 ], [ var='C' labels={project_id=ecp-data-ivf92d} value=0 ]} {Instance:project_id=ecp-data-iwkrok State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-iwkrok Value:0xc0200fe210} B:{Var:B Labels:project_id=ecp-data-iwkrok Value:0xc0200fe218} C:{Var:C Labels:project_id=ecp-data-iwkrok Value:0xc0200fe260}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983095773s EvaluationString:[ var='A' labels={project_id=ecp-data-iwkrok} value=6 ], [ var='B' labels={project_id=ecp-data-iwkrok} value=6 ], [ var='C' labels={project_id=ecp-data-iwkrok} value=0 ]} {Instance:project_id=ecp-data-iyyj0e State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-iyyj0e Value:0xc0200fe2f0} B:{Var:B Labels:project_id=ecp-data-iyyj0e Value:0xc0200fe2f8} C:{Var:C Labels:project_id=ecp-data-iyyj0e Value:0xc0200fe340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983103742s EvaluationString:[ var='A' labels={project_id=ecp-data-iyyj0e} value=0 ], [ var='B' labels={project_id=ecp-data-iyyj0e} value=0 ], [ var='C' labels={project_id=ecp-data-iyyj0e} value=0 ]} {Instance:project_id=ecp-data-izdar7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-izdar7 Value:0xc0200fe390} B:{Var:B Labels:project_id=ecp-data-izdar7 Value:0xc0200fe398} C:{Var:C Labels:project_id=ecp-data-izdar7 Value:0xc0200fe3e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983111459s EvaluationString:[ var='A' labels={project_id=ecp-data-izdar7} value=0 ], [ var='B' labels={project_id=ecp-data-izdar7} value=0 ], [ var='C' labels={project_id=ecp-data-izdar7} value=0 ]} {Instance:project_id=ecp-data-izyh5f State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-izyh5f Value:0xc0200fe480} B:{Var:B Labels:project_id=ecp-data-izyh5f Value:0xc0200fe430} C:{Var:C Labels:project_id=ecp-data-izyh5f Value:0xc0200fe438}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983118759s EvaluationString:[ var='A' labels={project_id=ecp-data-izyh5f} value=0 ], [ var='B' labels={project_id=ecp-data-izyh5f} value=0 ], [ var='C' labels={project_id=ecp-data-izyh5f} value=0 ]} {Instance:project_id=ecp-data-j1cher State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-j1cher Value:0xc0200fe4d0} B:{Var:B Labels:project_id=ecp-data-j1cher Value:0xc0200fe4d8} C:{Var:C Labels:project_id=ecp-data-j1cher Value:0xc0200fe520}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983127138s EvaluationString:[ var='A' labels={project_id=ecp-data-j1cher} value=0 ], [ var='B' labels={project_id=ecp-data-j1cher} value=0 ], [ var='C' labels={project_id=ecp-data-j1cher} value=0 ]} {Instance:project_id=ecp-data-j3durc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-j3durc Value:0xc0200fe5a0} B:{Var:B Labels:project_id=ecp-data-j3durc Value:0xc0200fe5a8} C:{Var:C Labels:project_id=ecp-data-j3durc Value:0xc0200fe5f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983134585s EvaluationString:[ var='A' labels={project_id=ecp-data-j3durc} value=4 ], [ var='B' labels={project_id=ecp-data-j3durc} value=4 ], [ var='C' labels={project_id=ecp-data-j3durc} value=0 ]} {Instance:project_id=ecp-data-j8vyz8 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-j8vyz8 Value:0xc0200fe648} B:{Var:B Labels:project_id=ecp-data-j8vyz8 Value:0xc0200fe690} C:{Var:C Labels:project_id=ecp-data-j8vyz8 Value:0xc0200fe640}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98316472s EvaluationString:[ var='A' labels={project_id=ecp-data-j8vyz8} value=1 ], [ var='B' labels={project_id=ecp-data-j8vyz8} value=1 ], [ var='C' labels={project_id=ecp-data-j8vyz8} value=0 ]} {Instance:project_id=ecp-data-jb4rbm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jb4rbm Value:0xc0200fe730} B:{Var:B Labels:project_id=ecp-data-jb4rbm Value:0xc0200fe6e0} C:{Var:C Labels:project_id=ecp-data-jb4rbm Value:0xc0200fe6e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983173579s EvaluationString:[ var='A' labels={project_id=ecp-data-jb4rbm} value=0 ], [ var='B' labels={project_id=ecp-data-jb4rbm} value=0 ], [ var='C' labels={project_id=ecp-data-jb4rbm} value=0 ]} {Instance:project_id=ecp-data-jbluir State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jbluir Value:0xc0200fe780} B:{Var:B Labels:project_id=ecp-data-jbluir Value:0xc0200fe788} C:{Var:C Labels:project_id=ecp-data-jbluir Value:0xc0200fe7d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983180956s EvaluationString:[ var='A' labels={project_id=ecp-data-jbluir} value=0 ], [ var='B' labels={project_id=ecp-data-jbluir} value=0 ], [ var='C' labels={project_id=ecp-data-jbluir} value=0 ]} {Instance:project_id=ecp-data-jc2ao2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jc2ao2 Value:0xc0200fe870} B:{Var:B Labels:project_id=ecp-data-jc2ao2 Value:0xc0200fe820} C:{Var:C Labels:project_id=ecp-data-jc2ao2 Value:0xc0200fe828}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983189032s EvaluationString:[ var='A' labels={project_id=ecp-data-jc2ao2} value=0 ], [ var='B' labels={project_id=ecp-data-jc2ao2} value=0 ], [ var='C' labels={project_id=ecp-data-jc2ao2} value=0 ]} {Instance:project_id=ecp-data-jcb5yt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jcb5yt Value:0xc0200fe910} B:{Var:B Labels:project_id=ecp-data-jcb5yt Value:0xc0200fe8c0} C:{Var:C Labels:project_id=ecp-data-jcb5yt Value:0xc0200fe8c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983195937s EvaluationString:[ var='A' labels={project_id=ecp-data-jcb5yt} value=0 ], [ var='B' labels={project_id=ecp-data-jcb5yt} value=0 ], [ var='C' labels={project_id=ecp-data-jcb5yt} value=0 ]} {Instance:project_id=ecp-data-jce9za State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jce9za Value:0xc0200fe9b0} B:{Var:B Labels:project_id=ecp-data-jce9za Value:0xc0200fe960} C:{Var:C Labels:project_id=ecp-data-jce9za Value:0xc0200fe968}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983204064s EvaluationString:[ var='A' labels={project_id=ecp-data-jce9za} value=0 ], [ var='B' labels={project_id=ecp-data-jce9za} value=0 ], [ var='C' labels={project_id=ecp-data-jce9za} value=0 ]} {Instance:project_id=ecp-data-jchb76 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jchb76 Value:0xc0200fea08} B:{Var:B Labels:project_id=ecp-data-jchb76 Value:0xc0200fea50} C:{Var:C Labels:project_id=ecp-data-jchb76 Value:0xc0200fea00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983212738s EvaluationString:[ var='A' labels={project_id=ecp-data-jchb76} value=0 ], [ var='B' labels={project_id=ecp-data-jchb76} value=0 ], [ var='C' labels={project_id=ecp-data-jchb76} value=0 ]} {Instance:project_id=ecp-data-jfbnbl State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jfbnbl Value:0xc0200feaa0} B:{Var:B Labels:project_id=ecp-data-jfbnbl Value:0xc0200feaa8} C:{Var:C Labels:project_id=ecp-data-jfbnbl Value:0xc0200feaf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983220327s EvaluationString:[ var='A' labels={project_id=ecp-data-jfbnbl} value=0 ], [ var='B' labels={project_id=ecp-data-jfbnbl} value=0 ], [ var='C' labels={project_id=ecp-data-jfbnbl} value=0 ]} {Instance:project_id=ecp-data-jggf3x State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jggf3x Value:0xc0200feb40} B:{Var:B Labels:project_id=ecp-data-jggf3x Value:0xc0200feb48} C:{Var:C Labels:project_id=ecp-data-jggf3x Value:0xc0200feb90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983227426s EvaluationString:[ var='A' labels={project_id=ecp-data-jggf3x} value=8 ], [ var='B' labels={project_id=ecp-data-jggf3x} value=8 ], [ var='C' labels={project_id=ecp-data-jggf3x} value=0 ]} {Instance:project_id=ecp-data-jhymat State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jhymat Value:0xc0200fec30} B:{Var:B Labels:project_id=ecp-data-jhymat Value:0xc0200febe0} C:{Var:C Labels:project_id=ecp-data-jhymat Value:0xc0200febe8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983235387s EvaluationString:[ var='A' labels={project_id=ecp-data-jhymat} value=1 ], [ var='B' labels={project_id=ecp-data-jhymat} value=1 ], [ var='C' labels={project_id=ecp-data-jhymat} value=0 ]} {Instance:project_id=ecp-data-jigdck State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jigdck Value:0xc0200fec88} B:{Var:B Labels:project_id=ecp-data-jigdck Value:0xc0200fed40} C:{Var:C Labels:project_id=ecp-data-jigdck Value:0xc0200fec80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983242775s EvaluationString:[ var='A' labels={project_id=ecp-data-jigdck} value=0 ], [ var='B' labels={project_id=ecp-data-jigdck} value=0 ], [ var='C' labels={project_id=ecp-data-jigdck} value=0 ]} {Instance:project_id=ecp-data-jiv4wh State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jiv4wh Value:0xc0200fedb0} B:{Var:B Labels:project_id=ecp-data-jiv4wh Value:0xc0200fedb8} C:{Var:C Labels:project_id=ecp-data-jiv4wh Value:0xc0200fee00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983250251s EvaluationString:[ var='A' labels={project_id=ecp-data-jiv4wh} value=5 ], [ var='B' labels={project_id=ecp-data-jiv4wh} value=5 ], [ var='C' labels={project_id=ecp-data-jiv4wh} value=0 ]} {Instance:project_id=ecp-data-jmfwck State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jmfwck Value:0xc0200fee50} B:{Var:B Labels:project_id=ecp-data-jmfwck Value:0xc0200fee58} C:{Var:C Labels:project_id=ecp-data-jmfwck Value:0xc0200feea0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983258241s EvaluationString:[ var='A' labels={project_id=ecp-data-jmfwck} value=5 ], [ var='B' labels={project_id=ecp-data-jmfwck} value=5 ], [ var='C' labels={project_id=ecp-data-jmfwck} value=0 ]} {Instance:project_id=ecp-data-jmkhol State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jmkhol Value:0xc0200fef40} B:{Var:B Labels:project_id=ecp-data-jmkhol Value:0xc0200feef0} C:{Var:C Labels:project_id=ecp-data-jmkhol Value:0xc0200feef8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983266464s EvaluationString:[ var='A' labels={project_id=ecp-data-jmkhol} value=4 ], [ var='B' labels={project_id=ecp-data-jmkhol} value=4 ], [ var='C' labels={project_id=ecp-data-jmkhol} value=0 ]} {Instance:project_id=ecp-data-jowdmw State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jowdmw Value:0xc0200fef90} B:{Var:B Labels:project_id=ecp-data-jowdmw Value:0xc0200fef98} C:{Var:C Labels:project_id=ecp-data-jowdmw Value:0xc0200fefe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983274746s EvaluationString:[ var='A' labels={project_id=ecp-data-jowdmw} value=0 ], [ var='B' labels={project_id=ecp-data-jowdmw} value=0 ], [ var='C' labels={project_id=ecp-data-jowdmw} value=0 ]} {Instance:project_id=ecp-data-jpox6a State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jpox6a Value:0xc0200ff030} B:{Var:B Labels:project_id=ecp-data-jpox6a Value:0xc0200ff038} C:{Var:C Labels:project_id=ecp-data-jpox6a Value:0xc0200ff080}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983282264s EvaluationString:[ var='A' labels={project_id=ecp-data-jpox6a} value=0 ], [ var='B' labels={project_id=ecp-data-jpox6a} value=0 ], [ var='C' labels={project_id=ecp-data-jpox6a} value=0 ]} {Instance:project_id=ecp-data-jqnyay State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jqnyay Value:0xc0200ff0d0} B:{Var:B Labels:project_id=ecp-data-jqnyay Value:0xc0200ff0d8} C:{Var:C Labels:project_id=ecp-data-jqnyay Value:0xc0200ff120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983289826s EvaluationString:[ var='A' labels={project_id=ecp-data-jqnyay} value=1 ], [ var='B' labels={project_id=ecp-data-jqnyay} value=1 ], [ var='C' labels={project_id=ecp-data-jqnyay} value=0 ]} {Instance:project_id=ecp-data-jrheoe State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jrheoe Value:0xc0200ff1c0} B:{Var:B Labels:project_id=ecp-data-jrheoe Value:0xc0200ff170} C:{Var:C Labels:project_id=ecp-data-jrheoe Value:0xc0200ff178}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983299371s EvaluationString:[ var='A' labels={project_id=ecp-data-jrheoe} value=0 ], [ var='B' labels={project_id=ecp-data-jrheoe} value=0 ], [ var='C' labels={project_id=ecp-data-jrheoe} value=0 ]} {Instance:project_id=ecp-data-jsoyax State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jsoyax Value:0xc0200ff260} B:{Var:B Labels:project_id=ecp-data-jsoyax Value:0xc0200ff210} C:{Var:C Labels:project_id=ecp-data-jsoyax Value:0xc0200ff218}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983306401s EvaluationString:[ var='A' labels={project_id=ecp-data-jsoyax} value=0 ], [ var='B' labels={project_id=ecp-data-jsoyax} value=0 ], [ var='C' labels={project_id=ecp-data-jsoyax} value=0 ]} {Instance:project_id=ecp-data-jswnit State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jswnit Value:0xc0200ff2b0} B:{Var:B Labels:project_id=ecp-data-jswnit Value:0xc0200ff2b8} C:{Var:C Labels:project_id=ecp-data-jswnit Value:0xc0200ff300}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983313939s EvaluationString:[ var='A' labels={project_id=ecp-data-jswnit} value=0 ], [ var='B' labels={project_id=ecp-data-jswnit} value=0 ], [ var='C' labels={project_id=ecp-data-jswnit} value=0 ]} {Instance:project_id=ecp-data-juvcdg State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-juvcdg Value:0xc0200ff3a0} B:{Var:B Labels:project_id=ecp-data-juvcdg Value:0xc0200ff350} C:{Var:C Labels:project_id=ecp-data-juvcdg Value:0xc0200ff358}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983321811s EvaluationString:[ var='A' labels={project_id=ecp-data-juvcdg} value=20 ], [ var='B' labels={project_id=ecp-data-juvcdg} value=20 ], [ var='C' labels={project_id=ecp-data-juvcdg} value=0 ]} {Instance:project_id=ecp-data-jvovh5 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jvovh5 Value:0xc0200ff3f0} B:{Var:B Labels:project_id=ecp-data-jvovh5 Value:0xc0200ff3f8} C:{Var:C Labels:project_id=ecp-data-jvovh5 Value:0xc0200ff440}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983331304s EvaluationString:[ var='A' labels={project_id=ecp-data-jvovh5} value=3 ], [ var='B' labels={project_id=ecp-data-jvovh5} value=3 ], [ var='C' labels={project_id=ecp-data-jvovh5} value=0 ]} {Instance:project_id=ecp-data-jydqcg State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jydqcg Value:0xc0200ff4e0} B:{Var:B Labels:project_id=ecp-data-jydqcg Value:0xc0200ff490} C:{Var:C Labels:project_id=ecp-data-jydqcg Value:0xc0200ff498}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983338985s EvaluationString:[ var='A' labels={project_id=ecp-data-jydqcg} value=9 ], [ var='B' labels={project_id=ecp-data-jydqcg} value=9 ], [ var='C' labels={project_id=ecp-data-jydqcg} value=0 ]} {Instance:project_id=ecp-data-jyz7om State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jyz7om Value:0xc0200ff530} B:{Var:B Labels:project_id=ecp-data-jyz7om Value:0xc0200ff538} C:{Var:C Labels:project_id=ecp-data-jyz7om Value:0xc0200ff580}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983346453s EvaluationString:[ var='A' labels={project_id=ecp-data-jyz7om} value=0 ], [ var='B' labels={project_id=ecp-data-jyz7om} value=0 ], [ var='C' labels={project_id=ecp-data-jyz7om} value=0 ]} {Instance:project_id=ecp-data-jzlcx5 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-jzlcx5 Value:0xc0200ff5e0} B:{Var:B Labels:project_id=ecp-data-jzlcx5 Value:0xc0200ff5e8} C:{Var:C Labels:project_id=ecp-data-jzlcx5 Value:0xc0200ff650}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983354016s EvaluationString:[ var='A' labels={project_id=ecp-data-jzlcx5} value=0 ], [ var='B' labels={project_id=ecp-data-jzlcx5} value=0 ], [ var='C' labels={project_id=ecp-data-jzlcx5} value=0 ]} {Instance:project_id=ecp-data-k1ddwv State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-k1ddwv Value:0xc0200ff6a0} B:{Var:B Labels:project_id=ecp-data-k1ddwv Value:0xc0200ff6a8} C:{Var:C Labels:project_id=ecp-data-k1ddwv Value:0xc0200ff700}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983409341s EvaluationString:[ var='A' labels={project_id=ecp-data-k1ddwv} value=4 ], [ var='B' labels={project_id=ecp-data-k1ddwv} value=4 ], [ var='C' labels={project_id=ecp-data-k1ddwv} value=0 ]} {Instance:project_id=ecp-data-k5mr6i State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-k5mr6i Value:0xc0200ff7a0} B:{Var:B Labels:project_id=ecp-data-k5mr6i Value:0xc0200ff750} C:{Var:C Labels:project_id=ecp-data-k5mr6i Value:0xc0200ff758}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983419707s EvaluationString:[ var='A' labels={project_id=ecp-data-k5mr6i} value=5 ], [ var='B' labels={project_id=ecp-data-k5mr6i} value=5 ], [ var='C' labels={project_id=ecp-data-k5mr6i} value=0 ]} {Instance:project_id=ecp-data-k8ik6f State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-k8ik6f Value:0xc0200ff840} B:{Var:B Labels:project_id=ecp-data-k8ik6f Value:0xc0200ff7f0} C:{Var:C Labels:project_id=ecp-data-k8ik6f Value:0xc0200ff7f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983427679s EvaluationString:[ var='A' labels={project_id=ecp-data-k8ik6f} value=0 ], [ var='B' labels={project_id=ecp-data-k8ik6f} value=0 ], [ var='C' labels={project_id=ecp-data-k8ik6f} value=0 ]} {Instance:project_id=ecp-data-kao2kc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-kao2kc Value:0xc0200ff890} B:{Var:B Labels:project_id=ecp-data-kao2kc Value:0xc0200ff898} C:{Var:C Labels:project_id=ecp-data-kao2kc Value:0xc0200ff8e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983434866s EvaluationString:[ var='A' labels={project_id=ecp-data-kao2kc} value=0 ], [ var='B' labels={project_id=ecp-data-kao2kc} value=0 ], [ var='C' labels={project_id=ecp-data-kao2kc} value=0 ]} {Instance:project_id=ecp-data-kdiogx State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-kdiogx Value:0xc0200ff930} B:{Var:B Labels:project_id=ecp-data-kdiogx Value:0xc0200ff938} C:{Var:C Labels:project_id=ecp-data-kdiogx Value:0xc0200ff980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983442409s EvaluationString:[ var='A' labels={project_id=ecp-data-kdiogx} value=0 ], [ var='B' labels={project_id=ecp-data-kdiogx} value=0 ], [ var='C' labels={project_id=ecp-data-kdiogx} value=0 ]} {Instance:project_id=ecp-data-kf2ybt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-kf2ybt Value:0xc0200ff9d8} B:{Var:B Labels:project_id=ecp-data-kf2ybt Value:0xc0200ffa20} C:{Var:C Labels:project_id=ecp-data-kf2ybt Value:0xc0200ff9d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983449496s EvaluationString:[ var='A' labels={project_id=ecp-data-kf2ybt} value=0 ], [ var='B' labels={project_id=ecp-data-kf2ybt} value=0 ], [ var='C' labels={project_id=ecp-data-kf2ybt} value=0 ]} {Instance:project_id=ecp-data-kiyvg4 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-kiyvg4 Value:0xc0200ffa78} B:{Var:B Labels:project_id=ecp-data-kiyvg4 Value:0xc0200ffac0} C:{Var:C Labels:project_id=ecp-data-kiyvg4 Value:0xc0200ffa70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983464576s EvaluationString:[ var='A' labels={project_id=ecp-data-kiyvg4} value=0 ], [ var='B' labels={project_id=ecp-data-kiyvg4} value=0 ], [ var='C' labels={project_id=ecp-data-kiyvg4} value=0 ]} {Instance:project_id=ecp-data-kkss89 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-kkss89 Value:0xc0200ffb10} B:{Var:B Labels:project_id=ecp-data-kkss89 Value:0xc0200ffb18} C:{Var:C Labels:project_id=ecp-data-kkss89 Value:0xc0200ffb60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983471601s EvaluationString:[ var='A' labels={project_id=ecp-data-kkss89} value=0 ], [ var='B' labels={project_id=ecp-data-kkss89} value=0 ], [ var='C' labels={project_id=ecp-data-kkss89} value=0 ]} {Instance:project_id=ecp-data-kkukhz State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-kkukhz Value:0xc0200ffbb0} B:{Var:B Labels:project_id=ecp-data-kkukhz Value:0xc0200ffbb8} C:{Var:C Labels:project_id=ecp-data-kkukhz Value:0xc0200ffc00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983479446s EvaluationString:[ var='A' labels={project_id=ecp-data-kkukhz} value=3 ], [ var='B' labels={project_id=ecp-data-kkukhz} value=3 ], [ var='C' labels={project_id=ecp-data-kkukhz} value=0 ]} {Instance:project_id=ecp-data-km27qh State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-km27qh Value:0xc0200ffc50} B:{Var:B Labels:project_id=ecp-data-km27qh Value:0xc0200ffc58} C:{Var:C Labels:project_id=ecp-data-km27qh Value:0xc0200ffca0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983487024s EvaluationString:[ var='A' labels={project_id=ecp-data-km27qh} value=0 ], [ var='B' labels={project_id=ecp-data-km27qh} value=0 ], [ var='C' labels={project_id=ecp-data-km27qh} value=0 ]} {Instance:project_id=ecp-data-kmeyim State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-kmeyim Value:0xc0200ffcf0} B:{Var:B Labels:project_id=ecp-data-kmeyim Value:0xc0200ffcf8} C:{Var:C Labels:project_id=ecp-data-kmeyim Value:0xc0200ffd40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983496123s EvaluationString:[ var='A' labels={project_id=ecp-data-kmeyim} value=0 ], [ var='B' labels={project_id=ecp-data-kmeyim} value=0 ], [ var='C' labels={project_id=ecp-data-kmeyim} value=0 ]} {Instance:project_id=ecp-data-l1i0zf State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-l1i0zf Value:0xc0200ffd90} B:{Var:B Labels:project_id=ecp-data-l1i0zf Value:0xc0200ffd98} C:{Var:C Labels:project_id=ecp-data-l1i0zf Value:0xc0200ffde0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983503573s EvaluationString:[ var='A' labels={project_id=ecp-data-l1i0zf} value=3 ], [ var='B' labels={project_id=ecp-data-l1i0zf} value=3 ], [ var='C' labels={project_id=ecp-data-l1i0zf} value=0 ]} {Instance:project_id=ecp-data-l3nhns State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-l3nhns Value:0xc0200ffe30} B:{Var:B Labels:project_id=ecp-data-l3nhns Value:0xc0200ffe38} C:{Var:C Labels:project_id=ecp-data-l3nhns Value:0xc0200ffe80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983512229s EvaluationString:[ var='A' labels={project_id=ecp-data-l3nhns} value=6 ], [ var='B' labels={project_id=ecp-data-l3nhns} value=6 ], [ var='C' labels={project_id=ecp-data-l3nhns} value=0 ]} {Instance:project_id=ecp-data-l9zedd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-l9zedd Value:0xc0200ffed0} B:{Var:B Labels:project_id=ecp-data-l9zedd Value:0xc0200ffed8} C:{Var:C Labels:project_id=ecp-data-l9zedd Value:0xc0200fff20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983521544s EvaluationString:[ var='A' labels={project_id=ecp-data-l9zedd} value=0 ], [ var='B' labels={project_id=ecp-data-l9zedd} value=0 ], [ var='C' labels={project_id=ecp-data-l9zedd} value=0 ]} {Instance:project_id=ecp-data-lc46wy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-lc46wy Value:0xc0200fff78} B:{Var:B Labels:project_id=ecp-data-lc46wy Value:0xc01fa20000} C:{Var:C Labels:project_id=ecp-data-lc46wy Value:0xc0200fff70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98352894s EvaluationString:[ var='A' labels={project_id=ecp-data-lc46wy} value=9 ], [ var='B' labels={project_id=ecp-data-lc46wy} value=9 ], [ var='C' labels={project_id=ecp-data-lc46wy} value=0 ]} {Instance:project_id=ecp-data-lcylqe State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-lcylqe Value:0xc01fa20518} B:{Var:B Labels:project_id=ecp-data-lcylqe Value:0xc01fa20560} C:{Var:C Labels:project_id=ecp-data-lcylqe Value:0xc01fa20510}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983536483s EvaluationString:[ var='A' labels={project_id=ecp-data-lcylqe} value=0 ], [ var='B' labels={project_id=ecp-data-lcylqe} value=0 ], [ var='C' labels={project_id=ecp-data-lcylqe} value=0 ]} {Instance:project_id=ecp-data-lf9thn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-lf9thn Value:0xc01fa205b0} B:{Var:B Labels:project_id=ecp-data-lf9thn Value:0xc01fa205b8} C:{Var:C Labels:project_id=ecp-data-lf9thn Value:0xc01fa20600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983543689s EvaluationString:[ var='A' labels={project_id=ecp-data-lf9thn} value=0 ], [ var='B' labels={project_id=ecp-data-lf9thn} value=0 ], [ var='C' labels={project_id=ecp-data-lf9thn} value=0 ]} {Instance:project_id=ecp-data-lfd4tk State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-lfd4tk Value:0xc01fa20660} B:{Var:B Labels:project_id=ecp-data-lfd4tk Value:0xc01fa20668} C:{Var:C Labels:project_id=ecp-data-lfd4tk Value:0xc01fa206d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983551075s EvaluationString:[ var='A' labels={project_id=ecp-data-lfd4tk} value=2 ], [ var='B' labels={project_id=ecp-data-lfd4tk} value=2 ], [ var='C' labels={project_id=ecp-data-lfd4tk} value=0 ]} {Instance:project_id=ecp-data-lg0vss State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-lg0vss Value:0xc01fa20770} B:{Var:B Labels:project_id=ecp-data-lg0vss Value:0xc01fa20720} C:{Var:C Labels:project_id=ecp-data-lg0vss Value:0xc01fa20728}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983558733s EvaluationString:[ var='A' labels={project_id=ecp-data-lg0vss} value=0 ], [ var='B' labels={project_id=ecp-data-lg0vss} value=0 ], [ var='C' labels={project_id=ecp-data-lg0vss} value=0 ]} {Instance:project_id=ecp-data-lieih6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-lieih6 Value:0xc01fa20810} B:{Var:B Labels:project_id=ecp-data-lieih6 Value:0xc01fa207c0} C:{Var:C Labels:project_id=ecp-data-lieih6 Value:0xc01fa207c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983565904s EvaluationString:[ var='A' labels={project_id=ecp-data-lieih6} value=3 ], [ var='B' labels={project_id=ecp-data-lieih6} value=3 ], [ var='C' labels={project_id=ecp-data-lieih6} value=0 ]} {Instance:project_id=ecp-data-lk4xd3 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-lk4xd3 Value:0xc01fa208f0} B:{Var:B Labels:project_id=ecp-data-lk4xd3 Value:0xc01fa20870} C:{Var:C Labels:project_id=ecp-data-lk4xd3 Value:0xc01fa20878}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983574091s EvaluationString:[ var='A' labels={project_id=ecp-data-lk4xd3} value=0 ], [ var='B' labels={project_id=ecp-data-lk4xd3} value=0 ], [ var='C' labels={project_id=ecp-data-lk4xd3} value=0 ]} {Instance:project_id=ecp-data-lkjqhy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-lkjqhy Value:0xc01fa20940} B:{Var:B Labels:project_id=ecp-data-lkjqhy Value:0xc01fa20948} C:{Var:C Labels:project_id=ecp-data-lkjqhy Value:0xc01fa20990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983583708s EvaluationString:[ var='A' labels={project_id=ecp-data-lkjqhy} value=0 ], [ var='B' labels={project_id=ecp-data-lkjqhy} value=0 ], [ var='C' labels={project_id=ecp-data-lkjqhy} value=0 ]} {Instance:project_id=ecp-data-lls8uv State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-lls8uv Value:0xc01fa209e0} B:{Var:B Labels:project_id=ecp-data-lls8uv Value:0xc01fa209e8} C:{Var:C Labels:project_id=ecp-data-lls8uv Value:0xc01fa20a30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983591909s EvaluationString:[ var='A' labels={project_id=ecp-data-lls8uv} value=0 ], [ var='B' labels={project_id=ecp-data-lls8uv} value=0 ], [ var='C' labels={project_id=ecp-data-lls8uv} value=0 ]} {Instance:project_id=ecp-data-lnejoc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-lnejoc Value:0xc01fa20a88} B:{Var:B Labels:project_id=ecp-data-lnejoc Value:0xc01fa20ad0} C:{Var:C Labels:project_id=ecp-data-lnejoc Value:0xc01fa20a80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983599921s EvaluationString:[ var='A' labels={project_id=ecp-data-lnejoc} value=0 ], [ var='B' labels={project_id=ecp-data-lnejoc} value=0 ], [ var='C' labels={project_id=ecp-data-lnejoc} value=0 ]} {Instance:project_id=ecp-data-lnmbqm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-lnmbqm Value:0xc01fa20b20} B:{Var:B Labels:project_id=ecp-data-lnmbqm Value:0xc01fa20b28} C:{Var:C Labels:project_id=ecp-data-lnmbqm Value:0xc01fa20b70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983607891s EvaluationString:[ var='A' labels={project_id=ecp-data-lnmbqm} value=38 ], [ var='B' labels={project_id=ecp-data-lnmbqm} value=38 ], [ var='C' labels={project_id=ecp-data-lnmbqm} value=0 ]} {Instance:project_id=ecp-data-loltnt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-loltnt Value:0xc01fa20bc8} B:{Var:B Labels:project_id=ecp-data-loltnt Value:0xc01fa20c10} C:{Var:C Labels:project_id=ecp-data-loltnt Value:0xc01fa20bc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983616339s EvaluationString:[ var='A' labels={project_id=ecp-data-loltnt} value=6 ], [ var='B' labels={project_id=ecp-data-loltnt} value=6 ], [ var='C' labels={project_id=ecp-data-loltnt} value=0 ]} {Instance:project_id=ecp-data-lq5jph State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-lq5jph Value:0xc01fa20c60} B:{Var:B Labels:project_id=ecp-data-lq5jph Value:0xc01fa20c68} C:{Var:C Labels:project_id=ecp-data-lq5jph Value:0xc01fa20cb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983624339s EvaluationString:[ var='A' labels={project_id=ecp-data-lq5jph} value=0 ], [ var='B' labels={project_id=ecp-data-lq5jph} value=0 ], [ var='C' labels={project_id=ecp-data-lq5jph} value=0 ]} {Instance:project_id=ecp-data-lsmybn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-lsmybn Value:0xc01fa20d50} B:{Var:B Labels:project_id=ecp-data-lsmybn Value:0xc01fa20d00} C:{Var:C Labels:project_id=ecp-data-lsmybn Value:0xc01fa20d08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983631245s EvaluationString:[ var='A' labels={project_id=ecp-data-lsmybn} value=0 ], [ var='B' labels={project_id=ecp-data-lsmybn} value=0 ], [ var='C' labels={project_id=ecp-data-lsmybn} value=0 ]} {Instance:project_id=ecp-data-lud8om State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-lud8om Value:0xc01fa20da0} B:{Var:B Labels:project_id=ecp-data-lud8om Value:0xc01fa20da8} C:{Var:C Labels:project_id=ecp-data-lud8om Value:0xc01fa20df0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983638905s EvaluationString:[ var='A' labels={project_id=ecp-data-lud8om} value=0 ], [ var='B' labels={project_id=ecp-data-lud8om} value=0 ], [ var='C' labels={project_id=ecp-data-lud8om} value=0 ]} {Instance:project_id=ecp-data-lwd38x State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-lwd38x Value:0xc01fa20e90} B:{Var:B Labels:project_id=ecp-data-lwd38x Value:0xc01fa20e40} C:{Var:C Labels:project_id=ecp-data-lwd38x Value:0xc01fa20e48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983646093s EvaluationString:[ var='A' labels={project_id=ecp-data-lwd38x} value=0 ], [ var='B' labels={project_id=ecp-data-lwd38x} value=0 ], [ var='C' labels={project_id=ecp-data-lwd38x} value=0 ]} {Instance:project_id=ecp-data-lwdk3s State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-lwdk3s Value:0xc01fa20ee0} B:{Var:B Labels:project_id=ecp-data-lwdk3s Value:0xc01fa20ee8} C:{Var:C Labels:project_id=ecp-data-lwdk3s Value:0xc01fa20f30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983655045s EvaluationString:[ var='A' labels={project_id=ecp-data-lwdk3s} value=0 ], [ var='B' labels={project_id=ecp-data-lwdk3s} value=0 ], [ var='C' labels={project_id=ecp-data-lwdk3s} value=0 ]} {Instance:project_id=ecp-data-lwdr4a State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-lwdr4a Value:0xc01fa20f80} B:{Var:B Labels:project_id=ecp-data-lwdr4a Value:0xc01fa20f88} C:{Var:C Labels:project_id=ecp-data-lwdr4a Value:0xc01fa20fd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983662172s EvaluationString:[ var='A' labels={project_id=ecp-data-lwdr4a} value=1 ], [ var='B' labels={project_id=ecp-data-lwdr4a} value=1 ], [ var='C' labels={project_id=ecp-data-lwdr4a} value=0 ]} {Instance:project_id=ecp-data-m4kwah State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-m4kwah Value:0xc01fa21070} B:{Var:B Labels:project_id=ecp-data-m4kwah Value:0xc01fa21020} C:{Var:C Labels:project_id=ecp-data-m4kwah Value:0xc01fa21028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983669714s EvaluationString:[ var='A' labels={project_id=ecp-data-m4kwah} value=0 ], [ var='B' labels={project_id=ecp-data-m4kwah} value=0 ], [ var='C' labels={project_id=ecp-data-m4kwah} value=0 ]} {Instance:project_id=ecp-data-m9cmu7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-m9cmu7 Value:0xc01fa21120} B:{Var:B Labels:project_id=ecp-data-m9cmu7 Value:0xc01fa210c0} C:{Var:C Labels:project_id=ecp-data-m9cmu7 Value:0xc01fa210c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983677232s EvaluationString:[ var='A' labels={project_id=ecp-data-m9cmu7} value=0 ], [ var='B' labels={project_id=ecp-data-m9cmu7} value=0 ], [ var='C' labels={project_id=ecp-data-m9cmu7} value=0 ]} {Instance:project_id=ecp-data-m9ql6x State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-m9ql6x Value:0xc01fa21170} B:{Var:B Labels:project_id=ecp-data-m9ql6x Value:0xc01fa21178} C:{Var:C Labels:project_id=ecp-data-m9ql6x Value:0xc01fa211c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983684512s EvaluationString:[ var='A' labels={project_id=ecp-data-m9ql6x} value=10 ], [ var='B' labels={project_id=ecp-data-m9ql6x} value=10 ], [ var='C' labels={project_id=ecp-data-m9ql6x} value=0 ]} {Instance:project_id=ecp-data-maed0b State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-maed0b Value:0xc01fa21218} B:{Var:B Labels:project_id=ecp-data-maed0b Value:0xc01fa21260} C:{Var:C Labels:project_id=ecp-data-maed0b Value:0xc01fa21210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983692731s EvaluationString:[ var='A' labels={project_id=ecp-data-maed0b} value=0 ], [ var='B' labels={project_id=ecp-data-maed0b} value=0 ], [ var='C' labels={project_id=ecp-data-maed0b} value=0 ]} {Instance:project_id=ecp-data-mauqo1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-mauqo1 Value:0xc01fa212b8} B:{Var:B Labels:project_id=ecp-data-mauqo1 Value:0xc01fa21300} C:{Var:C Labels:project_id=ecp-data-mauqo1 Value:0xc01fa212b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983700601s EvaluationString:[ var='A' labels={project_id=ecp-data-mauqo1} value=3 ], [ var='B' labels={project_id=ecp-data-mauqo1} value=3 ], [ var='C' labels={project_id=ecp-data-mauqo1} value=0 ]} {Instance:project_id=ecp-data-mc40mt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-mc40mt Value:0xc01fa21350} B:{Var:B Labels:project_id=ecp-data-mc40mt Value:0xc01fa21358} C:{Var:C Labels:project_id=ecp-data-mc40mt Value:0xc01fa213a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983709533s EvaluationString:[ var='A' labels={project_id=ecp-data-mc40mt} value=0 ], [ var='B' labels={project_id=ecp-data-mc40mt} value=0 ], [ var='C' labels={project_id=ecp-data-mc40mt} value=0 ]} {Instance:project_id=ecp-data-mcnjpv State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-mcnjpv Value:0xc01fa21440} B:{Var:B Labels:project_id=ecp-data-mcnjpv Value:0xc01fa213f0} C:{Var:C Labels:project_id=ecp-data-mcnjpv Value:0xc01fa213f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983718808s EvaluationString:[ var='A' labels={project_id=ecp-data-mcnjpv} value=0 ], [ var='B' labels={project_id=ecp-data-mcnjpv} value=0 ], [ var='C' labels={project_id=ecp-data-mcnjpv} value=0 ]} {Instance:project_id=ecp-data-mepno6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-mepno6 Value:0xc01fa21498} B:{Var:B Labels:project_id=ecp-data-mepno6 Value:0xc01fa214e0} C:{Var:C Labels:project_id=ecp-data-mepno6 Value:0xc01fa21490}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983726096s EvaluationString:[ var='A' labels={project_id=ecp-data-mepno6} value=0 ], [ var='B' labels={project_id=ecp-data-mepno6} value=0 ], [ var='C' labels={project_id=ecp-data-mepno6} value=0 ]} {Instance:project_id=ecp-data-mgcjcl State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-mgcjcl Value:0xc01fa21538} B:{Var:B Labels:project_id=ecp-data-mgcjcl Value:0xc01fa21580} C:{Var:C Labels:project_id=ecp-data-mgcjcl Value:0xc01fa21530}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983733822s EvaluationString:[ var='A' labels={project_id=ecp-data-mgcjcl} value=0 ], [ var='B' labels={project_id=ecp-data-mgcjcl} value=0 ], [ var='C' labels={project_id=ecp-data-mgcjcl} value=0 ]} {Instance:project_id=ecp-data-mjqohl State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-mjqohl Value:0xc01fa215d8} B:{Var:B Labels:project_id=ecp-data-mjqohl Value:0xc01fa21620} C:{Var:C Labels:project_id=ecp-data-mjqohl Value:0xc01fa215d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983741793s EvaluationString:[ var='A' labels={project_id=ecp-data-mjqohl} value=4 ], [ var='B' labels={project_id=ecp-data-mjqohl} value=4 ], [ var='C' labels={project_id=ecp-data-mjqohl} value=0 ]} {Instance:project_id=ecp-data-mlcd97 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-mlcd97 Value:0xc01fa21678} B:{Var:B Labels:project_id=ecp-data-mlcd97 Value:0xc01fa216c0} C:{Var:C Labels:project_id=ecp-data-mlcd97 Value:0xc01fa21670}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983750371s EvaluationString:[ var='A' labels={project_id=ecp-data-mlcd97} value=1 ], [ var='B' labels={project_id=ecp-data-mlcd97} value=1 ], [ var='C' labels={project_id=ecp-data-mlcd97} value=0 ]} {Instance:project_id=ecp-data-mliu3z State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-mliu3z Value:0xc01fa21760} B:{Var:B Labels:project_id=ecp-data-mliu3z Value:0xc01fa21710} C:{Var:C Labels:project_id=ecp-data-mliu3z Value:0xc01fa21718}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983758522s EvaluationString:[ var='A' labels={project_id=ecp-data-mliu3z} value=5 ], [ var='B' labels={project_id=ecp-data-mliu3z} value=5 ], [ var='C' labels={project_id=ecp-data-mliu3z} value=0 ]} {Instance:project_id=ecp-data-mm6lqy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-mm6lqy Value:0xc01fa217b0} B:{Var:B Labels:project_id=ecp-data-mm6lqy Value:0xc01fa217b8} C:{Var:C Labels:project_id=ecp-data-mm6lqy Value:0xc01fa21800}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983766625s EvaluationString:[ var='A' labels={project_id=ecp-data-mm6lqy} value=1 ], [ var='B' labels={project_id=ecp-data-mm6lqy} value=1 ], [ var='C' labels={project_id=ecp-data-mm6lqy} value=0 ]} {Instance:project_id=ecp-data-mo8zk9 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-mo8zk9 Value:0xc01fa218a0} B:{Var:B Labels:project_id=ecp-data-mo8zk9 Value:0xc01fa21850} C:{Var:C Labels:project_id=ecp-data-mo8zk9 Value:0xc01fa21858}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983774807s EvaluationString:[ var='A' labels={project_id=ecp-data-mo8zk9} value=0 ], [ var='B' labels={project_id=ecp-data-mo8zk9} value=0 ], [ var='C' labels={project_id=ecp-data-mo8zk9} value=0 ]} {Instance:project_id=ecp-data-mousgg State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-mousgg Value:0xc01fa218f0} B:{Var:B Labels:project_id=ecp-data-mousgg Value:0xc01fa218f8} C:{Var:C Labels:project_id=ecp-data-mousgg Value:0xc01fa21940}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983782618s EvaluationString:[ var='A' labels={project_id=ecp-data-mousgg} value=0 ], [ var='B' labels={project_id=ecp-data-mousgg} value=0 ], [ var='C' labels={project_id=ecp-data-mousgg} value=0 ]} {Instance:project_id=ecp-data-moyqmq State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-moyqmq Value:0xc01fa21998} B:{Var:B Labels:project_id=ecp-data-moyqmq Value:0xc01fa219e0} C:{Var:C Labels:project_id=ecp-data-moyqmq Value:0xc01fa21990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983789494s EvaluationString:[ var='A' labels={project_id=ecp-data-moyqmq} value=42 ], [ var='B' labels={project_id=ecp-data-moyqmq} value=42 ], [ var='C' labels={project_id=ecp-data-moyqmq} value=0 ]} {Instance:project_id=ecp-data-mqbcwe State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-mqbcwe Value:0xc01fa21a30} B:{Var:B Labels:project_id=ecp-data-mqbcwe Value:0xc01fa21a38} C:{Var:C Labels:project_id=ecp-data-mqbcwe Value:0xc01fa21a80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983798232s EvaluationString:[ var='A' labels={project_id=ecp-data-mqbcwe} value=1 ], [ var='B' labels={project_id=ecp-data-mqbcwe} value=1 ], [ var='C' labels={project_id=ecp-data-mqbcwe} value=0 ]} {Instance:project_id=ecp-data-mtrked State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-mtrked Value:0xc01fa21b20} B:{Var:B Labels:project_id=ecp-data-mtrked Value:0xc01fa21ad0} C:{Var:C Labels:project_id=ecp-data-mtrked Value:0xc01fa21ad8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983807118s EvaluationString:[ var='A' labels={project_id=ecp-data-mtrked} value=0 ], [ var='B' labels={project_id=ecp-data-mtrked} value=0 ], [ var='C' labels={project_id=ecp-data-mtrked} value=0 ]} {Instance:project_id=ecp-data-mtw1wv State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-mtw1wv Value:0xc01fa21b70} B:{Var:B Labels:project_id=ecp-data-mtw1wv Value:0xc01fa21b78} C:{Var:C Labels:project_id=ecp-data-mtw1wv Value:0xc01fa21bc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983814496s EvaluationString:[ var='A' labels={project_id=ecp-data-mtw1wv} value=4 ], [ var='B' labels={project_id=ecp-data-mtw1wv} value=4 ], [ var='C' labels={project_id=ecp-data-mtw1wv} value=0 ]} {Instance:project_id=ecp-data-mv60yt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-mv60yt Value:0xc01fa21c10} B:{Var:B Labels:project_id=ecp-data-mv60yt Value:0xc01fa21c18} C:{Var:C Labels:project_id=ecp-data-mv60yt Value:0xc01fa21c60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983821717s EvaluationString:[ var='A' labels={project_id=ecp-data-mv60yt} value=0 ], [ var='B' labels={project_id=ecp-data-mv60yt} value=0 ], [ var='C' labels={project_id=ecp-data-mv60yt} value=0 ]} {Instance:project_id=ecp-data-mwzzr6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-mwzzr6 Value:0xc01fa21cb0} B:{Var:B Labels:project_id=ecp-data-mwzzr6 Value:0xc01fa21cb8} C:{Var:C Labels:project_id=ecp-data-mwzzr6 Value:0xc01fa21d00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983829142s EvaluationString:[ var='A' labels={project_id=ecp-data-mwzzr6} value=0 ], [ var='B' labels={project_id=ecp-data-mwzzr6} value=0 ], [ var='C' labels={project_id=ecp-data-mwzzr6} value=0 ]} {Instance:project_id=ecp-data-n2fhgb State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-n2fhgb Value:0xc01fa21d50} B:{Var:B Labels:project_id=ecp-data-n2fhgb Value:0xc01fa21d58} C:{Var:C Labels:project_id=ecp-data-n2fhgb Value:0xc01fa21da0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983836386s EvaluationString:[ var='A' labels={project_id=ecp-data-n2fhgb} value=0 ], [ var='B' labels={project_id=ecp-data-n2fhgb} value=0 ], [ var='C' labels={project_id=ecp-data-n2fhgb} value=0 ]} {Instance:project_id=ecp-data-n2qnnk State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-n2qnnk Value:0xc01fa21df0} B:{Var:B Labels:project_id=ecp-data-n2qnnk Value:0xc01fa21df8} C:{Var:C Labels:project_id=ecp-data-n2qnnk Value:0xc01fa21e40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983845676s EvaluationString:[ var='A' labels={project_id=ecp-data-n2qnnk} value=0 ], [ var='B' labels={project_id=ecp-data-n2qnnk} value=0 ], [ var='C' labels={project_id=ecp-data-n2qnnk} value=0 ]} {Instance:project_id=ecp-data-n4leni State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-n4leni Value:0xc01fa21e98} B:{Var:B Labels:project_id=ecp-data-n4leni Value:0xc01fa21ee0} C:{Var:C Labels:project_id=ecp-data-n4leni Value:0xc01fa21e90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98385287s EvaluationString:[ var='A' labels={project_id=ecp-data-n4leni} value=0 ], [ var='B' labels={project_id=ecp-data-n4leni} value=0 ], [ var='C' labels={project_id=ecp-data-n4leni} value=0 ]} {Instance:project_id=ecp-data-n6i4iq State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-n6i4iq Value:0xc01fa21f40} B:{Var:B Labels:project_id=ecp-data-n6i4iq Value:0xc01fa21f48} C:{Var:C Labels:project_id=ecp-data-n6i4iq Value:0xc01fa21fd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983862052s EvaluationString:[ var='A' labels={project_id=ecp-data-n6i4iq} value=0 ], [ var='B' labels={project_id=ecp-data-n6i4iq} value=0 ], [ var='C' labels={project_id=ecp-data-n6i4iq} value=0 ]} {Instance:project_id=ecp-data-n8ypmf State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-n8ypmf Value:0xc025684020} B:{Var:B Labels:project_id=ecp-data-n8ypmf Value:0xc025684028} C:{Var:C Labels:project_id=ecp-data-n8ypmf Value:0xc025684070}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983869509s EvaluationString:[ var='A' labels={project_id=ecp-data-n8ypmf} value=0 ], [ var='B' labels={project_id=ecp-data-n8ypmf} value=0 ], [ var='C' labels={project_id=ecp-data-n8ypmf} value=0 ]} {Instance:project_id=ecp-data-ncp5is State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ncp5is Value:0xc0256840c0} B:{Var:B Labels:project_id=ecp-data-ncp5is Value:0xc0256840c8} C:{Var:C Labels:project_id=ecp-data-ncp5is Value:0xc025684110}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983877228s EvaluationString:[ var='A' labels={project_id=ecp-data-ncp5is} value=0 ], [ var='B' labels={project_id=ecp-data-ncp5is} value=0 ], [ var='C' labels={project_id=ecp-data-ncp5is} value=0 ]} {Instance:project_id=ecp-data-ncx22n State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ncx22n Value:0xc025684160} B:{Var:B Labels:project_id=ecp-data-ncx22n Value:0xc025684168} C:{Var:C Labels:project_id=ecp-data-ncx22n Value:0xc0256841b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983886259s EvaluationString:[ var='A' labels={project_id=ecp-data-ncx22n} value=0 ], [ var='B' labels={project_id=ecp-data-ncx22n} value=0 ], [ var='C' labels={project_id=ecp-data-ncx22n} value=0 ]} {Instance:project_id=ecp-data-ndoioc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ndoioc Value:0xc025684200} B:{Var:B Labels:project_id=ecp-data-ndoioc Value:0xc025684208} C:{Var:C Labels:project_id=ecp-data-ndoioc Value:0xc025684250}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983894126s EvaluationString:[ var='A' labels={project_id=ecp-data-ndoioc} value=0 ], [ var='B' labels={project_id=ecp-data-ndoioc} value=0 ], [ var='C' labels={project_id=ecp-data-ndoioc} value=0 ]} {Instance:project_id=ecp-data-nhjday State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-nhjday Value:0xc0256842a0} B:{Var:B Labels:project_id=ecp-data-nhjday Value:0xc0256842a8} C:{Var:C Labels:project_id=ecp-data-nhjday Value:0xc0256842f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983901413s EvaluationString:[ var='A' labels={project_id=ecp-data-nhjday} value=13 ], [ var='B' labels={project_id=ecp-data-nhjday} value=13 ], [ var='C' labels={project_id=ecp-data-nhjday} value=0 ]} {Instance:project_id=ecp-data-nkyyzu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-nkyyzu Value:0xc025684348} B:{Var:B Labels:project_id=ecp-data-nkyyzu Value:0xc0256843a0} C:{Var:C Labels:project_id=ecp-data-nkyyzu Value:0xc025684340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983910694s EvaluationString:[ var='A' labels={project_id=ecp-data-nkyyzu} value=0 ], [ var='B' labels={project_id=ecp-data-nkyyzu} value=0 ], [ var='C' labels={project_id=ecp-data-nkyyzu} value=0 ]} {Instance:project_id=ecp-data-nm8z3z State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-nm8z3z Value:0xc025684440} B:{Var:B Labels:project_id=ecp-data-nm8z3z Value:0xc0256843f0} C:{Var:C Labels:project_id=ecp-data-nm8z3z Value:0xc0256843f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983917746s EvaluationString:[ var='A' labels={project_id=ecp-data-nm8z3z} value=0 ], [ var='B' labels={project_id=ecp-data-nm8z3z} value=0 ], [ var='C' labels={project_id=ecp-data-nm8z3z} value=0 ]} {Instance:project_id=ecp-data-nmftzy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-nmftzy Value:0xc025684490} B:{Var:B Labels:project_id=ecp-data-nmftzy Value:0xc025684498} C:{Var:C Labels:project_id=ecp-data-nmftzy Value:0xc0256844e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983926688s EvaluationString:[ var='A' labels={project_id=ecp-data-nmftzy} value=0 ], [ var='B' labels={project_id=ecp-data-nmftzy} value=0 ], [ var='C' labels={project_id=ecp-data-nmftzy} value=0 ]} {Instance:project_id=ecp-data-nnlrlr State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-nnlrlr Value:0xc025684530} B:{Var:B Labels:project_id=ecp-data-nnlrlr Value:0xc025684538} C:{Var:C Labels:project_id=ecp-data-nnlrlr Value:0xc025684580}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983933743s EvaluationString:[ var='A' labels={project_id=ecp-data-nnlrlr} value=0 ], [ var='B' labels={project_id=ecp-data-nnlrlr} value=0 ], [ var='C' labels={project_id=ecp-data-nnlrlr} value=0 ]} {Instance:project_id=ecp-data-nnmvgd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-nnmvgd Value:0xc0256845d0} B:{Var:B Labels:project_id=ecp-data-nnmvgd Value:0xc0256845d8} C:{Var:C Labels:project_id=ecp-data-nnmvgd Value:0xc025684620}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983940826s EvaluationString:[ var='A' labels={project_id=ecp-data-nnmvgd} value=29 ], [ var='B' labels={project_id=ecp-data-nnmvgd} value=29 ], [ var='C' labels={project_id=ecp-data-nnmvgd} value=0 ]} {Instance:project_id=ecp-data-nnvehw State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-nnvehw Value:0xc025684678} B:{Var:B Labels:project_id=ecp-data-nnvehw Value:0xc0256846c0} C:{Var:C Labels:project_id=ecp-data-nnvehw Value:0xc025684670}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983948471s EvaluationString:[ var='A' labels={project_id=ecp-data-nnvehw} value=0 ], [ var='B' labels={project_id=ecp-data-nnvehw} value=0 ], [ var='C' labels={project_id=ecp-data-nnvehw} value=0 ]} {Instance:project_id=ecp-data-nnz48e State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-nnz48e Value:0xc025684710} B:{Var:B Labels:project_id=ecp-data-nnz48e Value:0xc025684718} C:{Var:C Labels:project_id=ecp-data-nnz48e Value:0xc025684760}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983956343s EvaluationString:[ var='A' labels={project_id=ecp-data-nnz48e} value=0 ], [ var='B' labels={project_id=ecp-data-nnz48e} value=0 ], [ var='C' labels={project_id=ecp-data-nnz48e} value=0 ]} {Instance:project_id=ecp-data-nobhf3 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-nobhf3 Value:0xc025684800} B:{Var:B Labels:project_id=ecp-data-nobhf3 Value:0xc0256847b0} C:{Var:C Labels:project_id=ecp-data-nobhf3 Value:0xc0256847b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983963726s EvaluationString:[ var='A' labels={project_id=ecp-data-nobhf3} value=2 ], [ var='B' labels={project_id=ecp-data-nobhf3} value=2 ], [ var='C' labels={project_id=ecp-data-nobhf3} value=0 ]} {Instance:project_id=ecp-data-noyu5o State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-noyu5o Value:0xc025684850} B:{Var:B Labels:project_id=ecp-data-noyu5o Value:0xc025684858} C:{Var:C Labels:project_id=ecp-data-noyu5o Value:0xc0256848a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983972066s EvaluationString:[ var='A' labels={project_id=ecp-data-noyu5o} value=6 ], [ var='B' labels={project_id=ecp-data-noyu5o} value=6 ], [ var='C' labels={project_id=ecp-data-noyu5o} value=0 ]} {Instance:project_id=ecp-data-nsgatk State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-nsgatk Value:0xc0256848f8} B:{Var:B Labels:project_id=ecp-data-nsgatk Value:0xc025684940} C:{Var:C Labels:project_id=ecp-data-nsgatk Value:0xc0256848f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983979591s EvaluationString:[ var='A' labels={project_id=ecp-data-nsgatk} value=0 ], [ var='B' labels={project_id=ecp-data-nsgatk} value=0 ], [ var='C' labels={project_id=ecp-data-nsgatk} value=0 ]} {Instance:project_id=ecp-data-nuamf3 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-nuamf3 Value:0xc025684990} B:{Var:B Labels:project_id=ecp-data-nuamf3 Value:0xc025684998} C:{Var:C Labels:project_id=ecp-data-nuamf3 Value:0xc0256849e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983987875s EvaluationString:[ var='A' labels={project_id=ecp-data-nuamf3} value=0 ], [ var='B' labels={project_id=ecp-data-nuamf3} value=0 ], [ var='C' labels={project_id=ecp-data-nuamf3} value=0 ]} {Instance:project_id=ecp-data-nwynf4 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-nwynf4 Value:0xc025684a30} B:{Var:B Labels:project_id=ecp-data-nwynf4 Value:0xc025684a38} C:{Var:C Labels:project_id=ecp-data-nwynf4 Value:0xc025684a80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.983997156s EvaluationString:[ var='A' labels={project_id=ecp-data-nwynf4} value=0 ], [ var='B' labels={project_id=ecp-data-nwynf4} value=0 ], [ var='C' labels={project_id=ecp-data-nwynf4} value=0 ]} {Instance:project_id=ecp-data-nxtx7b State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-nxtx7b Value:0xc025684ad0} B:{Var:B Labels:project_id=ecp-data-nxtx7b Value:0xc025684ad8} C:{Var:C Labels:project_id=ecp-data-nxtx7b Value:0xc025684b20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98400438s EvaluationString:[ var='A' labels={project_id=ecp-data-nxtx7b} value=0 ], [ var='B' labels={project_id=ecp-data-nxtx7b} value=0 ], [ var='C' labels={project_id=ecp-data-nxtx7b} value=0 ]} {Instance:project_id=ecp-data-nxupzj State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-nxupzj Value:0xc025684b70} B:{Var:B Labels:project_id=ecp-data-nxupzj Value:0xc025684b78} C:{Var:C Labels:project_id=ecp-data-nxupzj Value:0xc025684bc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984011721s EvaluationString:[ var='A' labels={project_id=ecp-data-nxupzj} value=0 ], [ var='B' labels={project_id=ecp-data-nxupzj} value=0 ], [ var='C' labels={project_id=ecp-data-nxupzj} value=0 ]} {Instance:project_id=ecp-data-o0w8zg State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-o0w8zg Value:0xc025684c10} B:{Var:B Labels:project_id=ecp-data-o0w8zg Value:0xc025684c18} C:{Var:C Labels:project_id=ecp-data-o0w8zg Value:0xc025684c60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984019297s EvaluationString:[ var='A' labels={project_id=ecp-data-o0w8zg} value=0 ], [ var='B' labels={project_id=ecp-data-o0w8zg} value=0 ], [ var='C' labels={project_id=ecp-data-o0w8zg} value=0 ]} {Instance:project_id=ecp-data-o6v2et State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-o6v2et Value:0xc025684cb0} B:{Var:B Labels:project_id=ecp-data-o6v2et Value:0xc025684cb8} C:{Var:C Labels:project_id=ecp-data-o6v2et Value:0xc025684d00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984026242s EvaluationString:[ var='A' labels={project_id=ecp-data-o6v2et} value=0 ], [ var='B' labels={project_id=ecp-data-o6v2et} value=0 ], [ var='C' labels={project_id=ecp-data-o6v2et} value=0 ]} {Instance:project_id=ecp-data-o8j3ru State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-o8j3ru Value:0xc025684d58} B:{Var:B Labels:project_id=ecp-data-o8j3ru Value:0xc025684da0} C:{Var:C Labels:project_id=ecp-data-o8j3ru Value:0xc025684d50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984034423s EvaluationString:[ var='A' labels={project_id=ecp-data-o8j3ru} value=0 ], [ var='B' labels={project_id=ecp-data-o8j3ru} value=0 ], [ var='C' labels={project_id=ecp-data-o8j3ru} value=0 ]} {Instance:project_id=ecp-data-oanq7f State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-oanq7f Value:0xc025684e80} B:{Var:B Labels:project_id=ecp-data-oanq7f Value:0xc025684e30} C:{Var:C Labels:project_id=ecp-data-oanq7f Value:0xc025684e38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984041395s EvaluationString:[ var='A' labels={project_id=ecp-data-oanq7f} value=0 ], [ var='B' labels={project_id=ecp-data-oanq7f} value=0 ], [ var='C' labels={project_id=ecp-data-oanq7f} value=0 ]} {Instance:project_id=ecp-data-ocnmtf State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ocnmtf Value:0xc025684ed0} B:{Var:B Labels:project_id=ecp-data-ocnmtf Value:0xc025684ed8} C:{Var:C Labels:project_id=ecp-data-ocnmtf Value:0xc025684f20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984050261s EvaluationString:[ var='A' labels={project_id=ecp-data-ocnmtf} value=6 ], [ var='B' labels={project_id=ecp-data-ocnmtf} value=6 ], [ var='C' labels={project_id=ecp-data-ocnmtf} value=0 ]} {Instance:project_id=ecp-data-ocwp0v State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ocwp0v Value:0xc025684fb8} B:{Var:B Labels:project_id=ecp-data-ocwp0v Value:0xc025685050} C:{Var:C Labels:project_id=ecp-data-ocwp0v Value:0xc025684fb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984058814s EvaluationString:[ var='A' labels={project_id=ecp-data-ocwp0v} value=3 ], [ var='B' labels={project_id=ecp-data-ocwp0v} value=3 ], [ var='C' labels={project_id=ecp-data-ocwp0v} value=0 ]} {Instance:project_id=ecp-data-oek932 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-oek932 Value:0xc0256850a0} B:{Var:B Labels:project_id=ecp-data-oek932 Value:0xc0256850a8} C:{Var:C Labels:project_id=ecp-data-oek932 Value:0xc0256850f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984066662s EvaluationString:[ var='A' labels={project_id=ecp-data-oek932} value=0 ], [ var='B' labels={project_id=ecp-data-oek932} value=0 ], [ var='C' labels={project_id=ecp-data-oek932} value=0 ]} {Instance:project_id=ecp-data-ofsywy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ofsywy Value:0xc0256851b0} B:{Var:B Labels:project_id=ecp-data-ofsywy Value:0xc025685140} C:{Var:C Labels:project_id=ecp-data-ofsywy Value:0xc025685148}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984074416s EvaluationString:[ var='A' labels={project_id=ecp-data-ofsywy} value=2 ], [ var='B' labels={project_id=ecp-data-ofsywy} value=2 ], [ var='C' labels={project_id=ecp-data-ofsywy} value=0 ]} {Instance:project_id=ecp-data-ogjbm1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ogjbm1 Value:0xc025685290} B:{Var:B Labels:project_id=ecp-data-ogjbm1 Value:0xc025685240} C:{Var:C Labels:project_id=ecp-data-ogjbm1 Value:0xc025685248}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984083009s EvaluationString:[ var='A' labels={project_id=ecp-data-ogjbm1} value=0 ], [ var='B' labels={project_id=ecp-data-ogjbm1} value=0 ], [ var='C' labels={project_id=ecp-data-ogjbm1} value=0 ]} {Instance:project_id=ecp-data-ogwoqa State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ogwoqa Value:0xc0256852e8} B:{Var:B Labels:project_id=ecp-data-ogwoqa Value:0xc025685330} C:{Var:C Labels:project_id=ecp-data-ogwoqa Value:0xc0256852e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984089791s EvaluationString:[ var='A' labels={project_id=ecp-data-ogwoqa} value=0 ], [ var='B' labels={project_id=ecp-data-ogwoqa} value=0 ], [ var='C' labels={project_id=ecp-data-ogwoqa} value=0 ]} {Instance:project_id=ecp-data-ohylf9 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ohylf9 Value:0xc025685380} B:{Var:B Labels:project_id=ecp-data-ohylf9 Value:0xc025685388} C:{Var:C Labels:project_id=ecp-data-ohylf9 Value:0xc0256853d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984097343s EvaluationString:[ var='A' labels={project_id=ecp-data-ohylf9} value=0 ], [ var='B' labels={project_id=ecp-data-ohylf9} value=0 ], [ var='C' labels={project_id=ecp-data-ohylf9} value=0 ]} {Instance:project_id=ecp-data-ojput6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ojput6 Value:0xc025685428} B:{Var:B Labels:project_id=ecp-data-ojput6 Value:0xc025685470} C:{Var:C Labels:project_id=ecp-data-ojput6 Value:0xc025685420}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984104131s EvaluationString:[ var='A' labels={project_id=ecp-data-ojput6} value=0 ], [ var='B' labels={project_id=ecp-data-ojput6} value=0 ], [ var='C' labels={project_id=ecp-data-ojput6} value=0 ]} {Instance:project_id=ecp-data-onfyts State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-onfyts Value:0xc0256854c0} B:{Var:B Labels:project_id=ecp-data-onfyts Value:0xc0256854c8} C:{Var:C Labels:project_id=ecp-data-onfyts Value:0xc025685510}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984111318s EvaluationString:[ var='A' labels={project_id=ecp-data-onfyts} value=6 ], [ var='B' labels={project_id=ecp-data-onfyts} value=6 ], [ var='C' labels={project_id=ecp-data-onfyts} value=0 ]} {Instance:project_id=ecp-data-op6djz State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-op6djz Value:0xc0256855b0} B:{Var:B Labels:project_id=ecp-data-op6djz Value:0xc025685560} C:{Var:C Labels:project_id=ecp-data-op6djz Value:0xc025685568}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984117783s EvaluationString:[ var='A' labels={project_id=ecp-data-op6djz} value=0 ], [ var='B' labels={project_id=ecp-data-op6djz} value=0 ], [ var='C' labels={project_id=ecp-data-op6djz} value=0 ]} {Instance:project_id=ecp-data-opgcwd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-opgcwd Value:0xc025685600} B:{Var:B Labels:project_id=ecp-data-opgcwd Value:0xc025685608} C:{Var:C Labels:project_id=ecp-data-opgcwd Value:0xc025685650}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984123793s EvaluationString:[ var='A' labels={project_id=ecp-data-opgcwd} value=0 ], [ var='B' labels={project_id=ecp-data-opgcwd} value=0 ], [ var='C' labels={project_id=ecp-data-opgcwd} value=0 ]} {Instance:project_id=ecp-data-opr910 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-opr910 Value:0xc0256856f0} B:{Var:B Labels:project_id=ecp-data-opr910 Value:0xc0256856a0} C:{Var:C Labels:project_id=ecp-data-opr910 Value:0xc0256856a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984131042s EvaluationString:[ var='A' labels={project_id=ecp-data-opr910} value=0 ], [ var='B' labels={project_id=ecp-data-opr910} value=0 ], [ var='C' labels={project_id=ecp-data-opr910} value=0 ]} {Instance:project_id=ecp-data-oqqd9c State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-oqqd9c Value:0xc025685740} B:{Var:B Labels:project_id=ecp-data-oqqd9c Value:0xc025685748} C:{Var:C Labels:project_id=ecp-data-oqqd9c Value:0xc025685790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984153655s EvaluationString:[ var='A' labels={project_id=ecp-data-oqqd9c} value=0 ], [ var='B' labels={project_id=ecp-data-oqqd9c} value=0 ], [ var='C' labels={project_id=ecp-data-oqqd9c} value=0 ]} {Instance:project_id=ecp-data-ot0g70 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ot0g70 Value:0xc025685830} B:{Var:B Labels:project_id=ecp-data-ot0g70 Value:0xc0256857e0} C:{Var:C Labels:project_id=ecp-data-ot0g70 Value:0xc0256857e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984161573s EvaluationString:[ var='A' labels={project_id=ecp-data-ot0g70} value=0 ], [ var='B' labels={project_id=ecp-data-ot0g70} value=0 ], [ var='C' labels={project_id=ecp-data-ot0g70} value=0 ]} {Instance:project_id=ecp-data-otiezg State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-otiezg Value:0xc025685880} B:{Var:B Labels:project_id=ecp-data-otiezg Value:0xc025685888} C:{Var:C Labels:project_id=ecp-data-otiezg Value:0xc0256858e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984168978s EvaluationString:[ var='A' labels={project_id=ecp-data-otiezg} value=383 ], [ var='B' labels={project_id=ecp-data-otiezg} value=383 ], [ var='C' labels={project_id=ecp-data-otiezg} value=0 ]} {Instance:project_id=ecp-data-ouvktc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-ouvktc Value:0xc025685970} B:{Var:B Labels:project_id=ecp-data-ouvktc Value:0xc025685978} C:{Var:C Labels:project_id=ecp-data-ouvktc Value:0xc0256859c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.98417633s EvaluationString:[ var='A' labels={project_id=ecp-data-ouvktc} value=0 ], [ var='B' labels={project_id=ecp-data-ouvktc} value=0 ], [ var='C' labels={project_id=ecp-data-ouvktc} value=0 ]} {Instance:project_id=ecp-data-oxu1qw State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-oxu1qw Value:0xc025685a10} B:{Var:B Labels:project_id=ecp-data-oxu1qw Value:0xc025685a18} C:{Var:C Labels:project_id=ecp-data-oxu1qw Value:0xc025685a60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984184295s EvaluationString:[ var='A' labels={project_id=ecp-data-oxu1qw} value=0 ], [ var='B' labels={project_id=ecp-data-oxu1qw} value=0 ], [ var='C' labels={project_id=ecp-data-oxu1qw} value=0 ]} {Instance:project_id=ecp-data-p8yetz State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-p8yetz Value:0xc025685af0} B:{Var:B Labels:project_id=ecp-data-p8yetz Value:0xc025685af8} C:{Var:C Labels:project_id=ecp-data-p8yetz Value:0xc025685b50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984191867s EvaluationString:[ var='A' labels={project_id=ecp-data-p8yetz} value=0 ], [ var='B' labels={project_id=ecp-data-p8yetz} value=0 ], [ var='C' labels={project_id=ecp-data-p8yetz} value=0 ]} {Instance:project_id=ecp-data-paulve State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-paulve Value:0xc025685be0} B:{Var:B Labels:project_id=ecp-data-paulve Value:0xc025685be8} C:{Var:C Labels:project_id=ecp-data-paulve Value:0xc025685c30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984220923s EvaluationString:[ var='A' labels={project_id=ecp-data-paulve} value=3 ], [ var='B' labels={project_id=ecp-data-paulve} value=3 ], [ var='C' labels={project_id=ecp-data-paulve} value=0 ]} {Instance:project_id=ecp-data-pcxhi3 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-pcxhi3 Value:0xc025685c80} B:{Var:B Labels:project_id=ecp-data-pcxhi3 Value:0xc025685c88} C:{Var:C Labels:project_id=ecp-data-pcxhi3 Value:0xc025685cd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984228844s EvaluationString:[ var='A' labels={project_id=ecp-data-pcxhi3} value=4 ], [ var='B' labels={project_id=ecp-data-pcxhi3} value=4 ], [ var='C' labels={project_id=ecp-data-pcxhi3} value=0 ]} {Instance:project_id=ecp-data-pd5jrt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-pd5jrt Value:0xc025685d20} B:{Var:B Labels:project_id=ecp-data-pd5jrt Value:0xc025685d28} C:{Var:C Labels:project_id=ecp-data-pd5jrt Value:0xc025685d70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984238985s EvaluationString:[ var='A' labels={project_id=ecp-data-pd5jrt} value=0 ], [ var='B' labels={project_id=ecp-data-pd5jrt} value=0 ], [ var='C' labels={project_id=ecp-data-pd5jrt} value=0 ]} {Instance:project_id=ecp-data-pdfxc7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-pdfxc7 Value:0xc025685e10} B:{Var:B Labels:project_id=ecp-data-pdfxc7 Value:0xc025685dc0} C:{Var:C Labels:project_id=ecp-data-pdfxc7 Value:0xc025685dc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984246012s EvaluationString:[ var='A' labels={project_id=ecp-data-pdfxc7} value=3 ], [ var='B' labels={project_id=ecp-data-pdfxc7} value=3 ], [ var='C' labels={project_id=ecp-data-pdfxc7} value=0 ]} {Instance:project_id=ecp-data-pdhvvg State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-pdhvvg Value:0xc025685e60} B:{Var:B Labels:project_id=ecp-data-pdhvvg Value:0xc025685e68} C:{Var:C Labels:project_id=ecp-data-pdhvvg Value:0xc025685eb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984253349s EvaluationString:[ var='A' labels={project_id=ecp-data-pdhvvg} value=0 ], [ var='B' labels={project_id=ecp-data-pdhvvg} value=0 ], [ var='C' labels={project_id=ecp-data-pdhvvg} value=0 ]} {Instance:project_id=ecp-data-pghiue State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:project_id=ecp-data-pghiue Value:0xc025685f50} B:{Var:B Labels:project_id=ecp-data-pghiue Value:0xc025685f00} C:{Var:C Labels:project_id=ecp-data-pghiue Value:0xc025685f08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.984262543s EvaluationString:[ var='A' labels={project_id=ecp-data-pghiue} value=0 ], [ var='B' labels={project_id=ecp-data-pghiue} valu +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pbyseail-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.009268867Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-magento-fulfilments-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-magento-fulfilments-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.009156206Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +level=debug ts=2024-05-29T13:44:14.009110677Z caller=remote_instance_store.go:51 user=154996 slug=veovo msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.009114279Z caller=remote_instance_store.go:51 user=491157 slug=prd01wr msg="calling SaveAlertInstance" +logger=ngalert.state.manager.persist user=127717 slug=engenoil t=2024-05-29T13:44:14.008860263Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.879105ms +level=debug ts=2024-05-29T13:44:14.008798393Z caller=ruler.go:522 msg="tenant is owned by this instance" user=548153 slug=koalastreams groups=0 +level=debug ts=2024-05-29T13:44:14.008592734Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pbnlmnc6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.008408678Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.008350415Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" +level=info component=discovery ts=2024-05-29T13:44:14.008306988Z caller=client.go:80 msg="creating client for grafana instance" user=505053 addr=dns:///luydev-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 +level=debug ts=2024-05-29T13:44:14.008216988Z caller=ruler.go:522 msg="tenant is owned by this instance" user=528519 slug=lagg groups=0 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pblopq0g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.008168355Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pbkant6r-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.008080844Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.008051374Z caller=remote_image_capturer.go:54 user=283914 slug=emmasleep rule_org_id=1 rule_uid=f7a257b5-5b4b-4bb5-b8c5-1d3b84d6f862 dashboard=CNR8LzU7z2323213wrrwewr panel=16 msg="rendering alert image with grafana" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pbkant6r-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.008022064Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pbkant6r-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.007950903Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.007878284Z caller=ruler.go:522 msg="tenant is owned by this instance" user=747443 slug=kabema groups=0 +level=debug ts=2024-05-29T13:44:14.00783894Z caller=remote_instance_store.go:51 user=751407 slug=nethermindjuno msg="calling SaveAlertInstance" +level=info component=discovery ts=2024-05-29T13:44:14.007837884Z caller=client.go:80 msg="creating client for grafana instance" user=703790 addr=dns:///luisbonet-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 +level=debug ts=2024-05-29T13:44:14.007815457Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" +level=info component=discovery ts=2024-05-29T13:44:14.007737783Z caller=client.go:80 msg="creating client for grafana instance" user=686926 addr=dns:///luciabotoaca-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 +level=debug ts=2024-05-29T13:44:14.007750224Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.007796487Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-allocated-carriers-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-allocated-carriers-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.00769328Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.00766088Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pbgtxr3h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.00767758Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.007608412Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pbgtxr3h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.00760016Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=395357 slug=sensen instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.007571493Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z +level=debug component=discovery ts=2024-05-29T13:44:14.007506123Z caller=retry.go:58 user=480895 msg="retrying grpc request" method=/remoteruler.rules.v1.RulesService/GetByRuleGroup attempt=2 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pbgtxr3h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.007463398Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pbc7j5e8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.007423338Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=oms-email-tracker, dimension_QueueName=ecom-prod-oms-email-tracker-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-email-tracker-dead-letter-queue, pagerduty_service=Fluent, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.007453581Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=oms-customer-mediator, dimension_QueueName=ecom-prod-oms-customer-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-customer-mediator-dead-letter-queue, pagerduty_service=customer-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.007322099Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=oms-customer-mediator, dimension_QueueName=ecom-prod-oms-customer-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-customer-mediator-dead-letter-queue, pagerduty_service=customer-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.007309177Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.007277569Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" +level=warn ts=2024-05-29T13:44:14.007192078Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=760092 slug=kubazamek +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pbc7j5e8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.007288186Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager.persist user=482907 slug=wavelonp t=2024-05-29T13:44:14.007168176Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +logger=ngalert.state.manager user=482907 slug=wavelonp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.007152736Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=482907 slug=wavelonp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:14.007141577Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pb956owu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.007065984Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.007130345Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=482907 slug=wavelonp t=2024-05-29T13:44:14.007095687Z level=debug msg="State manager processing evaluation results" resultCount=1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pb8t330q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.007023474Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.006986576Z caller=ruler.go:522 msg="tenant is owned by this instance" user=626343 slug=lakeyaung groups=0 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pb8t330q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.006969373Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pb8t330q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.006927383Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pb8t330q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.006915472Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-payuz8i2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.006743951Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-payuz8i2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.00667778Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-payuz8i2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.00663698Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.006557446Z caller=remote_instance_store.go:51 user=637816 slug=kingobservatory msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-papfzr3g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.006548889Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-papfzr3g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.006526198Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pak5iv05-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.006450908Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pak5iv05-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.006425897Z level=debug msg="Setting next state" handler=resultNormal +level=debug ts=2024-05-29T13:44:14.006385151Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" +level=debug ts=2024-05-29T13:44:14.00636292Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pak5iv05-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.006384827Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.nfs-2022-common-bitc.coreSlave*.usersessions.status.nfs-2022-ps5-bitc.GaugeUS_aws-iad_Slave,5)) Query" t=2024-05-29T13:44:14.006147157Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pabjx2ez-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.006182815Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.006142493Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pabjx2ez-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.006129754Z level=debug msg="Setting next state" handler=resultNormal +level=warn ts=2024-05-29T13:44:14.005994167Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=937412 slug=kxcprod +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pa57clrz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.006015953Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pa57clrz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.005983633Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-pa57clrz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.005956953Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9y5mor1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.005918042Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9y5mor1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.005855752Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9y5mor1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.005781381Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9xqi7g6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.00575055Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9xqi7g6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.00569453Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9xqi7g6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.005636689Z level=debug msg="Keeping state" state=Normal +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9xqi7g6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.005604869Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-perf-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-perf-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.005608669Z level=debug msg="Setting next state" handler=resultNormal +logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.005573759Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" +level=info component=discovery ts=2024-05-29T13:44:14.005469462Z caller=client.go:80 msg="creating client for grafana instance" user=760916 addr=dns:///lomasmhc-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 +logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9uf4khp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.005493148Z level=debug msg="Keeping state" state=Normal +level=debug ts=2024-05-29T13:44:14.005377861Z caller=ruler.go:522 msg="tenant is owned by this instance" user=493401 slug=kriptobot groups=0 + level=debug ts=2024-05-29T13:44:14.005152359Z caller=ruler.go:522 msg="tenant is owned by this instance" user=485198 slug=ixopay groups=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9rdj2je-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.005237785Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.005082858Z caller=ruler.go:522 msg="tenant is owned by this instance" user=617570 slug=khlebnikov groups=0 + level=debug ts=2024-05-29T13:44:14.004811407Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9r6crm3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.004989223Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9r6crm3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.004922482Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9r6crm3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.004891442Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9r6crm3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.00474226Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.004708147Z caller=remote_instance_store.go:51 user=489921 slug=statuscake msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9r6crm3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.00471217Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.004721676Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9qq23wi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.004678549Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.004649424Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9qq23wi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.004493198Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=763376 slug=f5nginxone t=2024-05-29T13:44:14.004311746Z level=debug msg="Saving alert states" count=13 max_state_save_concurrency=1 + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="datasource_uid=grafanacloud-logs, ref_id=NumErrors" t=2024-05-29T13:44:14.004253805Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="datasource_uid=grafanacloud-logs, ref_id=NumErrors" t=2024-05-29T13:44:14.004245175Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.004136768Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="datasource_uid=grafanacloud-logs, ref_id=NumErrors" t=2024-05-29T13:44:14.004189004Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="datasource_uid=grafanacloud-logs, ref_id=NumErrors" t=2024-05-29T13:44:14.004132324Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:14.004068608Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-wholesale_uk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-wholesale_uk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.004069401Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.004037295Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-wholesale_uk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-wholesale_uk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.004055488Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="datasource_uid=grafanacloud-logs, ref_id=NumErrors" t=2024-05-29T13:44:14.004017033Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.004025925Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="datasource_uid=grafanacloud-logs, ref_id=NumErrors" t=2024-05-29T13:44:14.003996622Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="datasource_uid=grafanacloud-logs, ref_id=NumErrors" t=2024-05-29T13:44:14.003965932Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-wholesale_fr-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-wholesale_fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.003928371Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="datasource_uid=grafanacloud-logs, ref_id=NumErrors" t=2024-05-29T13:44:14.003944682Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="datasource_uid=grafanacloud-logs, ref_id=NumErrors" t=2024-05-29T13:44:14.003937472Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="datasource_uid=grafanacloud-logs, ref_id=NumErrors" t=2024-05-29T13:44:14.003928412Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=763376 slug=f5nginxone instance="datasource_uid=grafanacloud-logs, ref_id=NumErrors" t=2024-05-29T13:44:14.003902741Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=info component=discovery ts=2024-05-29T13:44:14.003862346Z caller=client.go:80 msg="creating client for grafana instance" user=556868 addr=dns:///levvr-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:14.003780429Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-uk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-uk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.003778519Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.003724036Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9pk58dg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.003664719Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.003622364Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9pk58dg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.003650289Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9pk58dg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.003582828Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.003563506Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.003544959Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" + level=debug ts=2024-05-29T13:44:14.00335216Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9oyav1a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.003374096Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-se-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.003436795Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9oyav1a-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.003297565Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=763376 slug=f5nginxone version=150 fingerprint=cbde15b7286806cb attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.003248395Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=NumErrors State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.002857244s EvaluationString:}]" duration=14.177186ms + logger=ngalert.state.manager user=27998 slug=korob instance="datasource_uid=grafanacloud-korob, ref_id=A" t=2024-05-29T13:44:14.003239135Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=27998 slug=korob version=1 fingerprint=2b0dd66bcecb5405 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:14.003144131Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-korob, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:4.002816498s EvaluationString:}]" duration=56.873168ms + logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.002970822Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" + level=info component=discovery ts=2024-05-29T13:44:14.002804636Z caller=client.go:80 msg="creating client for grafana instance" user=528965 addr=dns:///letsatsi-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9eexzn9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.00273242Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:14.002691035Z caller=client.go:80 msg="creating client for grafana instance" user=659312 addr=dns:///leonetwork-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:14.002673635Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=520659 slug=jleahy + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p9d157th-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.002593638Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-356061laioeastus, cloud_platform=AZURE, customer_id=A007, env_id=env-356061, env_name=holuo-azure, env_type=qa, instance=env-356061laioeastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:14.002554757Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:14.002388433Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=536064 slug=jcomp + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-356061laioeastus, cloud_platform=AZURE, customer_id=A007, env_id=env-356061, env_name=holuo-azure, env_type=qa, instance=env-356061laioeastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:14.002460901Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.002352832Z caller=ruler.go:522 msg="tenant is owned by this instance" user=536064 slug=jcomp groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p930tzlg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.002265575Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.002151445Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.00215223Z caller=ruler.go:522 msg="tenant is owned by this instance" user=811513 slug=inpowertech groups=5 + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-nl-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-nl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.002295066Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.002265666Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" + level=debug ts=2024-05-29T13:44:14.002151154Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p930tzlg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.002212974Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p91ql4x6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.002078763Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p91ql4x6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.002053723Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p91ql4x6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.002010422Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:14.001882228Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=685307 slug=kdandersen + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-ie-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-ie-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.001982254Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-ie-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-ie-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.001932173Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.001714631Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p8s8236j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.001666449Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.001484844Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.001428127Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-338261laio1usw2, cloud_platform=AWS, customer_id=C820, env_id=338261, env_name=C820 RH Prod, env_type=prod, instance=env-338261laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=preprod" t=2024-05-29T13:44:14.001489954Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p8plf8xi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.001416646Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-338261laio1usw2, cloud_platform=AWS, customer_id=C820, env_id=338261, env_name=C820 RH Prod, env_type=prod, instance=env-338261laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=preprod" t=2024-05-29T13:44:14.001408553Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p8lj523r-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.001246244Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-es-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-es-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.001316542Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.001270764Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-dk-se-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-dk-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.001184563Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.001138585Z caller=remote_instance_store.go:51 user=196413 slug=form3production msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p8lj523r-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.001180004Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-dk-se-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-dk-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.001132774Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698963 slug=lemonade instance="datasource_uid=grafanacloud-prom, ref_id=QUERY" t=2024-05-29T13:44:14.001096577Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.001097687Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p8lj523r-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.001111983Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p8k50liz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.001024342Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.00106492Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=520568 slug=karpinity + level=debug ts=2024-05-29T13:44:14.00100458Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-de-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-de-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.000999667Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.000828421Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.000847914Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.000826601Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-ch-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-ch-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.000816256Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-ch-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-ch-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.000797888Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p8hopxm9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.000770799Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p8hopxm9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.000743059Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p8hopxm9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.000671418Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:14.000599084Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:14.000639628Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=245291 slug=pismo t=2024-05-29T13:44:14.000576082Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-be-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-be-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.000647533Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=426229 slug=accelbyte t=2024-05-29T13:44:14.000562863Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=45.927064ms + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-au-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-au-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.000506098Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.000253512Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=686478 slug=jitech + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p8gcybn5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.000483216Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p8gcybn5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.000468026Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.000415714Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=644298 slug=jrhrmsll + level=debug ts=2024-05-29T13:44:14.00033346Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-at-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-at-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:14.000371303Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-338097laio1use1, cloud_platform=AWS, customer_id=C591, env_id=338097, env_name=C591_Parallel_Dev, env_type=dev, instance=env-338097laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:14.000299698Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:14.000227096Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p8e19b0t-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.000182763Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p8e19b0t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:14.000112523Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:14.000131411Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=676704 slug=jonasbroenstrup + logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:14.000133787Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:14.000114146Z level=debug msg="Saving alert states" count=20 max_state_save_concurrency=1 + logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_TradingIds, scope=app, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:14.000077065Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p8d3ebus-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.999998601Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p8d3ebus-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.999970601Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_TradeReportRecord, scope=app, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:14.000016123Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_SeedingStatusRecord, scope=app, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:13.999938147Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p8d3ebus-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.999905431Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_SchedulerRecord, scope=app, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:13.999892655Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p8d3ebus-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.99987697Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p8d3ebus-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.99985063Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.99981275Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p8d2foku-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.99982073Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p8d2foku-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.99980989Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.999747108Z caller=ruler.go:522 msg="tenant is owned by this instance" user=749834 slug=kasperwem groups=0 + logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_QueuedEvents, scope=app, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:13.999757311Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.99975864Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_QueueSummaries, scope=app, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:13.999726594Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_KafkaOffsetRecord, scope=app, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:13.999626357Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.999407404Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=739417 slug=ivigee + level=debug ts=2024-05-29T13:44:13.99940356Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_PeerRecord, scope=platform, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:13.999445375Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_PeerRecord, scope=platform, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:13.999431267Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p7rfxkan-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.999346525Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_LockRequest, scope=platform, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:13.999320562Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p7rfxkan-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.999291834Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=206107 slug=hydrolix version=7 fingerprint=7d725d500776389b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.999197263Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=prometheus_ready, instance=localhost:9090, job=prometheus State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=prometheus_ready, instance=localhost:9090, job=prometheus Value:0xc018f870f0} B:{Var:B Labels:__name__=prometheus_ready, instance=localhost:9090, job=prometheus Value:0xc018f87140} C:{Var:C Labels:__name__=prometheus_ready, instance=localhost:9090, job=prometheus Value:0xc018f87190}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.998886825s EvaluationString:[ var='A' labels={__name__=prometheus_ready, instance=localhost:9090, job=prometheus} value=1 ], [ var='B' labels={__name__=prometheus_ready, instance=localhost:9090, job=prometheus} value=1 ], [ var='C' labels={__name__=prometheus_ready, instance=localhost:9090, job=prometheus} value=0 ]}]" duration=241.42344ms + logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_Lock, scope=platform, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:13.999264576Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p7rfxkan-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.999259534Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p7r15ga8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.999207353Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.999161005Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.999176202Z caller=client.go:80 msg="creating client for grafana instance" user=937412 addr=dns:///kxcprod-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.999151002Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=560006 slug=jfrd + logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_HostProperties, scope=platform, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:13.999192002Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-payment-mediator-dead-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:13.999136465Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p7r15ga8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.999114032Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.99912138Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.999048689Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.999084601Z caller=client.go:80 msg="creating client for grafana instance" user=623034 addr=dns:///kvmonitoring-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.999061201Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=531108 slug=juliancloud + level=debug ts=2024-05-29T13:44:13.999106894Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=ChannelState_ActiveStreamRecord, scope=app_channel, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:13.998966666Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.999006174Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=538037 slug=drivewealth instance="__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=ChannelState_ActiveStreamRecord, scope=app_channel, url=http://127.0.0.1:8301/metrics" t=2024-05-29T13:44:13.998950667Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.999028905Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.998902374Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p7pe5v4g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.998930291Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=538037 slug=drivewealth version=7 fingerprint=5c7d4cde85049854 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.998323163Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=ChannelState_ActiveStreamRecord, scope=app_channel, url=http://127.0.0.1:8301/metrics State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=ChannelState_ActiveStreamRecord, scope=app_channel, url=http://127.0.0.1:8301/metrics Value:0xc0336034f0} C:{Var:C Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=ChannelState_ActiveStreamRecord, scope=app_channel, url=http://127.0.0.1:8301/metrics Value:0xc033603600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.99652638s EvaluationString:[ var='B' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=ChannelState_ActiveStreamRecord, scope=app_channel, url=http://127.0.0.1:8301/metrics} value=1 ], [ var='C' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=ChannelState_ActiveStreamRecord, scope=app_channel, url=http://127.0.0.1:8301/metrics} value=0 ]} {Instance:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=ChannelState_ActiveStreamRecord, scope=control_channel, url=http://127.0.0.1:8301/metrics State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=ChannelState_ActiveStreamRecord, scope=control_channel, url=http://127.0.0.1:8301/metrics Value:0xc033603848} C:{Var:C Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=ChannelState_ActiveStreamRecord, scope=control_channel, url=http://127.0.0.1:8301/metrics Value:0xc033603a38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.996553446s EvaluationString:[ var='B' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=ChannelState_ActiveStreamRecord, scope=control_channel, url=http://127.0.0.1:8301/metrics} value=4 ], [ var='C' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=ChannelState_ActiveStreamRecord, scope=control_channel, url=http://127.0.0.1:8301/metrics} value=0 ]} {Instance:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_DirectoryQueryRecord, scope=platform, url=http://127.0.0.1:8301/metrics State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_DirectoryQueryRecord, scope=platform, url=http://127.0.0.1:8301/metrics Value:0xc033603d88} C:{Var:C Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_DirectoryQueryRecord, scope=platform, url=http://127.0.0.1:8301/metrics Value:0xc033603ec0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.996573871s EvaluationString:[ var='B' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_DirectoryQueryRecord, scope=platform, url=http://127.0.0.1:8301/metrics} value=0 ], [ var='C' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_DirectoryQueryRecord, scope=platform, url=http://127.0.0.1:8301/metrics} value=0 ]} {Instance:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_HostProperties, scope=platform, url=http://127.0.0.1:8301/metrics State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_HostProperties, scope=platform, url=http://127.0.0.1:8301/metrics Value:0xc01bbb8190} C:{Var:C Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_HostProperties, scope=platform, url=http://127.0.0.1:8301/metrics Value:0xc01bbb8318}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.99658578s EvaluationString:[ var='B' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_HostProperties, scope=platform, url=http://127.0.0.1:8301/metrics} value=1 ], [ var='C' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_HostProperties, scope=platform, url=http://127.0.0.1:8301/metrics} value=0 ]} {Instance:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_Lock, scope=platform, url=http://127.0.0.1:8301/metrics State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_Lock, scope=platform, url=http://127.0.0.1:8301/metrics Value:0xc01bbb8870} C:{Var:C Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_Lock, scope=platform, url=http://127.0.0.1:8301/metrics Value:0xc01bbb89a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.996596696s EvaluationString:[ var='B' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_Lock, scope=platform, url=http://127.0.0.1:8301/metrics} value=4 ], [ var='C' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_Lock, scope=platform, url=http://127.0.0.1:8301/metrics} value=0 ]} {Instance:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_LockRequest, scope=platform, url=http://127.0.0.1:8301/metrics State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_LockRequest, scope=platform, url=http://127.0.0.1:8301/metrics Value:0xc01bbb8cb0} C:{Var:C Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_LockRequest, scope=platform, url=http://127.0.0.1:8301/metrics Value:0xc01bbb8df0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.996608522s EvaluationString:[ var='B' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_LockRequest, scope=platform, url=http://127.0.0.1:8301/metrics} value=4 ], [ var='C' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_LockRequest, scope=platform, url=http://127.0.0.1:8301/metrics} value=0 ]} {Instance:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_LockTimer, scope=platform, url=http://127.0.0.1:8301/metrics State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_LockTimer, scope=platform, url=http://127.0.0.1:8301/metrics Value:0xc01bbb9868} C:{Var:C Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_LockTimer, scope=platform, url=http://127.0.0.1:8301/metrics Value:0xc01bbb9990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.996620575s EvaluationString:[ var='B' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_LockTimer, scope=platform, url=http://127.0.0.1:8301/metrics} value=0 ], [ var='C' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_LockTimer, scope=platform, url=http://127.0.0.1:8301/metrics} value=0 ]} {Instance:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_PeerRecord, scope=platform, url=http://127.0.0.1:8301/metrics State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_PeerRecord, scope=platform, url=http://127.0.0.1:8301/metrics Value:0xc02f7e0000} C:{Var:C Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_PeerRecord, scope=platform, url=http://127.0.0.1:8301/metrics Value:0xc01bbb9d98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.996630846s EvaluationString:[ var='B' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_PeerRecord, scope=platform, url=http://127.0.0.1:8301/metrics} value=0 ], [ var='C' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_PeerRecord, scope=platform, url=http://127.0.0.1:8301/metrics} value=0 ]} {Instance:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_RegistrationRecord, scope=platform, url=http://127.0.0.1:8301/metrics State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_RegistrationRecord, scope=platform, url=http://127.0.0.1:8301/metrics Value:0xc02f7e02a0} C:{Var:C Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_RegistrationRecord, scope=platform, url=http://127.0.0.1:8301/metrics Value:0xc02f7e0448}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.996643372s EvaluationString:[ var='B' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_RegistrationRecord, scope=platform, url=http://127.0.0.1:8301/metrics} value=0 ], [ var='C' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=NodeContext_RegistrationRecord, scope=platform, url=http://127.0.0.1:8301/metrics} value=0 ]} {Instance:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_CollectionRecord, scope=app, url=http://127.0.0.1:8301/metrics State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_CollectionRecord, scope=app, url=http://127.0.0.1:8301/metrics Value:0xc02f7e0700} C:{Var:C Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_CollectionRecord, scope=app, url=http://127.0.0.1:8301/metrics Value:0xc02f7e07f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.996656804s EvaluationString:[ var='B' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_CollectionRecord, scope=app, url=http://127.0.0.1:8301/metrics} value=0 ], [ var='C' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_CollectionRecord, scope=app, url=http://127.0.0.1:8301/metrics} value=0 ]} {Instance:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_FixConnectionRecord, scope=app, url=http://127.0.0.1:8301/metrics State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_FixConnectionRecord, scope=app, url=http://127.0.0.1:8301/metrics Value:0xc02f7e0998} C:{Var:C Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_FixConnectionRecord, scope=app, url=http://127.0.0.1:8301/metrics Value:0xc02f7e0aa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.996667632s EvaluationString:[ var='B' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_FixConnectionRecord, scope=app, url=http://127.0.0.1:8301/metrics} value=7 ], [ var='C' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_FixConnectionRecord, scope=app, url=http://127.0.0.1:8301/metrics} value=0 ]} {Instance:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_KafkaOffsetRecord, scope=app, url=http://127.0.0.1:8301/metrics State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_KafkaOffsetRecord, scope=app, url=http://127.0.0.1:8301/metrics Value:0xc02f7e0e50} C:{Var:C Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_KafkaOffsetRecord, scope=app, url=http://127.0.0.1:8301/metrics Value:0xc02f7e1088}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.996678639s EvaluationString:[ var='B' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_KafkaOffsetRecord, scope=app, url=http://127.0.0.1:8301/metrics} value=32 ], [ var='C' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_KafkaOffsetRecord, scope=app, url=http://127.0.0.1:8301/metrics} value=0 ]} {Instance:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_NodeRecord, scope=app, url=http://127.0.0.1:8301/metrics State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_NodeRecord, scope=app, url=http://127.0.0.1:8301/metrics Value:0xc02f7e1298} C:{Var:C Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_NodeRecord, scope=app, url=http://127.0.0.1:8301/metrics Value:0xc02f7e1378}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.996692123s EvaluationString:[ var='B' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_NodeRecord, scope=app, url=http://127.0.0.1:8301/metrics} value=0 ], [ var='C' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_NodeRecord, scope=app, url=http://127.0.0.1:8301/metrics} value=0 ]} {Instance:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_QueueSummaries, scope=app, url=http://127.0.0.1:8301/metrics State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_QueueSummaries, scope=app, url=http://127.0.0.1:8301/metrics Value:0xc02f7e1550} C:{Var:C Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_QueueSummaries, scope=app, url=http://127.0.0.1:8301/metrics Value:0xc02f7e1630}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.996700607s EvaluationString:[ var='B' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_QueueSummaries, scope=app, url=http://127.0.0.1:8301/metrics} value=2 ], [ var='C' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_QueueSummaries, scope=app, url=http://127.0.0.1:8301/metrics} value=0 ]} {Instance:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_QueuedEvents, scope=app, url=http://127.0.0.1:8301/metrics State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_QueuedEvents, scope=app, url=http://127.0.0.1:8301/metrics Value:0xc02f7e18e0} C:{Var:C Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_QueuedEvents, scope=app, url=http://127.0.0.1:8301/metrics Value:0xc02f7e17f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.996706885s EvaluationString:[ var='B' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_QueuedEvents, scope=app, url=http://127.0.0.1:8301/metrics} value=317606 ], [ var='C' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_QueuedEvents, scope=app, url=http://127.0.0.1:8301/metrics} value=0 ]} {Instance:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_SchedulerRecord, scope=app, url=http://127.0.0.1:8301/metrics State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_SchedulerRecord, scope=app, url=http://127.0.0.1:8301/metrics Value:0xc02f7e1b80} C:{Var:C Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_SchedulerRecord, scope=app, url=http://127.0.0.1:8301/metrics Value:0xc02f7e1a98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.996716602s EvaluationString:[ var='B' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_SchedulerRecord, scope=app, url=http://127.0.0.1:8301/metrics} value=0 ], [ var='C' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_SchedulerRecord, scope=app, url=http://127.0.0.1:8301/metrics} value=0 ]} {Instance:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_SeedingStatusRecord, scope=app, url=http://127.0.0.1:8301/metrics State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_SeedingStatusRecord, scope=app, url=http://127.0.0.1:8301/metrics Value:0xc02f7e1d18} C:{Var:C Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_SeedingStatusRecord, scope=app, url=http://127.0.0.1:8301/metrics Value:0xc02f7e1de8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.996728561s EvaluationString:[ var='B' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_SeedingStatusRecord, scope=app, url=http://127.0.0.1:8301/metrics} value=1 ], [ var='C' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_SeedingStatusRecord, scope=app, url=http://127.0.0.1:8301/metrics} value=0 ]} {Instance:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_TimerEntryRecord, scope=app, url=http://127.0.0.1:8301/metrics State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_TimerEntryRecord, scope=app, url=http://127.0.0.1:8301/metrics Value:0xc02f7e1fe0} C:{Var:C Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_TimerEntryRecord, scope=app, url=http://127.0.0.1:8301/metrics Value:0xc01b526390}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.99674025s EvaluationString:[ var='B' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_TimerEntryRecord, scope=app, url=http://127.0.0.1:8301/metrics} value=1 ], [ var='C' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_TimerEntryRecord, scope=app, url=http://127.0.0.1:8301/metrics} value=0 ]} {Instance:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_TradeReportRecord, scope=app, url=http://127.0.0.1:8301/metrics State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_TradeReportRecord, scope=app, url=http://127.0.0.1:8301/metrics Value:0xc01b526ec0} C:{Var:C Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_TradeReportRecord, scope=app, url=http://127.0.0.1:8301/metrics Value:0xc01b527360}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.996750501s EvaluationString:[ var='B' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_TradeReportRecord, scope=app, url=http://127.0.0.1:8301/metrics} value=3.693234e+06 ], [ var='C' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_TradeReportRecord, scope=app, url=http://127.0.0.1:8301/metrics} value=0 ]} {Instance:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_TradingIds, scope=app, url=http://127.0.0.1:8301/metrics State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_TradingIds, scope=app, url=http://127.0.0.1:8301/metrics Value:0xc01b527e28} C:{Var:C Labels:__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_TradingIds, scope=app, url=http://127.0.0.1:8301/metrics Value:0xc035d24e00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.99676234s EvaluationString:[ var='B' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_TradingIds, scope=app, url=http://127.0.0.1:8301/metrics} value=1 ], [ var='C' labels={__name__=trf1p_metrics_hydra_repository_row_count, application=trf1p_metrics, business=institutional, component=TrfEngine, data_type=application, db=telegraf, host=ny4ap-intel-02, location=NY4-PQT-8162, name=TrfEngineState_TradingIds, scope=app, url=http://127.0.0.1:8301/metrics} value=0 ]}]" duration=186.747531ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p7pe5v4g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.99889847Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p7n2ef7h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.998781039Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p7n2ef7h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.998721068Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p7n2ef7h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.998664368Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p7n2ef7h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.998611097Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p7n2ef7h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.998577187Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.998619397Z caller=client.go:80 msg="creating client for grafana instance" user=619427 addr=dns:///kvist-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.998573323Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p7j2e9vz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.998538447Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.998374745Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p7j2e9vz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.998437615Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.998450995Z caller=ruler.go:522 msg="tenant is owned by this instance" user=664622 slug=investsuitetest groups=2 + level=debug ts=2024-05-29T13:44:13.99843266Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-337536laio1use1, cloud_platform=AWS, customer_id=C706, env_id=337536, env_name=C706 COX DEV Parallel, env_type=dev, instance=env-337536laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.998310072Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.998211334Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p7grhhbe-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.998220153Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.998186602Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.998159833Z caller=remote_instance_store.go:51 user=777670 slug=fakku msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.998099806Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p714od85-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.998086732Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-337533laio1southeastasia, cloud_platform=Azure, customer_id=A228, env_id=337533, env_name=A228 DFS CompanyVital-Dev, env_type=dev, instance=env-337533laio1southeastasia, job=integrations/node_exporter, region=southeastasia, stage=testing" t=2024-05-29T13:44:13.998079451Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.998031969Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p714od85-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.998034241Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-337500laio1euw2, cloud_platform=AWS, customer_id=C817, env_id=337500, env_name=C817_NHS_UAT, env_type=qa, instance=env-337500laio1euw2, job=integrations/node_exporter, region=eu-west-2, stage=preprod" t=2024-05-29T13:44:13.997669452Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p6wf6azs-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.99790158Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core" t=2024-05-29T13:44:13.997775045Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p6wf6azs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.997824799Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p6ttxtbo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.997782919Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.997691927Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p6ttxtbo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.997668408Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p6qp5zqf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.997638587Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:13.99756187Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p6jn0els-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.997484766Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p6jn0els-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.997435285Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.997352118Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p6hoghn3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.997235433Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p6g2t3zo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.997202833Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p6g2t3zo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.997180693Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-337490laio1euw2, cloud_platform=AWS, customer_id=C817, env_id=337490, env_name=C817_NHS_DEV, env_type=dev, instance=env-337490laio1euw2, job=integrations/node_exporter, region=eu-west-2, stage=preprod" t=2024-05-29T13:44:13.997121008Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p6g2t3zo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.997152152Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p6chrs01-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.997005731Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p6chrs01-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.996978281Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.996915292Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p68zwxtg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.99688634Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p68zwxtg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.996834819Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p68zwxtg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.996776328Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.996741179Z caller=client.go:80 msg="creating client for grafana instance" user=666056 addr=dns:///kreicer-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p68zwxtg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.996744248Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.996679479Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=677303 slug=ituoga + level=debug ts=2024-05-29T13:44:13.996556629Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.996493577Z caller=ruler.go:522 msg="tenant is owned by this instance" user=739422 slug=iverdi groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p68t09jv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.996576176Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p63rncs4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.996525816Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p63rncs4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.996420385Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p62bagzh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.996379334Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.996359908Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p62bagzh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.996286093Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p62bagzh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.996255943Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:13.996209809Z caller=remote_image_capturer.go:61 user=283914 slug=emmasleep rule_org_id=1 rule_uid=f7a257b5-5b4b-4bb5-b8c5-1d3b84d6f862 dashboard=CNR8LzU7z2323213wrrwewr panel=16 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=169497471755, application=oms-invoice-mediator, dimension_QueueName=ecom-dev-oms-invoice-mediator-emails-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-invoice-mediator-emails-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core" t=2024-05-29T13:44:13.99624911Z level=warn msg="Failed to take an image" dashboard=CNR8LzU7z2323213wrrwewr panel=16 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p5zwf6gg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.996201553Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p5zwf6gg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.996141102Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p5zwf6gg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.996111122Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.996159574Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=799801 slug=ivercloud + level=warn ts=2024-05-29T13:44:13.996072573Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=544014 slug=ismartsolutions2022 + level=debug ts=2024-05-29T13:44:13.996001272Z caller=ruler.go:522 msg="tenant is owned by this instance" user=544014 slug=ismartsolutions2022 groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p5zwf6gg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.996069591Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p5zwf6gg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.996036821Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.995756207Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-337310laio1eastus, cloud_platform=Azure, customer_id=A253, env_id=337310, env_name=A253_Gilbane_Dev, env_type=dev, instance=env-337310laio1eastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:13.995773931Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.995702105Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.995628697Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.995604968Z caller=client.go:80 msg="creating client for grafana instance" user=637163 addr=dns:///kingmakersdevtest-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.995582368Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=615517 slug=ivc + logger=ngalert.state.manager user=409840 slug=smpl instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.995209511Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=409840 slug=smpl instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.995160784Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=409840 slug=smpl instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.995120949Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p5hcmqu7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.995187052Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=273545 slug=strigoio t=2024-05-29T13:44:13.995035482Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-337309laio1eastus, cloud_platform=Azure, customer_id=A253, env_id=337309, env_name=A253_Gilbane_Prod, env_type=prod, instance=env-337309laio1eastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:13.995108796Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.99500305Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p5c8b0he-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.9950056Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-e20810b8085e4866, persistentvolumeclaim=data-zookeeper-2" t=2024-05-29T13:44:13.995021443Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.994950871Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=127717 slug=engenoil instance= t=2024-05-29T13:44:13.994966863Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p5c8b0he-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.99494725Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p5c8b0he-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.994742518Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.994865961Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=557705 slug=inigoalbizu + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-0d63a10e6fc44824, persistentvolumeclaim=main-main-jn7b-pgdata" t=2024-05-29T13:44:13.994805422Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-05774361c90d426d, persistentvolumeclaim=data-zookeeper-0" t=2024-05-29T13:44:13.99477927Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.99469836Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=690119 slug=isolvtech + logger=ngalert.state.manager.persist user=460990 slug=classting t=2024-05-29T13:44:13.994801894Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p58myi6l-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.994690447Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=460990 slug=classting version=54 fingerprint=1a771ade3f48c1d3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.994666843Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc02ca1b920} C:{Var:C Labels: Value:0xc02ca1b928}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.994407285s EvaluationString:[ var='B' labels={} value=44.078872845505714 ], [ var='C' labels={} value=0 ]}]" duration=194.07618ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p58myi6l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.994661357Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.994540258Z caller=client.go:80 msg="creating client for grafana instance" user=659210 addr=dns:///kedacore-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p58myi6l-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.994587266Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p4zer333-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.994500615Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p4zer333-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.994479655Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.994402384Z caller=remote_image_capturer.go:54 user=283914 slug=emmasleep rule_org_id=1 rule_uid=f7a257b5-5b4b-4bb5-b8c5-1d3b84d6f862 dashboard=CNR8LzU7z2323213wrrwewr panel=16 msg="rendering alert image with grafana" + level=debug ts=2024-05-29T13:44:13.994338656Z caller=ruler.go:522 msg="tenant is owned by this instance" user=757626 slug=hmgroup groups=12 + level=info component=discovery ts=2024-05-29T13:44:13.994323156Z caller=client.go:80 msg="creating client for grafana instance" user=685307 addr=dns:///kdandersen-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.994288856Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=634981 slug=itprocurement1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p4mzvauf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.994323243Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=169497471755, application=oms-invoice-mediator, dimension_QueueName=ecom-dev-oms-invoice-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-invoice-mediator-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core" t=2024-05-29T13:44:13.994229752Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:13.994178757Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p4c5fhxt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.994127221Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p4bsfpd2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.994067691Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p4bsfpd2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.99403741Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.993970152Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:13.99382865Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p4ah42g1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.993932039Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p4ah42g1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.993858848Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.993898815Z caller=remote_alert_sender.go:94 user=698103 slug=vericast host=vericast-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.104.126:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fdfnr97u4dd6rf alerts=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-336215laio1use1, cloud_platform=AWS, customer_id=C694, env_id=336215, env_name=C694_COX_DEV, env_type=dev, instance=env-336215laio1use1, job=integrations/node_exporter, region=us-east-1, stage=testing" t=2024-05-29T13:44:13.993857439Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p4ah42g1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.993828878Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:13.993850685Z caller=remote_alert_sender.go:94 user=698103 slug=vericast host=vericast-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.245.254:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fdfnr97u4dd6rf alerts=1 + logger=ngalert.state.manager.persist user=109452 slug=deltarisk t=2024-05-29T13:44:13.993848509Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p47lz3k4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.993755527Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=698103 slug=vericast t=2024-05-29T13:44:13.993757473Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.535397ms + logger=ngalert.scheduler user=109452 slug=deltarisk version=7 fingerprint=76eeaa3b01b52982 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.993661634Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.993353613s EvaluationString:}]" duration=24.465172ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p47lz3k4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.993672906Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.993607671Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-336213laio1use1, cloud_platform=AWS, customer_id=C694, env_id=336213, env_name=C694_PARALLEL_PROD, env_type=prod, instance=env-336213laio1use1, job=integrations/node_exporter, region=us-east-1, stage=testing" t=2024-05-29T13:44:13.993660384Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.993590077Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.99347787Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p4607i59-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.993537875Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.993503718Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.993410369Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.993385474Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.993327847Z caller=client.go:80 msg="creating client for grafana instance" user=749834 addr=dns:///kasperwem-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.993269346Z caller=ruler.go:522 msg="tenant is owned by this instance" user=630017 slug=its4group groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p453ucqm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.993334083Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-336101laio1canadacentral, cloud_platform=Azure, customer_id=A215, env_id=336101, env_name=A215 FCL Prod, env_type=prod, instance=env-336101laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=preprod" t=2024-05-29T13:44:13.99323196Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p423gh57-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.993202172Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:13.993142242Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p423gh57-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.993113821Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-336066laio1use1, cloud_platform=AWS, customer_id=C593, env_id=336066, env_name=C593_TASC_PROD, env_type=prod, instance=env-336066laio1use1, job=integrations/node_exporter, region=us-east-1, stage=testing" t=2024-05-29T13:44:13.993079615Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=169497471755, application=oms-fulfillment-mediator, dimension_QueueName=ecom-dev-oms-fulfillment-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-fulfillment-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core" t=2024-05-29T13:44:13.992978949Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-336066laio1use1, cloud_platform=AWS, customer_id=C593, env_id=336066, env_name=C593_TASC_PROD, env_type=prod, instance=env-336066laio1use1, job=integrations/node_exporter, region=us-east-1, stage=testing" t=2024-05-29T13:44:13.993063296Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p3wbipna-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.992923669Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p3om7p1h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.992838848Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p3l7tu07-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.992744077Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p3l7tu07-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.992691896Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=374423 slug=bitburst t=2024-05-29T13:44:13.992902525Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=374423 slug=bitburst instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.992888496Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-336008laio1use1, cloud_platform=AWS, customer_id=C766, env_id=336008, env_name=C766 PURE DEV, env_type=dev, instance=env-336008laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.992878524Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=374423 slug=bitburst instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.992844806Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=374423 slug=bitburst instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.992798898Z level=debug msg="Setting next state" handler=resultNoData + level=info ts=2024-05-29T13:44:13.99261864Z caller=remote_image_capturer.go:61 user=283914 slug=emmasleep rule_org_id=1 rule_uid=f7a257b5-5b4b-4bb5-b8c5-1d3b84d6f862 dashboard=CNR8LzU7z2323213wrrwewr panel=16 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-335952laio1germanywestcentral, cloud_platform=Azure, customer_id=A216, env_id=335952, env_name=A216 Zurich Prod, env_type=prod, instance=env-335952laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=preprod" t=2024-05-29T13:44:13.992684867Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.992580911Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p3l7tu07-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.992615016Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p3ib322n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.992546855Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-335948laio1germanywestcentral, cloud_platform=Azure, customer_id=A216, env_id=335948, env_name=A216 Zurich Dev, env_type=dev, instance=env-335948laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=preprod" t=2024-05-29T13:44:13.992512515Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.992304837Z caller=client.go:80 msg="creating client for grafana instance" user=520568 addr=dns:///karpinity-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.991463729Z caller=client.go:80 msg="creating client for grafana instance" user=676704 addr=dns:///jonasbroenstrup-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p3dohq0p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.992322232Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.992229637Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.99159103Z caller=ruler.go:522 msg="tenant is owned by this instance" user=662913 slug=gealptsfprod groups=10 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p39iqkou-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.99210614Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p39iqkou-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.99206602Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p39iqkou-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.99205594Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p38vmarz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.992021009Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=707603 slug=canoneurope t=2024-05-29T13:44:13.992032957Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=22.92668ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p38vmarz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.991990649Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p38vmarz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.991958479Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p38vmarz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.991947599Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-335763laio1use1, cloud_platform=AWS, customer_id=C593, env_id=335763, env_name=C593_TASC_Dev, env_type=dev, instance=env-335763laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.991865759Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.991850882Z caller=remote_instance_store.go:51 user=316418 slug=workmotion msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=1mr10216z5, Method=--, Resource=/employment-contract-drafts/{proxy+}, Stage=--" t=2024-05-29T13:44:13.991784175Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=316418 slug=workmotion t=2024-05-29T13:44:13.99176932Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p34hlwnf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.991734986Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-335734laio1euc1, cloud_platform=AWS, customer_id=C802, env_id=335734, env_name=C802 Adidas LocalBI DEV, env_type=dev, instance=env-335734laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=preprod" t=2024-05-29T13:44:13.991680487Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:13.991665294Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" + level=debug ts=2024-05-29T13:44:13.99155843Z caller=ruler.go:522 msg="tenant is owned by this instance" user=539034 slug=ilenkradgps groups=1 + level=debug ts=2024-05-29T13:44:13.991625995Z caller=remote_instance_store.go:51 user=191103 slug=amazonadmin msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:13.991576384Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-335691laio1euc1, cloud_platform=AWS, customer_id=C802, env_id=335691, env_name=C802 Adidas LocalBI PROD, env_type=prod, instance=env-335691laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=preprod" t=2024-05-29T13:44:13.991527009Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p32jov1p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.991460864Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p2ple4r2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.991423983Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p2ple4r2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.991362313Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.991284224Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.990982125Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=659863 slug=fracek + level=info component=discovery ts=2024-05-29T13:44:13.991142826Z caller=client.go:80 msg="creating client for grafana instance" user=539447 addr=dns:///jka-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p2ootwt8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.99111435Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p2ootwt8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.991069599Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.990714622Z caller=ruler.go:522 msg="tenant is owned by this instance" user=569957 slug=hgp groups=2 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p2joqnmp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.990959158Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-335629laio1aps2, cloud_platform=AWS, customer_id=C791, env_id=335629, env_name=C791 Wagesafe Prod, env_type=prod, instance=env-335629laio1aps2, job=integrations/node_exporter, region=ap-southeast-2, stage=testing" t=2024-05-29T13:44:13.990929312Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-335629laio1aps2, cloud_platform=AWS, customer_id=C791, env_id=335629, env_name=C791 Wagesafe Prod, env_type=prod, instance=env-335629laio1aps2, job=integrations/node_exporter, region=ap-southeast-2, stage=testing" t=2024-05-29T13:44:13.990882632Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p2joqnmp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.990920648Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p2joqnmp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.990885258Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p2gvnp83-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.990724926Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.990855123Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=495235 slug=frantchenco + level=info component=discovery ts=2024-05-29T13:44:13.990758923Z caller=client.go:80 msg="creating client for grafana instance" user=514957 addr=dns:///jdx-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p2gvnp83-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.990663855Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.990698322Z caller=ruler.go:522 msg="tenant is owned by this instance" user=529779 slug=foodbuddies groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p2gvnp83-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.990612715Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.990675186Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.99068482Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p2c0uwx1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.990538884Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=169497471755, application=fluent-event-publisher, dimension_QueueName=ecom-dev-fluent-event-publisher-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-fluent-event-publisher-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core" t=2024-05-29T13:44:13.990654547Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=info component=discovery ts=2024-05-29T13:44:13.99050852Z caller=client.go:80 msg="creating client for grafana instance" user=536064 addr=dns:///jcomp-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager.persist user=637816 slug=kingobservatory t=2024-05-29T13:44:13.99052394Z level=debug msg="Saving alert states" count=26 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.99044422Z caller=ruler.go:522 msg="tenant is owned by this instance" user=691454 slug=habis groups=0 + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="application=stritz, env=prod2, operator_name=KafkaEventInput" t=2024-05-29T13:44:13.990501452Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.990368619Z caller=ruler.go:522 msg="tenant is owned by this instance" user=625101 slug=finxone groups=0 + level=debug ts=2024-05-29T13:44:13.990430095Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.99039788Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.990099216Z caller=ruler.go:522 msg="tenant is owned by this instance" user=518898 slug=idrobyshev groups=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-335427laio1aps2, cloud_platform=AWS, customer_id=C801, env_id=335427, env_name=C801_Nostradata_Prod, env_type=prod, instance=env-335427laio1aps2, job=integrations/node_exporter, region=ap-southeast-2, stage=preprod" t=2024-05-29T13:44:13.990308862Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p29h387w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.990316372Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="application=ratko, env=prod1, operator_name=KafkaEventInput" t=2024-05-29T13:44:13.990259222Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p29h387w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.990240311Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.990132117Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=666019 slug=infrateqgroup + level=debug ts=2024-05-29T13:44:13.990096316Z caller=ruler.go:522 msg="tenant is owned by this instance" user=666019 slug=infrateqgroup groups=0 + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="application=rakdos, env=prod1, operator_name=KafkaEventInput" t=2024-05-29T13:44:13.990193394Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p29h387w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.99013957Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=327842 slug=exabeam version=45 fingerprint=d00c1dbf8eaa0032 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.990120514Z level=debug msg="Alert rule evaluated" results="[{Instance:resource.label.project_id=exa-cloud-staging, resource.label.subscription_id=staging-audit-syslog-exa-audit-syslog-svc, resource.type=pubsub_subscription State:Normal Error: Results:map[] Values:map[E:{Var:E Labels:resource.label.project_id=exa-cloud-staging, resource.label.subscription_id=staging-audit-syslog-exa-audit-syslog-svc, resource.type=pubsub_subscription Value:0xc01de0fbb8} num_undelivered_messages:{Var:num_undelivered_messages Labels:resource.label.project_id=exa-cloud-staging, resource.label.subscription_id=staging-audit-syslog-exa-audit-syslog-svc, resource.type=pubsub_subscription Value:0xc01de0fbe0} oldest_unacked_message_age:{Var:oldest_unacked_message_age Labels:resource.label.project_id=exa-cloud-staging, resource.label.subscription_id=staging-audit-syslog-exa-audit-syslog-svc, resource.type=pubsub_subscription Value:0xc01de0fbe8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.989787087s EvaluationString:[ var='E' labels={resource.label.project_id=exa-cloud-staging, resource.label.subscription_id=staging-audit-syslog-exa-audit-syslog-svc, resource.type=pubsub_subscription} value=0 ], [ var='num_undelivered_messages' labels={resource.label.project_id=exa-cloud-staging, resource.label.subscription_id=staging-audit-syslog-exa-audit-syslog-svc, resource.type=pubsub_subscription} value=0 ], [ var='oldest_unacked_message_age' labels={resource.label.project_id=exa-cloud-staging, resource.label.subscription_id=staging-audit-syslog-exa-audit-syslog-svc, resource.type=pubsub_subscription} value=0 ]}]" duration=286.883147ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p27cjnkh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.990083879Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=269887 slug=blackrockdev instance="name=optengine.interactive_pfo_metrics_request" t=2024-05-29T13:44:13.990059233Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-335394laio1use1, cloud_platform=AWS, customer_id=C595, env_id=335394, env_name=C595 PARALLEL UAT, env_type=qa, instance=env-335394laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.990074075Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=269887 slug=blackrockdev t=2024-05-29T13:44:13.989962725Z level=debug msg="State manager processing evaluation results" resultCount=5 + Error parsing panelUID for alert annotationruleID541dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=269887 slug=blackrockdev version=10 fingerprint=0939a7c4a986e18b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.989817479Z level=debug msg="Alert rule evaluated" results="[{Instance:name=optengine.background_optimization_request State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:name=optengine.background_optimization_request Value:0xc01c42b558} B:{Var:B Labels:name=optengine.background_optimization_request Value:0xc01c42b568} D:{Var:D Labels:name=optengine.background_optimization_request Value:0xc01c42b598}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.989309183s EvaluationString:[ var='A' labels={name=optengine.background_optimization_request} value=14929 ], [ var='B' labels={name=optengine.background_optimization_request} value=1 ], [ var='D' labels={name=optengine.background_optimization_request} value=2954 ]} {Instance:name=optengine.interactive_optimization_request State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:name=optengine.interactive_optimization_request Value:0xc01c42b5c8} B:{Var:B Labels:name=optengine.interactive_optimization_request Value:0xc01c42b5e8} D:{Var:D Labels:name=optengine.interactive_optimization_request Value:0xc01c42b5b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.989327841s EvaluationString:[ var='A' labels={name=optengine.interactive_optimization_request} value=1491 ], [ var='B' labels={name=optengine.interactive_optimization_request} value=1 ], [ var='D' labels={name=optengine.interactive_optimization_request} value=991 ]} {Instance:name=optengine.interactive_pfo_metrics_request State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:name=optengine.interactive_pfo_metrics_request Value:0xc01c42b668} B:{Var:B Labels:name=optengine.interactive_pfo_metrics_request Value:0xc01c42b638} D:{Var:D Labels:name=optengine.interactive_pfo_metrics_request Value:0xc01c42b648}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.989338296s EvaluationString:[ var='A' labels={name=optengine.interactive_pfo_metrics_request} value=3298 ], [ var='B' labels={name=optengine.interactive_pfo_metrics_request} value=1 ], [ var='D' labels={name=optengine.interactive_pfo_metrics_request} value=2453 ]} {Instance:name=retrieve_upload_requests_from_db State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:name=retrieve_upload_requests_from_db Value:0xc01c42b6a8} B:{Var:B Labels:name=retrieve_upload_requests_from_db Value:0xc01c42b6b8} D:{Var:D Labels:name=retrieve_upload_requests_from_db Value:0xc01c42b688}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.989361941s EvaluationString:[ var='A' labels={name=retrieve_upload_requests_from_db} value=27503 ], [ var='B' labels={name=retrieve_upload_requests_from_db} value=1 ], [ var='D' labels={name=retrieve_upload_requests_from_db} value=14019 ]} {Instance:name=wasap.interactive_optimization_result State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:name=wasap.interactive_optimization_result Value:0xc01c42b708} B:{Var:B Labels:name=wasap.interactive_optimization_result Value:0xc01c42b6e8} D:{Var:D Labels:name=wasap.interactive_optimization_result Value:0xc01c42b6f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.989373696s EvaluationString:[ var='A' labels={name=wasap.interactive_optimization_result} value=1627 ], [ var='B' labels={name=wasap.interactive_optimization_result} value=1 ], [ var='D' labels={name=wasap.interactive_optimization_result} value=1038 ]}]" duration=305.020195ms + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="application=petrescue, env=prod1, operator_name=KafkaEventInput" t=2024-05-29T13:44:13.989958345Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="application=petrescue, env=prod1, operator_name=KafkaEventInput" t=2024-05-29T13:44:13.989945303Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="application=nuevo, env=prod2, operator_name=KafkaEventInput" t=2024-05-29T13:44:13.989857139Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p25cuuf7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.989853257Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p25cuuf7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.989795426Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-335354laio1use1, cloud_platform=AWS, customer_id=C595, env_id=335354, env_name=C595 PARALLEL PROD, env_type=prod, instance=env-335354laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.989801762Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="application=identitononcritical, env=prod1, operator_name=KafkaEventInput" t=2024-05-29T13:44:13.989647243Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.989566634Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p23rsnq0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.989633175Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p23rsnq0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.989585214Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p23rsnq0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.989551244Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=283914 slug=emmasleep instance="account_id=169497471755, application=ecommerce-order-mediator, dimension_QueueName=ecom-dev-ecommerce-payment-mediator-dead-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core" t=2024-05-29T13:44:13.9895714Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="application=identito, env=prod1, operator_name=KafkaEventInput" t=2024-05-29T13:44:13.989528293Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.989436317Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:13.989518956Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="team=order-management-core" + level=debug ts=2024-05-29T13:44:13.989440207Z caller=remote_instance_store.go:51 user=224047 slug=ppbtradingtribeprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-335182laio1euc1, cloud_platform=AWS, customer_id=C760, env_id=335182, env_name=C760 Lamborghini Prod New, env_type=prod, instance=env-335182laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=preprod" t=2024-05-29T13:44:13.989427015Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-335182laio1euc1, cloud_platform=AWS, customer_id=C760, env_id=335182, env_name=C760 Lamborghini Prod New, env_type=prod, instance=env-335182laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=preprod" t=2024-05-29T13:44:13.989398856Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p235zbe2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.989350182Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p235zbe2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.989306631Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p235zbe2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.989278431Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.989199828Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.989166949Z caller=remote_instance_store.go:51 user=169420 slug=newspring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="application=farmking, env=prod1, operator_name=KafkaEventInput" t=2024-05-29T13:44:13.989194493Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p1zjti0d-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.98912784Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.989003077Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.989028307Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="application=charlie, env=prod1, operator_name=KafkaEventInput" t=2024-05-29T13:44:13.989023979Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.98896631Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-335118laio1euc1, cloud_platform=AWS, customer_id=C799, env_id=335118, env_name=C799DeutscheTelekom_p-prd, env_type=prod, instance=env-335118laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=preprod" t=2024-05-29T13:44:13.988924116Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="application=candysolitaire, env=prod1, operator_name=KafkaEventInput" t=2024-05-29T13:44:13.988909024Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.988830852Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.988770095Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="application=candycubesnew, env=prod1, operator_name=KafkaEventInput" t=2024-05-29T13:44:13.988781401Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p1nf6kys-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.988750066Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-335071laio1aps2, cloud_platform=AWS, customer_id=C791, env_id=335071, env_name=c791_Wagsafe_DEV_New, env_type=dev, instance=env-335071laio1aps2, job=integrations/node_exporter, region=ap-southeast-2, stage=testing" t=2024-05-29T13:44:13.988702389Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p1nf6kys-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.988639575Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.988628141Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.988628592Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:13.988581161Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.988595836Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114492 slug=railsbank instance="QueueName=PROD-PPL-BOL-CENTROLINK-RM-RETURN-SQS" t=2024-05-29T13:44:13.988566803Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="application=candycrush3d, env=prod1, operator_name=KafkaEventInput" t=2024-05-29T13:44:13.988576005Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.988528753Z caller=remote_image_capturer.go:54 user=283914 slug=emmasleep rule_org_id=1 rule_uid=f7a257b5-5b4b-4bb5-b8c5-1d3b84d6f862 dashboard=CNR8LzU7z2323213wrrwewr panel=16 msg="rendering alert image with grafana" + level=debug ts=2024-05-29T13:44:13.988486171Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="application=candycrush, env=prod2, operator_name=KafkaEventInput" t=2024-05-29T13:44:13.988448704Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p1gbnmi7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.988354932Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="application=candycrush, env=prod1, operator_name=KafkaEventInput" t=2024-05-29T13:44:13.988353087Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334975laio1euc1, cloud_platform=AWS, customer_id=C757, env_id=334975, env_name=C757 Adidas GlobalBI Dev, env_type=dev, instance=env-334975laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=preprod" t=2024-05-29T13:44:13.988254567Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p1epziaa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.988085879Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p19skggl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.988015708Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=637816 slug=kingobservatory instance="application=blossomblastsaga, env=prod1, operator_name=KafkaEventInput" t=2024-05-29T13:44:13.988001892Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p19skggl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.987838706Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334801laio1use1, cloud_platform=AWS, customer_id=C797, env_id=334801, env_name=C797 Cox nVision PRD, env_type=prod, instance=env-334801laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.987857123Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334801laio1use1, cloud_platform=AWS, customer_id=C797, env_id=334801, env_name=C797 Cox nVision PRD, env_type=prod, instance=env-334801laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.987841502Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=637816 slug=kingobservatory t=2024-05-29T13:44:13.987687853Z level=debug msg="State manager processing evaluation results" resultCount=26 + level=debug ts=2024-05-29T13:44:13.987600007Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p12es9rb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.987669875Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334729laioapn1, cloud_platform=AWS, customer_id=C787, env_id=334729, env_name=C787_JEOL_prod_new, env_type=prod, instance=env-334729laioapn1, job=integrations/node_exporter, region=ap-northeast-1, stage=testing" t=2024-05-29T13:44:13.987633655Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p0quqzy9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.987500173Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.987474455Z caller=grafana.go:247 user=289650 slug=eurostar msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=16" groups=36 alerts=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p0quqzy9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.987448362Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334575laio1use1, cloud_platform=AWS, customer_id=C798, env_id=334575, env_name=C798 Payway PROD, env_type=prod, instance=env-334575laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.987374085Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p0quqzy9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.987323281Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334574laio1use1, cloud_platform=AWS, customer_id=C798, env_id=334574, env_name=C798 Payway DEV, env_type=dev, instance=env-334574laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.987183264Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p0qpzwu3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.987167269Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=283914 slug=emmasleep t=2024-05-29T13:44:13.986504099Z level=debug msg="State manager processing evaluation results" resultCount=107 + level=debug ts=2024-05-29T13:44:13.987035265Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p0lju0b8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.986992168Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p0lju0b8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.986951187Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.986890881Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=283914 slug=emmasleep version=10 fingerprint=c95d14479df12e3c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.979429322Z level=debug msg="Alert rule evaluated" results="[{Instance:account_id=169497471755, application=ecommerce-order-mediator, dimension_QueueName=ecom-dev-ecommerce-marketplace-order-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-ecommerce-marketplace-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=169497471755, application=ecommerce-order-mediator, dimension_QueueName=ecom-dev-ecommerce-marketplace-order-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-ecommerce-marketplace-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc0a8ff67d0} B:{Var:B Labels:account_id=169497471755, application=ecommerce-order-mediator, dimension_QueueName=ecom-dev-ecommerce-marketplace-order-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-ecommerce-marketplace-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc0a8ff68d0} C:{Var:C Labels:account_id=169497471755, application=ecommerce-order-mediator, dimension_QueueName=ecom-dev-ecommerce-marketplace-order-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-ecommerce-marketplace-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc0a8ff6720}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.967695782s EvaluationString:[ var='A' labels={account_id=169497471755, application=ecommerce-order-mediator, dimension_QueueName=ecom-dev-ecommerce-marketplace-order-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-ecommerce-marketplace-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=1 ], [ var='B' labels={account_id=169497471755, application=ecommerce-order-mediator, dimension_QueueName=ecom-dev-ecommerce-marketplace-order-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-ecommerce-marketplace-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=1 ], [ var='C' labels={account_id=169497471755, application=ecommerce-order-mediator, dimension_QueueName=ecom-dev-ecommerce-marketplace-order-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-ecommerce-marketplace-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=1 ]} {Instance:account_id=169497471755, application=ecommerce-order-mediator, dimension_QueueName=ecom-dev-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=169497471755, application=ecommerce-order-mediator, dimension_QueueName=ecom-dev-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc0a8ff6ca0} B:{Var:B Labels:account_id=169497471755, application=ecommerce-order-mediator, dimension_QueueName=ecom-dev-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc0a8ff6d80} C:{Var:C Labels:account_id=169497471755, application=ecommerce-order-mediator, dimension_QueueName=ecom-dev-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc0a8ff6e68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.967721939s EvaluationString:[ var='A' labels={account_id=169497471755, application=ecommerce-order-mediator, dimension_QueueName=ecom-dev-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=2 ], [ var='B' labels={account_id=169497471755, application=ecommerce-order-mediator, dimension_QueueName=ecom-dev-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=2 ], [ var='C' labels={account_id=169497471755, application=ecommerce-order-mediator, dimension_QueueName=ecom-dev-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=1 ]} {Instance:account_id=169497471755, application=ecommerce-order-mediator, dimension_QueueName=ecom-dev-ecommerce-payment-mediator-dead-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=169497471755, application=ecommerce-order-mediator, dimension_QueueName=ecom-dev-ecommerce-payment-mediator-dead-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc0a8ff7070} B:{Var:B Labels:account_id=169497471755, application=ecommerce-order-mediator, dimension_QueueName=ecom-dev-ecommerce-payment-mediator-dead-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc0a8ff7168} C:{Var:C Labels:account_id=169497471755, application=ecommerce-order-mediator, dimension_QueueName=ecom-dev-ecommerce-payment-mediator-dead-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc0a8ff7258}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.967737954s EvaluationString:[ var='A' labels={account_id=169497471755, application=ecommerce-order-mediator, dimension_QueueName=ecom-dev-ecommerce-payment-mediator-dead-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=28 ], [ var='B' labels={account_id=169497471755, application=ecommerce-order-mediator, dimension_QueueName=ecom-dev-ecommerce-payment-mediator-dead-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=28 ], [ var='C' labels={account_id=169497471755, application=ecommerce-order-mediator, dimension_QueueName=ecom-dev-ecommerce-payment-mediator-dead-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=1 ]} {Instance:account_id=169497471755, application=fluent-event-publisher, dimension_QueueName=ecom-dev-fluent-event-publisher-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-fluent-event-publisher-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=169497471755, application=fluent-event-publisher, dimension_QueueName=ecom-dev-fluent-event-publisher-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-fluent-event-publisher-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc0a8ff7440} B:{Var:B Labels:account_id=169497471755, application=fluent-event-publisher, dimension_QueueName=ecom-dev-fluent-event-publisher-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-fluent-event-publisher-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc0a8ff7540} C:{Var:C Labels:account_id=169497471755, application=fluent-event-publisher, dimension_QueueName=ecom-dev-fluent-event-publisher-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-fluent-event-publisher-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc0a8ff7630}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.967749823s EvaluationString:[ var='A' labels={account_id=169497471755, application=fluent-event-publisher, dimension_QueueName=ecom-dev-fluent-event-publisher-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-fluent-event-publisher-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=17 ], [ var='B' labels={account_id=169497471755, application=fluent-event-publisher, dimension_QueueName=ecom-dev-fluent-event-publisher-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-fluent-event-publisher-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=17 ], [ var='C' labels={account_id=169497471755, application=fluent-event-publisher, dimension_QueueName=ecom-dev-fluent-event-publisher-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-fluent-event-publisher-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=1 ]} {Instance:account_id=169497471755, application=oms-customer-mediator, dimension_QueueName=ecom-dev-oms-customer-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-customer-mediator-dead-letter-queue, pagerduty_service=customer-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=169497471755, application=oms-customer-mediator, dimension_QueueName=ecom-dev-oms-customer-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-customer-mediator-dead-letter-queue, pagerduty_service=customer-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc0a8ff7ce8} B:{Var:B Labels:account_id=169497471755, application=oms-customer-mediator, dimension_QueueName=ecom-dev-oms-customer-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-customer-mediator-dead-letter-queue, pagerduty_service=customer-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc0a8ff7b50} C:{Var:C Labels:account_id=169497471755, application=oms-customer-mediator, dimension_QueueName=ecom-dev-oms-customer-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-customer-mediator-dead-letter-queue, pagerduty_service=customer-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc0a8ff7c08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.967762558s EvaluationString:[ var='A' labels={account_id=169497471755, application=oms-customer-mediator, dimension_QueueName=ecom-dev-oms-customer-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-customer-mediator-dead-letter-queue, pagerduty_service=customer-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=3 ], [ var='B' labels={account_id=169497471755, application=oms-customer-mediator, dimension_QueueName=ecom-dev-oms-customer-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-customer-mediator-dead-letter-queue, pagerduty_service=customer-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=3 ], [ var='C' labels={account_id=169497471755, application=oms-customer-mediator, dimension_QueueName=ecom-dev-oms-customer-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-customer-mediator-dead-letter-queue, pagerduty_service=customer-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=1 ]} {Instance:account_id=169497471755, application=oms-fulfillment-mediator, dimension_QueueName=ecom-dev-oms-fulfillment-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-fulfillment-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=169497471755, application=oms-fulfillment-mediator, dimension_QueueName=ecom-dev-oms-fulfillment-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-fulfillment-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc0a8ff7ff0} B:{Var:B Labels:account_id=169497471755, application=oms-fulfillment-mediator, dimension_QueueName=ecom-dev-oms-fulfillment-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-fulfillment-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc0a8ff7e80} C:{Var:C Labels:account_id=169497471755, application=oms-fulfillment-mediator, dimension_QueueName=ecom-dev-oms-fulfillment-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-fulfillment-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc0a8ff7f30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.96777573s EvaluationString:[ var='A' labels={account_id=169497471755, application=oms-fulfillment-mediator, dimension_QueueName=ecom-dev-oms-fulfillment-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-fulfillment-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=6 ], [ var='B' labels={account_id=169497471755, application=oms-fulfillment-mediator, dimension_QueueName=ecom-dev-oms-fulfillment-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-fulfillment-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=6 ], [ var='C' labels={account_id=169497471755, application=oms-fulfillment-mediator, dimension_QueueName=ecom-dev-oms-fulfillment-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-fulfillment-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=1 ]} {Instance:account_id=169497471755, application=oms-invoice-mediator, dimension_QueueName=ecom-dev-oms-invoice-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-invoice-mediator-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=169497471755, application=oms-invoice-mediator, dimension_QueueName=ecom-dev-oms-invoice-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-invoice-mediator-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc04a99a110} B:{Var:B Labels:account_id=169497471755, application=oms-invoice-mediator, dimension_QueueName=ecom-dev-oms-invoice-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-invoice-mediator-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc04a99a1b0} C:{Var:C Labels:account_id=169497471755, application=oms-invoice-mediator, dimension_QueueName=ecom-dev-oms-invoice-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-invoice-mediator-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc04a99a250}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.967785737s EvaluationString:[ var='A' labels={account_id=169497471755, application=oms-invoice-mediator, dimension_QueueName=ecom-dev-oms-invoice-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-invoice-mediator-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=2 ], [ var='B' labels={account_id=169497471755, application=oms-invoice-mediator, dimension_QueueName=ecom-dev-oms-invoice-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-invoice-mediator-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=2 ], [ var='C' labels={account_id=169497471755, application=oms-invoice-mediator, dimension_QueueName=ecom-dev-oms-invoice-mediator-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-invoice-mediator-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=1 ]} {Instance:account_id=169497471755, application=oms-invoice-mediator, dimension_QueueName=ecom-dev-oms-invoice-mediator-emails-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-invoice-mediator-emails-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=169497471755, application=oms-invoice-mediator, dimension_QueueName=ecom-dev-oms-invoice-mediator-emails-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-invoice-mediator-emails-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc04a99a3f0} B:{Var:B Labels:account_id=169497471755, application=oms-invoice-mediator, dimension_QueueName=ecom-dev-oms-invoice-mediator-emails-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-invoice-mediator-emails-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc04a99a490} C:{Var:C Labels:account_id=169497471755, application=oms-invoice-mediator, dimension_QueueName=ecom-dev-oms-invoice-mediator-emails-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-invoice-mediator-emails-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core Value:0xc04a99a360}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.967798933s EvaluationString:[ var='A' labels={account_id=169497471755, application=oms-invoice-mediator, dimension_QueueName=ecom-dev-oms-invoice-mediator-emails-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-invoice-mediator-emails-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=27 ], [ var='B' labels={account_id=169497471755, application=oms-invoice-mediator, dimension_QueueName=ecom-dev-oms-invoice-mediator-emails-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-invoice-mediator-emails-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=27 ], [ var='C' labels={account_id=169497471755, application=oms-invoice-mediator, dimension_QueueName=ecom-dev-oms-invoice-mediator-emails-dead-letter-queue, environment=development, job=267658-AWS-SQS-COS-Ecom-DEV-metrics, name=arn:aws:sqs:eu-central-1:169497471755:ecom-dev-oms-invoice-mediator-emails-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-DEV-metrics, team=order-management-core} value=1 ]} {Instance:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-magento-order-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-magento-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-magento-order-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-magento-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99a7f0} B:{Var:B Labels:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-magento-order-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-magento-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99a5c8} C:{Var:C Labels:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-magento-order-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-magento-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99a738}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.96781216s EvaluationString:[ var='A' labels={account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-magento-order-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-magento-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-magento-order-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-magento-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-magento-order-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-magento-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-marketplace-order-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-marketplace-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-marketplace-order-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-marketplace-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99a8f8} B:{Var:B Labels:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-marketplace-order-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-marketplace-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99aad0} C:{Var:C Labels:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-marketplace-order-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-marketplace-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99ab70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.967821682s EvaluationString:[ var='A' labels={account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-marketplace-order-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-marketplace-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=21 ], [ var='B' labels={account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-marketplace-order-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-marketplace-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=21 ], [ var='C' labels={account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-marketplace-order-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-marketplace-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=1 ]} {Instance:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-order-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-order-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99aca0} B:{Var:B Labels:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-order-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99af00} C:{Var:C Labels:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-order-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99afa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.967833275s EvaluationString:[ var='A' labels={account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-order-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-order-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-order-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99b260} B:{Var:B Labels:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99b3f0} C:{Var:C Labels:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99b1c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.967845252s EvaluationString:[ var='A' labels={account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=150 ], [ var='B' labels={account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=150 ], [ var='C' labels={account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=1 ]} {Instance:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99b600} B:{Var:B Labels:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99b6b0} C:{Var:C Labels:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99b770}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.967866135s EvaluationString:[ var='A' labels={account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-payment-mediator-dead-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-payment-mediator-dead-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99b8c0} B:{Var:B Labels:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-payment-mediator-dead-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99b960} C:{Var:C Labels:account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-payment-mediator-dead-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99ba00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.96787779s EvaluationString:[ var='A' labels={account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-payment-mediator-dead-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=15775.2 ], [ var='B' labels={account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-payment-mediator-dead-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=15775.2 ], [ var='C' labels={account_id=178863526580, application=ecommerce-order-mediator, dimension_QueueName=ecom-prod-ecommerce-payment-mediator-dead-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=1 ]} {Instance:account_id=178863526580, application=ecommerce-send-email, dimension_QueueName=ecom-prod-ecommerce-send-email-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-send-email-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=ecommerce-send-email, dimension_QueueName=ecom-prod-ecommerce-send-email-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-send-email-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99bb38} B:{Var:B Labels:account_id=178863526580, application=ecommerce-send-email, dimension_QueueName=ecom-prod-ecommerce-send-email-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-send-email-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99bbe0} C:{Var:C Labels:account_id=178863526580, application=ecommerce-send-email, dimension_QueueName=ecom-prod-ecommerce-send-email-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-send-email-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99bc80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.967901262s EvaluationString:[ var='A' labels={account_id=178863526580, application=ecommerce-send-email, dimension_QueueName=ecom-prod-ecommerce-send-email-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-send-email-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=ecommerce-send-email, dimension_QueueName=ecom-prod-ecommerce-send-email-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-send-email-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=ecommerce-send-email, dimension_QueueName=ecom-prod-ecommerce-send-email-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-ecommerce-send-email-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-at-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-at-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-at-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-at-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99be80} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-at-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-at-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99bf20} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-at-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-at-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04a99bdc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.96791301s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-at-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-at-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-at-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-at-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-at-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-at-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-au-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-au-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-au-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-au-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3c188} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-au-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-au-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3c290} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-au-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-au-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3c330}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.967922651s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-au-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-au-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-au-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-au-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-au-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-au-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-be-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-be-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-be-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-be-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3c510} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-be-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-be-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3c5a8} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-be-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-be-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3c650}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.967932342s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-be-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-be-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-be-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-be-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-be-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-be-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-ch-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-ch-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-ch-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-ch-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3c940} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-ch-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-ch-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3c9c8} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-ch-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-ch-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3c7a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.967941282s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-ch-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-ch-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-ch-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-ch-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-ch-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-ch-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-de-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-de-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-de-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-de-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3cd40} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-de-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-de-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3cde0} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-de-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-de-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3ce80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.967955409s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-de-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-de-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-de-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-de-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-de-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-de-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-dk-se-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-dk-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-dk-se-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-dk-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3cfc0} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-dk-se-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-dk-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3d080} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-dk-se-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-dk-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3d120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.967967405s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-dk-se-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-dk-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-dk-se-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-dk-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-dk-se-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-dk-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-es-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-es-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-es-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-es-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3d330} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-es-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-es-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3d3d0} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-es-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-es-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3d290}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.96797769s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-es-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-es-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-es-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-es-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-es-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-es-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-fr-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-fr-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3d670} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-fr-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3d510} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-fr-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3d5b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.967988992s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-fr-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-fr-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-fr-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-hk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-hk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-hk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-hk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3d7a0} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-hk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-hk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3d848} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-hk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-hk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3d9c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.967999095s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-hk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-hk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-hk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-hk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-hk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-hk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-ie-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-ie-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-ie-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-ie-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3dba0} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-ie-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-ie-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3dc40} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-ie-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-ie-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3daf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.96800997s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-ie-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-ie-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-ie-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-ie-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-ie-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-ie-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-it-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-it-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-it-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-it-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3dd80} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-it-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-it-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3de30} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-it-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-it-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04ed3dec0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968020697s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-it-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-it-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-it-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-it-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-it-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-it-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-nl-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-nl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-nl-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-nl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc044322458} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-nl-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-nl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc044322060} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-nl-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-nl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc0443223b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968033145s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-nl-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-nl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-nl-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-nl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-nl-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-nl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-nz-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-nz-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-nz-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-nz-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc044322770} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-nz-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-nz-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc0443225a0} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-nz-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-nz-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc0443226d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968042373s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-nz-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-nz-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-nz-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-nz-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-nz-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-nz-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc0443229f0} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc044322c20} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc0443228a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968054815s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-pl-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-pl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-pl-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-pl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc044322ee0} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-pl-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-pl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc044322fc0} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-pl-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-pl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc044322e40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968065928s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-pl-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-pl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-pl-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-pl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-pl-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-pl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-pt-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-pt-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-pt-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-pt-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc0443230e0} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-pt-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-pt-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc044323170} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-pt-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-pt-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc044323220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968075185s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-pt-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-pt-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-pt-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-pt-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-pt-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-pt-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-se-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-se-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc044323468} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-se-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc044323760} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-se-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc044323410}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968085112s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-se-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-se-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-se-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-tw-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-tw-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-tw-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-tw-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc044323b40} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-tw-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-tw-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc044323ca0} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-tw-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-tw-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc044323d50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968094866s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-tw-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-tw-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-tw-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-tw-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-tw-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-tw-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-uk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-uk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-uk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-uk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc044323ff0} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-uk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-uk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc044323e68} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-uk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-uk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc044323f48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968104088s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-uk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-uk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-uk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-uk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-uk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-uk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-wholesale_fr-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-wholesale_fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-wholesale_fr-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-wholesale_fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc0755061f0} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-wholesale_fr-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-wholesale_fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075506330} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-wholesale_fr-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-wholesale_fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075506160}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968115916s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-wholesale_fr-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-wholesale_fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-wholesale_fr-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-wholesale_fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-wholesale_fr-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-wholesale_fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-wholesale_uk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-wholesale_uk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-wholesale_uk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-wholesale_uk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075506470} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-wholesale_uk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-wholesale_uk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075506510} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-wholesale_uk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-wholesale_uk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075506650}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968127405s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-wholesale_uk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-wholesale_uk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-wholesale_uk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-wholesale_uk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-erp-consumer-wholesale_uk-oms-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-erp-consumer-wholesale_uk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075506770} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc0755069a0} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075506a28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968139769s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=14 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=14 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=1 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075506ea8} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075507050} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075506dd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968149492s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-perf-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-perf-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-perf-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-perf-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075507170} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-perf-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-perf-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075507200} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-perf-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-perf-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075507288}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968158962s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-perf-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-perf-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-perf-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-perf-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-perf-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-perf-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-wholesale-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-wholesale-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-wholesale-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-wholesale-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc0755073b0} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-wholesale-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-wholesale-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075507440} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-wholesale-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-wholesale-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075507530}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968169523s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-wholesale-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-wholesale-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-wholesale-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-wholesale-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-wholesale-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-wholesale-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-wholesale-perf-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-wholesale-perf-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-wholesale-perf-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-wholesale-perf-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075507790} B:{Var:B Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-wholesale-perf-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-wholesale-perf-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075507870} C:{Var:C Labels:account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-wholesale-perf-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-wholesale-perf-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075507900}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968178896s EvaluationString:[ var='A' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-wholesale-perf-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-wholesale-perf-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-wholesale-perf-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-wholesale-perf-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=fluent-event-publisher, dimension_QueueName=ecom-prod-fluent-event-publisher-fscm-wholesale-perf-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-fluent-event-publisher-fscm-wholesale-perf-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=oms-carrier-mediator, dimension_QueueName=ecom-prod-oms-carrier-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-carrier-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=oms-carrier-mediator, dimension_QueueName=ecom-prod-oms-carrier-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-carrier-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075507ae0} B:{Var:B Labels:account_id=178863526580, application=oms-carrier-mediator, dimension_QueueName=ecom-prod-oms-carrier-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-carrier-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075507b68} C:{Var:C Labels:account_id=178863526580, application=oms-carrier-mediator, dimension_QueueName=ecom-prod-oms-carrier-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-carrier-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075507cb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.9681919s EvaluationString:[ var='A' labels={account_id=178863526580, application=oms-carrier-mediator, dimension_QueueName=ecom-prod-oms-carrier-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-carrier-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=318 ], [ var='B' labels={account_id=178863526580, application=oms-carrier-mediator, dimension_QueueName=ecom-prod-oms-carrier-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-carrier-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=318 ], [ var='C' labels={account_id=178863526580, application=oms-carrier-mediator, dimension_QueueName=ecom-prod-oms-carrier-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-carrier-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=1 ]} {Instance:account_id=178863526580, application=oms-customer-mediator, dimension_QueueName=ecom-prod-oms-customer-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-customer-mediator-dead-letter-queue, pagerduty_service=customer-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=oms-customer-mediator, dimension_QueueName=ecom-prod-oms-customer-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-customer-mediator-dead-letter-queue, pagerduty_service=customer-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075507dc0} B:{Var:B Labels:account_id=178863526580, application=oms-customer-mediator, dimension_QueueName=ecom-prod-oms-customer-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-customer-mediator-dead-letter-queue, pagerduty_service=customer-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075507ef8} C:{Var:C Labels:account_id=178863526580, application=oms-customer-mediator, dimension_QueueName=ecom-prod-oms-customer-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-customer-mediator-dead-letter-queue, pagerduty_service=customer-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc075507f80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.96820414s EvaluationString:[ var='A' labels={account_id=178863526580, application=oms-customer-mediator, dimension_QueueName=ecom-prod-oms-customer-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-customer-mediator-dead-letter-queue, pagerduty_service=customer-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=oms-customer-mediator, dimension_QueueName=ecom-prod-oms-customer-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-customer-mediator-dead-letter-queue, pagerduty_service=customer-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=oms-customer-mediator, dimension_QueueName=ecom-prod-oms-customer-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-customer-mediator-dead-letter-queue, pagerduty_service=customer-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=oms-email-tracker, dimension_QueueName=ecom-prod-oms-email-tracker-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-email-tracker-dead-letter-queue, pagerduty_service=Fluent, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=oms-email-tracker, dimension_QueueName=ecom-prod-oms-email-tracker-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-email-tracker-dead-letter-queue, pagerduty_service=Fluent, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c6290} B:{Var:B Labels:account_id=178863526580, application=oms-email-tracker, dimension_QueueName=ecom-prod-oms-email-tracker-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-email-tracker-dead-letter-queue, pagerduty_service=Fluent, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c6170} C:{Var:C Labels:account_id=178863526580, application=oms-email-tracker, dimension_QueueName=ecom-prod-oms-email-tracker-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-email-tracker-dead-letter-queue, pagerduty_service=Fluent, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c6200}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968229604s EvaluationString:[ var='A' labels={account_id=178863526580, application=oms-email-tracker, dimension_QueueName=ecom-prod-oms-email-tracker-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-email-tracker-dead-letter-queue, pagerduty_service=Fluent, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=oms-email-tracker, dimension_QueueName=ecom-prod-oms-email-tracker-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-email-tracker-dead-letter-queue, pagerduty_service=Fluent, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=oms-email-tracker, dimension_QueueName=ecom-prod-oms-email-tracker-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-email-tracker-dead-letter-queue, pagerduty_service=Fluent, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=oms-fraud-check-service, dimension_QueueName=ecom-prod-oms-fraud-check-service-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fraud-check-service-dead-letter-queue, pagerduty_service=test, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=oms-fraud-check-service, dimension_QueueName=ecom-prod-oms-fraud-check-service-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fraud-check-service-dead-letter-queue, pagerduty_service=test, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c6520} B:{Var:B Labels:account_id=178863526580, application=oms-fraud-check-service, dimension_QueueName=ecom-prod-oms-fraud-check-service-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fraud-check-service-dead-letter-queue, pagerduty_service=test, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c65c0} C:{Var:C Labels:account_id=178863526580, application=oms-fraud-check-service, dimension_QueueName=ecom-prod-oms-fraud-check-service-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fraud-check-service-dead-letter-queue, pagerduty_service=test, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c63b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968240686s EvaluationString:[ var='A' labels={account_id=178863526580, application=oms-fraud-check-service, dimension_QueueName=ecom-prod-oms-fraud-check-service-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fraud-check-service-dead-letter-queue, pagerduty_service=test, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=oms-fraud-check-service, dimension_QueueName=ecom-prod-oms-fraud-check-service-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fraud-check-service-dead-letter-queue, pagerduty_service=test, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=oms-fraud-check-service, dimension_QueueName=ecom-prod-oms-fraud-check-service-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fraud-check-service-dead-letter-queue, pagerduty_service=test, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-allocated-carriers-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-allocated-carriers-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-allocated-carriers-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-allocated-carriers-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c6810} B:{Var:B Labels:account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-allocated-carriers-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-allocated-carriers-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c66e0} C:{Var:C Labels:account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-allocated-carriers-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-allocated-carriers-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c6780}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968251227s EvaluationString:[ var='A' labels={account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-allocated-carriers-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-allocated-carriers-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-allocated-carriers-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-allocated-carriers-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-allocated-carriers-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-allocated-carriers-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c6960} B:{Var:B Labels:account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c6af0} C:{Var:C Labels:account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c6b80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968261477s EvaluationString:[ var='A' labels={account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=27 ], [ var='B' labels={account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=27 ], [ var='C' labels={account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=1 ]} {Instance:account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-fraud-check-result-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-fraud-check-result-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-fraud-check-result-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-fraud-check-result-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c6cb0} B:{Var:B Labels:account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-fraud-check-result-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-fraud-check-result-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c6d40} C:{Var:C Labels:account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-fraud-check-result-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-fraud-check-result-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c6dd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968273269s EvaluationString:[ var='A' labels={account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-fraud-check-result-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-fraud-check-result-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-fraud-check-result-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-fraud-check-result-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-fraud-check-result-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-fraud-check-result-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-magento-fulfilments-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-magento-fulfilments-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-magento-fulfilments-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-magento-fulfilments-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c7010} B:{Var:B Labels:account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-magento-fulfilments-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-magento-fulfilments-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c6ef0} C:{Var:C Labels:account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-magento-fulfilments-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-magento-fulfilments-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c6f80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968286173s EvaluationString:[ var='A' labels={account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-magento-fulfilments-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-magento-fulfilments-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=1 ], [ var='B' labels={account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-magento-fulfilments-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-magento-fulfilments-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=1 ], [ var='C' labels={account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-magento-fulfilments-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-magento-fulfilments-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=1 ]} {Instance:account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-s3-wholesale-events-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-s3-wholesale-events-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-s3-wholesale-events-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-s3-wholesale-events-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c7140} B:{Var:B Labels:account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-s3-wholesale-events-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-s3-wholesale-events-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c71e0} C:{Var:C Labels:account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-s3-wholesale-events-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-s3-wholesale-events-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c7270}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968296636s EvaluationString:[ var='A' labels={account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-s3-wholesale-events-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-s3-wholesale-events-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-s3-wholesale-events-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-s3-wholesale-events-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=oms-fulfillment-mediator, dimension_QueueName=ecom-prod-oms-fulfillment-mediator-s3-wholesale-events-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-fulfillment-mediator-s3-wholesale-events-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c75a0} B:{Var:B Labels:account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c74a0} C:{Var:C Labels:account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c7520}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968307635s EvaluationString:[ var='A' labels={account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=28 ], [ var='B' labels={account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=28 ], [ var='C' labels={account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=1 ]} {Instance:account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-emails-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-emails-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-emails-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-emails-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c76a0} B:{Var:B Labels:account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-emails-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-emails-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c7720} C:{Var:C Labels:account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-emails-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-emails-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c7798}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968317772s EvaluationString:[ var='A' labels={account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-emails-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-emails-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=7770 ], [ var='B' labels={account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-emails-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-emails-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=7770 ], [ var='C' labels={account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-emails-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-emails-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=1 ]} {Instance:account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-file-events-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-file-events-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-file-events-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-file-events-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c7ab0} B:{Var:B Labels:account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-file-events-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-file-events-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c78d8} C:{Var:C Labels:account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-file-events-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-file-events-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c7948}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968329017s EvaluationString:[ var='A' labels={account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-file-events-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-file-events-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=1628 ], [ var='B' labels={account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-file-events-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-file-events-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=1628 ], [ var='C' labels={account_id=178863526580, application=oms-invoice-mediator, dimension_QueueName=ecom-prod-oms-invoice-mediator-file-events-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-invoice-mediator-file-events-dead-letter-queue, pagerduty_service=invoice-distributor, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=1 ]} {Instance:account_id=178863526580, application=oms-outstanding-demand-processor, dimension_QueueName=ecom-prod-oms-outstanding-demand-processor-dead-letter-queue.fifo, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-outstanding-demand-processor-dead-letter-queue.fifo, pagerduty_service=order-management-core, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=oms-outstanding-demand-processor, dimension_QueueName=ecom-prod-oms-outstanding-demand-processor-dead-letter-queue.fifo, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-outstanding-demand-processor-dead-letter-queue.fifo, pagerduty_service=order-management-core, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c7bc0} B:{Var:B Labels:account_id=178863526580, application=oms-outstanding-demand-processor, dimension_QueueName=ecom-prod-oms-outstanding-demand-processor-dead-letter-queue.fifo, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-outstanding-demand-processor-dead-letter-queue.fifo, pagerduty_service=order-management-core, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c7c18} C:{Var:C Labels:account_id=178863526580, application=oms-outstanding-demand-processor, dimension_QueueName=ecom-prod-oms-outstanding-demand-processor-dead-letter-queue.fifo, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-outstanding-demand-processor-dead-letter-queue.fifo, pagerduty_service=order-management-core, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c7cd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968339482s EvaluationString:[ var='A' labels={account_id=178863526580, application=oms-outstanding-demand-processor, dimension_QueueName=ecom-prod-oms-outstanding-demand-processor-dead-letter-queue.fifo, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-outstanding-demand-processor-dead-letter-queue.fifo, pagerduty_service=order-management-core, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=oms-outstanding-demand-processor, dimension_QueueName=ecom-prod-oms-outstanding-demand-processor-dead-letter-queue.fifo, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-outstanding-demand-processor-dead-letter-queue.fifo, pagerduty_service=order-management-core, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=oms-outstanding-demand-processor, dimension_QueueName=ecom-prod-oms-outstanding-demand-processor-dead-letter-queue.fifo, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-outstanding-demand-processor-dead-letter-queue.fifo, pagerduty_service=order-management-core, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=oms-partners-integrator, dimension_QueueName=ecom-prod-oms-partners-integrator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-partners-integrator-dead-letter-queue, pagerduty_service=order-management-core, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=oms-partners-integrator, dimension_QueueName=ecom-prod-oms-partners-integrator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-partners-integrator-dead-letter-queue, pagerduty_service=order-management-core, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c7dd0} B:{Var:B Labels:account_id=178863526580, application=oms-partners-integrator, dimension_QueueName=ecom-prod-oms-partners-integrator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-partners-integrator-dead-letter-queue, pagerduty_service=order-management-core, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c7e58} C:{Var:C Labels:account_id=178863526580, application=oms-partners-integrator, dimension_QueueName=ecom-prod-oms-partners-integrator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-partners-integrator-dead-letter-queue, pagerduty_service=order-management-core, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc04e8c7ec8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968350514s EvaluationString:[ var='A' labels={account_id=178863526580, application=oms-partners-integrator, dimension_QueueName=ecom-prod-oms-partners-integrator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-partners-integrator-dead-letter-queue, pagerduty_service=order-management-core, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=oms-partners-integrator, dimension_QueueName=ecom-prod-oms-partners-integrator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-partners-integrator-dead-letter-queue, pagerduty_service=order-management-core, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=oms-partners-integrator, dimension_QueueName=ecom-prod-oms-partners-integrator-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-partners-integrator-dead-letter-queue, pagerduty_service=order-management-core, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=oms-stock-receiver, dimension_QueueName=ecom-prod-oms-stock-receiver-fulfilment-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-stock-receiver-fulfilment-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=oms-stock-receiver, dimension_QueueName=ecom-prod-oms-stock-receiver-fulfilment-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-stock-receiver-fulfilment-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc073d30010} B:{Var:B Labels:account_id=178863526580, application=oms-stock-receiver, dimension_QueueName=ecom-prod-oms-stock-receiver-fulfilment-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-stock-receiver-fulfilment-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc073d30160} C:{Var:C Labels:account_id=178863526580, application=oms-stock-receiver, dimension_QueueName=ecom-prod-oms-stock-receiver-fulfilment-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-stock-receiver-fulfilment-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc073d30208}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968386119s EvaluationString:[ var='A' labels={account_id=178863526580, application=oms-stock-receiver, dimension_QueueName=ecom-prod-oms-stock-receiver-fulfilment-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-stock-receiver-fulfilment-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=2254 ], [ var='B' labels={account_id=178863526580, application=oms-stock-receiver, dimension_QueueName=ecom-prod-oms-stock-receiver-fulfilment-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-stock-receiver-fulfilment-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=2254 ], [ var='C' labels={account_id=178863526580, application=oms-stock-receiver, dimension_QueueName=ecom-prod-oms-stock-receiver-fulfilment-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-stock-receiver-fulfilment-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=1 ]} {Instance:account_id=178863526580, application=oms-stock-receiver, dimension_QueueName=ecom-prod-oms-stock-receiver-s3-stock-events-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-stock-receiver-s3-stock-events-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=oms-stock-receiver, dimension_QueueName=ecom-prod-oms-stock-receiver-s3-stock-events-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-stock-receiver-s3-stock-events-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc073d30480} B:{Var:B Labels:account_id=178863526580, application=oms-stock-receiver, dimension_QueueName=ecom-prod-oms-stock-receiver-s3-stock-events-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-stock-receiver-s3-stock-events-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc073d30540} C:{Var:C Labels:account_id=178863526580, application=oms-stock-receiver, dimension_QueueName=ecom-prod-oms-stock-receiver-s3-stock-events-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-stock-receiver-s3-stock-events-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc073d30348}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968406082s EvaluationString:[ var='A' labels={account_id=178863526580, application=oms-stock-receiver, dimension_QueueName=ecom-prod-oms-stock-receiver-s3-stock-events-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-stock-receiver-s3-stock-events-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=oms-stock-receiver, dimension_QueueName=ecom-prod-oms-stock-receiver-s3-stock-events-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-stock-receiver-s3-stock-events-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=oms-stock-receiver, dimension_QueueName=ecom-prod-oms-stock-receiver-s3-stock-events-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-stock-receiver-s3-stock-events-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=178863526580, application=oms-stock-receiver, dimension_QueueName=ecom-prod-oms-stock-receiver-tracking-number-updates-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-stock-receiver-tracking-number-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=178863526580, application=oms-stock-receiver, dimension_QueueName=ecom-prod-oms-stock-receiver-tracking-number-updates-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-stock-receiver-tracking-number-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc073d307f8} B:{Var:B Labels:account_id=178863526580, application=oms-stock-receiver, dimension_QueueName=ecom-prod-oms-stock-receiver-tracking-number-updates-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-stock-receiver-tracking-number-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc073d306c0} C:{Var:C Labels:account_id=178863526580, application=oms-stock-receiver, dimension_QueueName=ecom-prod-oms-stock-receiver-tracking-number-updates-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-stock-receiver-tracking-number-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core Value:0xc073d30778}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968421057s EvaluationString:[ var='A' labels={account_id=178863526580, application=oms-stock-receiver, dimension_QueueName=ecom-prod-oms-stock-receiver-tracking-number-updates-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-stock-receiver-tracking-number-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=178863526580, application=oms-stock-receiver, dimension_QueueName=ecom-prod-oms-stock-receiver-tracking-number-updates-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-stock-receiver-tracking-number-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=178863526580, application=oms-stock-receiver, dimension_QueueName=ecom-prod-oms-stock-receiver-tracking-number-updates-dead-letter-queue, environment=production, job=267658-AWS-SQS-COS-Ecom-PROD-metrics, name=arn:aws:sqs:eu-central-1:178863526580:ecom-prod-oms-stock-receiver-tracking-number-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-PROD-metrics, team=order-management-core} value=0 ]} {Instance:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-magento-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-magento-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-magento-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-magento-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc073d30af0} B:{Var:B Labels:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-magento-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-magento-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc073d309a0} C:{Var:C Labels:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-magento-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-magento-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc073d30a50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968440879s EvaluationString:[ var='A' labels={account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-magento-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-magento-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-magento-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-magento-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-magento-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-magento-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ]} {Instance:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-marketplace-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-marketplace-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-marketplace-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-marketplace-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc073d30dc0} B:{Var:B Labels:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-marketplace-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-marketplace-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc073d30c60} C:{Var:C Labels:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-marketplace-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-marketplace-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc073d30cf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968453491s EvaluationString:[ var='A' labels={account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-marketplace-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-marketplace-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=2 ], [ var='B' labels={account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-marketplace-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-marketplace-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=2 ], [ var='C' labels={account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-marketplace-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-marketplace-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=1 ]} {Instance:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc073d30f80} B:{Var:B Labels:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc073d31050} C:{Var:C Labels:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc073d31120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968467392s EvaluationString:[ var='A' labels={account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=7 ], [ var='B' labels={account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=7 ], [ var='C' labels={account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=1 ]} {Instance:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc073d31420} B:{Var:B Labels:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc073d31260} C:{Var:C Labels:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc073d31310}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968478937s EvaluationString:[ var='A' labels={account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=9 ], [ var='B' labels={account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=9 ], [ var='C' labels={account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-delivery-date-updates-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=1 ]} {Instance:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc073d315a0} B:{Var:B Labels:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc073d316d0} C:{Var:C Labels:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc073d31820}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968490309s EvaluationString:[ var='A' labels={account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-order-mediator-wholesale-orders-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ]} {Instance:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-payment-mediator-dead-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-payment-mediator-dead-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc073d31bc0} B:{Var:B Labels:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-payment-mediator-dead-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc073d31ce8} C:{Var:C Labels:account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-payment-mediator-dead-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc073d31e10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968501885s EvaluationString:[ var='A' labels={account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-payment-mediator-dead-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=10 ], [ var='B' labels={account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-payment-mediator-dead-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=10 ], [ var='C' labels={account_id=797798810101, application=ecommerce-order-mediator, dimension_QueueName=ecom-stage-ecommerce-payment-mediator-dead-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-payment-mediator-dead-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=1 ]} {Instance:account_id=797798810101, application=ecommerce-send-email, dimension_QueueName=ecom-stage-ecommerce-send-email-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-send-email-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=ecommerce-send-email, dimension_QueueName=ecom-stage-ecommerce-send-email-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-send-email-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d2120} B:{Var:B Labels:account_id=797798810101, application=ecommerce-send-email, dimension_QueueName=ecom-stage-ecommerce-send-email-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-send-email-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc073d31ff0} C:{Var:C Labels:account_id=797798810101, application=ecommerce-send-email, dimension_QueueName=ecom-stage-ecommerce-send-email-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-send-email-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d2080}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968515093s EvaluationString:[ var='A' labels={account_id=797798810101, application=ecommerce-send-email, dimension_QueueName=ecom-stage-ecommerce-send-email-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-send-email-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=797798810101, application=ecommerce-send-email, dimension_QueueName=ecom-stage-ecommerce-send-email-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-send-email-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=797798810101, application=ecommerce-send-email, dimension_QueueName=ecom-stage-ecommerce-send-email-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-ecommerce-send-email-dead-letter-queue, pagerduty_service=order-stock, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ]} {Instance:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-at-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-at-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-at-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-at-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d2310} B:{Var:B Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-at-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-at-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d23d0} C:{Var:C Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-at-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-at-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d2520}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.96852469s EvaluationString:[ var='A' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-at-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-at-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-at-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-at-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-at-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-at-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ]} {Instance:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-au-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-au-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-au-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-au-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d27b0} B:{Var:B Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-au-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-au-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d2840} C:{Var:C Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-au-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-au-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d2678}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968534827s EvaluationString:[ var='A' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-au-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-au-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-au-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-au-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-au-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-au-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ]} {Instance:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-be-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-be-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-be-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-be-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d2970} B:{Var:B Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-be-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-be-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d2aa8} C:{Var:C Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-be-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-be-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d2b40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968544752s EvaluationString:[ var='A' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-be-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-be-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-be-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-be-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-be-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-be-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ]} {Instance:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-ch-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-ch-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-ch-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-ch-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d2dc8} B:{Var:B Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-ch-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-ch-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d2e70} C:{Var:C Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-ch-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-ch-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d2d30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968556856s EvaluationString:[ var='A' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-ch-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-ch-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-ch-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-ch-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-ch-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-ch-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ]} {Instance:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-de-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-de-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-de-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-de-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d3030} B:{Var:B Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-de-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-de-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d30a8} C:{Var:C Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-de-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-de-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d3170}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968567862s EvaluationString:[ var='A' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-de-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-de-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-de-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-de-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-de-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-de-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ]} {Instance:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-dk-se-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-dk-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-dk-se-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-dk-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d3400} B:{Var:B Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-dk-se-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-dk-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d32a0} C:{Var:C Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-dk-se-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-dk-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d3338}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968578123s EvaluationString:[ var='A' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-dk-se-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-dk-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-dk-se-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-dk-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-dk-se-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-dk-se-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ]} {Instance:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-es-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-es-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-es-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-es-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d3530} B:{Var:B Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-es-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-es-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d35c0} C:{Var:C Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-es-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-es-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d3660}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968588289s EvaluationString:[ var='A' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-es-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-es-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-es-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-es-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-es-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-es-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ]} {Instance:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-fr-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-fr-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d3790} B:{Var:B Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-fr-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d38e0} C:{Var:C Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-fr-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d3970}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968599556s EvaluationString:[ var='A' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-fr-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-fr-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-fr-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-fr-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ]} {Instance:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-hk-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-hk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-hk-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-hk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d3b48} B:{Var:B Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-hk-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-hk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d3c00} C:{Var:C Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-hk-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-hk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d3d30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.96860921s EvaluationString:[ var='A' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-hk-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-hk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-hk-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-hk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-hk-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-hk-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ]} {Instance:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-ie-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-ie-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-ie-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-ie-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d3e40} B:{Var:B Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-ie-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-ie-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d3eb8} C:{Var:C Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-ie-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-ie-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc01d7d3f70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968619652s EvaluationString:[ var='A' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-ie-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-ie-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-ie-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-ie-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-ie-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-ie-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ]} {Instance:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-it-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-it-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-it-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-it-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc04a0bc0a0} B:{Var:B Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-it-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-it-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc04a0bc150} C:{Var:C Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-it-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-it-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc04a0bc200}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968631483s EvaluationString:[ var='A' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-it-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-it-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-it-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-it-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-it-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-it-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ]} {Instance:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-nl-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-nl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-nl-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-nl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc04a0bc338} B:{Var:B Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-nl-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-nl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc04a0bc410} C:{Var:C Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-nl-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-nl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc04a0bc4c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968641047s EvaluationString:[ var='A' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-nl-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-nl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-nl-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-nl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-nl-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-nl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ]} {Instance:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-nz-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-nz-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-nz-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-nz-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc04a0bc5e8} B:{Var:B Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-nz-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-nz-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc04a0bc690} C:{Var:C Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-nz-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-nz-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc04a0bc738}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968664801s EvaluationString:[ var='A' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-nz-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-nz-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-nz-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-nz-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-nz-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-nz-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ]} {Instance:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc04a0bc880} B:{Var:B Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc04a0bc930} C:{Var:C Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc04a0bc9e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968674969s EvaluationString:[ var='A' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ]} {Instance:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-pl-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-pl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-pl-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-pl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc04a0bcbb0} B:{Var:B Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-pl-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-pl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc04a0bcc58} C:{Var:C Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-pl-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-pl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc04a0bcb00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968686519s EvaluationString:[ var='A' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-pl-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-pl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='B' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-pl-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-pl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ], [ var='C' labels={account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-pl-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-pl-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core} value=0 ]} {Instance:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-pt-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-pt-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-pt-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-pt-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc04a0bcda0} B:{Var:B Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-pt-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-pt-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc04a0bcef0} C:{Var:C Labels:account_id=797798810101, application=fluent-event-publisher, dimension_QueueName=ecom-stage-erp-consumer-pt-oms-dead-letter-queue, environment=staging, job=267658-AWS-SQS-COS-Ecom-STAGE-metrics, name=arn:aws:sqs:eu-central-1:797798810101:ecom-stage-erp-consumer-pt-oms-dead-letter-queue, pagerduty_service=fulfilment, region=eu-central-1, scrape_job=COS-Ecom-STAGE-metrics, team=order-management-core Value:0xc04a0bcf98}] EvaluatedAt:2024-05-2 + level=debug ts=2024-05-29T13:44:13.986859547Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=667326 slug=lakovna t=2024-05-29T13:44:13.986722181Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.227986ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p0fnxkjx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.986513663Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.986594408Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.98648041Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p04hd0yh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.986303881Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.986280572Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334531laio1southeastasia, cloud_platform=Azure, customer_id=A228, env_id=334531, env_name=A228 DFS Prod, env_type=prod, instance=env-334531laio1southeastasia, job=integrations/node_exporter, region=southeastasia, stage=testing" t=2024-05-29T13:44:13.986253437Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-p03ea2qq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.986182379Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334522laio1eastus, cloud_platform=Azure, customer_id=A250, env_id=334522, env_name=A250 Fresh Market Prod, env_type=prod, instance=env-334522laio1eastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:13.986027207Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ozzwpbba-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.985884766Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334513laio1eus2, cloud_platform=AWS, customer_id=C788, env_id=334513, env_name=c788_Tendam_dev, env_type=dev, instance=env-334513laio1eus2, job=integrations/node_exporter, region=eu-south-2, stage=testing" t=2024-05-29T13:44:13.985837502Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ozz5ttnr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.985850656Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ozz5ttnr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.985823126Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ozz5ttnr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.985754905Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ozz5ttnr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.985717925Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334500laio1eastus2, cloud_platform=Azure, customer_id=A249, env_id=334500, env_name=A249 VS Services Prod 2, env_type=prod, instance=env-334500laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:13.985585607Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ozmatn40-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.985503492Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334500laio1eastus2, cloud_platform=Azure, customer_id=A249, env_id=334500, env_name=A249 VS Services Prod 2, env_type=prod, instance=env-334500laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:13.98548235Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.985370901Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334492laio1eastus2, cloud_platform=Azure, customer_id=A249, env_id=334492, env_name=A249 VS Services Dev 2, env_type=dev, instance=env-334492laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:13.985302923Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ozk9fmrc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.985344571Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334492laio1eastus2, cloud_platform=Azure, customer_id=A249, env_id=334492, env_name=A249 VS Services Dev 2, env_type=dev, instance=env-334492laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:13.98528752Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ozk9fmrc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.98529282Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oz6ys7yn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.98522245Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.985175001Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oz6ys7yn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.985189239Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oz0d1053-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.984971737Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oz0d1053-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.984943647Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.984929091Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334484laio1aps1, cloud_platform=AWS, customer_id=C034, env_id=334484, env_name=C034 AP AAP Test Env, env_type=test, instance=env-334484laio1aps1, job=integrations/node_exporter, region=ap-southeast-1, stage=testing" t=2024-05-29T13:44:13.984930666Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334484laio1aps1, cloud_platform=AWS, customer_id=C034, env_id=334484, env_name=C034 AP AAP Test Env, env_type=test, instance=env-334484laio1aps1, job=integrations/node_exporter, region=ap-southeast-1, stage=testing" t=2024-05-29T13:44:13.984908847Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oz0d1053-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.984813305Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oyy3ur5w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.984745645Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oyy3ur5w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.984708264Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oyy3ur5w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.984635503Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.984562811Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oyy3ur5w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.984607013Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334474laio1eastus2, cloud_platform=Azure, customer_id=A248, env_id=334474, env_name=A248 Tri-City Dev, env_type=dev, instance=env-334474laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:13.984521283Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.984384461Z caller=remote_instance_store.go:51 user=154996 slug=veovo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oyy089sl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.984468072Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oyy089sl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.984419701Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oyy089sl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.984387681Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334473laio1eastus2, cloud_platform=Azure, customer_id=A248, env_id=334473, env_name=A248 Tri-City Prod, env_type=prod, instance=env-334473laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:13.984266177Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334473laio1eastus2, cloud_platform=Azure, customer_id=A248, env_id=334473, env_name=A248 Tri-City Prod, env_type=prod, instance=env-334473laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:13.984247328Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oyvejj5u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.984024197Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oysb5ekw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.983983797Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oysb5ekw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.983970127Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.983917136Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oysb5ekw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.983863726Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.983849352Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oysb5ekw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.983847045Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:13.983704415Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.754097ms + level=debug ts=2024-05-29T13:44:13.983718306Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334465laio1eastus2, cloud_platform=Azure, customer_id=A249, env_id=334465, env_name=A249 VS Services Prod 1, env_type=prod, instance=env-334465laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:13.983720734Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oyqgbrx3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.983730484Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oyn25fdp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.983585213Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oyn25fdp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.983551052Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oyn25fdp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.983524682Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.983386843Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334461laio1euw1, cloud_platform=AWS, customer_id=C033, env_id=334461, env_name=C033 EU AAP Test Env, env_type=dev, instance=env-334461laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=testing" t=2024-05-29T13:44:13.983489437Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=27737 slug=edfmancapital t=2024-05-29T13:44:13.983408377Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.143081ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oykedn1v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.98333287Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oykedn1v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.9832862Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=698103 slug=vericast version=46 fingerprint=ae8d55c18bdb40dc attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.982971217Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Alerting Error: Results:map[] Values:map[C0:{Var:C Labels:datacenter=atx1, instance=vm-1a646865a5e6, project=engops-activedirectory-corp Value:0xc018262e20} C1:{Var:C Labels:datacenter=atx1, instance=vm-30b8baa76662, project=qradar-gateway Value:0xc018263b70} C10:{Var:C Labels:datacenter=da1, instance=vm-05791100a48c, project=valassis-engops-activedirectory Value:0xc018263b20} C11:{Var:C Labels:datacenter=da1, instance=vm-14082bc300c6, project=engops-hitachi-bravura-corp Value:0xc018262cd8} C12:{Var:C Labels:datacenter=da1, instance=vm-2126a742815d, project=engops-hitachi-bravura-infra Value:0xc018262ee8} C13:{Var:C Labels:datacenter=da1, instance=vm-2ca219fefa46, project=engops-awx Value:0xc0182636b0} C14:{Var:C Labels:datacenter=da1, instance=vm-32b427f59788, project=valassis-engops-sandbox Value:0xc018263900} C15:{Var:C Labels:datacenter=da1, instance=vm-37fa26717c8e, project=nessus-scanner Value:0xc018263018} C16:{Var:C Labels:datacenter=da1, instance=vm-3ca603f03f4a, project=valassis-engops-activedirectory Value:0xc0182639f8} C17:{Var:C Labels:datacenter=da1, instance=vm-48994d0e8d68, project=valassis-engops-sandbox Value:0xc0182633d0} C18:{Var:C Labels:datacenter=da1, instance=vm-4aa8079c5e39, project=engops-services-corp Value:0xc018262fc0} C19:{Var:C Labels:datacenter=da1, instance=vm-4ebd447642b2, project=engops-bastion Value:0xc018263580} C2:{Var:C Labels:datacenter=atx1, instance=vm-4cf906669fd3, project=engops-bigfix Value:0xc018263650} C20:{Var:C Labels:datacenter=da1, instance=vm-5300eaf3f208, project=engops-activedirectory-corp Value:0xc0182631d0} C21:{Var:C Labels:datacenter=da1, instance=vm-5832888e1caf, project=engops-activedirectory-corp Value:0xc018262f60} C22:{Var:C Labels:datacenter=da1, instance=vm-5a8500a657c9, project=engops-services-corp Value:0xc018263818} C23:{Var:C Labels:datacenter=da1, instance=vm-5d6dec05d5c8, project=valassis-engops-activedirectory Value:0xc018263940} C24:{Var:C Labels:datacenter=da1, instance=vm-5f20a99a5de9, project=engops-activedirectory-prod Value:0xc018263230} C25:{Var:C Labels:datacenter=da1, instance=vm-61b777ea1b12, project=engops-activedirectory-prod Value:0xc018262c38} C26:{Var:C Labels:datacenter=da1, instance=vm-7cddfd11c3f1, project=engops-activedirectory-prod Value:0xc018263a48} C27:{Var:C Labels:datacenter=da1, instance=vm-8dfdb0d48168, project=engops-activedirectory-corp Value:0xc0182630f8} C28:{Var:C Labels:datacenter=da1, instance=vm-8ed06a723235, project=qradar-gateway Value:0xc018262be0} C29:{Var:C Labels:datacenter=da1, instance=vm-9c66fb3055bf, project=engops-services-corp Value:0xc018263a98} C3:{Var:C Labels:datacenter=atx1, instance=vm-507bca8ea3da, project=engops-bigfix Value:0xc018263858} C30:{Var:C Labels:datacenter=da1, instance=vm-a50697e40038, project=engops-activedirectory-corp Value:0xc018262d48} C31:{Var:C Labels:datacenter=da1, instance=vm-bbc7d6e2f405, project=valassis-engops-activedirectory Value:0xc018263978} C32:{Var:C Labels:datacenter=da1, instance=vm-bcbd658327bb, project=engops-activedirectory-prod Value:0xc018263180} C33:{Var:C Labels:datacenter=da1, instance=vm-c4b46f470cb5, project=valassis-engops-sandbox Value:0xc018263370} C34:{Var:C Labels:datacenter=da1, instance=vm-c6f5ee141d97, project=engops-services-corp Value:0xc018263488} C35:{Var:C Labels:datacenter=da1, instance=vm-cf9f71031da4, project=engops-activedirectory-prod Value:0xc0182639c8} C36:{Var:C Labels:datacenter=da1, instance=vm-d6c3990b8c24, project=engops-services-corp Value:0xc018262d98} C37:{Var:C Labels:datacenter=da1, instance=vm-dba0296abcde, project=engops-hitachi-bravura-prod Value:0xc018263778} C38:{Var:C Labels:datacenter=da1, instance=vm-e57577eec249, project=engops-services-corp Value:0xc0182637d8} C39:{Var:C Labels:datacenter=da1, instance=vm-ffd975fbc0d8, project=engops-zscaler Value:0xc018263440} C4:{Var:C Labels:datacenter=atx1, instance=vm-77a197e7bdf2, project=engops-zscaler Value:0xc018263c20} C40:{Var:C Labels:datacenter=dc1, instance=vm-ab47911606be, project=valassis-engops-activedirectory Value:0xc018262e78} C41:{Var:C Labels:datacenter=dc1, instance=vm-cce684ca5deb, project=valassis-engops-activedirectory Value:0xc018263520} C5:{Var:C Labels:datacenter=atx1, instance=vm-82deca692da9, project=engops-activedirectory-prod Value:0xc0182635e0} C6:{Var:C Labels:datacenter=atx1, instance=vm-859e4d97a22a, project=engops-passwordstate Value:0xc018263280} C7:{Var:C Labels:datacenter=atx1, instance=vm-a349a9481e33, project=engops-services-corp Value:0xc018263bb0} C8:{Var:C Labels:datacenter=atx1, instance=vm-a35ecc45c306, project=engops-activedirectory-prod Value:0xc0182638b0} C9:{Var:C Labels:datacenter=atx1, instance=vm-ea4413ef28b4, project=engops-activedirectory-corp Value:0xc018263ad0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.982091893s EvaluationString:[ var='C0' metric='Value' labels={datacenter=atx1, instance=vm-1a646865a5e6, project=engops-activedirectory-corp} value=0 ], [ var='C1' metric='Value' labels={datacenter=atx1, instance=vm-30b8baa76662, project=qradar-gateway} value=0 ], [ var='C2' metric='Value' labels={datacenter=atx1, instance=vm-4cf906669fd3, project=engops-bigfix} value=0 ], [ var='C3' metric='Value' labels={datacenter=atx1, instance=vm-507bca8ea3da, project=engops-bigfix} value=0 ], [ var='C4' metric='Value' labels={datacenter=atx1, instance=vm-77a197e7bdf2, project=engops-zscaler} value=0 ], [ var='C5' metric='Value' labels={datacenter=atx1, instance=vm-82deca692da9, project=engops-activedirectory-prod} value=0 ], [ var='C6' metric='Value' labels={datacenter=atx1, instance=vm-859e4d97a22a, project=engops-passwordstate} value=0 ], [ var='C7' metric='Value' labels={datacenter=atx1, instance=vm-a349a9481e33, project=engops-services-corp} value=0 ], [ var='C8' metric='Value' labels={datacenter=atx1, instance=vm-a35ecc45c306, project=engops-activedirectory-prod} value=0 ], [ var='C9' metric='Value' labels={datacenter=atx1, instance=vm-ea4413ef28b4, project=engops-activedirectory-corp} value=0 ], [ var='C10' metric='Value' labels={datacenter=da1, instance=vm-05791100a48c, project=valassis-engops-activedirectory} value=0 ], [ var='C11' metric='Value' labels={datacenter=da1, instance=vm-14082bc300c6, project=engops-hitachi-bravura-corp} value=0 ], [ var='C12' metric='Value' labels={datacenter=da1, instance=vm-2126a742815d, project=engops-hitachi-bravura-infra} value=0 ], [ var='C13' metric='Value' labels={datacenter=da1, instance=vm-2ca219fefa46, project=engops-awx} value=0 ], [ var='C14' metric='Value' labels={datacenter=da1, instance=vm-32b427f59788, project=valassis-engops-sandbox} value=0 ], [ var='C15' metric='Value' labels={datacenter=da1, instance=vm-37fa26717c8e, project=nessus-scanner} value=0 ], [ var='C16' metric='Value' labels={datacenter=da1, instance=vm-3ca603f03f4a, project=valassis-engops-activedirectory} value=0 ], [ var='C17' metric='Value' labels={datacenter=da1, instance=vm-48994d0e8d68, project=valassis-engops-sandbox} value=0 ], [ var='C18' metric='Value' labels={datacenter=da1, instance=vm-4aa8079c5e39, project=engops-services-corp} value=0 ], [ var='C19' metric='Value' labels={datacenter=da1, instance=vm-4ebd447642b2, project=engops-bastion} value=0 ], [ var='C20' metric='Value' labels={datacenter=da1, instance=vm-5300eaf3f208, project=engops-activedirectory-corp} value=0 ], [ var='C21' metric='Value' labels={datacenter=da1, instance=vm-5832888e1caf, project=engops-activedirectory-corp} value=0 ], [ var='C22' metric='Value' labels={datacenter=da1, instance=vm-5a8500a657c9, project=engops-services-corp} value=0 ], [ var='C23' metric='Value' labels={datacenter=da1, instance=vm-5d6dec05d5c8, project=valassis-engops-activedirectory} value=0 ], [ var='C24' metric='Value' labels={datacenter=da1, instance=vm-5f20a99a5de9, project=engops-activedirectory-prod} value=0 ], [ var='C25' metric='Value' labels={datacenter=da1, instance=vm-61b777ea1b12, project=engops-activedirectory-prod} value=0 ], [ var='C26' metric='Value' labels={datacenter=da1, instance=vm-7cddfd11c3f1, project=engops-activedirectory-prod} value=0 ], [ var='C27' metric='Value' labels={datacenter=da1, instance=vm-8dfdb0d48168, project=engops-activedirectory-corp} value=0 ], [ var='C28' metric='Value' labels={datacenter=da1, instance=vm-8ed06a723235, project=qradar-gateway} value=0 ], [ var='C29' metric='Value' labels={datacenter=da1, instance=vm-9c66fb3055bf, project=engops-services-corp} value=0 ], [ var='C30' metric='Value' labels={datacenter=da1, instance=vm-a50697e40038, project=engops-activedirectory-corp} value=0 ], [ var='C31' metric='Value' labels={datacenter=da1, instance=vm-bbc7d6e2f405, project=valassis-engops-activedirectory} value=0 ], [ var='C32' metric='Value' labels={datacenter=da1, instance=vm-bcbd658327bb, project=engops-activedirectory-prod} value=0 ], [ var='C33' metric='Value' labels={datacenter=da1, instance=vm-c4b46f470cb5, project=valassis-engops-sandbox} value=0 ], [ var='C34' metric='Value' labels={datacenter=da1, instance=vm-c6f5ee141d97, project=engops-services-corp} value=0 ], [ var='C35' metric='Value' labels={datacenter=da1, instance=vm-cf9f71031da4, project=engops-activedirectory-prod} value=0 ], [ var='C36' metric='Value' labels={datacenter=da1, instance=vm-d6c3990b8c24, project=engops-services-corp} value=0 ], [ var='C37' metric='Value' labels={datacenter=da1, instance=vm-dba0296abcde, project=engops-hitachi-bravura-prod} value=0 ], [ var='C38' metric='Value' labels={datacenter=da1, instance=vm-e57577eec249, project=engops-services-corp} value=0 ], [ var='C39' metric='Value' labels={datacenter=da1, instance=vm-ffd975fbc0d8, project=engops-zscaler} value=0 ], [ var='C40' metric='Value' labels={datacenter=dc1, instance=vm-ab47911606be, project=valassis-engops-activedirectory} value=0 ], [ var='C41' metric='Value' labels={datacenter=dc1, instance=vm-cce684ca5deb, project=valassis-engops-activedirectory} value=0 ]}]" duration=574.79691ms + level=debug ts=2024-05-29T13:44:13.98301549Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.983025561Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334436laio1use1, cloud_platform=AWS, customer_id=C439, env_id=334436, env_name=C439 BBU PROD Parallel, env_type=prod, instance=env-334436laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.983004598Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oydzd2f7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.982942606Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334410laio1use1, cloud_platform=AWS, customer_id=C794, env_id=334410, env_name=C794 Amica AWS DEV, env_type=dev, instance=env-334410laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.982808242Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334410laio1use1, cloud_platform=AWS, customer_id=C794, env_id=334410, env_name=C794 Amica AWS DEV, env_type=dev, instance=env-334410laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.982794498Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oydp2s4u-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.982653833Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.982746089Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.982681214Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oydp2s4u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.982509572Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334370laio1use1, cloud_platform=AWS, customer_id=C795, env_id=334370, env_name=c795 Omega DEV, env_type=dev, instance=env-334370laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.982423508Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oy88sal6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.98232251Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oy75347w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.982248539Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.982299871Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oy75347w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.982232859Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oy75347w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.982129098Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.982062335Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oy5vfpi5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.982033947Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oy5vfpi5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.981989676Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334365laio1use1, cloud_platform=AWS, customer_id=C796, env_id=334365, env_name=C796 Enova Parallel Prod, env_type=prod, instance=env-334365laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.981886415Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-cbf3d7a86d7642b9, persistentvolumeclaim=data-zookeeper-0" t=2024-05-29T13:44:13.981877557Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334365laio1use1, cloud_platform=AWS, customer_id=C796, env_id=334365, env_name=C796 Enova Parallel Prod, env_type=prod, instance=env-334365laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.981867674Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.981821347Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oxlgdg04-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.981806624Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.981795021Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.98165838Z caller=remote_instance_store.go:51 user=351895 slug=abacusworks msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-7cff9907c51546da, persistentvolumeclaim=main-repo1" t=2024-05-29T13:44:13.981704837Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-7cff9907c51546da, persistentvolumeclaim=main-repo1" t=2024-05-29T13:44:13.981692887Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oxlgdg04-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.981710924Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oxlgdg04-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.981682323Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-1f17abc4faec4b9e, persistentvolumeclaim=data-redpanda-0" t=2024-05-29T13:44:13.981637069Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-1937dcc534294c1d, persistentvolumeclaim=data-rabbitmq-0" t=2024-05-29T13:44:13.981600048Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334350laio1eastus, cloud_platform=Azure, customer_id=A245, env_id=334350, env_name=A245 Arlington CPS, env_type=prod, instance=env-334350laio1eastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:13.981570383Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-01e43321d01a42a1, persistentvolumeclaim=data-zookeeper-2" t=2024-05-29T13:44:13.981534063Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-01e43321d01a42a1, persistentvolumeclaim=data-zookeeper-2" t=2024-05-29T13:44:13.981521844Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=538355 slug=flogic t=2024-05-29T13:44:13.981490372Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.696767ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oxivq0ek-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.981511301Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oxivq0ek-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.981418031Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.981429185Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.834376ms + logger=ngalert.state.manager.persist user=107179 slug=ibaudata t=2024-05-29T13:44:13.981415729Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oxf91t5t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.981269389Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=107179 slug=ibaudata instance= t=2024-05-29T13:44:13.981270448Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T14:14:10Z next_ends_at=2024-05-29T14:24:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oxdmmgi0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.981100477Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=112732 slug=gleamer t=2024-05-29T13:44:13.981142997Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.981026807Z caller=remote_instance_store.go:51 user=686395 slug=containerfoundation msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oxctgj2z-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.980797034Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.980803369Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334340laio1canadacentral, cloud_platform=Azure, customer_id=A246, env_id=334340, env_name=A246 Longo Brothers PROD, env_type=prod, instance=env-334340laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=preprod" t=2024-05-29T13:44:13.980718826Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334340laio1canadacentral, cloud_platform=Azure, customer_id=A246, env_id=334340, env_name=A246 Longo Brothers PROD, env_type=prod, instance=env-334340laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=preprod" t=2024-05-29T13:44:13.980697429Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:13.980363883Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oxcdpidj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.98039977Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oxal0rk8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.98034439Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oxal0rk8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.980302169Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ox0s3xtd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.980065877Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ox0s3xtd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.980001486Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-owxzzs8y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.979883905Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-owxzzs8y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.979799744Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-owwds4uw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.979748393Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-owwds4uw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.979711753Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-owvro3oh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.979546311Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=691855 slug=chainlake instance="instance=lokilogs-cax11-lokilogs" t=2024-05-29T13:44:13.979858275Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:13.979846814Z caller=remote_alert_sender.go:94 user=288032 slug=dapperlabssre host=dapperlabssre-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.118.165:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=vYfesgP4k alerts=1 + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:13.979833097Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=23.719666ms + logger=ngalert.state.manager.persist user=438185 slug=nodeinfra t=2024-05-29T13:44:13.979782083Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=49.207982ms + logger=ngalert.state.manager user=288032 slug=dapperlabssre t=2024-05-29T13:44:13.979795662Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=288032 slug=dapperlabssre version=1 fingerprint=389a805d01eb58a9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.979704907Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.979314071s EvaluationString:}]" duration=13.917783ms + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:13.979715013Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=74.614967ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334336laio1eastus, cloud_platform=Azure, customer_id=A244, env_id=334336, env_name=a244_Gilbane_PROD, env_type=prod, instance=env-334336laio1eastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:13.979756573Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.979696974Z caller=remote_instance_store.go:51 user=526847 slug=soniclabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.97964023Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-owvro3oh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.979480521Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.979417556Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.979338326Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-owrvvmc5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.979187138Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.979159429Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-owrvvmc5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.979122887Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-owrvvmc5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.979093017Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.978925905Z caller=remote_instance_store.go:51 user=777670 slug=fakku msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-owqanjz7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.978985886Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-owqanjz7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.978902385Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-owpf98io-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.978714243Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.978604475Z caller=remote_instance_store.go:51 user=242310 slug=suzy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-owpf98io-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.978602612Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334314laio1use1, cloud_platform=AWS, customer_id=C793, env_id=334314, env_name=C793 Meredith Prod, env_type=prod, instance=env-334314laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.978613551Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-owgdsyt4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.978533051Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-owc975pp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.978326409Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.978355265Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-owc975pp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.978263008Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-owc2wtyw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.978119897Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-owc2wtyw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.978064646Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.978008683Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-owb1tyxw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.977955905Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-owb1tyxw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.977937845Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.977964234Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.977948823Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334243laio1use1, cloud_platform=AWS, customer_id=C674, env_id=334243, env_name=C674 Amica Prod, env_type=prod, instance=env-334243laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.977869827Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-owb1tyxw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.977781673Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ow9g11vv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.977720493Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.977571019Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ow9g11vv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.977611971Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet instance="deployment=ordersv2-live-default-04-director" t=2024-05-29T13:44:13.977561548Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ow6r9jso-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.97749538Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ow6r9jso-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.97746239Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet instance="deployment=ordersv2-live-default-03-director" t=2024-05-29T13:44:13.97750622Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance="deployment=ordersv2-live-alt-director" t=2024-05-29T13:44:13.977445234Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ow6r9jso-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.9774183Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ow6r9jso-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.977321179Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet instance="deployment=ordersv2-live-alt-03" t=2024-05-29T13:44:13.97730836Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-334221laio1eastus2, cloud_platform=Azure, customer_id=A241, env_id=334221, env_name=A241 Farm Credit Prod, env_type=prod, instance=env-334221laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:13.977272705Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ovrtcjb3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.977245518Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.977198198Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=472647 slug=planet instance="deployment=ordersv2-control-plane-director" t=2024-05-29T13:44:13.977123305Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance="deployment=ordersv2-control-plane-director" t=2024-05-29T13:44:13.977115058Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet instance="deployment=ordersv2-control-plane-api" t=2024-05-29T13:44:13.977085093Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ovfiqasl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.976954635Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ovfiqasl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.976921454Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.976912024Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ovfiqasl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.976829463Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ovcdxwb3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.976789413Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=120649 slug=ware t=2024-05-29T13:44:13.976764336Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ovcdxwb3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.976697782Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=127813 slug=clearsale instance= t=2024-05-29T13:44:13.976632541Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=127813 slug=clearsale t=2024-05-29T13:44:13.976576916Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ovcdxwb3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.976621581Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ovbb7rir-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.976585851Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.976464835Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-333971laio1use1, cloud_platform=AWS, customer_id=C786, env_id=333971, env_name=C786 AIG Dev, env_type=dev, instance=env-333971laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.976338512Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ovb7x595-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.976267808Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.976024639Z caller=remote_instance_store.go:51 user=70430 slug=dapperlabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ov4lwosx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.976013085Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=320778 slug=omegaai t=2024-05-29T13:44:13.97602919Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=320778 slug=omegaai instance= t=2024-05-29T13:44:13.976003336Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=320778 slug=omegaai version=1 fingerprint=2c2927a11df6410c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.975822903Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.975480837s EvaluationString:}]" duration=364.484099ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ov0j2byd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.975900234Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ov0j2byd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.975832673Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.97564259Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ouqxrxqc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.975574011Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.975528921Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ouqxrxqc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.97552804Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.975505192Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.572666ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ouqxrxqc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.97551634Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.975487474Z caller=remote_instance_store.go:51 user=703825 slug=andrewbauman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-333764laio1eastus2, cloud_platform=Azure, customer_id=A241, env_id=333764, env_name=a241 Farm Credit Dev, env_type=prod, instance=env-333764laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:13.97538866Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.975268859Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=111839 slug=last9 t=2024-05-29T13:44:13.975087544Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=111839 slug=last9 instance="datasource_uid=oTGJrwcVz, ref_id=query" previous_handler=resultNoData t=2024-05-29T13:44:13.975065934Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=111839 slug=last9 instance="datasource_uid=oTGJrwcVz, ref_id=query" previous_handler=resultNoData t=2024-05-29T13:44:13.975049659Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.975202214Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ounzsst8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.975238407Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ounzsst8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.975192407Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.97507583Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=403369 slug=clearsaletechlabs t=2024-05-29T13:44:13.974972042Z level=debug msg="Saving alert states" count=20 max_state_save_concurrency=1 + logger=ngalert.state.manager user=403369 slug=clearsaletechlabs instance="route=TENB" t=2024-05-29T13:44:13.974943858Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=403369 slug=clearsaletechlabs instance="route=PUT api/applications/{id:int}" t=2024-05-29T13:44:13.974923591Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=403369 slug=clearsaletechlabs instance="route=PUT api/applications/{id:int}" t=2024-05-29T13:44:13.974911297Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=403369 slug=clearsaletechlabs instance="route=PUT" t=2024-05-29T13:44:13.974883856Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-333525laio1use1, cloud_platform=AWS, customer_id=C694, env_id=333525, env_name=C694_COX_Mobility_UAT, env_type=qa, instance=env-333525laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.974812828Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=403369 slug=clearsaletechlabs instance="route=POST api/profiler/variables" t=2024-05-29T13:44:13.974803853Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=403369 slug=clearsaletechlabs instance="route=POST api/profiler/variables" t=2024-05-29T13:44:13.97479175Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.974682186Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=403369 slug=clearsaletechlabs instance="route=POST" t=2024-05-29T13:44:13.974626206Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:13.974502978Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.974586696Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.974414679Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.974596334Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=403369 slug=clearsaletechlabs instance="route=HEAD" t=2024-05-29T13:44:13.974490939Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.974553728Z caller=remote_instance_store.go:51 user=667326 slug=lakovna msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.974404189Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=667326 slug=lakovna instance= t=2024-05-29T13:44:13.974457886Z level=warn msg="Failed to take an image" dashboard=d655a547-d823-4496-952e-3d997ba6eacd panel=2 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oubckdhm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.974484749Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=114492 slug=railsbank version=1 fingerprint=bdb14587c45e3e01 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.974349476Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[A0:{Var:A Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.974071761s EvaluationString:[ var='A0' metric='NoData' labels={} value=null ]}]" duration=138.464781ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oubckdhm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.974457429Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ou3j0k2z-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.974297997Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=403369 slug=clearsaletechlabs instance="route=GET api/applications" t=2024-05-29T13:44:13.974295119Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=403369 slug=clearsaletechlabs instance="route=GET api/applications" t=2024-05-29T13:44:13.974287641Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=403369 slug=clearsaletechlabs instance="route=GET" t=2024-05-29T13:44:13.974265504Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=87780 slug=zencloudandhosting instance="datasource_uid=000000020, ref_id=A" t=2024-05-29T13:44:13.974233669Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:50:10Z next_ends_at=2024-05-29T13:52:10Z + logger=ngalert.state.manager user=403369 slug=clearsaletechlabs instance="route=DEBUG" t=2024-05-29T13:44:13.974233233Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=403369 slug=clearsaletechlabs instance="route=DEBUG" t=2024-05-29T13:44:13.974216583Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=403369 slug=clearsaletechlabs version=33 fingerprint=82349e13b14db961 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.973789203Z level=debug msg="Alert rule evaluated" results="[{Instance:route=DEBUG State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:route=DEBUG Value:0xc024aaae20} C:{Var:C Labels:route=DEBUG Value:0xc024aaae38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.972839674s EvaluationString:[ var='B' labels={route=DEBUG} value=1.77863632503e-312 ], [ var='C' labels={route=DEBUG} value=0 ]} {Instance:route=GET State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:route=GET Value:0xc024aaae68} C:{Var:C Labels:route=GET Value:0xc024aaae58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.972854088s EvaluationString:[ var='B' labels={route=GET} value=0.030990573229585535 ], [ var='C' labels={route=GET} value=0 ]} {Instance:route=GET api/applications State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:route=GET api/applications Value:0xc024aaaec8} C:{Var:C Labels:route=GET api/applications Value:0xc024aaae88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.972871914s EvaluationString:[ var='B' labels={route=GET api/applications} value=0.0019809774354080205 ], [ var='C' labels={route=GET api/applications} value=0 ]} {Instance:route=GET api/check State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:route=GET api/check Value:0xc024aaaf28} C:{Var:C Labels:route=GET api/check Value:0xc024aaaf48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.97287896s EvaluationString:[ var='B' labels={route=GET api/check} value=2.026231647216921 ], [ var='C' labels={route=GET api/check} value=0 ]} {Instance:route=GET api/devices State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:route=GET api/devices Value:0xc024aaaf98} C:{Var:C Labels:route=GET api/devices Value:0xc024aaafb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.97288748s EvaluationString:[ var='B' labels={route=GET api/devices} value=0.00463311984872982 ], [ var='C' labels={route=GET api/devices} value=0 ]} {Instance:route=GET api/profiler/variables State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:route=GET api/profiler/variables Value:0xc024aaaff8} C:{Var:C Labels:route=GET api/profiler/variables Value:0xc024aab008}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.97289472s EvaluationString:[ var='B' labels={route=GET api/profiler/variables} value=0.005703324996569338 ], [ var='C' labels={route=GET api/profiler/variables} value=0 ]} {Instance:route=GET api/profiler/variables/{sessionid} State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:route=GET api/profiler/variables/{sessionid} Value:0xc024aab048} C:{Var:C Labels:route=GET api/profiler/variables/{sessionid} Value:0xc024aab058}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.972901023s EvaluationString:[ var='B' labels={route=GET api/profiler/variables/{sessionid}} value=0.0038841117280285758 ], [ var='C' labels={route=GET api/profiler/variables/{sessionid}} value=0 ]} {Instance:route=HEAD State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:route=HEAD Value:0xc024aab080} C:{Var:C Labels:route=HEAD Value:0xc024aab098}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.972907224s EvaluationString:[ var='B' labels={route=HEAD} value=1.9384258433050825 ], [ var='C' labels={route=HEAD} value=0 ]} {Instance:route=OPTIONS State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:route=OPTIONS Value:0xc024aab0c8} C:{Var:C Labels:route=OPTIONS Value:0xc024aab0f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.972912668s EvaluationString:[ var='B' labels={route=OPTIONS} value=1.77863632503e-312 ], [ var='C' labels={route=OPTIONS} value=0 ]} {Instance:route=PATCH State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:route=PATCH Value:0xc024aab148} C:{Var:C Labels:route=PATCH Value:0xc024aab120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.972919125s EvaluationString:[ var='B' labels={route=PATCH} value=1.77192448984e-312 ], [ var='C' labels={route=PATCH} value=0 ]} {Instance:route=POST State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:route=POST Value:0xc024aab170} C:{Var:C Labels:route=POST Value:0xc024aab188}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.972924507s EvaluationString:[ var='B' labels={route=POST} value=0.030834076930350455 ], [ var='C' labels={route=POST} value=0 ]} {Instance:route=POST api/applications State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:route=POST api/applications Value:0xc024aab1a8} C:{Var:C Labels:route=POST api/applications Value:0xc024aab1b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.972929914s EvaluationString:[ var='B' labels={route=POST api/applications} value=0.00026278366132310635 ], [ var='C' labels={route=POST api/applications} value=0 ]} {Instance:route=POST api/devices State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:route=POST api/devices Value:0xc024aab1e8} C:{Var:C Labels:route=POST api/devices Value:0xc024aab208}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.972936747s EvaluationString:[ var='B' labels={route=POST api/devices} value=19.767179049186083 ], [ var='C' labels={route=POST api/devices} value=0 ]} {Instance:route=POST api/devices/customer State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:route=POST api/devices/customer Value:0xc024aab248} C:{Var:C Labels:route=POST api/devices/customer Value:0xc024aab258}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.972942147s EvaluationString:[ var='B' labels={route=POST api/devices/customer} value=0.055790703310666594 ], [ var='C' labels={route=POST api/devices/customer} value=0 ]} {Instance:route=POST api/profiler/device/generate State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:route=POST api/profiler/device/generate Value:0xc024aab278} C:{Var:C Labels:route=POST api/profiler/device/generate Value:0xc024aab288}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.97294744s EvaluationString:[ var='B' labels={route=POST api/profiler/device/generate} value=0.2662713818999411 ], [ var='C' labels={route=POST api/profiler/device/generate} value=0 ]} {Instance:route=POST api/profiler/variables State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:route=POST api/profiler/variables Value:0xc024aab2b8} C:{Var:C Labels:route=POST api/profiler/variables Value:0xc024aab2a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.972952729s EvaluationString:[ var='B' labels={route=POST api/profiler/variables} value=71.65929707355937 ], [ var='C' labels={route=POST api/profiler/variables} value=0 ]} {Instance:route=POST api/profiler/variables/errors State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:route=POST api/profiler/variables/errors Value:0xc024aab2d8} C:{Var:C Labels:route=POST api/profiler/variables/errors Value:0xc024aab2e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.972961012s EvaluationString:[ var='B' labels={route=POST api/profiler/variables/errors} value=0.21699622552428827 ], [ var='C' labels={route=POST api/profiler/variables/errors} value=0 ]} {Instance:route=PUT State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:route=PUT Value:0xc024aab318} C:{Var:C Labels:route=PUT Value:0xc024aab308}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.972968534s EvaluationString:[ var='B' labels={route=PUT} value=0.001926005779461206 ], [ var='C' labels={route=PUT} value=0 ]} {Instance:route=PUT api/applications/{id:int} State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:route=PUT api/applications/{id:int} Value:0xc024aab348} C:{Var:C Labels:route=PUT api/applications/{id:int} Value:0xc024aab358}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.972974456s EvaluationString:[ var='B' labels={route=PUT api/applications/{id:int}} value=1.77863632503e-312 ], [ var='C' labels={route=PUT api/applications/{id:int}} value=0 ]} {Instance:route=TENB State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:route=TENB Value:0xc024aab3a8} C:{Var:C Labels:route=TENB Value:0xc024aab390}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.972980095s EvaluationString:[ var='B' labels={route=TENB} value=1.77192448984e-312 ], [ var='C' labels={route=TENB} value=0 ]}]" duration=205.624726ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ou3j0k2z-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.974171916Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=87780 slug=zencloudandhosting version=1 fingerprint=712f6888e917baf6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.973983528Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=000000020, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.973708624s EvaluationString:}]" duration=198.613425ms + logger=ngalert.state.manager.persist user=336467 slug=deepset t=2024-05-29T13:44:13.973990685Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.485257ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ou0hnxcz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.974092195Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.973997124Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.973901552Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.97394091Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.973876677Z caller=remote_alert_sender.go:94 user=350551 slug=loopme host=loopme-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.148.183.121:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=wpl1UaRVk alerts=1 + level=debug ts=2024-05-29T13:44:13.973821301Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-333270laio1use1, cloud_platform=AWS, customer_id=C785, env_id=333270, env_name=C785 Freddie Mac Prod, env_type=prod, instance=env-333270laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.973792436Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-otznsc3v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.973721901Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-otznsc3v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.973684231Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-otud08b5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.973631161Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-otud08b5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.97354767Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.973529591Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-otud08b5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.973509149Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-otud08b5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.973493679Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-otsobamh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.973451439Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.973373757Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=667326 slug=lakovna t=2024-05-29T13:44:13.973454116Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-otsobamh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.973353878Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-otsobamh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.973344318Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.973386468Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=656459 slug=activeport t=2024-05-29T13:44:13.973370687Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=7.972525ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-otioky47-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.973282717Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.973246352Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.973237832Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-otioky47-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.973227566Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.973125398Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-otioky47-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.973156036Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oti7ucwq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.973090585Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-333077laio1euw1, cloud_platform=AWS, customer_id=C472, env_id=333077, env_name=C472_PROD, env_type=prod, instance=env-333077laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.973041212Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-333077laio1euw1, cloud_platform=AWS, customer_id=C472, env_id=333077, env_name=C472_PROD, env_type=prod, instance=env-333077laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.973013867Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oti7ucwq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.972941463Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oth0x7c3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.972912023Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oth0x7c3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.972825222Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="cluster=prd-us-east-1-siren-devctl" t=2024-05-29T13:44:13.972931429Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-332973laio1use1, cloud_platform=AWS, customer_id=C433, env_id=332973, env_name=C433 ABC Liquors DEV, env_type=dev, instance=env-332973laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.972807491Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-otcf5eif-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.97264674Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-otcf5eif-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.97262042Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-otcf5eif-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.972564489Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ot9y171c-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.972504669Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-332946laio1eastus, cloud_platform=Azure, customer_id=A236, env_id=332946, env_name=a236_Eagle, env_type=prod, instance=env-332946laio1eastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:13.97248673Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ot4sip2x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.972249386Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ot4sip2x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.972220746Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-332898laio1use1, cloud_platform=AWS, customer_id=C766, env_id=332898, env_name=C766 PURE PROD, env_type=prod, instance=env-332898laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.972217672Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ot4sip2x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.972148715Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ot4sip2x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.972119145Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ot47beyd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.972030954Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ot1apaj7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.971823322Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-osxkyuva-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.971713631Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-332805laio1centralus, cloud_platform=Azure, customer_id=A239, env_id=332805, env_name=a239_partner_prod_Dr, env_type=prod, instance=env-332805laio1centralus, job=integrations/node_exporter, region=centralus, stage=preprod" t=2024-05-29T13:44:13.972005653Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-osufrkc5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.971420788Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-osufrkc5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.971395967Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-osrs19cr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.971337687Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=309009 slug=elestyle t=2024-05-29T13:44:13.971869947Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-332713laio1apn1, cloud_platform=AWS, customer_id=C679, env_id=332713, env_name=C679_Parallel_Prod, env_type=prod, instance=env-332713laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=live" t=2024-05-29T13:44:13.971793766Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-osmiwrrd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.971162155Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-osmiwrrd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.971133605Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle t=2024-05-29T13:44:13.971762605Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-osjda3j2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.970945953Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-osjda3j2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.970911522Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-osjda3j2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.970747541Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-osdef46i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.97069911Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-osant1j7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.970543349Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-osant1j7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.970515488Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-332706laio1use1, cloud_platform=AWS, customer_id=C538, env_id=332706, env_name=C538 HBC DEV, env_type=dev, instance=env-332706laio1use1, job=integrations/node_exporter, region=us-east-1, stage=testing" t=2024-05-29T13:44:13.97157738Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-332706laio1use1, cloud_platform=AWS, customer_id=C538, env_id=332706, env_name=C538 HBC DEV, env_type=dev, instance=env-332706laio1use1, job=integrations/node_exporter, region=us-east-1, stage=testing" t=2024-05-29T13:44:13.971557047Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-os9glzsd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.970464468Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-os9glzsd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.970437287Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-332705laio1use1, cloud_platform=AWS, customer_id=C538, env_id=332705, env_name=C538 HBC QA, env_type=qa, instance=env-332705laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.971383672Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-os5h4ert-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.970139774Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-orwzsr6q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.970031813Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-orung6pd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.969847541Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-orulicak-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.969784421Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-orgnpgp3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.969641889Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-orgnpgp3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.969591549Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=528849 slug=bitvavo instance= t=2024-05-29T13:44:13.971108201Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-orgnpgp3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.969576989Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.971162838Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oqsevq0q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.969423877Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oqsevq0q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.969384447Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oqsevq0q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.969369167Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oqqd4c96-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.969315906Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.971026404Z caller=remote_image_capturer.go:61 user=467258 slug=neonprod rule_org_id=1 rule_uid=c1031582-c519-4912-95be-54692e4dd0cf dashboard=a5e73d29-4fe9-4b0b-b1ad-fec87f20d487 panel=93 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oqbzubyu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.969076154Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.970894021Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-332430laio1centralus, cloud_platform=Azure, customer_id=A239, env_id=332430, env_name=A239 BF InternalAnalytics, env_type=prod, instance=env-332430laio1centralus, job=integrations/node_exporter, region=centralus, stage=preprod" t=2024-05-29T13:44:13.970848125Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-332430laio1centralus, cloud_platform=Azure, customer_id=A239, env_id=332430, env_name=A239 BF InternalAnalytics, env_type=prod, instance=env-332430laio1centralus, job=integrations/node_exporter, region=centralus, stage=preprod" t=2024-05-29T13:44:13.970819971Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.970743029Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.970518362Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.970388895Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.464017ms + level=debug ts=2024-05-29T13:44:13.970266293Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=467258 slug=neonprod instance="neon_region=eu-west-1" t=2024-05-29T13:44:13.97020058Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.970127279Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=327842 slug=exabeam t=2024-05-29T13:44:13.97018861Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=327842 slug=exabeam instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.970175289Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=327842 slug=exabeam instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.970170396Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=27737 slug=edfmancapital version=2 fingerprint=a4ef158933195685 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.970023694Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.969751223s EvaluationString:}]" duration=21.988057ms + logger=ngalert.state.manager user=327842 slug=exabeam t=2024-05-29T13:44:13.970146914Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-332061laiouse1, cloud_platform=AWS, customer_id=C736, env_id=332061, env_name=C736 EDU_Zagreb, env_type=prod, instance=env-332061laiouse1, job=integrations/node_exporter, region=us-east-1, stage=testing" t=2024-05-29T13:44:13.970025611Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.969951046Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=467258 slug=neonprod t=2024-05-29T13:44:13.969984853Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.scheduler user=467258 slug=neonprod version=49 fingerprint=0ad73b36c00c9d4d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.969894242Z level=debug msg="Alert rule evaluated" results="[{Instance:neon_region=eu-west-1 State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:neon_region=eu-west-1 Value:0xc0354a9598} B:{Var:B Labels:neon_region=eu-west-1 Value:0xc0354a9880} C:{Var:C Labels:neon_region=eu-west-1 Value:0xc0354a9888}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968917916s EvaluationString:[ var='A' labels={neon_region=eu-west-1} value=1 ], [ var='B' labels={neon_region=eu-west-1} value=1 ], [ var='C' labels={neon_region=eu-west-1} value=1 ]} {Instance:neon_region=us-east-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:neon_region=us-east-2 Value:0xc0354a9a10} B:{Var:B Labels:neon_region=us-east-2 Value:0xc0354a9a18} C:{Var:C Labels:neon_region=us-east-2 Value:0xc0354a9d00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968932653s EvaluationString:[ var='A' labels={neon_region=us-east-2} value=0 ], [ var='B' labels={neon_region=us-east-2} value=0 ], [ var='C' labels={neon_region=us-east-2} value=0 ]}]" duration=116.118339ms + level=debug ts=2024-05-29T13:44:13.969805538Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-332005laio1apn1, cloud_platform=AWS, customer_id=C679, env_id=332005, env_name=C679_Parallel_Dev, env_type=dev, instance=env-332005laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=live" t=2024-05-29T13:44:13.969765432Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260115 slug=agri instance= t=2024-05-29T13:44:13.969546556Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-332002laio1northeurope, cloud_platform=Azure, customer_id=A164, env_id=332002, env_name=A164_CTTI_PARALLEL_DEV, env_type=dev, instance=env-332002laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=testing" t=2024-05-29T13:44:13.969495348Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.969444993Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance="resourceName=HOTEL-POOL" t=2024-05-29T13:44:13.96941521Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=396586 slug=opengov instance="resourceName=BRAVO-POOL" t=2024-05-29T13:44:13.969301741Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=707603 slug=canoneurope instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.969092656Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.969143949Z caller=remote_instance_store.go:51 user=707603 slug=canoneurope msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=707603 slug=canoneurope instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.969083573Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=396586 slug=opengov version=342 fingerprint=9b1d9b565fcbd387 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.968945928Z level=debug msg="Alert rule evaluated" results="[{Instance:resourceName=DELTA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:resourceName=DELTA-POOL Value:0xc0466b4410} C:{Var:C Labels:resourceName=DELTA-POOL Value:0xc0466b4418}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968385142s EvaluationString:[ var='B' labels={resourceName=DELTA-POOL} value=0 ], [ var='C' labels={resourceName=DELTA-POOL} value=0 ]} {Instance:resourceName=ALPHA-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:resourceName=ALPHA-POOL Value:0xc0466b4608} C:{Var:C Labels:resourceName=ALPHA-POOL Value:0xc0466b4600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968397448s EvaluationString:[ var='B' labels={resourceName=ALPHA-POOL} value=0 ], [ var='C' labels={resourceName=ALPHA-POOL} value=0 ]} {Instance:resourceName=BRAVO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:resourceName=BRAVO-POOL Value:0xc0466b47f8} C:{Var:C Labels:resourceName=BRAVO-POOL Value:0xc0466b47f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968403595s EvaluationString:[ var='B' labels={resourceName=BRAVO-POOL} value=0 ], [ var='C' labels={resourceName=BRAVO-POOL} value=0 ]} {Instance:resourceName=GOLF-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:resourceName=GOLF-POOL Value:0xc0466b49b0} C:{Var:C Labels:resourceName=GOLF-POOL Value:0xc0466b49b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968409505s EvaluationString:[ var='B' labels={resourceName=GOLF-POOL} value=0 ], [ var='C' labels={resourceName=GOLF-POOL} value=0 ]} {Instance:resourceName=FOXTROT-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:resourceName=FOXTROT-POOL Value:0xc0466b4b28} C:{Var:C Labels:resourceName=FOXTROT-POOL Value:0xc0466b4e50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.96841675s EvaluationString:[ var='B' labels={resourceName=FOXTROT-POOL} value=0 ], [ var='C' labels={resourceName=FOXTROT-POOL} value=0 ]} {Instance:resourceName=HOTEL-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:resourceName=HOTEL-POOL Value:0xc0466b5140} C:{Var:C Labels:resourceName=HOTEL-POOL Value:0xc0466b5148}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968422798s EvaluationString:[ var='B' labels={resourceName=HOTEL-POOL} value=0 ], [ var='C' labels={resourceName=HOTEL-POOL} value=0 ]} {Instance:resourceName=ECHO-POOL State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:resourceName=ECHO-POOL Value:0xc0466b5438} C:{Var:C Labels:resourceName=ECHO-POOL Value:0xc0466b5430}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968430403s EvaluationString:[ var='B' labels={resourceName=ECHO-POOL} value=0 ], [ var='C' labels={resourceName=ECHO-POOL} value=0 ]}]" duration=138.436155ms + logger=ngalert.state.manager user=707603 slug=canoneurope instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.969058335Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=707603 slug=canoneurope version=1 fingerprint=c676d37c1e747811 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.968969811Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.968582826s EvaluationString:}]" duration=8.290294ms + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:13.96897797Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.55977ms + level=debug ts=2024-05-29T13:44:13.968907952Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oq5mpg1d-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.968806141Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-331841laio1use1, cloud_platform=AWS, customer_id=C462, env_id=331841, env_name=C462 Prod Parallel, env_type=prod, instance=env-331841laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.96874432Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oq4hmy0b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.968552358Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-opumly9m-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.968342016Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-331727laio1euw1, cloud_platform=AWS, customer_id=C613, env_id=331727, env_name=C613_BricoDepot_Prod_u12, env_type=prod, instance=env-331727laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.968326288Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-opumly9m-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.968226575Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-opumly9m-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.968196884Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-opiqm4o6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.968167394Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.968149566Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-opiqm4o6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.968039513Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-opgdnqx1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.967983722Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-opgdnqx1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.967925952Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.968055516Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-opgdnqx1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.967877801Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-331724laio1euw1, cloud_platform=AWS, customer_id=C613, env_id=331724, env_name=C613_DEV_2021U12, env_type=dev, instance=env-331724laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.968074361Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=109452 slug=deltarisk t=2024-05-29T13:44:13.967918264Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.967845314Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.967791357Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-331714laio1usw2, cloud_platform=AWS, customer_id=C772, env_id=331714, env_name=C772 Boyd DEV, env_type=dev, instance=env-331714laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=preprod" t=2024-05-29T13:44:13.967840637Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=109452 slug=deltarisk t=2024-05-29T13:44:13.967794535Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-opea389y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.967654899Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.967560525Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-331576laio1usw1, cloud_platform=AWS, customer_id=C537, env_id=331576, env_name=C537_PacSun_PROD_Parallel, env_type=prod, instance=env-331576laio1usw1, job=integrations/node_exporter, region=us-west-1, stage=testing" t=2024-05-29T13:44:13.967645192Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-opcw33sa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.967492437Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-opbzet8r-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.967266095Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.967220582Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-opbcugyp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.967200744Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-331501laio1use1, cloud_platform=AWS, customer_id=C462, env_id=331501, env_name=C462 FED DEV Parallel, env_type=dev, instance=env-331501laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.967135437Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-331501laio1use1, cloud_platform=AWS, customer_id=C462, env_id=331501, env_name=C462 FED DEV Parallel, env_type=dev, instance=env-331501laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.967116332Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.967073648Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-opauc8mn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.966987902Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-opauc8mn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.966918061Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-331383laio1use1, cloud_platform=AWS, customer_id=C439, env_id=331383, env_name=C439 BBU DEV, env_type=dev, instance=env-331383laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.966939186Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-op9us04e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.966726549Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.966855726Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-op9us04e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.966672369Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.966757868Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-op9us04e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.966610698Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-op9us04e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.966596808Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ooxa4dq2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.966545068Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=687021 slug=heviai t=2024-05-29T13:44:13.966495142Z level=debug msg="Saving alert states" count=20 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:13.966488832Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.781241ms + level=debug ts=2024-05-29T13:44:13.966399712Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-331008laio1use1, cloud_platform=AWS, customer_id=C462, env_id=331008, env_name=C462 DEV Parallel, env_type=dev, instance=env-331008laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.966358869Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oovk792l-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.966339185Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oovk792l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.966284785Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oovk792l-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.966199254Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=687021 slug=heviai instance="__name__=failed_equipments_count, instance=tekirdaguni, job=pacs_and_equipments_status" t=2024-05-29T13:44:13.966231741Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.96617294Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-331007laio1use1, cloud_platform=AWS, customer_id=C462, env_id=331007, env_name=C462 DEMO Parallel, env_type=demo, instance=env-331007laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.966188827Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=320778 slug=omegaai t=2024-05-29T13:44:13.966136841Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.026798ms + level=debug ts=2024-05-29T13:44:13.966125745Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.966056029Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.965977889Z caller=remote_instance_store.go:51 user=751407 slug=nethermindjuno msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oogmgolc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.965952631Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-330929laiouse1, cloud_platform=AWS, customer_id=C780, env_id=330929, env_name=C780 Quipux PROD, env_type=prod, instance=env-330929laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.965945145Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.965927044Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.965813738Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oogmgolc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.96580493Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.965699031Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.96572682Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=687021 slug=heviai instance="__name__=failed_equipments_count, instance=okmeydani1, job=pacs_and_equipments_status" t=2024-05-29T13:44:13.965697937Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=687021 slug=heviai instance="__name__=failed_equipments_count, instance=kocaeli, job=pacs_and_equipments_status" t=2024-05-29T13:44:13.965599837Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oo5a8gno-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.965538747Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oo5a8gno-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.965504707Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oo5a8gno-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.965476247Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=687021 slug=heviai instance="__name__=failed_equipments_count, instance=ilhanvarank, job=pacs_and_equipments_status" t=2024-05-29T13:44:13.965530336Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=166137 slug=teletracking t=2024-05-29T13:44:13.965457233Z level=debug msg="Deleting alert states" count=1 + level=debug ts=2024-05-29T13:44:13.965368131Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=656459 slug=activeport t=2024-05-29T13:44:13.965394233Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=687021 slug=heviai instance="__name__=failed_equipments_count, instance=guven, job=pacs_and_equipments_status" t=2024-05-29T13:44:13.965376535Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=656459 slug=activeport instance="datasource_uid=a17a51ac-52fa-4a8f-ae4d-66e273cfbbfc, ref_id=A" t=2024-05-29T13:44:13.965382713Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=656459 slug=activeport t=2024-05-29T13:44:13.965339723Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=687021 slug=heviai instance="__name__=failed_equipments_count, instance=ftr, job=pacs_and_equipments_status" t=2024-05-29T13:44:13.965281435Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=687021 slug=heviai instance="__name__=failed_equipments_count, instance=ftr, job=pacs_and_equipments_status" t=2024-05-29T13:44:13.965271135Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-onxeuc01-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.965143673Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-onxeuc01-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.965115543Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=687021 slug=heviai instance="__name__=failed_equipments_count, instance=bayindir, job=pacs_and_equipments_status" t=2024-05-29T13:44:13.965102234Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-330672laio1aps1, cloud_platform=AWS, customer_id=C782, env_id=330672, env_name=C782_Smart_Axiata_PROD, env_type=prod, instance=env-330672laio1aps1, job=integrations/node_exporter, region=ap-southeast-1, stage=live" t=2024-05-29T13:44:13.96505811Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-onxeuc01-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.965025822Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166137 slug=teletracking t=2024-05-29T13:44:13.965039118Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-onxeuc01-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.964983532Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.965040727Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=687021 slug=heviai instance="__name__=failed_equipments_count, instance=amerikan, job=pacs_and_equipments_status" t=2024-05-29T13:44:13.964876632Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=652809 slug=glassnode t=2024-05-29T13:44:13.964819125Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=687021 slug=heviai instance="__name__=failed_equipments_count, instance=acibadem-1, job=pacs_and_equipments_status" t=2024-05-29T13:44:13.964777232Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-onvs53xs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.964742299Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=652809 slug=glassnode instance="namespace=glassnode, pod=prod-eth-exporter-2-0, source_env=cryptorado" t=2024-05-29T13:44:13.964738427Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.964709145Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-onh5sou0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.964689529Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=687021 slug=heviai version=10 fingerprint=bf1c62a52175d2ac attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.964357429Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=failed_equipments_count, instance=acibadem-1, job=pacs_and_equipments_status State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=failed_equipments_count, instance=acibadem-1, job=pacs_and_equipments_status Value:0xc029951d60} C:{Var:C Labels:__name__=failed_equipments_count, instance=acibadem-1, job=pacs_and_equipments_status Value:0xc029951d90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.962329743s EvaluationString:[ var='A' labels={__name__=failed_equipments_count, instance=acibadem-1, job=pacs_and_equipments_status} value=-1 ], [ var='C' labels={__name__=failed_equipments_count, instance=acibadem-1, job=pacs_and_equipments_status} value=0 ]} {Instance:__name__=failed_equipments_count, instance=amerikan, job=pacs_and_equipments_status State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=failed_equipments_count, instance=amerikan, job=pacs_and_equipments_status Value:0xc029951e18} C:{Var:C Labels:__name__=failed_equipments_count, instance=amerikan, job=pacs_and_equipments_status Value:0xc029951de0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.962355343s EvaluationString:[ var='A' labels={__name__=failed_equipments_count, instance=amerikan, job=pacs_and_equipments_status} value=0 ], [ var='C' labels={__name__=failed_equipments_count, instance=amerikan, job=pacs_and_equipments_status} value=0 ]} {Instance:__name__=failed_equipments_count, instance=antalyaeah, job=pacs_and_equipments_status State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=failed_equipments_count, instance=antalyaeah, job=pacs_and_equipments_status Value:0xc029951ea8} C:{Var:C Labels:__name__=failed_equipments_count, instance=antalyaeah, job=pacs_and_equipments_status Value:0xc029951ed8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.962364543s EvaluationString:[ var='A' labels={__name__=failed_equipments_count, instance=antalyaeah, job=pacs_and_equipments_status} value=-1 ], [ var='C' labels={__name__=failed_equipments_count, instance=antalyaeah, job=pacs_and_equipments_status} value=0 ]} {Instance:__name__=failed_equipments_count, instance=bayindir, job=pacs_and_equipments_status State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=failed_equipments_count, instance=bayindir, job=pacs_and_equipments_status Value:0xc029951f58} C:{Var:C Labels:__name__=failed_equipments_count, instance=bayindir, job=pacs_and_equipments_status Value:0xc029951f90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.962389344s EvaluationString:[ var='A' labels={__name__=failed_equipments_count, instance=bayindir, job=pacs_and_equipments_status} value=0 ], [ var='C' labels={__name__=failed_equipments_count, instance=bayindir, job=pacs_and_equipments_status} value=0 ]} {Instance:__name__=failed_equipments_count, instance=cerrahpasa1, job=pacs_and_equipments_status State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=failed_equipments_count, instance=cerrahpasa1, job=pacs_and_equipments_status Value:0xc00cd898b0} C:{Var:C Labels:__name__=failed_equipments_count, instance=cerrahpasa1, job=pacs_and_equipments_status Value:0xc00cd89960}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.962470244s EvaluationString:[ var='A' labels={__name__=failed_equipments_count, instance=cerrahpasa1, job=pacs_and_equipments_status} value=0 ], [ var='C' labels={__name__=failed_equipments_count, instance=cerrahpasa1, job=pacs_and_equipments_status} value=0 ]} {Instance:__name__=failed_equipments_count, instance=ftr, job=pacs_and_equipments_status State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=failed_equipments_count, instance=ftr, job=pacs_and_equipments_status Value:0xc00cd899e8} C:{Var:C Labels:__name__=failed_equipments_count, instance=ftr, job=pacs_and_equipments_status Value:0xc00cd89a10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.962485344s EvaluationString:[ var='A' labels={__name__=failed_equipments_count, instance=ftr, job=pacs_and_equipments_status} value=0 ], [ var='C' labels={__name__=failed_equipments_count, instance=ftr, job=pacs_and_equipments_status} value=0 ]} {Instance:__name__=failed_equipments_count, instance=guven, job=pacs_and_equipments_status State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=failed_equipments_count, instance=guven, job=pacs_and_equipments_status Value:0xc00cd89a78} C:{Var:C Labels:__name__=failed_equipments_count, instance=guven, job=pacs_and_equipments_status Value:0xc00cd89aa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.962493144s EvaluationString:[ var='A' labels={__name__=failed_equipments_count, instance=guven, job=pacs_and_equipments_status} value=-1 ], [ var='C' labels={__name__=failed_equipments_count, instance=guven, job=pacs_and_equipments_status} value=0 ]} {Instance:__name__=failed_equipments_count, instance=hacettepe, job=pacs_and_equipments_status State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=failed_equipments_count, instance=hacettepe, job=pacs_and_equipments_status Value:0xc00cd89b18} C:{Var:C Labels:__name__=failed_equipments_count, instance=hacettepe, job=pacs_and_equipments_status Value:0xc00cd89b60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.962501644s EvaluationString:[ var='A' labels={__name__=failed_equipments_count, instance=hacettepe, job=pacs_and_equipments_status} value=0 ], [ var='C' labels={__name__=failed_equipments_count, instance=hacettepe, job=pacs_and_equipments_status} value=0 ]} {Instance:__name__=failed_equipments_count, instance=ilhanvarank, job=pacs_and_equipments_status State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=failed_equipments_count, instance=ilhanvarank, job=pacs_and_equipments_status Value:0xc00cd89be0} C:{Var:C Labels:__name__=failed_equipments_count, instance=ilhanvarank, job=pacs_and_equipments_status Value:0xc00cd89c20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.962509844s EvaluationString:[ var='A' labels={__name__=failed_equipments_count, instance=ilhanvarank, job=pacs_and_equipments_status} value=0 ], [ var='C' labels={__name__=failed_equipments_count, instance=ilhanvarank, job=pacs_and_equipments_status} value=0 ]} {Instance:__name__=failed_equipments_count, instance=kocaeli, job=pacs_and_equipments_status State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=failed_equipments_count, instance=kocaeli, job=pacs_and_equipments_status Value:0xc00cd89c80} C:{Var:C Labels:__name__=failed_equipments_count, instance=kocaeli, job=pacs_and_equipments_status Value:0xc00cd89cb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.962517744s EvaluationString:[ var='A' labels={__name__=failed_equipments_count, instance=kocaeli, job=pacs_and_equipments_status} value=0 ], [ var='C' labels={__name__=failed_equipments_count, instance=kocaeli, job=pacs_and_equipments_status} value=0 ]} {Instance:__name__=failed_equipments_count, instance=okmeydani1, job=pacs_and_equipments_status State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=failed_equipments_count, instance=okmeydani1, job=pacs_and_equipments_status Value:0xc00cd89d30} C:{Var:C Labels:__name__=failed_equipments_count, instance=okmeydani1, job=pacs_and_equipments_status Value:0xc00cd89d80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.962527044s EvaluationString:[ var='A' labels={__name__=failed_equipments_count, instance=okmeydani1, job=pacs_and_equipments_status} value=0 ], [ var='C' labels={__name__=failed_equipments_count, instance=okmeydani1, job=pacs_and_equipments_status} value=0 ]} {Instance:__name__=failed_equipments_count, instance=osman, job=pacs_and_equipments_status State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=failed_equipments_count, instance=osman, job=pacs_and_equipments_status Value:0xc00cd89dd8} C:{Var:C Labels:__name__=failed_equipments_count, instance=osman, job=pacs_and_equipments_status Value:0xc00cd89df8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.962535444s EvaluationString:[ var='A' labels={__name__=failed_equipments_count, instance=osman, job=pacs_and_equipments_status} value=0 ], [ var='C' labels={__name__=failed_equipments_count, instance=osman, job=pacs_and_equipments_status} value=0 ]} {Instance:__name__=failed_equipments_count, instance=permission, job=pacs_and_equipments_status State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=failed_equipments_count, instance=permission, job=pacs_and_equipments_status Value:0xc00cd89e78} C:{Var:C Labels:__name__=failed_equipments_count, instance=permission, job=pacs_and_equipments_status Value:0xc00cd89f20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.962543144s EvaluationString:[ var='A' labels={__name__=failed_equipments_count, instance=permission, job=pacs_and_equipments_status} value=-1 ], [ var='C' labels={__name__=failed_equipments_count, instance=permission, job=pacs_and_equipments_status} value=0 ]} {Instance:__name__=failed_equipments_count, instance=permissiontest, job=pacs_and_equipments_status State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=failed_equipments_count, instance=permissiontest, job=pacs_and_equipments_status Value:0xc03522c060} C:{Var:C Labels:__name__=failed_equipments_count, instance=permissiontest, job=pacs_and_equipments_status Value:0xc03522c0d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.962552444s EvaluationString:[ var='A' labels={__name__=failed_equipments_count, instance=permissiontest, job=pacs_and_equipments_status} value=-1 ], [ var='C' labels={__name__=failed_equipments_count, instance=permissiontest, job=pacs_and_equipments_status} value=0 ]} {Instance:__name__=failed_equipments_count, instance=samatya_1, job=pacs_and_equipments_status State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=failed_equipments_count, instance=samatya_1, job=pacs_and_equipments_status Value:0xc03522c170} C:{Var:C Labels:__name__=failed_equipments_count, instance=samatya_1, job=pacs_and_equipments_status Value:0xc03522c1c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.962568245s EvaluationString:[ var='A' labels={__name__=failed_equipments_count, instance=samatya_1, job=pacs_and_equipments_status} value=-1 ], [ var='C' labels={__name__=failed_equipments_count, instance=samatya_1, job=pacs_and_equipments_status} value=0 ]} {Instance:__name__=failed_equipments_count, instance=sislietfal1, job=pacs_and_equipments_status State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=failed_equipments_count, instance=sislietfal1, job=pacs_and_equipments_status Value:0xc03522c248} C:{Var:C Labels:__name__=failed_equipments_count, instance=sislietfal1, job=pacs_and_equipments_status Value:0xc03522c270}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.962578745s EvaluationString:[ var='A' labels={__name__=failed_equipments_count, instance=sislietfal1, job=pacs_and_equipments_status} value=0 ], [ var='C' labels={__name__=failed_equipments_count, instance=sislietfal1, job=pacs_and_equipments_status} value=0 ]} {Instance:__name__=failed_equipments_count, instance=sivas, job=pacs_and_equipments_status State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=failed_equipments_count, instance=sivas, job=pacs_and_equipments_status Value:0xc03522c2f8} C:{Var:C Labels:__name__=failed_equipments_count, instance=sivas, job=pacs_and_equipments_status Value:0xc03522c8b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.962587145s EvaluationString:[ var='A' labels={__name__=failed_equipments_count, instance=sivas, job=pacs_and_equipments_status} value=-1 ], [ var='C' labels={__name__=failed_equipments_count, instance=sivas, job=pacs_and_equipments_status} value=0 ]} {Instance:__name__=failed_equipments_count, instance=tekirdaguni, job=pacs_and_equipments_status State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=failed_equipments_count, instance=tekirdaguni, job=pacs_and_equipments_status Value:0xc03522c928} C:{Var:C Labels:__name__=failed_equipments_count, instance=tekirdaguni, job=pacs_and_equipments_status Value:0xc03522ca50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.962595145s EvaluationString:[ var='A' labels={__name__=failed_equipments_count, instance=tekirdaguni, job=pacs_and_equipments_status} value=0 ], [ var='C' labels={__name__=failed_equipments_count, instance=tekirdaguni, job=pacs_and_equipments_status} value=0 ]} {Instance:__name__=failed_equipments_count, instance=trabzon, job=pacs_and_equipments_status State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=failed_equipments_count, instance=trabzon, job=pacs_and_equipments_status Value:0xc03522cad0} C:{Var:C Labels:__name__=failed_equipments_count, instance=trabzon, job=pacs_and_equipments_status Value:0xc03522caf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.962603545s EvaluationString:[ var='A' labels={__name__=failed_equipments_count, instance=trabzon, job=pacs_and_equipments_status} value=-1 ], [ var='C' labels={__name__=failed_equipments_count, instance=trabzon, job=pacs_and_equipments_status} value=0 ]} {Instance:__name__=failed_equipments_count, instance=trakya1, job=pacs_and_equipments_status State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=failed_equipments_count, instance=trakya1, job=pacs_and_equipments_status Value:0xc03522cbb8} C:{Var:C Labels:__name__=failed_equipments_count, instance=trakya1, job=pacs_and_equipments_status Value:0xc03522cb70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.962614345s EvaluationString:[ var='A' labels={__name__=failed_equipments_count, instance=trakya1, job=pacs_and_equipments_status} value=0 ], [ var='C' labels={__name__=failed_equipments_count, instance=trakya1, job=pacs_and_equipments_status} value=0 ]}]" duration=35.213911ms + level=debug ts=2024-05-29T13:44:13.964652983Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-onh5sou0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.964617088Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-onen03l5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.964480076Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-onen03l5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.964410526Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-onen03l5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.964339895Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-on39lnuk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.964233894Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.96418118Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.43006ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-on39lnuk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.964163493Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:13.964092081Z caller=remote_alert_sender.go:94 user=656459 slug=activeport host=activeport-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.164.20.114:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fa60419c-7385-440e-900a-38ef9adf17df alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-on2lkcsk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.964064302Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-on2lkcsk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.963926181Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=731546 slug=liderbci t=2024-05-29T13:44:13.963855601Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.023111ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-330301laio1cac1, cloud_platform=AWS, customer_id=C589, env_id=330301, env_name=C589_COX_Canada_PROD, env_type=prod, instance=env-330301laio1cac1, job=integrations/node_exporter, region=ca-central-1, stage=live" t=2024-05-29T13:44:13.963870887Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-330301laio1cac1, cloud_platform=AWS, customer_id=C589, env_id=330301, env_name=C589_COX_Canada_PROD, env_type=prod, instance=env-330301laio1cac1, job=integrations/node_exporter, region=ca-central-1, stage=live" t=2024-05-29T13:44:13.96385396Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-omzxiix2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.963799289Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=125436 slug=caura instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.963635962Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=125436 slug=caura t=2024-05-29T13:44:13.963608884Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.963591814Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-omzc27xv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.963511156Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-omwr26k1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.963344105Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-omuljk2v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.963211743Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.963088942Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-omoo8kxs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.963078002Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-omoo8kxs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.963046242Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-omoo8kxs-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.962956571Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-omoo8kxs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.96288202Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-omnni3ol-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.962825529Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-330242laio1use1, cloud_platform=AWS, customer_id=C781, env_id=330242, env_name=C781_CON_AIBI, env_type=prod, instance=env-330242laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.962868037Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-330242laio1use1, cloud_platform=AWS, customer_id=C781, env_id=330242, env_name=C781_CON_AIBI, env_type=prod, instance=env-330242laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.962849645Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.962834972Z caller=remote_instance_store.go:51 user=538355 slug=flogic msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.962771171Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=538355 slug=flogic t=2024-05-29T13:44:13.962789978Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-omnni3ol-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.962720588Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538355 slug=flogic instance="account_id=641264638977, dimension_DBInstanceIdentifier=solamame-uematsu-report-prod, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:rds:ap-northeast-1:641264638977:db:solamame-uematsu-report-prod, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter" t=2024-05-29T13:44:13.962775964Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-omnni3ol-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.962678268Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-330040laio1use1, cloud_platform=AWS, customer_id=C487, env_id=330040, env_name=C487 Pfizer DEV, env_type=dev, instance=env-330040laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.962602643Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-om3nn62s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.962534966Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-om3nn62s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.962470376Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-om3nn62s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.962353635Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-om0ihdl0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.962133212Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=174016 slug=journalstaging t=2024-05-29T13:44:13.962078282Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-olutbpm2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.961958361Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=174016 slug=journalstaging instance="datasource_uid=bYQmLgyGz, ref_id=A" t=2024-05-29T13:44:13.962024116Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.961919866Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.961856873Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.961889222Z caller=remote_instance_store.go:51 user=432323 slug=lithic msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.961872604Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.961788536Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ollaxpkh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.961689598Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ollaxpkh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.961587547Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.961702992Z caller=remote_instance_store.go:51 user=542095 slug=intelligencefusion msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=542095 slug=intelligencefusion instance="RuleName=--" t=2024-05-29T13:44:13.961612258Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.961578337Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-olkqnian-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.961453575Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-olkqnian-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.961407885Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329967laio1euc1, cloud_platform=AWS, customer_id=C590, env_id=329967, env_name=C590_PROD_PARALLEL_U12, env_type=prod, instance=env-329967laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:13.96148822Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329967laio1euc1, cloud_platform=AWS, customer_id=C590, env_id=329967, env_name=C590_PROD_PARALLEL_U12, env_type=prod, instance=env-329967laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:13.96147264Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.961306332Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-olhbjrig-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.961192763Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-olhbjrig-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.961117842Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=432323 slug=lithic instance="Cluster Name=sandbox-v2, Consumer Group=ledger-account-consumer-v1, Topic=processing.accounts.v1" t=2024-05-29T13:44:13.961059114Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329779laio1use1, cloud_platform=AWS, customer_id=C707, env_id=329779, env_name=C707 TacoBell PROD, env_type=prod, instance=env-329779laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.961086661Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-olhbjrig-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.961084722Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.960758883Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.961003831Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=432323 slug=lithic t=2024-05-29T13:44:13.960855914Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329775laio1use1, cloud_platform=AWS, customer_id=C707, env_id=329775, env_name=C707 TacoBell DEV, env_type=dev, instance=env-329775laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.960937134Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-olbirzaw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.960757478Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329755laio1usw1, cloud_platform=AWS, customer_id=C652, env_id=329755, env_name=C652 Gilead DEV, env_type=dev, instance=env-329755laio1usw1, job=integrations/node_exporter, region=us-west-1, stage=live" t=2024-05-29T13:44:13.960765905Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329755laio1usw1, cloud_platform=AWS, customer_id=C652, env_id=329755, env_name=C652 Gilead DEV, env_type=dev, instance=env-329755laio1usw1, job=integrations/node_exporter, region=us-west-1, stage=live" t=2024-05-29T13:44:13.960752648Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-olbirzaw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.960694658Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.960630691Z caller=remote_instance_store.go:51 user=703825 slug=andrewbauman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ola6s7o8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.960567126Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.960570854Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:13.960500764Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ol7fjzdd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.960366024Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.960396404Z caller=remote_instance_store.go:51 user=777670 slug=fakku msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329625laioeastus, cloud_platform=Azure, customer_id=A235, env_id=329625, env_name=A235 Parana Seguros PROD, env_type=prod, instance=env-329625laioeastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:13.960405645Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.960291109Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ol7fjzdd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.960287663Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ol6y1hne-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.960189342Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ol6y1hne-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.960122322Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ol6y1hne-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.960054591Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=350551 slug=loopme version=4 fingerprint=5ca51b59b205fedc attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.960028721Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=0eCCEONVk, ref_id=A,C,D,E,F State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.959686853s EvaluationString:}]" duration=790.265539ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-okzr05w5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.960022141Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-okzr05w5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.95999269Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329501laio1usw2, cloud_platform=AWS, customer_id=C753, env_id=329501, env_name=C753 COX DMS+ QA, env_type=qa, instance=env-329501laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=preprod" t=2024-05-29T13:44:13.9599888Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-okzr05w5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.95994785Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-okzr05w5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.95991773Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329500laio1usw2, cloud_platform=AWS, customer_id=C753, env_id=329500, env_name=C753 COX DMS+ DEV, env_type=dev, instance=env-329500laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=preprod" t=2024-05-29T13:44:13.959791669Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.959806815Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.959654246Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oky1j27w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.959731358Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oky1j27w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.959616607Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-okx4xe2q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.959565156Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-okx4xe2q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.959489775Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.959595861Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-okx4xe2q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.959440675Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-okwh4wg9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.959352954Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-okwh4wg9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.959212752Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.959580437Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-okw2615n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.959064161Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-okuo0jbu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.958865089Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-okrfr86u-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.958603016Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329476laio1euc1, cloud_platform=AWS, customer_id=C752, env_id=329476, env_name=C752_Groupama_Dev, env_type=dev, instance=env-329476laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:13.959438465Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-okrfr86u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.958510925Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-okp9l85m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.958358474Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-okbc5gkd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.958079411Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-okbc5gkd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.958066401Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-okbc5gkd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.95801339Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.959325308Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ok4t7ujq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.957816448Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.959267002Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.767333ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ok4t7ujq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.957682017Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ok4t7ujq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.957672107Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ojy968rc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.957641736Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ojy968rc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.957589156Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ojy968rc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.957527645Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ojxuxmy8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.957360473Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ojxuxmy8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.957347013Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ojxuxmy8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.957304493Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ojtxs1gi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.957231282Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ojtxs1gi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.957217032Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ojtxs1gi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.957121801Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ojnmtw1t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.956970579Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ojn3lco3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.956816348Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ojn3lco3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.956806418Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ojn3lco3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.956779067Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329415laioeuc1, cloud_platform=AWS, customer_id=C775, env_id=329415, env_name=C775 CBR Prod, env_type=prod, instance=env-329415laioeuc1, job=integrations/node_exporter, region=eu-central-1, stage=testing" t=2024-05-29T13:44:13.959124951Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ojn3lco3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.956768467Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ojejlxb7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.956651436Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ojejlxb7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.956624056Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ojejlxb7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.956613596Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ojc87cq7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.956583265Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ojc87cq7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.956495815Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oj8kkqk8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.956466274Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oj8kkqk8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.956386933Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oj8kkqk8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.956376623Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oj6u12rc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.956349383Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oj6u12rc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.956300703Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oiwl408a-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.956225592Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.958782561Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.958676629Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329397laio1usw2, cloud_platform=AWS, customer_id=C772, env_id=329397, env_name=C772 Boyd QA, env_type=qa, instance=env-329397laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=preprod" t=2024-05-29T13:44:13.95878382Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oijwm0eg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.955996209Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oijwm0eg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.955967419Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329395laio1usw2, cloud_platform=AWS, customer_id=C774, env_id=329395, env_name=c774_Cheesecake_dev1, env_type=dev, instance=env-329395laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=preprod" t=2024-05-29T13:44:13.958636551Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oigx7gkg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.955844078Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oigx7gkg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.955806227Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oic1ok3e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.955736017Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oic1ok3e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.955726617Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oic1ok3e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.955687236Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oib74hp1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.955650516Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.95847515Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oib74hp1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.955585335Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ohzfpm7n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.955548175Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ohzfpm7n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.955537255Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ohzfpm7n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.955507314Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329393laio1usw2, cloud_platform=AWS, customer_id=C774, env_id=329393, env_name=c774_Cheesecake_Prod, env_type=prod, instance=env-329393laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=preprod" t=2024-05-29T13:44:13.958458946Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ohpw152c-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.955383773Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ohpw152c-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.955342683Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ohlla6ip-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.955301082Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ohlla6ip-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.955274202Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.958332067Z caller=remote_instance_store.go:51 user=432323 slug=lithic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ohlla6ip-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.955225952Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329365laio1eastus2, cloud_platform=Azure, customer_id=A234, env_id=329365, env_name=A234 BF Partner Prod, env_type=prod, instance=env-329365laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:13.9582755Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329365laio1eastus2, cloud_platform=Azure, customer_id=A234, env_id=329365, env_name=A234 BF Partner Prod, env_type=prod, instance=env-329365laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:13.958262996Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:13.958222764Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ohejjhog-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.955198331Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oha5zvbh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.95508126Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.957899863Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.957773249Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329362laio1eastus2, cloud_platform=Azure, customer_id=A234, env_id=329362, env_name=A234 BF Internal Pre-Prod, env_type=qa, instance=env-329362laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:13.957807902Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.957224829Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329335laiouse1, cloud_platform=AWS, customer_id=C736, env_id=329335, env_name=C736 HS HeilbronnParallel, env_type=prod, instance=env-329335laiouse1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.957249678Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.957211635Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.957206046Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:13.95676992Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.956567072Z caller=remote_instance_store.go:51 user=336467 slug=deepset msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=336467 slug=deepset instance= t=2024-05-29T13:44:13.95647311Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=336467 slug=deepset instance= t=2024-05-29T13:44:13.956459218Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.scheduler user=336467 slug=deepset version=5 fingerprint=47c6f0a8de53f82f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.95633973Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=3.122634ms + level=debug ts=2024-05-29T13:44:13.956463785Z caller=remote_instance_store.go:51 user=112732 slug=gleamer msg="calling SaveAlertInstance" + level=error ts=2024-05-29T13:44:13.956282244Z caller=remote_rule_evaluator.go:110 user=336467 slug=deepset msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.state.manager user=112732 slug=gleamer instance= t=2024-05-29T13:44:13.956404122Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112732 slug=gleamer instance= t=2024-05-29T13:44:13.95639301Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329319laiouse1, cloud_platform=AWS, customer_id=C736, env_id=329319, env_name=C736_EDU_Denver_Parallel, env_type=prod, instance=env-329319laiouse1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.956306935Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.956188813Z caller=remote_instance_store.go:51 user=137351 slug=pinnacle21 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=523054 slug=vialtopartners instance= t=2024-05-29T13:44:13.95609863Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.956009903Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.956023947Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329293laio1use1, cloud_platform=AWS, customer_id=C014, env_id=329293, env_name=SEC_U12_PenTest, env_type=prod, instance=env-329293laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.955989169Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.955929652Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.955876356Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.95570435Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329291laio1apn1, cloud_platform=AWS, customer_id=C767, env_id=329291, env_name=C767 MEGMILK Prod, env_type=prod, instance=env-329291laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=testing" t=2024-05-29T13:44:13.955775563Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.955728085Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.955587711Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329276laio1eastus2, cloud_platform=Azure, customer_id=A226, env_id=329276, env_name=A226 Big Lots Prod, env_type=prod, instance=env-329276laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:13.955518939Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.95542126Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329246laio1euw1, cloud_platform=AWS, customer_id=C473, env_id=329246, env_name=C473_Avon_Dev_2021U11, env_type=dev, instance=env-329246laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.955012044Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.954791183Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329238laio1use1, cloud_platform=AWS, customer_id=C667, env_id=329238, env_name=C667 AMVETS PROD, env_type=prod, instance=env-329238laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.954844682Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329238laio1use1, cloud_platform=AWS, customer_id=C667, env_id=329238, env_name=C667 AMVETS PROD, env_type=prod, instance=env-329238laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.954829133Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:13.954714327Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=30.635335ms + level=debug ts=2024-05-29T13:44:13.954715772Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.954707272Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oh2e5fwh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.954685896Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oh1tqi1k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.954630545Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.954654698Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=426229 slug=accelbyte t=2024-05-29T13:44:13.954631527Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.954630915Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datacenter=us-west-2" t=2024-05-29T13:44:13.954607044Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.954602845Z caller=remote_instance_store.go:51 user=438185 slug=nodeinfra msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.954564472Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:13.954530202Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=55.542995ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oh1tqi1k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.954482314Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698103 slug=vericast instance= t=2024-05-29T13:44:13.954474568Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oh192k6c-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.954365503Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:13.954323831Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329056laioeuw1, cloud_platform=AWS, customer_id=C765, env_id=329056, env_name=c765_vp_eu, env_type=prod, instance=env-329056laioeuw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.954206178Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.954197583Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oh0m2ial-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.954188511Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.954199533Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oh0m2ial-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.95409759Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329053laio1eastus, cloud_platform=Azure, customer_id=A231, env_id=329053, env_name=A231 Ann Taylor PROD, env_type=prod, instance=env-329053laio1eastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:13.954023861Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ogzwuzsr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.954019299Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.953880601Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ogzwuzsr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.953902428Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ogzwuzsr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.953838687Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=524410 slug=syso t=2024-05-29T13:44:13.953791075Z level=debug msg="Saving alert states done" count=7 max_state_save_concurrency=1 duration=231.090592ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329052laio1eastus, cloud_platform=Azure, customer_id=A231, env_id=329052, env_name=A231 Ann Taylor DEV, env_type=dev, instance=env-329052laio1eastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:13.953777431Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ogvfqgvs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.953711336Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ogvfqgvs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.953557864Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ogr15jtr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.953460023Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.953350946Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-329012laio1eastus, cloud_platform=Azure, customer_id=A232, env_id=329012, env_name=A232_Lane_Bryant_DEV, env_type=dev, instance=env-329012laio1eastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:13.953389314Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.953276688Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ogpq4j4p-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.953257241Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.953201502Z caller=remote_instance_store.go:51 user=217320 slug=workpath msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=217320 slug=workpath instance= t=2024-05-29T13:44:13.953151869Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ogpq4j4p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.95311454Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=635771 slug=sharedservices t=2024-05-29T13:44:13.952926676Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.656366ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-328970laio1sae1, cloud_platform=AWS, customer_id=C764, env_id=328970, env_name=C764 Prodemge Prod, env_type=prod, instance=env-328970laio1sae1, job=integrations/node_exporter, region=sa-east-1, stage=live" t=2024-05-29T13:44:13.953013134Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.952922117Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-328950laio1use1, cloud_platform=AWS, customer_id=C750, env_id=328950, env_name=c750_Centene_Test, env_type=test, instance=env-328950laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.952860268Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ogesyyqx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.952865177Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ogesyyqx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.952814577Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.952538185Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.952514314Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ogbi5s3l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.952613114Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-og8gfd94-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.952494173Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-og8gfd94-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.952483903Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-og8gfd94-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.952400172Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-328747laio1usw2, cloud_platform=AWS, customer_id=C556, env_id=328747, env_name=C556_G3_Dev_U11, env_type=dev, instance=env-328747laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=live" t=2024-05-29T13:44:13.952038068Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=320778 slug=omegaai version=1 fingerprint=26cd9140fc1d1f48 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.951911493Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.951546286s EvaluationString:}]" duration=1.565599394s + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.951897772Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oforzdcm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.951898737Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=250150 slug=bizagi version=58 fingerprint=35fc4821f67ad350 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.951802828Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.951599148s EvaluationString:}]" duration=400.046444ms + logger=ngalert.state.manager.persist user=731546 slug=liderbci t=2024-05-29T13:44:13.95183176Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=731546 slug=liderbci t=2024-05-29T13:44:13.951745539Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oforzdcm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.951827526Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-328740laio1use1, cloud_platform=AWS, customer_id=C519, env_id=328740, env_name=C519 Sanofi QA, env_type=qa, instance=env-328740laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.951834084Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=731546 slug=liderbci version=32 fingerprint=76d7bca659e479e9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.951629818Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc041e077f8} C:{Var:C Labels: Value:0xc041e078e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.951097764s EvaluationString:[ var='A' labels={} value=-1 ], [ var='C' labels={} value=0 ]}]" duration=18.622708ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oforzdcm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.951768056Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oforzdcm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.951752486Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.951626885Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.951638407Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ofnyipbs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.951566804Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ofnyipbs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.951537083Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ofkj2886-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.951493123Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ofkj2886-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.951409262Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:13.951413366Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.833582ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ofdwp72h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.9512293Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ofdwp72h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.951152509Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ofdwp72h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.951118419Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ofd8s75g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.950942497Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ofd8s75g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.950901527Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ofd8s75g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.950735535Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=109452 slug=deltarisk instance= t=2024-05-29T13:44:13.950734034Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=861995 slug=umasalud t=2024-05-29T13:44:13.950634501Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.950390564Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-328393laio1eastus, cloud_platform=Azure, customer_id=A229, env_id=328393, env_name=A229 Sompo QA, env_type=qa, instance=env-328393laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:13.950430352Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ofb7ww6h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.950410882Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ofb7ww6h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.950356521Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ofb7ww6h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.950322181Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.950336197Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod t=2024-05-29T13:44:13.950305594Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=473762 slug=intentiq t=2024-05-29T13:44:13.950222318Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=34.355102ms + level=debug ts=2024-05-29T13:44:13.95025986Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + level=debug component=discovery ts=2024-05-29T13:44:13.950185505Z caller=retry.go:58 user=826949 msg="retrying grpc request" method=/remoteruler.rules.v1.RulesService/GetByRuleGroup attempt=2 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ofb2n95y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.950090198Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ofb2n95y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.950054598Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-328323laio1eastus, cloud_platform=Azure, customer_id=A229, env_id=328323, env_name=A229 Sompo DEV, env_type=dev, instance=env-328323laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:13.949988246Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ofan7we1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.949979397Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.949809873Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ofan7we1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.949798145Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.949754643Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ofa7w9ir-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.949736855Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.949748471Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.949723915Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:13.949704502Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=a4e3afdc71869579 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.949574796Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.949332191s EvaluationString:}]" duration=180.475019ms + logger=ngalert.scheduler user=430961 slug=solifi version=1 fingerprint=74d12fdaa4d02fab attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.949521246Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.949116245s EvaluationString:}]" duration=194.023874ms + logger=ngalert.state.manager user=523054 slug=vialtopartners instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.949400438Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oez8assy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.949324641Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=523054 slug=vialtopartners instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.949366469Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oeyd9mgd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.9492665Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oeyd9mgd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.949123678Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-328294laio1eastus, cloud_platform=Azure, customer_id=A227, env_id=328294, env_name=A227 - Talbots Prod, env_type=prod, instance=env-328294laio1eastus, job=integrations/node_exporter, region=eastus, stage=preprod" t=2024-05-29T13:44:13.949123316Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.948855047Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:13.948818894Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oewgb22h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.948890356Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-328249laio1euc1, cloud_platform=AWS, customer_id=C560, env_id=328249, env_name=C560 C&A 01 EVAL, env_type=prod, instance=env-328249laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:13.948785325Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.948769713Z caller=remote_rule_evaluator.go:193 user=656459 slug=activeport msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + level=debug ts=2024-05-29T13:44:13.948688657Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.948726861Z caller=remote_image_capturer.go:33 user=183214 slug=vectorizedio rule_org_id=1 rule_uid=cdiofhamiw5xcb msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-328241laio1euw1, cloud_platform=AWS, customer_id=C434, env_id=328241, env_name=C434_AVORIS_DEV, env_type=dev, instance=env-328241laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.948637512Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.948626759Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=PARIS Query" t=2024-05-29T13:44:13.948623745Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.948502234Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oepw0get-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.948542033Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=kube-state-metrics, effect=NoSchedule, endpoint=http, instance=10.1.0.79:8080, job=kube-state-metrics, key=redpanda-node, namespace=monitoring, pod=logging-fluent-bit-6nmhd, provider=aws, redpanda_id=ci0c2f8k30vsi89l4v1g, service=prometheus-kube-state-metrics, value=true" t=2024-05-29T13:44:13.948533368Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oepw0get-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.948510992Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=316418 slug=workmotion t=2024-05-29T13:44:13.94849639Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=26.963016ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oepjr3za-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.948383301Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=334045 slug=aireye instance= t=2024-05-29T13:44:13.948449029Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oepjr3za-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.948352141Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=183214 slug=vectorizedio version=1 fingerprint=889ec8f4a87989e2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.948130101Z level=debug msg="Alert rule evaluated" results="[{Instance:container=kube-state-metrics, effect=NoSchedule, endpoint=http, instance=10.1.0.79:8080, job=kube-state-metrics, key=redpanda-node, namespace=monitoring, pod=logging-fluent-bit-6nmhd, provider=aws, redpanda_id=ci0c2f8k30vsi89l4v1g, service=prometheus-kube-state-metrics, value=true State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:container=kube-state-metrics, effect=NoSchedule, endpoint=http, instance=10.1.0.79:8080, job=kube-state-metrics, key=redpanda-node, namespace=monitoring, pod=logging-fluent-bit-6nmhd, provider=aws, redpanda_id=ci0c2f8k30vsi89l4v1g, service=prometheus-kube-state-metrics, value=true Value:0xc0d2af00b0} B:{Var:B Labels:container=kube-state-metrics, effect=NoSchedule, endpoint=http, instance=10.1.0.79:8080, job=kube-state-metrics, key=redpanda-node, namespace=monitoring, pod=logging-fluent-bit-6nmhd, provider=aws, redpanda_id=ci0c2f8k30vsi89l4v1g, service=prometheus-kube-state-metrics, value=true Value:0xc0d2af01e0} C:{Var:C Labels:container=kube-state-metrics, effect=NoSchedule, endpoint=http, instance=10.1.0.79:8080, job=kube-state-metrics, key=redpanda-node, namespace=monitoring, pod=logging-fluent-bit-6nmhd, provider=aws, redpanda_id=ci0c2f8k30vsi89l4v1g, service=prometheus-kube-state-metrics, value=true Value:0xc02c093840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.947448996s EvaluationString:[ var='A' labels={container=kube-state-metrics, effect=NoSchedule, endpoint=http, instance=10.1.0.79:8080, job=kube-state-metrics, key=redpanda-node, namespace=monitoring, pod=logging-fluent-bit-6nmhd, provider=aws, redpanda_id=ci0c2f8k30vsi89l4v1g, service=prometheus-kube-state-metrics, value=true} value=0 ], [ var='B' labels={container=kube-state-metrics, effect=NoSchedule, endpoint=http, instance=10.1.0.79:8080, job=kube-state-metrics, key=redpanda-node, namespace=monitoring, pod=logging-fluent-bit-6nmhd, provider=aws, redpanda_id=ci0c2f8k30vsi89l4v1g, service=prometheus-kube-state-metrics, value=true} value=0 ], [ var='C' labels={container=kube-state-metrics, effect=NoSchedule, endpoint=http, instance=10.1.0.79:8080, job=kube-state-metrics, key=redpanda-node, namespace=monitoring, pod=logging-fluent-bit-6nmhd, provider=aws, redpanda_id=ci0c2f8k30vsi89l4v1g, service=prometheus-kube-state-metrics, value=true} value=1 ]} {Instance:container=kube-state-metrics, effect=NoSchedule, endpoint=http, instance=10.1.0.79:8080, job=kube-state-metrics, key=redpanda-node, namespace=monitoring, pod=logging-fluent-bit-bdlqv, provider=aws, redpanda_id=ci0c2f8k30vsi89l4v1g, service=prometheus-kube-state-metrics, value=true State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:container=kube-state-metrics, effect=NoSchedule, endpoint=http, instance=10.1.0.79:8080, job=kube-state-metrics, key=redpanda-node, namespace=monitoring, pod=logging-fluent-bit-bdlqv, provider=aws, redpanda_id=ci0c2f8k30vsi89l4v1g, service=prometheus-kube-state-metrics, value=true Value:0xc0d2af05c0} B:{Var:B Labels:container=kube-state-metrics, effect=NoSchedule, endpoint=http, instance=10.1.0.79:8080, job=kube-state-metrics, key=redpanda-node, namespace=monitoring, pod=logging-fluent-bit-bdlqv, provider=aws, redpanda_id=ci0c2f8k30vsi89l4v1g, service=prometheus-kube-state-metrics, value=true Value:0xc0d2af0720} C:{Var:C Labels:container=kube-state-metrics, effect=NoSchedule, endpoint=http, instance=10.1.0.79:8080, job=kube-state-metrics, key=redpanda-node, namespace=monitoring, pod=logging-fluent-bit-bdlqv, provider=aws, redpanda_id=ci0c2f8k30vsi89l4v1g, service=prometheus-kube-state-metrics, value=true Value:0xc0d2af0890}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.947471079s EvaluationString:[ var='A' labels={container=kube-state-metrics, effect=NoSchedule, endpoint=http, instance=10.1.0.79:8080, job=kube-state-metrics, key=redpanda-node, namespace=monitoring, pod=logging-fluent-bit-bdlqv, provider=aws, redpanda_id=ci0c2f8k30vsi89l4v1g, service=prometheus-kube-state-metrics, value=true} value=0 ], [ var='B' labels={container=kube-state-metrics, effect=NoSchedule, endpoint=http, instance=10.1.0.79:8080, job=kube-state-metrics, key=redpanda-node, namespace=monitoring, pod=logging-fluent-bit-bdlqv, provider=aws, redpanda_id=ci0c2f8k30vsi89l4v1g, service=prometheus-kube-state-metrics, value=true} value=0 ], [ var='C' labels={container=kube-state-metrics, effect=NoSchedule, endpoint=http, instance=10.1.0.79:8080, job=kube-state-metrics, key=redpanda-node, namespace=monitoring, pod=logging-fluent-bit-bdlqv, provider=aws, redpanda_id=ci0c2f8k30vsi89l4v1g, service=prometheus-kube-state-metrics, value=true} value=1 ]} {Instance:container=kube-state-metrics, effect=NoSchedule, endpoint=http, instance=10.1.0.79:8080, job=kube-state-metrics, key=redpanda-node, namespace=monitoring, pod=logging-fluent-bit-f8qkf, provider=aws, redpanda_id=ci0c2f8k30vsi89l4v1g, service=prometheus-kube-state-metrics, value=true State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:container=kube-state-metrics, effect=NoSchedule, endpoint=http, instance=10.1.0.79:8080, job=kube-state-metrics, key=redpanda-node, namespace=monitoring, pod=logging-fluent-bit-f8qkf, provider=aws, redpanda_id=ci0c2f8k30vsi89l4v1g, service=prometheus-kube-state-metrics, value=true Value:0xc0d2af0c40} B:{Var:B Labels:container=kube-state-metrics, effect=NoSchedule, endpoint=http, instance=10.1.0.79:8080, job=kube-state-metrics, key=redpanda-node, namespace=monitoring, pod=logging-fluent-bit-f8qkf, provider=aws, redpanda_id=ci0c2f8k30vsi89l4v1g, service=prometheus-kube-state-metrics, value=true Value:0xc0d2af0d80} C:{Var:C Labels:container=kube-state-metrics, effect=NoSchedule, endpoint=http, instance=10.1.0.79:8080, job=kube-state-metrics, key=redpanda-node, namespace=monitoring, pod=logging-fluent-bit-f8qkf, provider=aws, redpanda_id=ci0c2f8k30vsi89l4v1g, service=prometheus-kube-state-metrics, value=true Value:0xc0d2af0ed0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.947484898s EvaluationString:[ var='A' labels={container=kube-state-metrics, effect=NoSchedule, endpoint=http, instance=10.1.0.79:8080, job=kube-state-metrics, key=redpanda-node, namespace=monitoring, pod=logging-fluent-bit-f8qkf, provider=aws, redpanda_id=ci0c2f8k30vsi89l4v1g, service=prometheus-kube-state-metrics, value=true} value=0 ], [ var='B' labels={container=kube-state-metrics, effect=NoSchedule, endpoint=http, instance=10.1.0.79:8080, job=kube-state-metrics, key=redpanda-node, namespace=monitoring, pod=logging-fluent-bit-f8qkf, provider=aws, redpanda_id=ci0c2f8k30vsi89l4v1g, service=prometheus-kube-state-metrics, value=true} value=0 ], [ var='C' labels={container=kube-state-metrics, effect=NoSchedule, endpoint=http, instance=10.1.0.79:8080, job=kube-state-metrics, key=redpanda-node, namespace=monitoring, pod=logging-fluent-bit-f8qkf, provider=aws, redpanda_id=ci0c2f8k30vsi89l4v1g, service=prometheus-kube-state-metrics, value=true} value=1 ]}]" duration=44.327535ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oedglxhl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.948186909Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.948100502Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.948073699Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.948098249Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oedglxhl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.948014997Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-327850laio1use1, cloud_platform=AWS, customer_id=C651, env_id=327850, env_name=C651 Dorinka DEV, env_type=dev, instance=env-327850laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.947992274Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.947939171Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=119453 slug=edisonlearning t=2024-05-29T13:44:13.947995338Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=119453 slug=edisonlearning instance= t=2024-05-29T13:44:13.94798146Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=119453 slug=edisonlearning instance= t=2024-05-29T13:44:13.947969903Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.94795983Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oe46nklm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.947908606Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.947883506Z caller=remote_instance_store.go:51 user=527202 slug=lnrsusinsurancedev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.947792868Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oe46nklm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.947839865Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-327817laio1use1, cloud_platform=AWS, customer_id=C651, env_id=327817, env_name=C651_Dorinka_Prod, env_type=prod, instance=env-327817laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.9477715Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=729654 slug=bmsmonitoring t=2024-05-29T13:44:13.947780013Z level=debug msg="Saving alert states done" count=9 max_state_save_concurrency=1 duration=125.007937ms + logger=ngalert.state.manager user=527202 slug=lnrsusinsurancedev t=2024-05-29T13:44:13.94777238Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.947679131Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=327842 slug=exabeam t=2024-05-29T13:44:13.947699927Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=33.369125ms + level=debug ts=2024-05-29T13:44:13.94771236Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oe3qfzag-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.947731174Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=371756 slug=asapp t=2024-05-29T13:44:13.947723629Z level=debug msg="Saving alert states done" count=13 max_state_save_concurrency=1 duration=255.700435ms + level=debug ts=2024-05-29T13:44:13.947619563Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oe3qfzag-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.947671884Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oe3pjwti-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.947508682Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.947479291Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oe3pjwti-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.947396051Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oe0vqa76-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.94732633Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oe0vqa76-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.94730153Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-327816laio1euw1, cloud_platform=AWS, customer_id=C584, env_id=327816, env_name=C584_FeverLabs_Prod, env_type=prod, instance=env-327816laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.947199584Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.947098302Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-odstmy3r-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.947013877Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-odstmy3r-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.946973866Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-odr2vgg6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.946881815Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-odr2vgg6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.946725804Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-odmkvlki-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.946600353Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.946296993Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-odj9spos-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.946293959Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-327789laio1use1, cloud_platform=AWS, customer_id=C557, env_id=327789, env_name=C557 AF PROD, env_type=prod, instance=env-327789laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.946240649Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oddpye4e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.946103938Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:13.946147083Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:13.946124041Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:13.946113294Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-odaskdt2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.945989756Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-odaskdt2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.945974306Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-327739laio1use1, cloud_platform=AWS, customer_id=C762, env_id=327739, env_name=C762 Lightning Dev, env_type=dev, instance=env-327739laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.946054414Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-odaskdt2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.945930306Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.945952362Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=746601 slug=columbusm3 t=2024-05-29T13:44:13.94583764Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-od89t8ie-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.945851645Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-od89t8ie-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.945822465Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.945648746Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.945578877Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.945482073Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-327717laio1euc1, cloud_platform=AWS, customer_id=C665, env_id=327717, env_name=C665_DKB_DEV_U11, env_type=dev, instance=env-327717laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=preprod" t=2024-05-29T13:44:13.945558909Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-327717laio1euc1, cloud_platform=AWS, customer_id=C665, env_id=327717, env_name=C665_DKB_DEV_U11, env_type=dev, instance=env-327717laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=preprod" t=2024-05-29T13:44:13.945545269Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.945554251Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.945482513Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.945405761Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ocqbt52q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.94538103Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.945497059Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ocqbt52q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.94535093Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.945432572Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.945466788Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.94538199Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ocqbt52q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.945306109Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ocqbt52q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.945274709Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ocm4749n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.945201458Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ocm4749n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.945029017Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ock1wpl3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.944884945Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ock1wpl3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.944858005Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ocjongka-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.944719613Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-327668laio1eastus2, cloud_platform=Azure, customer_id=A223, env_id=327668, env_name=A223 Ross Dev, env_type=dev, instance=env-327668laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:13.945208507Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ocjongka-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.944610042Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ocjongka-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.944583152Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-occ573w9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.944457071Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-occ573w9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.94439449Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.945033548Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-327533laio1cac1, cloud_platform=AWS, customer_id=C602, env_id=327533, env_name=C602 Wawanesa Prod, env_type=prod, instance=env-327533laio1cac1, job=integrations/node_exporter, region=ca-central-1, stage=live" t=2024-05-29T13:44:13.945038462Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.94442676Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.944864384Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-327524laio1use1, cloud_platform=AWS, customer_id=C545, env_id=327524, env_name=C545 City Austin PROD, env_type=prod, instance=env-327524laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.944911365Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.94480857Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-327475laio1euw2, cloud_platform=AWS, customer_id=C548, env_id=327475, env_name=C548_MDU_PROD_U11, env_type=prod, instance=env-327475laio1euw2, job=integrations/node_exporter, region=eu-west-2, stage=live" t=2024-05-29T13:44:13.944735561Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.944733587Z caller=remote_instance_store.go:51 user=703825 slug=andrewbauman msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.94444792Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-327440laio1usw2, cloud_platform=AWS, customer_id=C666, env_id=327440, env_name=C666 Green Dot PROD, env_type=prod, instance=env-327440laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=live" t=2024-05-29T13:44:13.944351962Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-occ573w9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.944281239Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.944237137Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance= t=2024-05-29T13:44:13.944299581Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=307381 slug=kambitaskforce t=2024-05-29T13:44:13.944224365Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-327425laio1euw3, cloud_platform=AWS, customer_id=C676, env_id=327425, env_name=C676_Guess_EU_Dev_U11, env_type=dev, instance=env-327425laio1euw3, job=integrations/node_exporter, region=eu-west-3, stage=live" t=2024-05-29T13:44:13.944199237Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oca5pem9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.944174938Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.943776084Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oca5pem9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.944069627Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-327423laio1euw3, cloud_platform=AWS, customer_id=C676, env_id=327423, env_name=C676_Guess_EU_Prod_U11, env_type=prod, instance=env-327423laio1euw3, job=integrations/node_exporter, region=eu-west-3, stage=live" t=2024-05-29T13:44:13.94403392Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.943966728Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-obzl1vsw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.944009786Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-obzl1vsw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.943967336Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-obzl1vsw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.943882435Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.943868307Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-327335laio1use2, cloud_platform=AWS, customer_id=C510, env_id=327335, env_name=C510_Dev, env_type=dev, instance=env-327335laio1use2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:13.943881875Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-obuanfdh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.943815424Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-obnzjedf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.943567912Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-327246laio1cac1, cloud_platform=AWS, customer_id=C602, env_id=327246, env_name=C602 Wawanesa DEV, env_type=dev, instance=env-327246laio1cac1, job=integrations/node_exporter, region=ca-central-1, stage=live" t=2024-05-29T13:44:13.943518755Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.943414295Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=127813 slug=clearsale instance= t=2024-05-29T13:44:13.94342623Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-obnzjedf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.943469851Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-obhb89a3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.943314079Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.943153086Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.943279037Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.156.57:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fdbhspzzyx0qrf alerts=1 + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:13.943195167Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=28.812376ms + level=debug ts=2024-05-29T13:44:13.943155781Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=196413 slug=form3production instance="id=41453, name=form3test-traces, reason=live_traces_exceeded, tenant=form3test-traces" t=2024-05-29T13:44:13.942968219Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-327037laio1aps2, cloud_platform=AWS, customer_id=C566, env_id=327037, env_name=C566_KFC_Parallel_Prod, env_type=prod, instance=env-327037laio1aps2, job=integrations/node_exporter, region=ap-southeast-2, stage=live" t=2024-05-29T13:44:13.942765851Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-327037laio1aps2, cloud_platform=AWS, customer_id=C566, env_id=327037, env_name=C566_KFC_Parallel_Prod, env_type=prod, instance=env-327037laio1aps2, job=integrations/node_exporter, region=ap-southeast-2, stage=live" t=2024-05-29T13:44:13.942750746Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ob63xmvy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.942658852Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=196413 slug=form3production instance="id=217533, name=form3ustest-traces, reason=live_traces_exceeded, tenant=form3ustest-traces" t=2024-05-29T13:44:13.942591459Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ob63xmvy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.942529531Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.942415198Z caller=remote_instance_store.go:51 user=706031 slug=miceutest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=196413 slug=form3production instance="id=199729, name=form3usdevelopment-traces, reason=live_traces_exceeded, tenant=form3usdevelopment-traces" t=2024-05-29T13:44:13.94245136Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326993laio1eastus2, cloud_platform=Azure, customer_id=A226, env_id=326993, env_name=A226 Big Lots QA, env_type=qa, instance=env-326993laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:13.94255653Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=471861 slug=planetstaging t=2024-05-29T13:44:13.942374322Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=24.890614ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oauu06p6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.942326839Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.942332543Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326987laio1cac1, cloud_platform=AWS, customer_id=C513, env_id=326987, env_name=C513 Deschenes DEV, env_type=dev, instance=env-326987laio1cac1, job=integrations/node_exporter, region=ca-central-1, stage=live" t=2024-05-29T13:44:13.942322885Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oauhxaxg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.942219668Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oauhxaxg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.942108287Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=196413 slug=form3production version=1 fingerprint=d12652b2c78bc39f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.941835441Z level=debug msg="Alert rule evaluated" results="[{Instance:id=199729, name=form3usdevelopment-traces, reason=live_traces_exceeded, tenant=form3usdevelopment-traces State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:id=199729, name=form3usdevelopment-traces, reason=live_traces_exceeded, tenant=form3usdevelopment-traces Value:0xc04f38f328} C:{Var:C Labels:id=199729, name=form3usdevelopment-traces, reason=live_traces_exceeded, tenant=form3usdevelopment-traces Value:0xc04f38f2f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.941189077s EvaluationString:[ var='B' labels={id=199729, name=form3usdevelopment-traces, reason=live_traces_exceeded, tenant=form3usdevelopment-traces} value=0 ], [ var='C' labels={id=199729, name=form3usdevelopment-traces, reason=live_traces_exceeded, tenant=form3usdevelopment-traces} value=0 ]} {Instance:id=217533, name=form3ustest-traces, reason=live_traces_exceeded, tenant=form3ustest-traces State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:id=217533, name=form3ustest-traces, reason=live_traces_exceeded, tenant=form3ustest-traces Value:0xc04f38f398} C:{Var:C Labels:id=217533, name=form3ustest-traces, reason=live_traces_exceeded, tenant=form3ustest-traces Value:0xc04f38f3c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.941206986s EvaluationString:[ var='B' labels={id=217533, name=form3ustest-traces, reason=live_traces_exceeded, tenant=form3ustest-traces} value=0 ], [ var='C' labels={id=217533, name=form3ustest-traces, reason=live_traces_exceeded, tenant=form3ustest-traces} value=0 ]} {Instance:id=217533, name=form3ustest-traces, reason=rate_limited, tenant=form3ustest-traces State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:id=217533, name=form3ustest-traces, reason=rate_limited, tenant=form3ustest-traces Value:0xc04f38f450} C:{Var:C Labels:id=217533, name=form3ustest-traces, reason=rate_limited, tenant=form3ustest-traces Value:0xc04f38f498}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.941214682s EvaluationString:[ var='B' labels={id=217533, name=form3ustest-traces, reason=rate_limited, tenant=form3ustest-traces} value=0 ], [ var='C' labels={id=217533, name=form3ustest-traces, reason=rate_limited, tenant=form3ustest-traces} value=0 ]} {Instance:id=41449, name=form3development-traces, reason=live_traces_exceeded, tenant=form3development-traces State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:id=41449, name=form3development-traces, reason=live_traces_exceeded, tenant=form3development-traces Value:0xc04f38f518} C:{Var:C Labels:id=41449, name=form3development-traces, reason=live_traces_exceeded, tenant=form3development-traces Value:0xc04f38f550}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.941218466s EvaluationString:[ var='B' labels={id=41449, name=form3development-traces, reason=live_traces_exceeded, tenant=form3development-traces} value=0 ], [ var='C' labels={id=41449, name=form3development-traces, reason=live_traces_exceeded, tenant=form3development-traces} value=0 ]} {Instance:id=41453, name=form3test-traces, reason=live_traces_exceeded, tenant=form3test-traces State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:id=41453, name=form3test-traces, reason=live_traces_exceeded, tenant=form3test-traces Value:0xc04f38f650} C:{Var:C Labels:id=41453, name=form3test-traces, reason=live_traces_exceeded, tenant=form3test-traces Value:0xc04f38f610}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.941223395s EvaluationString:[ var='B' labels={id=41453, name=form3test-traces, reason=live_traces_exceeded, tenant=form3test-traces} value=0 ], [ var='C' labels={id=41453, name=form3test-traces, reason=live_traces_exceeded, tenant=form3test-traces} value=0 ]} {Instance:id=41454, name=form3production-traces, reason=live_traces_exceeded, tenant=form3production-traces State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:id=41454, name=form3production-traces, reason=live_traces_exceeded, tenant=form3production-traces Value:0xc04f38f6d8} C:{Var:C Labels:id=41454, name=form3production-traces, reason=live_traces_exceeded, tenant=form3production-traces Value:0xc04f38f700}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.941228581s EvaluationString:[ var='B' labels={id=41454, name=form3production-traces, reason=live_traces_exceeded, tenant=form3production-traces} value=0 ], [ var='C' labels={id=41454, name=form3production-traces, reason=live_traces_exceeded, tenant=form3production-traces} value=0 ]}]" duration=140.726337ms + logger=ngalert.state.manager.persist user=664976 slug=staging1themomproject t=2024-05-29T13:44:13.942088253Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.535593ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oassyt37-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.941959825Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oassyt37-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.941893064Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oar44n6z-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.941850174Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326866laiocac1, cloud_platform=AWS, customer_id=C761, env_id=326866, env_name=C761 Bell Canada PROD, env_type=prod, instance=env-326866laiocac1, job=integrations/node_exporter, region=ca-central-1, stage=testing" t=2024-05-29T13:44:13.941820313Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326760laiowestus2, cloud_platform=Azure, customer_id=A225, env_id=326760, env_name=A225 Vuori PROD, env_type=prod, instance=env-326760laiowestus2, job=integrations/node_exporter, region=westus2, stage=live" t=2024-05-29T13:44:13.941665082Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326760laiowestus2, cloud_platform=Azure, customer_id=A225, env_id=326760, env_name=A225 Vuori PROD, env_type=prod, instance=env-326760laiowestus2, job=integrations/node_exporter, region=westus2, stage=live" t=2024-05-29T13:44:13.941648463Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oanuzsiq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.941405119Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326717laio1eastus2, cloud_platform=Azure, customer_id=A218, env_id=326717, env_name=A218 GFS POC, env_type=prod, instance=env-326717laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:13.941435997Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.941343748Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.941225923Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=183214 slug=vectorizedio t=2024-05-29T13:44:13.941053483Z level=debug msg="Saving alert states" count=58 max_state_save_concurrency=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.941038339Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.941031927Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.941176554Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oah5b0aj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.941137127Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.941024266Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.941163345Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oah5b0aj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.941110886Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.941011781Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.941008464Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oah5b0aj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.941079526Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.941000499Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940994624Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940987044Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940964985Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oah5b0aj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.940940205Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oaefj122-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.940901244Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oaefj122-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.940889524Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940929287Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oaefj122-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.940839084Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940903752Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940881366Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940860951Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940849027Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940816485Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oaefj122-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.940799923Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oaefj122-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.940760823Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.940748988Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.940713402Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940770519Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oa0vyq1j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.940666202Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940725814Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.940689205Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oa0vyq1j-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.940589441Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.940653269Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-oa0vyq1j-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.940572841Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940676752Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940665845Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940660072Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940626893Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.940516438Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940591554Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.940531736Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940575565Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:13.940434227Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.940492088Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940553879Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940507377Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940489845Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940446374Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326672laio1aps2, cloud_platform=AWS, customer_id=C566, env_id=326672, env_name=c566_kfc_dev u11 parallel, env_type=dev, instance=env-326672laio1aps2, job=integrations/node_exporter, region=ap-southeast-2, stage=live" t=2024-05-29T13:44:13.940328183Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.94031716Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940371696Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.94035547Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.94032647Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940303036Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940293567Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940277236Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940273129Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o9mytiwi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.939977435Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940267669Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o9mytiwi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.939922184Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940264052Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940259294Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o9ltvo9f-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.939880954Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o9ltvo9f-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.939848103Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o9ltvo9f-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.939804853Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940234854Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o9ltvo9f-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.939695022Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o96pxju7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.939636151Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.940192988Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.939850603Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.939932115Z caller=remote_instance_store.go:51 user=543660 slug=jobcloudprogrammaticstage msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.939938618Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.939986202Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.939968136Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.939958254Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.939925927Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.939916087Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.939882096Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.939866266Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.939852274Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.939782639Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.939816434Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326617laio1usw1, cloud_platform=AWS, customer_id=C418, env_id=326617, env_name=C418 R&F QA, env_type=qa, instance=env-326617laio1usw1, job=integrations/node_exporter, region=us-west-1, stage=live" t=2024-05-29T13:44:13.939789966Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.939793296Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.939787054Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.939780567Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.939747898Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.939722532Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.939704982Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.939695343Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.939684977Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.939623486Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.939659139Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.939594335Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=wYEY2h04k, ref_id=A" t=2024-05-29T13:44:13.939513777Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o95ayaj2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.939318548Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326580laio1use1, cloud_platform=AWS, customer_id=C290, env_id=326580, env_name=C290 Enova PROD, env_type=prod, instance=env-326580laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.939208885Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.939181023Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o8t5czf4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.939168576Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o8qgxnc6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.939069565Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o8qgxnc6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.939031235Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o8qgxnc6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.939003165Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.938980277Z caller=remote_instance_store.go:51 user=557231 slug=lnrsusinsuranceprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.938739062Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o89jsuin-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.938891644Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=396873 slug=l2beat t=2024-05-29T13:44:13.938983301Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=396873 slug=l2beat instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.938958649Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=396873 slug=l2beat instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.938937225Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326571laio1use1, cloud_platform=AWS, customer_id=C476, env_id=326571, env_name=C476 DXL DEV, env_type=dev, instance=env-326571laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.93878212Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.938616038Z caller=remote_instance_store.go:51 user=738479 slug=gohero msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.938635789Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=334408 slug=voltagrid instance= t=2024-05-29T13:44:13.938635226Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o88ohxsl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.938604651Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326569laio1use1, cloud_platform=AWS, customer_id=C545, env_id=326569, env_name=C545 City of Austin DEV, env_type=dev, instance=env-326569laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.938593555Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=334408 slug=voltagrid version=63 fingerprint=8ad4eb071cb8a324 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.938504364Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.938241045s EvaluationString:}]" duration=32.245496ms + level=debug ts=2024-05-29T13:44:13.938514953Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o88ohxsl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.93856661Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o88ohxsl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.93853632Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.938498007Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o85l0q3m-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.938485709Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o85l0q3m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.938439809Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326566laio1euw1, cloud_platform=AWS, customer_id=C562, env_id=326566, env_name=C562 TSOL Prod U11, env_type=prod, instance=env-326566laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.938390961Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o85l0q3m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.938413559Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o85l0q3m-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.938342858Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.938313782Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o83r4i4s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.938238347Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o83r4i4s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.938172446Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o83r4i4s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.938141006Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326565laio1euw1, cloud_platform=AWS, customer_id=C562, env_id=326565, env_name=C562 TSOL Dev U11, env_type=dev, instance=env-326565laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.938204845Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o7qfkvrt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.937992524Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.938007069Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.937919489Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o7qfkvrt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.937925384Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.937758837Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o7o87elx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.937712911Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326297laio1use1, cloud_platform=AWS, customer_id=C568, env_id=326297, env_name=C568_TR_DEV, env_type=dev, instance=env-326297laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.937643845Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o7o87elx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.937641541Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.937635548Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o7nb885c-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.93752271Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o7nb885c-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.937461089Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.93736393Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o7nb885c-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.937435239Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o7kpxhll-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.937373778Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.937367222Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.055684ms + logger=ngalert.state.manager user=304032 slug=clearbanc instance="clearco_owner=capital-one-devs, clearco_service=shopify-fivetran-service" t=2024-05-29T13:44:13.93735789Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=472647 slug=planet t=2024-05-29T13:44:13.937336716Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.794635ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o7kpxhll-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.937265487Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.937112399Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.937064262Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326271laio1euc1, cloud_platform=AWS, customer_id=C552, env_id=326271, env_name=C552 Klingel DEV, env_type=dev, instance=env-326271laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:13.937230309Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o7fxl6hf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.937156906Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.936987375Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o79fgoma-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.936913253Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o77ye1oy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.936882963Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.936821564Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=122048 slug=skoobe version=1 fingerprint=08e2cb1d486f0a8b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.936674306Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-graphite, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.936121683s EvaluationString:}]" duration=41.280808ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o77ye1oy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.936773362Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.936510879Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o77ye1oy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.936749932Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o765cejt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.936720281Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.936715217Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o765cejt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.936670551Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o765cejt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.93661137Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=242310 slug=suzy t=2024-05-29T13:44:13.936550065Z level=debug msg="Deleting alert states" count=9 + level=debug ts=2024-05-29T13:44:13.936514998Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326240laio1use2, cloud_platform=AWS, customer_id=C691, env_id=326240, env_name=C691 CFM Materials PROD, env_type=prod, instance=env-326240laio1use2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:13.93656633Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o6w3qx05-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.936527279Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.936490558Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=242310 slug=suzy t=2024-05-29T13:44:13.936481526Z level=info msg="Detected stale state entry" cacheID="[[\"S2_Major_Chat\",\"Awake 7 Days\"],[\"S2_Major_Slack\",\"24/7\"],[\"__alert_rule_namespace_uid__\",\"bd519fb1-9aee-45a7-b350-c5be13e50fd4\"],[\"__alert_rule_uid__\",\"bdkszv18iiv40b\"],[\"aggregatedBy\",\"average\"],[\"alertname\",\"Core - Endpoint Duration High - Critical!\"],[\"grafana_folder\",\"Shane's Dashboards\"],[\"name\",\"goose.rest.controller.AddMissionActionStats.durationavg Core_Graphite\"]]" state=Normal reason= + logger=ngalert.state.manager user=242310 slug=suzy t=2024-05-29T13:44:13.936473706Z level=info msg="Detected stale state entry" cacheID="[[\"S2_Major_Chat\",\"Awake 7 Days\"],[\"S2_Major_Slack\",\"24/7\"],[\"__alert_rule_namespace_uid__\",\"bd519fb1-9aee-45a7-b350-c5be13e50fd4\"],[\"__alert_rule_uid__\",\"bdkszv18iiv40b\"],[\"aggregatedBy\",\"average\"],[\"alertname\",\"Core - Endpoint Duration High - Critical!\"],[\"grafana_folder\",\"Shane's Dashboards\"],[\"name\",\"thelma.rest.controller.GetUserRewardAsync.durationavg Core_Graphite\"]]" state=Normal reason= + logger=ngalert.state.manager user=242310 slug=suzy t=2024-05-29T13:44:13.936462855Z level=info msg="Detected stale state entry" cacheID="[[\"S2_Major_Chat\",\"Awake 7 Days\"],[\"S2_Major_Slack\",\"24/7\"],[\"__alert_rule_namespace_uid__\",\"bd519fb1-9aee-45a7-b350-c5be13e50fd4\"],[\"__alert_rule_uid__\",\"bdkszv18iiv40b\"],[\"aggregatedBy\",\"average\"],[\"alertname\",\"Core - Endpoint Duration High - Critical!\"],[\"grafana_folder\",\"Shane's Dashboards\"],[\"name\",\"goose.rest.controller.CreateMultipleForBrand.durationavg Core_Graphite\"]]" state=Normal reason= + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o6w3qx05-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.936463419Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o6w3qx05-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.936430168Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=thelma.rest.controller.PrepareUploadAsync.durationavg Core_Graphite" t=2024-05-29T13:44:13.936345295Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o6ut2gwd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.936371478Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=spectrum-cable, request_message_type=NotifyConnectionRequest, schema=com.asapp.schemas.product.chat.presence, service=UserPresenceNotifier" t=2024-05-29T13:44:13.936371375Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=thelma.rest.controller.ApplyOverrides.durationavg Core_Graphite" t=2024-05-29T13:44:13.936271357Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=thelma.rest.controller.VerifyPhoneStartAsync.durationavg Core_Graphite" t=2024-05-29T13:44:13.936238399Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=thelma.rest.controller.RegisterAsync.durationavg Core_Graphite" t=2024-05-29T13:44:13.936164071Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=thelma.rest.controller.RewardSearchAsync.durationavg Core_Graphite" t=2024-05-29T13:44:13.936112468Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o6ut2gwd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.936242046Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=thelma.rest.controller.GetUserRewardsAsync.durationavg Core_Graphite" t=2024-05-29T13:44:13.936041066Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=thelma.rest.controller.GetUserRewardsAsync.durationavg Core_Graphite" t=2024-05-29T13:44:13.936029598Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=american-airlines, request_message_type=NotifyConnectionRequest, schema=com.asapp.schemas.product.chat.presence, service=UserPresenceNotifier" t=2024-05-29T13:44:13.936161015Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o6oemfdd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.936106275Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=thelma.rest.controller.GetCompositeRewardsAsync.durationavg Core_Graphite" t=2024-05-29T13:44:13.935963005Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=thelma.rest.controller.GetCompositeRewardsAsync.durationavg Core_Graphite" t=2024-05-29T13:44:13.935951675Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o6oemfdd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.936061295Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o6oemfdd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.936033794Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=371756 slug=asapp version=123 fingerprint=cd80a211026a6ea3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.935897722Z level=debug msg="Alert rule evaluated" results="[{Instance:company_marker=american-airlines, request_message_type=NotifyConnectionRequest, schema=com.asapp.schemas.product.chat.presence, service=UserPresenceNotifier State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=american-airlines, request_message_type=NotifyConnectionRequest, schema=com.asapp.schemas.product.chat.presence, service=UserPresenceNotifier Value:0xc0209f3080} C:{Var:C Labels:company_marker=american-airlines, request_message_type=NotifyConnectionRequest, schema=com.asapp.schemas.product.chat.presence, service=UserPresenceNotifier Value:0xc0209f30c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.935510789s EvaluationString:[ var='B' labels={company_marker=american-airlines, request_message_type=NotifyConnectionRequest, schema=com.asapp.schemas.product.chat.presence, service=UserPresenceNotifier} value=0 ], [ var='C' labels={company_marker=american-airlines, request_message_type=NotifyConnectionRequest, schema=com.asapp.schemas.product.chat.presence, service=UserPresenceNotifier} value=0 ]} {Instance:company_marker=spectrum-cable, request_message_type=NotifyConnectionRequest, schema=com.asapp.schemas.product.chat.presence, service=UserPresenceNotifier State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=spectrum-cable, request_message_type=NotifyConnectionRequest, schema=com.asapp.schemas.product.chat.presence, service=UserPresenceNotifier Value:0xc0209f3138} C:{Var:C Labels:company_marker=spectrum-cable, request_message_type=NotifyConnectionRequest, schema=com.asapp.schemas.product.chat.presence, service=UserPresenceNotifier Value:0xc0209f31c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.935526271s EvaluationString:[ var='B' labels={company_marker=spectrum-cable, request_message_type=NotifyConnectionRequest, schema=com.asapp.schemas.product.chat.presence, service=UserPresenceNotifier} value=0 ], [ var='C' labels={company_marker=spectrum-cable, request_message_type=NotifyConnectionRequest, schema=com.asapp.schemas.product.chat.presence, service=UserPresenceNotifier} value=0 ]} {Instance:company_marker=spectrum-cable, request_message_type=NotifyPongRequest, schema=com.asapp.schemas.product.chat.presence, service=UserPresenceNotifier State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=spectrum-cable, request_message_type=NotifyPongRequest, schema=com.asapp.schemas.product.chat.presence, service=UserPresenceNotifier Value:0xc0209f3250} C:{Var:C Labels:company_marker=spectrum-cable, request_message_type=NotifyPongRequest, schema=com.asapp.schemas.product.chat.presence, service=UserPresenceNotifier Value:0xc0209f3278}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.935533616s EvaluationString:[ var='B' labels={company_marker=spectrum-cable, request_message_type=NotifyPongRequest, schema=com.asapp.schemas.product.chat.presence, service=UserPresenceNotifier} value=0 ], [ var='C' labels={company_marker=spectrum-cable, request_message_type=NotifyPongRequest, schema=com.asapp.schemas.product.chat.presence, service=UserPresenceNotifier} value=0 ]}]" duration=62.177097ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o6k57es3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.935922853Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o6k57es3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.935911733Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326199laio1eastus2, cloud_platform=Azure, customer_id=A223, env_id=326199, env_name=A223 Ross Prod, env_type=prod, instance=env-326199laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:13.935908168Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326199laio1eastus2, cloud_platform=Azure, customer_id=A223, env_id=326199, env_name=A223 Ross Prod, env_type=prod, instance=env-326199laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:13.935887554Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o6k57es3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.935842442Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=thelma.rest.controller.GetOptinsAsync.durationavg Core_Graphite" t=2024-05-29T13:44:13.935841004Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.935852794Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o6ilqy91-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.935766882Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=thelma.rest.controller.GetAppConfigAsync.durationavg Core_Graphite" t=2024-05-29T13:44:13.9357418Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=thelma.rest.controller.GetAnnouncement_Compat.durationavg Core_Graphite" t=2024-05-29T13:44:13.935705215Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=thelma.rest.controller.GetAnnouncement_Compat.durationavg Core_Graphite" t=2024-05-29T13:44:13.935694072Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=thelma.rest.controller.GetAnnouncementAsync.durationavg Core_Graphite" t=2024-05-29T13:44:13.935671454Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o6ilqy91-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.93565599Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326198laio1euw1, cloud_platform=AWS, customer_id=C610, env_id=326198, env_name=C610_Telefonica_Prod_U11, env_type=prod, instance=env-326198laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.935671641Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.935648752Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=thelma.rest.controller.GetAvailableChallengesAsync.durationavg Core_Graphite" t=2024-05-29T13:44:13.935588557Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:13.93556408Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=thelma.rest.controller.GenerateZendeskTokenAsync.durationavg Core_Graphite" t=2024-05-29T13:44:13.935523177Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=thelma.rest.controller.LoginAsync.durationavg Core_Graphite" t=2024-05-29T13:44:13.935476666Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.93544628Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=secondary.rest.controller.AcquireLease.durationavg Core_Graphite" t=2024-05-29T13:44:13.935326445Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.FindForBrand.durationavg Core_Graphite" t=2024-05-29T13:44:13.93526481Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o6ce6zb3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.935240876Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.ValidateUpdateStepsFromContext.durationavg Core_Graphite" t=2024-05-29T13:44:13.935231223Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326181laio1use1, cloud_platform=AWS, customer_id=C476, env_id=326181, env_name=C476 DXL PROD, env_type=prod, instance=env-326181laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.935234253Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o6ba51no-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.935114935Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326166laio1use1, cloud_platform=AWS, customer_id=C290, env_id=326166, env_name=C290 Enova DEV, env_type=dev, instance=env-326166laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.935037642Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.Update.durationavg Core_Graphite" t=2024-05-29T13:44:13.93503448Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.9348251Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.DeleteStepsFromContext.durationavg Core_Graphite" t=2024-05-29T13:44:13.934866006Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.AttachAssetsToAnswerOptions.durationavg Core_Graphite" t=2024-05-29T13:44:13.934644245Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.AddAutoAssignActions.durationavg Core_Graphite" t=2024-05-29T13:44:13.934598164Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326160laio1use2, cloud_platform=AWS, customer_id=C512, env_id=326160, env_name=C512 BFS PROD, env_type=prod, instance=env-326160laio1use2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:13.934620986Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.934546186Z caller=remote_instance_store.go:51 user=384712 slug=nearinc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.93432464Z caller=remote_instance_store.go:51 user=635771 slug=sharedservices msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.ValidateSplitTestingConcepts.durationavg Core_Graphite" t=2024-05-29T13:44:13.934498981Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.ValidateAndFillResearchTypes.durationavg Core_Graphite" t=2024-05-29T13:44:13.934452038Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=384712 slug=nearinc instance="datasource_uid=7FBWubNVz, ref_id=A,C,D" t=2024-05-29T13:44:13.934472298Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.UpdateTags.durationavg Core_Graphite" t=2024-05-29T13:44:13.93441553Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=635771 slug=sharedservices t=2024-05-29T13:44:13.934240489Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o5v3occe-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.934351707Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o5v3occe-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.934234326Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.UpdateMission.durationavg Core_Graphite" t=2024-05-29T13:44:13.93429112Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-326155laio1cac1, cloud_platform=AWS, customer_id=C636, env_id=326155, env_name=C636_PROD_GOEASY_PARALLEL, env_type=prod, instance=env-326155laio1cac1, job=integrations/node_exporter, region=ca-central-1, stage=live" t=2024-05-29T13:44:13.93425969Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.UpdateMission.durationavg Core_Graphite" t=2024-05-29T13:44:13.934260591Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=635771 slug=sharedservices version=2 fingerprint=0be64d8cbcd8af4d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.934056066Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.93359184s EvaluationString:}]" duration=60.142112ms + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.UpdateMaxDiff.durationavg Core_Graphite" t=2024-05-29T13:44:13.934223623Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.GetMissionTargetingPipingMissions.durationavg Core_Graphite" t=2024-05-29T13:44:13.934163452Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.GetMissionTargetingPipingMissions.durationavg Core_Graphite" t=2024-05-29T13:44:13.934154208Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.CalculatePendingCredits.durationavg Core_Graphite" t=2024-05-29T13:44:13.933960491Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.HydrateAnswers.durationavg Core_Graphite" t=2024-05-29T13:44:13.933910397Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.GetMissionDetails.durationavg Core_Graphite" t=2024-05-29T13:44:13.933854295Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o5s5cagg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.933912102Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o5pfs9c7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.933830862Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.GetActionWordCloud.durationavg Core_Graphite" t=2024-05-29T13:44:13.933779953Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.GetActionWordCloud.durationavg Core_Graphite" t=2024-05-29T13:44:13.933771291Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.GetActionDetail.durationavg Core_Graphite" t=2024-05-29T13:44:13.933732473Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325984laio1eastus2, cloud_platform=Azure, customer_id=A221, env_id=325984, env_name=a221_LV_Prod, env_type=prod, instance=env-325984laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:13.9338454Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.CalculateGridTopPosition.durationavg Core_Graphite" t=2024-05-29T13:44:13.933706663Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.CalculateGridTopPosition.durationavg Core_Graphite" t=2024-05-29T13:44:13.933698149Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.GetAvailable.durationavg Core_Graphite" t=2024-05-29T13:44:13.933675872Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o5pfs9c7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.933790701Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.GetGroupedForMission.durationavg Core_Graphite" t=2024-05-29T13:44:13.93356862Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o5pfs9c7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.93371876Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o5pfs9c7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.93369853Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o5klc1fd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.93365449Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.GetGroupedForMission.durationavg Core_Graphite" t=2024-05-29T13:44:13.933560065Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.933516178Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.Find.durationavg Core_Graphite" t=2024-05-29T13:44:13.933505905Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o5klc1fd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.933490618Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.GetForBrand.durationavg Core_Graphite" t=2024-05-29T13:44:13.93342994Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.GetForBrand.durationavg Core_Graphite" t=2024-05-29T13:44:13.933420707Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325894laio1euc1, cloud_platform=AWS, customer_id=C587, env_id=325894, env_name=C587_PROD, env_type=prod, instance=env-325894laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:13.933459683Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.933428768Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.GetDetail.durationavg Core_Graphite" t=2024-05-29T13:44:13.933327846Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.QueueMissionWordDocument.durationavg Core_Graphite" t=2024-05-29T13:44:13.933292495Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=451750 slug=amadeuspfpprod t=2024-05-29T13:44:13.933232717Z level=debug msg="Saving alert states done" count=8 max_state_save_concurrency=1 duration=124.014424ms + level=debug ts=2024-05-29T13:44:13.933266435Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o5jalqfw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.933240275Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o5jalqfw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.933212675Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o5jalqfw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.933139244Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.GetMissionLastUpdateUTC.durationavg Core_Graphite" t=2024-05-29T13:44:13.932995774Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=173730 slug=nikon instance= t=2024-05-29T13:44:13.933084794Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325841laio1cac1, cloud_platform=AWS, customer_id=C482, env_id=325841, env_name=C482 AZGA PROD, env_type=prod, instance=env-325841laio1cac1, job=integrations/node_exporter, region=ca-central-1, stage=live" t=2024-05-29T13:44:13.933079216Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=173730 slug=nikon instance= t=2024-05-29T13:44:13.933075403Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o5jalqfw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.933095864Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325841laio1cac1, cloud_platform=AWS, customer_id=C482, env_id=325841, env_name=C482 AZGA PROD, env_type=prod, instance=env-325841laio1cac1, job=integrations/node_exporter, region=ca-central-1, stage=live" t=2024-05-29T13:44:13.933060903Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.933053935Z caller=remote_alert_sender.go:94 user=22398 slug=sunfolding host=sunfolding-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.83.87:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ae63a650-1e5c-4644-ae8e-a5f7aac410b0 alerts=1 + logger=ngalert.state.manager user=173730 slug=nikon t=2024-05-29T13:44:13.933047894Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=22398 slug=sunfolding t=2024-05-29T13:44:13.932941855Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.309487ms + level=debug ts=2024-05-29T13:44:13.932872602Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=173730 slug=nikon version=4 fingerprint=73816eeb51677cbc attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.932943937Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.932556888s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=128.974555ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o55rhdgu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.932929542Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.GetForMissionPiping.durationavg Core_Graphite" t=2024-05-29T13:44:13.932898902Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o55rhdgu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.932834021Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325804laiouse1, cloud_platform=AWS, customer_id=C494, env_id=325804, env_name=C494 CINCH UAT, env_type=qa, instance=env-325804laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.932903687Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.GetById.durationavg Core_Graphite" t=2024-05-29T13:44:13.932830419Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=goose.rest.controller.GetById.durationavg Core_Graphite" t=2024-05-29T13:44:13.932822841Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=21051 slug=mojio t=2024-05-29T13:44:13.932810348Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=21.75584ms + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=api.rest.controller.GetUpdateRequired.durationavg Core_Graphite" t=2024-05-29T13:44:13.932766171Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=api.rest.controller.GetServerStatus.durationavg Core_Graphite" t=2024-05-29T13:44:13.932731587Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325790laio1use1, cloud_platform=AWS, customer_id=C699, env_id=325790, env_name=C699 Compucom PROD, env_type=prod, instance=env-325790laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.932699058Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o51csq8p-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.932561738Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=api.rest.controller.EthnicityFind.durationavg Core_Graphite" t=2024-05-29T13:44:13.932546515Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=api.rest.controller.EthnicityFind.durationavg Core_Graphite" t=2024-05-29T13:44:13.932539293Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.932579114Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=612525 slug=adleyeview instance= t=2024-05-29T13:44:13.93251153Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=612525 slug=adleyeview instance= t=2024-05-29T13:44:13.93247693Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.932460213Z caller=remote_rule_evaluator.go:193 user=656459 slug=activeport msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325788laio1use1, cloud_platform=AWS, customer_id=C656, env_id=325788, env_name=C656 HarperCollins PROD, env_type=prod, instance=env-325788laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.932470258Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o51csq8p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.932485348Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=api.rest.controller.GetMissionsActive.durationavg Core_Graphite" t=2024-05-29T13:44:13.932362098Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=api.rest.controller.GetMissionsActive.durationavg Core_Graphite" t=2024-05-29T13:44:13.932355343Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.932444948Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=api.rest.controller.GetMissionResponseRateInfo.durationavg Core_Graphite" t=2024-05-29T13:44:13.93232076Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=api.rest.controller.GetMetaDataQuota.durationavg Core_Graphite" t=2024-05-29T13:44:13.932254137Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o4yvfoda-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.932328676Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=api.rest.controller.GetActionResponseRateInfo.durationavg Core_Graphite" t=2024-05-29T13:44:13.932197294Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=612525 slug=adleyeview t=2024-05-29T13:44:13.932310928Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=api.rest.controller.AmazonWebHook.durationavg Core_Graphite" t=2024-05-29T13:44:13.932156296Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance="aggregatedBy=average, name=api.rest.controller.GetByBrandId.durationavg Core_Graphite" t=2024-05-29T13:44:13.932077778Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o4yvfoda-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.932222385Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o4yvfoda-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.932178304Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o4yvfoda-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.932148634Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o4mbdh5w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.932103144Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=242310 slug=suzy version=90 fingerprint=12a4e0d8e611db3a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.930930526Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=average, name=api.rest.controller.GetByBrandId.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=api.rest.controller.GetByBrandId.durationavg Core_Graphite Value:0xc0152842a8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=api.rest.controller.GetByBrandId.durationavg Core_Graphite Value:0xc0152842d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928349383s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=api.rest.controller.GetByBrandId.durationavg Core_Graphite} value=4.666666666666667 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=api.rest.controller.GetByBrandId.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=api.rest.controller.AmazonWebHook.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=api.rest.controller.AmazonWebHook.durationavg Core_Graphite Value:0xc015284338} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=api.rest.controller.AmazonWebHook.durationavg Core_Graphite Value:0xc015284368}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928362745s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=api.rest.controller.AmazonWebHook.durationavg Core_Graphite} value=26.25 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=api.rest.controller.AmazonWebHook.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=api.rest.controller.GetActionResponseRateInfo.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=api.rest.controller.GetActionResponseRateInfo.durationavg Core_Graphite Value:0xc0152843d8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=api.rest.controller.GetActionResponseRateInfo.durationavg Core_Graphite Value:0xc015284400}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928367653s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=api.rest.controller.GetActionResponseRateInfo.durationavg Core_Graphite} value=8.166666666666666 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=api.rest.controller.GetActionResponseRateInfo.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=api.rest.controller.GetBrandPropertyValues.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=api.rest.controller.GetBrandPropertyValues.durationavg Core_Graphite Value:0xc015284468} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=api.rest.controller.GetBrandPropertyValues.durationavg Core_Graphite Value:0xc0152844b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928371683s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=api.rest.controller.GetBrandPropertyValues.durationavg Core_Graphite} value=8.530112044817926 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=api.rest.controller.GetBrandPropertyValues.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=api.rest.controller.GetMetaDataQuota.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=api.rest.controller.GetMetaDataQuota.durationavg Core_Graphite Value:0xc015284538} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=api.rest.controller.GetMetaDataQuota.durationavg Core_Graphite Value:0xc015284610}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928375051s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=api.rest.controller.GetMetaDataQuota.durationavg Core_Graphite} value=29.28 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=api.rest.controller.GetMetaDataQuota.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=api.rest.controller.GetMissionComposite.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=api.rest.controller.GetMissionComposite.durationavg Core_Graphite Value:0xc015284678} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=api.rest.controller.GetMissionComposite.durationavg Core_Graphite Value:0xc015284698}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928379354s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=api.rest.controller.GetMissionComposite.durationavg Core_Graphite} value=66.42450980392157 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=api.rest.controller.GetMissionComposite.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=api.rest.controller.GetMissionResponseRateInfo.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=api.rest.controller.GetMissionResponseRateInfo.durationavg Core_Graphite Value:0xc0152846f8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=api.rest.controller.GetMissionResponseRateInfo.durationavg Core_Graphite Value:0xc015284718}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928382833s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=api.rest.controller.GetMissionResponseRateInfo.durationavg Core_Graphite} value=14.737301587301587 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=api.rest.controller.GetMissionResponseRateInfo.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=api.rest.controller.GetMissionsActive.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=api.rest.controller.GetMissionsActive.durationavg Core_Graphite Value:0xc015284778} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=api.rest.controller.GetMissionsActive.durationavg Core_Graphite Value:0xc015284798}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928387011s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=api.rest.controller.GetMissionsActive.durationavg Core_Graphite} value=1.3333333333333333 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=api.rest.controller.GetMissionsActive.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=api.rest.controller.GetUserSnapshotAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=api.rest.controller.GetUserSnapshotAsync.durationavg Core_Graphite Value:0xc015284830} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=api.rest.controller.GetUserSnapshotAsync.durationavg Core_Graphite Value:0xc0152847e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928390511s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=api.rest.controller.GetUserSnapshotAsync.durationavg Core_Graphite} value=246.91770186335404 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=api.rest.controller.GetUserSnapshotAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=api.rest.controller.VerifyUserTrackPoint.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=api.rest.controller.VerifyUserTrackPoint.durationavg Core_Graphite Value:0xc015284918} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=api.rest.controller.VerifyUserTrackPoint.durationavg Core_Graphite Value:0xc015284940}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928393663s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=api.rest.controller.VerifyUserTrackPoint.durationavg Core_Graphite} value=136.25 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=api.rest.controller.VerifyUserTrackPoint.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=api.rest.controller.CountryFind.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=api.rest.controller.CountryFind.durationavg Core_Graphite Value:0xc0152851d8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=api.rest.controller.CountryFind.durationavg Core_Graphite Value:0xc015285208}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928397355s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=api.rest.controller.CountryFind.durationavg Core_Graphite} value=1.6666666666666667 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=api.rest.controller.CountryFind.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=api.rest.controller.EthnicityFind.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=api.rest.controller.EthnicityFind.durationavg Core_Graphite Value:0xc0152852a0} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=api.rest.controller.EthnicityFind.durationavg Core_Graphite Value:0xc015285248}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928400363s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=api.rest.controller.EthnicityFind.durationavg Core_Graphite} value=1.85 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=api.rest.controller.EthnicityFind.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=api.rest.controller.StatesFind.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=api.rest.controller.StatesFind.durationavg Core_Graphite Value:0xc015285368} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=api.rest.controller.StatesFind.durationavg Core_Graphite Value:0xc015285300}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928404601s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=api.rest.controller.StatesFind.durationavg Core_Graphite} value=2.892857142857143 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=api.rest.controller.StatesFind.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=api.rest.controller.GetByEmail.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=api.rest.controller.GetByEmail.durationavg Core_Graphite Value:0xc015285408} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=api.rest.controller.GetByEmail.durationavg Core_Graphite Value:0xc015285428}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928407168s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=api.rest.controller.GetByEmail.durationavg Core_Graphite} value=84 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=api.rest.controller.GetByEmail.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=api.rest.controller.GetLocalization.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=api.rest.controller.GetLocalization.durationavg Core_Graphite Value:0xc015285c98} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=api.rest.controller.GetLocalization.durationavg Core_Graphite Value:0xc015285e08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928410853s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=api.rest.controller.GetLocalization.durationavg Core_Graphite} value=0.0984848484848485 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=api.rest.controller.GetLocalization.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=api.rest.controller.GetLocalizationForConsole.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=api.rest.controller.GetLocalizationForConsole.durationavg Core_Graphite Value:0xc015285f18} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=api.rest.controller.GetLocalizationForConsole.durationavg Core_Graphite Value:0xc022792010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928414689s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=api.rest.controller.GetLocalizationForConsole.durationavg Core_Graphite} value=23.15 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=api.rest.controller.GetLocalizationForConsole.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=api.rest.controller.GetServerStatus.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=api.rest.controller.GetServerStatus.durationavg Core_Graphite Value:0xc022792080} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=api.rest.controller.GetServerStatus.durationavg Core_Graphite Value:0xc0227920a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.92841928s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=api.rest.controller.GetServerStatus.durationavg Core_Graphite} value=1.2217391304347827 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=api.rest.controller.GetServerStatus.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=api.rest.controller.GetUpdateRequired.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=api.rest.controller.GetUpdateRequired.durationavg Core_Graphite Value:0xc022792108} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=api.rest.controller.GetUpdateRequired.durationavg Core_Graphite Value:0xc022792128}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928423411s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=api.rest.controller.GetUpdateRequired.durationavg Core_Graphite} value=1.9583333333333333 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=api.rest.controller.GetUpdateRequired.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=api.rest.controller.GetActiveOffers.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=api.rest.controller.GetActiveOffers.durationavg Core_Graphite Value:0xc0227921c0} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=api.rest.controller.GetActiveOffers.durationavg Core_Graphite Value:0xc022792178}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928427127s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=api.rest.controller.GetActiveOffers.durationavg Core_Graphite} value=8 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=api.rest.controller.GetActiveOffers.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetById.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetById.durationavg Core_Graphite Value:0xc022792270} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetById.durationavg Core_Graphite Value:0xc022792298}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928431339s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetById.durationavg Core_Graphite} value=9.458333333333336 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetById.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetForMission.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetForMission.durationavg Core_Graphite Value:0xc022792348} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetForMission.durationavg Core_Graphite Value:0xc0227923b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928437165s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetForMission.durationavg Core_Graphite} value=11.4 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetForMission.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetForMissionPiping.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetForMissionPiping.durationavg Core_Graphite Value:0xc022792438} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetForMissionPiping.durationavg Core_Graphite Value:0xc0227923f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928439815s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetForMissionPiping.durationavg Core_Graphite} value=23.666666666666668 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetForMissionPiping.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.FixBadImages.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.FixBadImages.durationavg Core_Graphite Value:0xc022792510} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.FixBadImages.durationavg Core_Graphite Value:0xc022792488}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928443252s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.FixBadImages.durationavg Core_Graphite} value=0 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.FixBadImages.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetCompletedByAction.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetCompletedByAction.durationavg Core_Graphite Value:0xc022792600} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetCompletedByAction.durationavg Core_Graphite Value:0xc022792628}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928445917s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetCompletedByAction.durationavg Core_Graphite} value=106.51750000000001 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetCompletedByAction.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetMissionLastUpdateUTC.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetMissionLastUpdateUTC.durationavg Core_Graphite Value:0xc0227926e0} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetMissionLastUpdateUTC.durationavg Core_Graphite Value:0xc022792738}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928448938s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetMissionLastUpdateUTC.durationavg Core_Graphite} value=3.5 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetMissionLastUpdateUTC.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.PrepareAssetTranscripts.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.PrepareAssetTranscripts.durationavg Core_Graphite Value:0xc0227927a0} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.PrepareAssetTranscripts.durationavg Core_Graphite Value:0xc0227927c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928453101s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.PrepareAssetTranscripts.durationavg Core_Graphite} value=0 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.PrepareAssetTranscripts.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.QueueMissionWordDocument.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.QueueMissionWordDocument.durationavg Core_Graphite Value:0xc022792858} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.QueueMissionWordDocument.durationavg Core_Graphite Value:0xc022792878}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928456355s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.QueueMissionWordDocument.durationavg Core_Graphite} value=1692 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.QueueMissionWordDocument.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetDetail.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetDetail.durationavg Core_Graphite Value:0xc022792918} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetDetail.durationavg Core_Graphite Value:0xc022792940}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.92845983s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetDetail.durationavg Core_Graphite} value=70.4236111111111 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetDetail.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetByBrandId.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetByBrandId.durationavg Core_Graphite Value:0xc0227929b0} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetByBrandId.durationavg Core_Graphite Value:0xc0227929d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928462682s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetByBrandId.durationavg Core_Graphite} value=9 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetByBrandId.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetActiveByBrand.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetActiveByBrand.durationavg Core_Graphite Value:0xc022792a38} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetActiveByBrand.durationavg Core_Graphite Value:0xc022792a58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928465635s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetActiveByBrand.durationavg Core_Graphite} value=4.333333333333333 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetActiveByBrand.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetForBrand.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetForBrand.durationavg Core_Graphite Value:0xc022792ac0} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetForBrand.durationavg Core_Graphite Value:0xc022792af8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928470292s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetForBrand.durationavg Core_Graphite} value=61.77499999999999 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetForBrand.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.Create.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.Create.durationavg Core_Graphite Value:0xc022792b88} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.Create.durationavg Core_Graphite Value:0xc022792b40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928472942s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.Create.durationavg Core_Graphite} value=560.3333333333334 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.Create.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.Find.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.Find.durationavg Core_Graphite Value:0xc022792c08} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.Find.durationavg Core_Graphite Value:0xc022792bc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928476282s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.Find.durationavg Core_Graphite} value=8.911277206589705 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.Find.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.EstimateBySizeAndRate.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.EstimateBySizeAndRate.durationavg Core_Graphite Value:0xc022792c68} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.EstimateBySizeAndRate.durationavg Core_Graphite Value:0xc022792c88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928479341s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.EstimateBySizeAndRate.durationavg Core_Graphite} value=2.5 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.EstimateBySizeAndRate.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetGroupedForMission.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetGroupedForMission.durationavg Core_Graphite Value:0xc022792cf8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetGroupedForMission.durationavg Core_Graphite Value:0xc022792d20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928482439s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetGroupedForMission.durationavg Core_Graphite} value=4 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetGroupedForMission.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetByCintCountryId.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetByCintCountryId.durationavg Core_Graphite Value:0xc022792d88} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetByCintCountryId.durationavg Core_Graphite Value:0xc022792db0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928486053s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetByCintCountryId.durationavg Core_Graphite} value=21 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetByCintCountryId.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.Download.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.Download.durationavg Core_Graphite Value:0xc022792e20} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.Download.durationavg Core_Graphite Value:0xc022792e48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928490735s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.Download.durationavg Core_Graphite} value=1114 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.Download.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetAvailable.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetAvailable.durationavg Core_Graphite Value:0xc022792ea8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetAvailable.durationavg Core_Graphite Value:0xc022792ed0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928493794s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetAvailable.durationavg Core_Graphite} value=30.306944444444444 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetAvailable.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.CalculateGridTopPosition.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.CalculateGridTopPosition.durationavg Core_Graphite Value:0xc022792f40} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.CalculateGridTopPosition.durationavg Core_Graphite Value:0xc022792f68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928496762s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.CalculateGridTopPosition.durationavg Core_Graphite} value=0 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.CalculateGridTopPosition.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetActionDetail.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetActionDetail.durationavg Core_Graphite Value:0xc022792fd8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetActionDetail.durationavg Core_Graphite Value:0xc022793000}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928499324s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetActionDetail.durationavg Core_Graphite} value=66.67017543859649 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetActionDetail.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetActionWordCloud.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetActionWordCloud.durationavg Core_Graphite Value:0xc022793070} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetActionWordCloud.durationavg Core_Graphite Value:0xc022793098}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928501934s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetActionWordCloud.durationavg Core_Graphite} value=28.228431372549018 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetActionWordCloud.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetCrossTab.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetCrossTab.durationavg Core_Graphite Value:0xc022793108} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetCrossTab.durationavg Core_Graphite Value:0xc022793128}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.92850449s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetCrossTab.durationavg Core_Graphite} value=690 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetCrossTab.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetMissionDetails.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetMissionDetails.durationavg Core_Graphite Value:0xc022793198} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetMissionDetails.durationavg Core_Graphite Value:0xc0227931b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928507064s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetMissionDetails.durationavg Core_Graphite} value=65.44871794871796 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetMissionDetails.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetMonadicRatingSummary.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetMonadicRatingSummary.durationavg Core_Graphite Value:0xc022793228} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetMonadicRatingSummary.durationavg Core_Graphite Value:0xc022793248}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.92851274s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetMonadicRatingSummary.durationavg Core_Graphite} value=35 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetMonadicRatingSummary.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.HydrateAnswers.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.HydrateAnswers.durationavg Core_Graphite Value:0xc0227932b0} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.HydrateAnswers.durationavg Core_Graphite Value:0xc0227932d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928515359s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.HydrateAnswers.durationavg Core_Graphite} value=0 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.HydrateAnswers.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.CalculatePendingCredits.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.CalculatePendingCredits.durationavg Core_Graphite Value:0xc022793358} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.CalculatePendingCredits.durationavg Core_Graphite Value:0xc022793318}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928517677s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.CalculatePendingCredits.durationavg Core_Graphite} value=51.8 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.CalculatePendingCredits.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.CopyMission.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.CopyMission.durationavg Core_Graphite Value:0xc0227933c0} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.CopyMission.durationavg Core_Graphite Value:0xc0227933e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928520078s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.CopyMission.durationavg Core_Graphite} value=3144.5 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.CopyMission.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetForBrandGlobal.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetForBrandGlobal.durationavg Core_Graphite Value:0xc022793448} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetForBrandGlobal.durationavg Core_Graphite Value:0xc022793468}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.92852254s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetForBrandGlobal.durationavg Core_Graphite} value=10.084444444444443 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetForBrandGlobal.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetForCrossTab.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetForCrossTab.durationavg Core_Graphite Value:0xc0227934c8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetForCrossTab.durationavg Core_Graphite Value:0xc0227934e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928524839s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetForCrossTab.durationavg Core_Graphite} value=20.25 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetForCrossTab.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetMissionComposite.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetMissionComposite.durationavg Core_Graphite Value:0xc022793578} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetMissionComposite.durationavg Core_Graphite Value:0xc022793530}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928527174s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetMissionComposite.durationavg Core_Graphite} value=26.020833333333336 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetMissionComposite.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetMissionTargetingPipingMissions.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetMissionTargetingPipingMissions.durationavg Core_Graphite Value:0xc0227935d8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetMissionTargetingPipingMissions.durationavg Core_Graphite Value:0xc0227935f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928530127s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetMissionTargetingPipingMissions.durationavg Core_Graphite} value=22 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetMissionTargetingPipingMissions.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.PauseMission.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.PauseMission.durationavg Core_Graphite Value:0xc022793658} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.PauseMission.durationavg Core_Graphite Value:0xc022793678}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928532976s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.PauseMission.durationavg Core_Graphite} value=641 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.PauseMission.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.UpdateMaxDiff.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.UpdateMaxDiff.durationavg Core_Graphite Value:0xc0227936e0} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.UpdateMaxDiff.durationavg Core_Graphite Value:0xc022793708}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928535348s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.UpdateMaxDiff.durationavg Core_Graphite} value=1332 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.UpdateMaxDiff.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.UpdateMission.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.UpdateMission.durationavg Core_Graphite Value:0xc022793788} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.UpdateMission.durationavg Core_Graphite Value:0xc022793748}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928537464s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.UpdateMission.durationavg Core_Graphite} value=1235.1666666666667 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.UpdateMission.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.UpdateSplitTesting.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.UpdateSplitTesting.durationavg Core_Graphite Value:0xc0227937e8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.UpdateSplitTesting.durationavg Core_Graphite Value:0xc022793808}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928540322s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.UpdateSplitTesting.durationavg Core_Graphite} value=1186.75 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.UpdateSplitTesting.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.UpdateStandAlone.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.UpdateStandAlone.durationavg Core_Graphite Value:0xc022793868} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.UpdateStandAlone.durationavg Core_Graphite Value:0xc022793890}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928543112s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.UpdateStandAlone.durationavg Core_Graphite} value=1329 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.UpdateStandAlone.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.UpdateStandAloneShared.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.UpdateStandAloneShared.durationavg Core_Graphite Value:0xc0227938f8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.UpdateStandAloneShared.durationavg Core_Graphite Value:0xc022793918}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928546277s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.UpdateStandAloneShared.durationavg Core_Graphite} value=1329 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.UpdateStandAloneShared.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.UpdateTags.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.UpdateTags.durationavg Core_Graphite Value:0xc022793988} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.UpdateTags.durationavg Core_Graphite Value:0xc0227939a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928548642s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.UpdateTags.durationavg Core_Graphite} value=761 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.UpdateTags.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.ValidateAndFillResearchTypes.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.ValidateAndFillResearchTypes.durationavg Core_Graphite Value:0xc022793a10} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.ValidateAndFillResearchTypes.durationavg Core_Graphite Value:0xc022793a38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928550949s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.ValidateAndFillResearchTypes.durationavg Core_Graphite} value=3.1666666666666665 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.ValidateAndFillResearchTypes.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.ValidateSplitTestingConcepts.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.ValidateSplitTestingConcepts.durationavg Core_Graphite Value:0xc022793a98} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.ValidateSplitTestingConcepts.durationavg Core_Graphite Value:0xc022793ab8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928553537s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.ValidateSplitTestingConcepts.durationavg Core_Graphite} value=22.333333333333332 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.ValidateSplitTestingConcepts.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.ValidateSharedLink.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.ValidateSharedLink.durationavg Core_Graphite Value:0xc022793b28} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.ValidateSharedLink.durationavg Core_Graphite Value:0xc022793b50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928556672s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.ValidateSharedLink.durationavg Core_Graphite} value=6 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.ValidateSharedLink.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetByCountry.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetByCountry.durationavg Core_Graphite Value:0xc022793be0} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetByCountry.durationavg Core_Graphite Value:0xc022793b98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928559297s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetByCountry.durationavg Core_Graphite} value=48.75 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetByCountry.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.AddAutoAssignActions.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.AddAutoAssignActions.durationavg Core_Graphite Value:0xc022793c78} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.AddAutoAssignActions.durationavg Core_Graphite Value:0xc022793c30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928561639s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.AddAutoAssignActions.durationavg Core_Graphite} value=0 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.AddAutoAssignActions.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.AttachAssetsToAnswerOptions.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.AttachAssetsToAnswerOptions.durationavg Core_Graphite Value:0xc022793cd8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.AttachAssetsToAnswerOptions.durationavg Core_Graphite Value:0xc022793cf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928564233s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.AttachAssetsToAnswerOptions.durationavg Core_Graphite} value=0 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.AttachAssetsToAnswerOptions.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.CleanupCreateStepsFromContext.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.CleanupCreateStepsFromContext.durationavg Core_Graphite Value:0xc022793d58} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.CleanupCreateStepsFromContext.durationavg Core_Graphite Value:0xc022793d78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928567255s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.CleanupCreateStepsFromContext.durationavg Core_Graphite} value=19.333333333333332 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.CleanupCreateStepsFromContext.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.CleanupUpdateStepsFromContext.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.CleanupUpdateStepsFromContext.durationavg Core_Graphite Value:0xc022793dd8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.CleanupUpdateStepsFromContext.durationavg Core_Graphite Value:0xc022793df8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928569735s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.CleanupUpdateStepsFromContext.durationavg Core_Graphite} value=180.5 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.CleanupUpdateStepsFromContext.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.CreateForMission.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.CreateForMission.durationavg Core_Graphite Value:0xc022793e60} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.CreateForMission.durationavg Core_Graphite Value:0xc022793e88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928572637s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.CreateForMission.durationavg Core_Graphite} value=1575.6666666666667 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.CreateForMission.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.CreateImageAnswerOptionsAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.CreateImageAnswerOptionsAsync.durationavg Core_Graphite Value:0xc022793ee8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.CreateImageAnswerOptionsAsync.durationavg Core_Graphite Value:0xc022793f10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928575635s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.CreateImageAnswerOptionsAsync.durationavg Core_Graphite} value=0.16666666666666666 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.CreateImageAnswerOptionsAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.CreateStepsFromContext.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.CreateStepsFromContext.durationavg Core_Graphite Value:0xc022793f80} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.CreateStepsFromContext.durationavg Core_Graphite Value:0xc022793fa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928579293s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.CreateStepsFromContext.durationavg Core_Graphite} value=734.8333333333334 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.CreateStepsFromContext.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.DeleteStepsFromContext.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.DeleteStepsFromContext.durationavg Core_Graphite Value:0xc00cdf6008} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.DeleteStepsFromContext.durationavg Core_Graphite Value:0xc00cdf6028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.92858235s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.DeleteStepsFromContext.durationavg Core_Graphite} value=0 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.DeleteStepsFromContext.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetPrimaryForMission.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetPrimaryForMission.durationavg Core_Graphite Value:0xc00cdf60b0} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetPrimaryForMission.durationavg Core_Graphite Value:0xc00cdf6068}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928584636s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetPrimaryForMission.durationavg Core_Graphite} value=20.63157894736842 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetPrimaryForMission.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.GetValidStepKind.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.GetValidStepKind.durationavg Core_Graphite Value:0xc00cdf6120} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.GetValidStepKind.durationavg Core_Graphite Value:0xc00cdf6148}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928586898s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.GetValidStepKind.durationavg Core_Graphite} value=0 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.GetValidStepKind.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.PatchMaxDiffStep.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.PatchMaxDiffStep.durationavg Core_Graphite Value:0xc00cdf61c8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.PatchMaxDiffStep.durationavg Core_Graphite Value:0xc00cdf6188}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928589282s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.PatchMaxDiffStep.durationavg Core_Graphite} value=37 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.PatchMaxDiffStep.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.Update.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.Update.durationavg Core_Graphite Value:0xc00cdf6298} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.Update.durationavg Core_Graphite Value:0xc00cdf6208}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928591914s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.Update.durationavg Core_Graphite} value=2227.3333333333335 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.Update.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.UpdateActionsToMatchFirstAction.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.UpdateActionsToMatchFirstAction.durationavg Core_Graphite Value:0xc00cdf6310} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.UpdateActionsToMatchFirstAction.durationavg Core_Graphite Value:0xc00cdf6338}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928594503s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.UpdateActionsToMatchFirstAction.durationavg Core_Graphite} value=0 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.UpdateActionsToMatchFirstAction.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.UpdateAutoAssignActions.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.UpdateAutoAssignActions.durationavg Core_Graphite Value:0xc00cdf63a8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.UpdateAutoAssignActions.durationavg Core_Graphite Value:0xc00cdf63d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928596648s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.UpdateAutoAssignActions.durationavg Core_Graphite} value=4 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.UpdateAutoAssignActions.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.UpdateStepsFromContext.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.UpdateStepsFromContext.durationavg Core_Graphite Value:0xc00cdf6438} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.UpdateStepsFromContext.durationavg Core_Graphite Value:0xc00cdf6458}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928604619s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.UpdateStepsFromContext.durationavg Core_Graphite} value=1849 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.UpdateStepsFromContext.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.ValidateCreateStepsFromContext.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.ValidateCreateStepsFromContext.durationavg Core_Graphite Value:0xc00cdf64b8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.ValidateCreateStepsFromContext.durationavg Core_Graphite Value:0xc00cdf64d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.92860748s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.ValidateCreateStepsFromContext.durationavg Core_Graphite} value=10 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.ValidateCreateStepsFromContext.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.ValidateUpdateStepsFromContext.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.ValidateUpdateStepsFromContext.durationavg Core_Graphite Value:0xc00cdf6548} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.ValidateUpdateStepsFromContext.durationavg Core_Graphite Value:0xc00cdf6570}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928611027s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.ValidateUpdateStepsFromContext.durationavg Core_Graphite} value=128.33333333333334 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.ValidateUpdateStepsFromContext.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.FindForBrand.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.FindForBrand.durationavg Core_Graphite Value:0xc00cdf6610} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.FindForBrand.durationavg Core_Graphite Value:0xc00cdf6638}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928614841s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.FindForBrand.durationavg Core_Graphite} value=14 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.FindForBrand.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=goose.rest.controller.FindForBrandByTemplateKind.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=goose.rest.controller.FindForBrandByTemplateKind.durationavg Core_Graphite Value:0xc00cdf66c0} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=goose.rest.controller.FindForBrandByTemplateKind.durationavg Core_Graphite Value:0xc00cdf6678}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928619083s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=goose.rest.controller.FindForBrandByTemplateKind.durationavg Core_Graphite} value=9 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=goose.rest.controller.FindForBrandByTemplateKind.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=secondary.rest.controller.AcquireLease.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=secondary.rest.controller.AcquireLease.durationavg Core_Graphite Value:0xc00cdf6748} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=secondary.rest.controller.AcquireLease.durationavg Core_Graphite Value:0xc00cdf6708}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928621869s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=secondary.rest.controller.AcquireLease.durationavg Core_Graphite} value=0.4444444444444444 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=secondary.rest.controller.AcquireLease.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=secondary.rest.controller.GetActiveMissions.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=secondary.rest.controller.GetActiveMissions.durationavg Core_Graphite Value:0xc00cdf67c8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=secondary.rest.controller.GetActiveMissions.durationavg Core_Graphite Value:0xc00cdf67f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928624581s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=secondary.rest.controller.GetActiveMissions.durationavg Core_Graphite} value=0 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=secondary.rest.controller.GetActiveMissions.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.AppleLoginAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.AppleLoginAsync.durationavg Core_Graphite Value:0xc00cdf6860} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.AppleLoginAsync.durationavg Core_Graphite Value:0xc00cdf6888}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928627453s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.AppleLoginAsync.durationavg Core_Graphite} value=80 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.AppleLoginAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.LoginAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.LoginAsync.durationavg Core_Graphite Value:0xc00cdf68e8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.LoginAsync.durationavg Core_Graphite Value:0xc00cdf6908}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928629907s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.LoginAsync.durationavg Core_Graphite} value=64.66666666666667 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.LoginAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.GenerateZendeskTokenAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.GenerateZendeskTokenAsync.durationavg Core_Graphite Value:0xc00cdf6978} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.GenerateZendeskTokenAsync.durationavg Core_Graphite Value:0xc00cdf6998}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928632457s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.GenerateZendeskTokenAsync.durationavg Core_Graphite} value=4.00952380952381 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.GenerateZendeskTokenAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.SelfAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.SelfAsync.durationavg Core_Graphite Value:0xc00cdf69f8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.SelfAsync.durationavg Core_Graphite Value:0xc00cdf6a18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928638465s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.SelfAsync.durationavg Core_Graphite} value=24.052691511387163 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.SelfAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.GetAvailableChallengesAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.GetAvailableChallengesAsync.durationavg Core_Graphite Value:0xc00cdf6aa8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.GetAvailableChallengesAsync.durationavg Core_Graphite Value:0xc00cdf6ad0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928642219s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.GetAvailableChallengesAsync.durationavg Core_Graphite} value=242.49756728778476 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.GetAvailableChallengesAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.GetUserTrackChallengeAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.GetUserTrackChallengeAsync.durationavg Core_Graphite Value:0xc00cdf6b68} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.GetUserTrackChallengeAsync.durationavg Core_Graphite Value:0xc00cdf6b18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928645501s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.GetUserTrackChallengeAsync.durationavg Core_Graphite} value=510.3636363636364 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.GetUserTrackChallengeAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.GetAnnouncementAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.GetAnnouncementAsync.durationavg Core_Graphite Value:0xc00cdf6be8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.GetAnnouncementAsync.durationavg Core_Graphite Value:0xc00cdf6c08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928648657s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.GetAnnouncementAsync.durationavg Core_Graphite} value=11.820082815734988 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.GetAnnouncementAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.GetAnnouncement_Compat.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.GetAnnouncement_Compat.durationavg Core_Graphite Value:0xc00cdf6c68} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.GetAnnouncement_Compat.durationavg Core_Graphite Value:0xc00cdf6c88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928651685s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.GetAnnouncement_Compat.durationavg Core_Graphite} value=11.849068322981365 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.GetAnnouncement_Compat.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.GetAppConfigAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.GetAppConfigAsync.durationavg Core_Graphite Value:0xc00cdf6cf8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.GetAppConfigAsync.durationavg Core_Graphite Value:0xc00cdf6d20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928654704s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.GetAppConfigAsync.durationavg Core_Graphite} value=3.1363636363636362 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.GetAppConfigAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.GetLegalAvailableAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.GetLegalAvailableAsync.durationavg Core_Graphite Value:0xc00cdf6e28} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.GetLegalAvailableAsync.durationavg Core_Graphite Value:0xc00cdf6da8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928657425s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.GetLegalAvailableAsync.durationavg Core_Graphite} value=42.375 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.GetLegalAvailableAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.GetLegalNoticeAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.GetLegalNoticeAsync.durationavg Core_Graphite Value:0xc00cdf6f28} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.GetLegalNoticeAsync.durationavg Core_Graphite Value:0xc00cdf6ea8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928660817s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.GetLegalNoticeAsync.durationavg Core_Graphite} value=70.68695652173913 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.GetLegalNoticeAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.GetOptinsAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.GetOptinsAsync.durationavg Core_Graphite Value:0xc00cdf6fd8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.GetOptinsAsync.durationavg Core_Graphite Value:0xc00cdf7000}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928663191s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.GetOptinsAsync.durationavg Core_Graphite} value=25.73076923076923 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.GetOptinsAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.GetWebConfigAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.GetWebConfigAsync.durationavg Core_Graphite Value:0xc00cdf7090} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.GetWebConfigAsync.durationavg Core_Graphite Value:0xc00cdf70b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928665601s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.GetWebConfigAsync.durationavg Core_Graphite} value=8.947101449275362 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.GetWebConfigAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.OptInAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.OptInAsync.durationavg Core_Graphite Value:0xc00cdf7148} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.OptInAsync.durationavg Core_Graphite Value:0xc00cdf7108}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928668459s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.OptInAsync.durationavg Core_Graphite} value=540.6666666666666 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.OptInAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.GetCompositeRewardsAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.GetCompositeRewardsAsync.durationavg Core_Graphite Value:0xc00cdf71b0} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.GetCompositeRewardsAsync.durationavg Core_Graphite Value:0xc00cdf71d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928670767s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.GetCompositeRewardsAsync.durationavg Core_Graphite} value=10.893064182194616 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.GetCompositeRewardsAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.GetGoalForUserAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.GetGoalForUserAsync.durationavg Core_Graphite Value:0xc00cdf7238} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.GetGoalForUserAsync.durationavg Core_Graphite Value:0xc00cdf7258}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928673515s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.GetGoalForUserAsync.durationavg Core_Graphite} value=56.067857142857136 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.GetGoalForUserAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.GetUserRewardsAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.GetUserRewardsAsync.durationavg Core_Graphite Value:0xc00cdf72c8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.GetUserRewardsAsync.durationavg Core_Graphite Value:0xc00cdf72f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928675735s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.GetUserRewardsAsync.durationavg Core_Graphite} value=19.279968944099377 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.GetUserRewardsAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.RequestRewardAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.RequestRewardAsync.durationavg Core_Graphite Value:0xc00cdf7358} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.RequestRewardAsync.durationavg Core_Graphite Value:0xc00cdf7378}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928678217s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.RequestRewardAsync.durationavg Core_Graphite} value=2788.875 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.RequestRewardAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.RewardSearchAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.RewardSearchAsync.durationavg Core_Graphite Value:0xc00cdf73e8} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.RewardSearchAsync.durationavg Core_Graphite Value:0xc00cdf7408}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928681759s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.RewardSearchAsync.durationavg Core_Graphite} value=32.3308143547274 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.RewardSearchAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.RegisterAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.RegisterAsync.durationavg Core_Graphite Value:0xc00cdf7478} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.RegisterAsync.durationavg Core_Graphite Value:0xc00cdf74a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928684277s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.RegisterAsync.durationavg Core_Graphite} value=1021.8 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.RegisterAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.VerifyPhoneCompleteAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.VerifyPhoneCompleteAsync.durationavg Core_Graphite Value:0xc00cdf7508} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.VerifyPhoneCompleteAsync.durationavg Core_Graphite Value:0xc00cdf7528}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928686658s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.VerifyPhoneCompleteAsync.durationavg Core_Graphite} value=312.2857142857143 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.VerifyPhoneCompleteAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.VerifyPhoneStartAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.VerifyPhoneStartAsync.durationavg Core_Graphite Value:0xc00cdf7588} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.VerifyPhoneStartAsync.durationavg Core_Graphite Value:0xc00cdf75a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928689757s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.VerifyPhoneStartAsync.durationavg Core_Graphite} value=1827.642857142857 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.VerifyPhoneStartAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.ApplyOverrides.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.ApplyOverrides.durationavg Core_Graphite Value:0xc00cdf7630} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.ApplyOverrides.durationavg Core_Graphite Value:0xc00cdf75e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928692141s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.ApplyOverrides.durationavg Core_Graphite} value=6.935196687370599 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.ApplyOverrides.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.GetForPerformingAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.GetForPerformingAsync.durationavg Core_Graphite Value:0xc00cdf7698} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.GetForPerformingAsync.durationavg Core_Graphite Value:0xc00cdf76b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928694316s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.GetForPerformingAsync.durationavg Core_Graphite} value=1072.7159863945578 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.GetForPerformingAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.PrepareUploadAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.PrepareUploadAsync.durationavg Core_Graphite Value:0xc00cdf7738} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.PrepareUploadAsync.durationavg Core_Graphite Value:0xc00cdf76f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928696577s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.PrepareUploadAsync.durationavg Core_Graphite} value=6 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.PrepareUploadAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.SkipResponseAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.SkipResponseAsync.durationavg Core_Graphite Value:0xc00cdf7798} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.SkipResponseAsync.durationavg Core_Graphite Value:0xc00cdf77b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928698692s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.SkipResponseAsync.durationavg Core_Graphite} value=303.9166666666667 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.SkipResponseAsync.durationavg Core_Graphite} value=0 ]} {Instance:aggregatedBy=average, name=thelma.rest.controller.SubmitResponseAsync.durationavg Core_Graphite State:Normal Error: Results:map[] Values:map[Mean Reduce:{Var:Mean Reduce Labels:aggregatedBy=average, name=thelma.rest.controller.SubmitResponseAsync.durationavg Core_Graphite Value:0xc00cdf7828} Millisecond Threshold:{Var:Millisecond Threshold Labels:aggregatedBy=average, name=thelma.rest.controller.SubmitResponseAsync.durationavg Core_Graphite Value:0xc00cdf7850}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928700861s EvaluationString:[ var='Mean Reduce' labels={aggregatedBy=average, name=thelma.rest.controller.SubmitResponseAsync.durationavg Core_Graphite} value=2197.6707902001376 ], [ var='Millisecond Threshold' labels={aggregatedBy=average, name=thelma.rest.controller.SubmitResponseAsync.durationavg Core_Graphite} value=0 ]}]" duration=73.334379ms + level=debug ts=2024-05-29T13:44:13.93202059Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325775laio1use2, cloud_platform=AWS, customer_id=C512, env_id=325775, env_name=C512 BFS QA, env_type=qa, instance=env-325775laio1use2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:13.931991826Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.931937774Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.099833ms + logger=ngalert.state.manager.persist user=320778 slug=omegaai t=2024-05-29T13:44:13.931878124Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.243943ms + level=debug ts=2024-05-29T13:44:13.931805384Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o4daiyja-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.93177501Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325772laio1euw1, cloud_platform=AWS, customer_id=C608, env_id=325772, env_name=C608_IPSEN_Prod_2021U11, env_type=prod, instance=env-325772laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.931780808Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.931671148Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.931613023Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.931596596Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325770laio1use1, cloud_platform=AWS, customer_id=C672, env_id=325770, env_name=C672 NJIT DEV, env_type=dev, instance=env-325770laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.931573889Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o43xzmrv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.931576178Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o43xzmrv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.931544288Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o43xzmrv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.931471797Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.931402334Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.931344482Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.931290452Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=891649532224e9b6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.931291986Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.931036527s EvaluationString:}]" duration=198.6589ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o422rwp6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.931181264Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.931115001Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o422rwp6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.931141094Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o422rwp6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.931066483Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.93103841Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.931026322Z caller=remote_instance_store.go:51 user=151289 slug=everflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o3v9c4zh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.930914291Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o3v9c4zh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.930881041Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o3v9c4zh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.93081134Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:13.930783067Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance= t=2024-05-29T13:44:13.930774723Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=288032 slug=dapperlabssre t=2024-05-29T13:44:13.930744214Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=63699 slug=bizzydist t=2024-05-29T13:44:13.930673379Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.4399ms + logger=ngalert.scheduler user=288032 slug=dapperlabssre version=1 fingerprint=1b346fd211d85116 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.930688284Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.930439314s EvaluationString:}]" duration=34.094423ms + logger=ngalert.state.manager.persist user=151289 slug=everflow t=2024-05-29T13:44:13.930611613Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=72.890467ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o3u24v3b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.930630728Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o3u24v3b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.930589738Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.93052721Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=438185 slug=nodeinfra instance="chain=SUI, instance=57.128.98.9:8001, network=mainnet, region=france, servicetype=fullnode" t=2024-05-29T13:44:13.930560236Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.930540029Z caller=remote_instance_store.go:51 user=815134 slug=tauspace msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o3u24v3b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.930490967Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=815134 slug=tauspace instance= t=2024-05-29T13:44:13.930479217Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=664976 slug=staging1themomproject instance="datasource_uid=grafanacloud-prom, ref_id=query" t=2024-05-29T13:44:13.930519062Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=664976 slug=staging1themomproject instance="datasource_uid=grafanacloud-prom, ref_id=query" t=2024-05-29T13:44:13.930503643Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=815134 slug=tauspace t=2024-05-29T13:44:13.930419606Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=664976 slug=staging1themomproject version=14 fingerprint=53f5109419e99f1f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.930413873Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=query State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.929940389s EvaluationString:}]" duration=18.464902ms + logger=ngalert.scheduler user=815134 slug=tauspace version=4 fingerprint=c3db65fb858170ab attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.930343605Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[Current Count :{Var:Current Count Labels: Value:0xc0b1f3dd18} Threshold:{Var:Threshold Labels: Value:0xc0b1f3dd60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.929979433s EvaluationString:[ var='Current Count ' labels={} value=2538 ], [ var='Threshold' labels={} value=0 ]}]" duration=396.189964ms + logger=ngalert.state.manager user=438185 slug=nodeinfra t=2024-05-29T13:44:13.930332905Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o3tgzznb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.930268545Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o3r2bv2b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.930155854Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o3r2bv2b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.930084783Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o3r2bv2b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.930055853Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.929005921Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.929791784Z caller=remote_instance_store.go:51 user=169420 slug=newspring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325514laio1westeurope, cloud_platform=Azure, customer_id=A217, env_id=325514, env_name=A217-NAV-DEV, env_type=dev, instance=env-325514laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:13.929741657Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=169420 slug=newspring instance="instance=10.5.60.52, job=Eaton UPS 1Gbps, key=COL, sysName=ups-00-20-85-D4-46-4C.ad.newspring.cc" t=2024-05-29T13:44:13.929721863Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.929717633Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=169420 slug=newspring instance="instance=10.5.60.51, job=Eaton UPS 1Gbps, key=COL, sysName=col-idf-ups-01.ad.newspring.cc" t=2024-05-29T13:44:13.929628112Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.929608302Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=169420 slug=newspring instance="instance=10.5.60.50, job=Eaton UPS 1Gbps, key=COL, sysName=col-mdf-ups-01.ad.newspring.cc" t=2024-05-29T13:44:13.929529599Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=169420 slug=newspring version=112 fingerprint=7851c1a6c520fa87 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.929228707Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=10.5.60.50, job=Eaton UPS 1Gbps, key=COL, sysName=col-mdf-ups-01.ad.newspring.cc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=10.5.60.50, job=Eaton UPS 1Gbps, key=COL, sysName=col-mdf-ups-01.ad.newspring.cc Value:0xc013101828} B:{Var:B Labels:instance=10.5.60.50, job=Eaton UPS 1Gbps, key=COL, sysName=col-mdf-ups-01.ad.newspring.cc Value:0xc013101880} COL:{Var:COL Labels:instance=10.5.60.50, job=Eaton UPS 1Gbps, key=COL, sysName=col-mdf-ups-01.ad.newspring.cc Value:0xc0131018d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928625593s EvaluationString:[ var='A' labels={instance=10.5.60.50, job=Eaton UPS 1Gbps, key=COL, sysName=col-mdf-ups-01.ad.newspring.cc} value=118 ], [ var='B' labels={instance=10.5.60.50, job=Eaton UPS 1Gbps, key=COL, sysName=col-mdf-ups-01.ad.newspring.cc} value=0 ], [ var='COL' labels={instance=10.5.60.50, job=Eaton UPS 1Gbps, key=COL, sysName=col-mdf-ups-01.ad.newspring.cc} value=118 ]} {Instance:instance=10.5.60.51, job=Eaton UPS 1Gbps, key=COL, sysName=col-idf-ups-01.ad.newspring.cc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=10.5.60.51, job=Eaton UPS 1Gbps, key=COL, sysName=col-idf-ups-01.ad.newspring.cc Value:0xc013101a30} B:{Var:B Labels:instance=10.5.60.51, job=Eaton UPS 1Gbps, key=COL, sysName=col-idf-ups-01.ad.newspring.cc Value:0xc013101970} COL:{Var:COL Labels:instance=10.5.60.51, job=Eaton UPS 1Gbps, key=COL, sysName=col-idf-ups-01.ad.newspring.cc Value:0xc0131019e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928643145s EvaluationString:[ var='A' labels={instance=10.5.60.51, job=Eaton UPS 1Gbps, key=COL, sysName=col-idf-ups-01.ad.newspring.cc} value=118 ], [ var='B' labels={instance=10.5.60.51, job=Eaton UPS 1Gbps, key=COL, sysName=col-idf-ups-01.ad.newspring.cc} value=0 ], [ var='COL' labels={instance=10.5.60.51, job=Eaton UPS 1Gbps, key=COL, sysName=col-idf-ups-01.ad.newspring.cc} value=118 ]} {Instance:instance=10.5.60.52, job=Eaton UPS 1Gbps, key=COL, sysName=ups-00-20-85-D4-46-4C.ad.newspring.cc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=10.5.60.52, job=Eaton UPS 1Gbps, key=COL, sysName=ups-00-20-85-D4-46-4C.ad.newspring.cc Value:0xc013101c48} B:{Var:B Labels:instance=10.5.60.52, job=Eaton UPS 1Gbps, key=COL, sysName=ups-00-20-85-D4-46-4C.ad.newspring.cc Value:0xc013101c88} COL:{Var:COL Labels:instance=10.5.60.52, job=Eaton UPS 1Gbps, key=COL, sysName=ups-00-20-85-D4-46-4C.ad.newspring.cc Value:0xc013101ce0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.928651696s EvaluationString:[ var='A' labels={instance=10.5.60.52, job=Eaton UPS 1Gbps, key=COL, sysName=ups-00-20-85-D4-46-4C.ad.newspring.cc} value=122 ], [ var='B' labels={instance=10.5.60.52, job=Eaton UPS 1Gbps, key=COL, sysName=ups-00-20-85-D4-46-4C.ad.newspring.cc} value=0 ], [ var='COL' labels={instance=10.5.60.52, job=Eaton UPS 1Gbps, key=COL, sysName=ups-00-20-85-D4-46-4C.ad.newspring.cc} value=122 ]}]" duration=136.303796ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325491laio1use1, cloud_platform=AWS, customer_id=C459, env_id=325491, env_name=C459_AZ_US_UAT, env_type=qa, instance=env-325491laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.929265101Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.929272519Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325491laio1use1, cloud_platform=AWS, customer_id=C459, env_id=325491, env_name=C459_AZ_US_UAT, env_type=qa, instance=env-325491laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.929246795Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.929145842Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=884866 slug=cnonumerique instance="datasource_uid=fdhk917z41xj4a, ref_id=A" t=2024-05-29T13:44:13.929146724Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o3ebwzz6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.929185774Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=884866 slug=cnonumerique instance="datasource_uid=fdhk917z41xj4a, ref_id=A" t=2024-05-29T13:44:13.929125393Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o3c7sc20-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.929111943Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o3c7sc20-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.929095263Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o3c7sc20-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.929054462Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.929019528Z caller=remote_instance_store.go:51 user=206439 slug=relaypro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.9290332Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.928975975Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.928982611Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o3c7sc20-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.928951161Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.928856393Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.928857386Z caller=remote_alert_sender.go:94 user=55491 slug=demandbase host=demandbase-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.144.145.226:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=e72354bb-84f7-47a5-b2b1-1c19313d9dd1 alerts=1 + level=debug ts=2024-05-29T13:44:13.92881242Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=55491 slug=demandbase t=2024-05-29T13:44:13.928749333Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=48.441342ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o3a360bp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.928756449Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.928788049Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o37uswrk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.928701489Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.928759519Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.92875709Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325389laio1use1, cloud_platform=AWS, customer_id=C348, env_id=325389, env_name=C348 CWHH PROD, env_type=prod, instance=env-325389laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.928680013Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=277970 slug=teckresourcestest t=2024-05-29T13:44:13.928538031Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=4 fingerprint=8b5a8dfd9eee4d2c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.928495513Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=9.485599ms + level=debug ts=2024-05-29T13:44:13.928553086Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=error ts=2024-05-29T13:44:13.928452166Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o35m4l2q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.928448456Z level=debug msg="Keeping state" state=Normal + level=debug component=discovery ts=2024-05-29T13:44:13.928443081Z caller=retry.go:58 user=398905 msg="retrying grpc request" method=/remoteruler.rules.v1.RulesService/GetByRuleGroup attempt=2 + level=debug ts=2024-05-29T13:44:13.928397332Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o35m4l2q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.928417916Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o35m4l2q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.928375855Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o35m4l2q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.928262144Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.928386973Z caller=remote_instance_store.go:51 user=147497 slug=rhodev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o33wvhj6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.928142643Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o33wvhj6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.928064022Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o2lj2ty7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.927968351Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.928430975Z caller=remote_instance_store.go:51 user=212546 slug=modica msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=212546 slug=modica instance="datasource_uid=grafanacloud-graphite, ref_id=A" t=2024-05-29T13:44:13.928393023Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=212546 slug=modica t=2024-05-29T13:44:13.928365941Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o2bn3jyk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.927753449Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o2bn3jyk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.927686118Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o28j599i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.927580067Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o28j599i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.927564587Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o28j599i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.927495186Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o256pc5u-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.927445856Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o256pc5u-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.927336365Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o23rr5ob-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.927170503Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o23rr5ob-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.927039832Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o23rr5ob-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.927018481Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.928074284Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325351laio1use1, cloud_platform=AWS, customer_id=C633, env_id=325351, env_name=C633 Disney PRD U11, env_type=prod, instance=env-325351laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.9281245Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o1walogi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.926313374Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o1qlr2si-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.925982841Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o1qlr2si-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.925847669Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o1qlr2si-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.925784039Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325350laio1euw1, cloud_platform=AWS, customer_id=C634, env_id=325350, env_name=C634_KFC_GB_PARALLEL_PROD, env_type=prod, instance=env-325350laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.92796426Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325350laio1euw1, cloud_platform=AWS, customer_id=C634, env_id=325350, env_name=C634_KFC_GB_PARALLEL_PROD, env_type=prod, instance=env-325350laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.927940529Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o1d891ol-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.925379905Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o16blhl5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.925123142Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o16blhl5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.92495458Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325297laio1use1, cloud_platform=AWS, customer_id=C646, env_id=325297, env_name=C646 iconectiv Prod, env_type=prod, instance=env-325297laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.927762649Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:13.927724102Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=538037 slug=drivewealth instance="host=ny4ap-uoms-02" t=2024-05-29T13:44:13.927679016Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="host=ny4ap-uoms-01" t=2024-05-29T13:44:13.927647325Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o0puj3a9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.924767808Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.927577303Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o0puj3a9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.924688507Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o0nv657m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.924552926Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.927523162Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o0i5ak6s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.924322934Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o0i5ak6s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.924292413Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=538037 slug=drivewealth version=162 fingerprint=472ec383781bdd98 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.927506604Z level=debug msg="Alert rule evaluated" results="[{Instance:host=ny4ap-uoms-01 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=ny4ap-uoms-01 Value:0xc01b0b0ff0} B:{Var:B Labels:host=ny4ap-uoms-01 Value:0xc01b0b1010} C:{Var:C Labels:host=ny4ap-uoms-01 Value:0xc01b0b1040}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.927078385s EvaluationString:[ var='A' labels={host=ny4ap-uoms-01} value=3358.5584697869876 ], [ var='B' labels={host=ny4ap-uoms-01} value=3358.5584697869876 ], [ var='C' labels={host=ny4ap-uoms-01} value=0 ]} {Instance:host=ny4ap-uoms-02 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=ny4ap-uoms-02 Value:0xc01b0b1090} B:{Var:B Labels:host=ny4ap-uoms-02 Value:0xc01b0b10e0} C:{Var:C Labels:host=ny4ap-uoms-02 Value:0xc01b0b1100}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.927093718s EvaluationString:[ var='A' labels={host=ny4ap-uoms-02} value=301.9502096241606 ], [ var='B' labels={host=ny4ap-uoms-02} value=301.9502096241606 ], [ var='C' labels={host=ny4ap-uoms-02} value=0 ]} {Instance:host=ny4ap-uoms-03 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=ny4ap-uoms-03 Value:0xc01b0b1170} B:{Var:B Labels:host=ny4ap-uoms-03 Value:0xc01b0b11b0} C:{Var:C Labels:host=ny4ap-uoms-03 Value:0xc01b0b1200}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.927100641s EvaluationString:[ var='A' labels={host=ny4ap-uoms-03} value=303.10493711282146 ], [ var='B' labels={host=ny4ap-uoms-03} value=303.10493711282146 ], [ var='C' labels={host=ny4ap-uoms-03} value=0 ]}]" duration=26.543739ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o0hdkxs4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.924135352Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o0hdkxs4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.924055031Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o0hdkxs4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.92398735Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o07ulfn0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.92391808Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o07ulfn0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.923671437Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-o07ulfn0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.923628847Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nzxxu4xq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.923513265Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nzxxu4xq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.923367484Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nzwyuxyd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.923323984Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nzwyuxyd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.923184512Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nzscaacl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.923064081Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nzscaacl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.92302286Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=231576 slug=om2phoenixpoc instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.927360457Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325219laio1use1, cloud_platform=AWS, customer_id=C363, env_id=325219, env_name=C363_Lincoln_PRD_2021U11, env_type=prod, instance=env-325219laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.927385284Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nzrrjyl8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.922902389Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=231576 slug=om2phoenixpoc instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.927275333Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=231576 slug=om2phoenixpoc instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.927269179Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=231576 slug=om2phoenixpoc instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.927254033Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=231576 slug=om2phoenixpoc instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.927247914Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nzniucwe-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.922419914Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=231576 slug=om2phoenixpoc instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.92722617Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nzniucwe-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.922344823Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nzltahuy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.922292373Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.927190573Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=231576 slug=om2phoenixpoc instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.927145513Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nzlt0uki-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.922141371Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nzlt0uki-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.922123941Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nzlt0uki-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.922082151Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=231576 slug=om2phoenixpoc instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.927131298Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nzlt0uki-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.92202769Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nzlt0uki-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.92201405Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325189laio1use1, cloud_platform=AWS, customer_id=C437, env_id=325189, env_name=C437_DPSG_QA, env_type=qa, instance=env-325189laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.927176329Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325189laio1use1, cloud_platform=AWS, customer_id=C437, env_id=325189, env_name=C437_DPSG_QA, env_type=qa, instance=env-325189laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.92716084Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nzkg9xzv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.921916929Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nzkg9xzv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.921904089Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nzhqsq8x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.921806368Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nzf2535u-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.921631076Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nzf2535u-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.921557805Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nzf2535u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.921507985Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nzf2535u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.921482585Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325112laio1use1, cloud_platform=AWS, customer_id=C724, env_id=325112, env_name=C724 Travelers CL Prd, env_type=prod, instance=env-325112laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.926995063Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nz63wwjk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.921374424Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nz4xkg39-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.921281393Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nz4xkg39-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.921267112Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nz4xkg39-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.921226622Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nz4xkg39-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.921174161Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nz3ug8se-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.921106931Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nz3ug8se-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.92103112Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nz3ug8se-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.92098772Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.926818433Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nz1fy0rd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.920876338Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nz1fy0rd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.920822378Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.92685298Z caller=remote_instance_store.go:51 user=777670 slug=fakku msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nz1fy0rd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.920802868Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.926803058Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325052laio1westeurope, cloud_platform=Azure, customer_id=A217, env_id=325052, env_name=a217-NAV-PROD, env_type=prod, instance=env-325052laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:13.926676467Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-325052laio1westeurope, cloud_platform=Azure, customer_id=A217, env_id=325052, env_name=a217-NAV-PROD, env_type=prod, instance=env-325052laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:13.926658Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.926384259Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.926373449Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.926214981Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.926265997Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.926364962Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.92621014Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-324990laio1use1, cloud_platform=AWS, customer_id=C363, env_id=324990, env_name=C363_Lincoln_Dev_U11, env_type=dev, instance=env-324990laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.926272946Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.926299036Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.926134Z caller=remote_instance_store.go:51 user=465816 slug=metricgamingqa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.926156104Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=189b9dcc9857ea6e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.926140301Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.92587902s EvaluationString:}]" duration=10.625362ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-324978laio1euw1, cloud_platform=AWS, customer_id=C394, env_id=324978, env_name=C394_AZ_EU_UAT, env_type=qa, instance=env-324978laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.9261043Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-324978laio1euw1, cloud_platform=AWS, customer_id=C394, env_id=324978, env_name=C394_AZ_EU_UAT, env_type=qa, instance=env-324978laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.926089046Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.925785703Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.925802561Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.925709084Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-324835laio1euw1, cloud_platform=AWS, customer_id=C394, env_id=324835, env_name=C394_AZ_EU_Dev, env_type=dev, instance=env-324835laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.925210703Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.924913675Z caller=remote_instance_store.go:51 user=686395 slug=containerfoundation msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-324823laio1use1, cloud_platform=AWS, customer_id=C631, env_id=324823, env_name=C631 The Bay PROD, env_type=prod, instance=env-324823laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.924776798Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.924172957Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.924228035Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:13.924074607Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.924058651Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.924018062Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.923999765Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=471861 slug=planetstaging t=2024-05-29T13:44:13.924049496Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=471861 slug=planetstaging instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.924022282Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=471861 slug=planetstaging version=2 fingerprint=603d42a4c4fc0025 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.923955487Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.923700407s EvaluationString:}]" duration=19.500773ms + logger=ngalert.scheduler user=543660 slug=jobcloudprogrammaticstage version=1 fingerprint=22ab46368e69f231 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.923629835Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.923137387s EvaluationString:}]" duration=69.89613ms + level=debug ts=2024-05-29T13:44:13.923604199Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.923566137Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.923559201Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.923434823Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.923138266Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=150145 slug=pleasant t=2024-05-29T13:44:13.923056544Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-324712laio1use1, cloud_platform=AWS, customer_id=C680, env_id=324712, env_name=C680 FIS DEV U11, env_type=dev, instance=env-324712laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.923021914Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.922896739Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.922951036Z caller=remote_instance_store.go:51 user=21051 slug=mojio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.92282095Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.922494234Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.922430378Z caller=remote_instance_store.go:51 user=706031 slug=miceutest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.922257909Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.921825533Z caller=grafana.go:247 user=309706 slug=felfel msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=16" groups=3 alerts=0 + logger=ngalert.state.manager user=696798 slug=mcv instance="name=keepLastValue(eadp.gos.torch.prod.anthem-1-xone.Gameplay_Users,3) Query" t=2024-05-29T13:44:13.921823098Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv instance="name=keepLastValue(eadp.gos.torch.prod.anthem-1-xone.Gameplay_Users,3) Query" t=2024-05-29T13:44:13.921811871Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-324600laio1euw2, cloud_platform=AWS, customer_id=C751, env_id=324600, env_name=C751_Prod_mmflowers_U11, env_type=prod, instance=env-324600laio1euw2, job=integrations/node_exporter, region=eu-west-2, stage=live" t=2024-05-29T13:44:13.921710461Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.921598238Z caller=remote_instance_store.go:51 user=316418 slug=workmotion msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=1mr10216z5, Method=--, Resource=/tenants, Stage=--" t=2024-05-29T13:44:13.921473015Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=1mr10216z5, Method=--, Resource=/tenants, Stage=--" t=2024-05-29T13:44:13.921459023Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=1mr10216z5, Method=--, Resource=/tenants, Stage=--" t=2024-05-29T13:44:13.921450157Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=316418 slug=workmotion t=2024-05-29T13:44:13.921373089Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-324598laio1use1, cloud_platform=AWS, customer_id=C633, env_id=324598, env_name=C633 Disney SBX U11, env_type=sandbox, instance=env-324598laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.921284896Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-324580laioeuc1, cloud_platform=AWS, customer_id=C746, env_id=324580, env_name=C746 SAINT HERBLAIN U11, env_type=prod, instance=env-324580laioeuc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:13.921121509Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-324576laio1use1, cloud_platform=AWS, customer_id=C333, env_id=324576, env_name=c333_Mercer_QA_U11, env_type=qa, instance=env-324576laio1use1, job=integrations/node_exporter, region=us-east-1, stage=testing" t=2024-05-29T13:44:13.920954238Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-324576laio1use1, cloud_platform=AWS, customer_id=C333, env_id=324576, env_name=c333_Mercer_QA_U11, env_type=qa, instance=env-324576laio1use1, job=integrations/node_exporter, region=us-east-1, stage=testing" t=2024-05-29T13:44:13.920937746Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.920869786Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nyyvue70-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.920617606Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=A,D" t=2024-05-29T13:44:13.920609592Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=A,D" t=2024-05-29T13:44:13.920596963Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.920517949Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nysd4e81-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.920470434Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nysd4e81-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.920434804Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=485459 slug=heroiclabs t=2024-05-29T13:44:13.920446584Z level=debug msg="Saving alert states done" count=62 max_state_save_concurrency=1 duration=1.145509867s + level=debug ts=2024-05-29T13:44:13.920207194Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nyqln5ka-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.920199331Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=63699 slug=bizzydist t=2024-05-29T13:44:13.920230552Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-324540laio1euw1, cloud_platform=AWS, customer_id=C637, env_id=324540, env_name=C637 Pfizer EU AI Pilot, env_type=sandbox, instance=env-324540laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.920251074Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.920209116Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.920075881Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nyfsqtvh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.92004652Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.920014151Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.919979337Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nyfsqtvh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.919955599Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=408734 slug=mmrresearch t=2024-05-29T13:44:13.919799229Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=408734 slug=mmrresearch instance= t=2024-05-29T13:44:13.919777857Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=408734 slug=mmrresearch instance= t=2024-05-29T13:44:13.91977077Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager.persist user=236496 slug=improbable t=2024-05-29T13:44:13.919922743Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=31.99658ms + logger=ngalert.state.manager user=408734 slug=mmrresearch instance= t=2024-05-29T13:44:13.919757273Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nyfsqtvh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.919942479Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nyfsqtvh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.919866798Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.919802009Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.919820521Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nybcyt4s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.919624436Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.91920112Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxybafuq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.919439564Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxybafuq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.919407083Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.919583241Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxwkz3nm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.919252302Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxwkz3nm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.919180211Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.919614801Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-324490laio1aps2, cloud_platform=AWS, customer_id=C730, env_id=324490, env_name=c730 NEW UoA PROD, env_type=prod, instance=env-324490laio1aps2, job=integrations/node_exporter, region=ap-southeast-2, stage=live" t=2024-05-29T13:44:13.919535764Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.919448722Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.919210746Z caller=client.go:80 msg="creating client for grafana instance" user=543999 addr=dns:///jamitlabs-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.919144146Z caller=ruler.go:522 msg="tenant is owned by this instance" user=542315 slug=hlsb groups=0 + level=debug ts=2024-05-29T13:44:13.919152179Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.918919396Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxwgxakv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.918968139Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-324483laio1use1, cloud_platform=AWS, customer_id=C631, env_id=324483, env_name=C631 The Bay QA, env_type=qa, instance=env-324483laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.918890376Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.9188387Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.918780531Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-324483laio1use1, cloud_platform=AWS, customer_id=C631, env_id=324483, env_name=C631 The Bay QA, env_type=qa, instance=env-324483laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.918873179Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.91872414Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.918649717Z caller=remote_instance_store.go:51 user=137351 slug=pinnacle21 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxsublql-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.918600565Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.918453708Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=320778 slug=omegaai version=11 fingerprint=2ee806fbaa4a3fb7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.918456859Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=k3-C8xH4z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.918026002s EvaluationString:}]" duration=103.72655ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxsublql-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.918568805Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxq8f1v4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.918366763Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxq8f1v4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.918334602Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxnwfq2q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.918286632Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxnwfq2q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.918166971Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxnwfq2q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.9180663Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.918061535Z caller=ruler.go:522 msg="tenant is owned by this instance" user=607509 slug=henokv groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxmauh21-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.917814627Z level=debug msg="Keeping state" state=Normal + level=error ts=2024-05-29T13:44:13.91769219Z caller=remote_rule_evaluator.go:110 user=481110 slug=g123 msg="remote evaluate failed" code=Code(422) err="condition C does not exist, must be one of [A]" + logger=ngalert.scheduler user=481110 slug=g123 version=1 fingerprint=59fdc921a0f784b6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.917731064Z level=error msg="Failed to evaluate rule" error="condition C does not exist, must be one of [A]" duration=1.093697ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-324403laio1euc1, cloud_platform=AWS, customer_id=C654, env_id=324403, env_name=C654 New QA Swift Iris, env_type=qa, instance=env-324403laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:13.917765895Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="__name__=agent_config_last_load_successful, agent_hostname=ip-10-75-74-192, env=prd-prd, host=ip-10-75-74-192, instance=ip-10-75-74-192:12345, job=integrations/agent, region=eu-central-1, stack=mdcb" t=2024-05-29T13:44:13.915103747Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxl9g1wo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.917669675Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="__name__=agent_config_last_load_successful, agent_hostname=ip-10-75-74-192, env=prd-prd, host=ip-10-75-74-192, instance=ip-10-75-74-192:12345, job=integrations/agent, region=eu-central-1, stack=mdcb" t=2024-05-29T13:44:13.915095864Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.91766186Z caller=remote_instance_store.go:51 user=70430 slug=dapperlabs msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.917622731Z caller=client.go:80 msg="creating client for grafana instance" user=799801 addr=dns:///ivercloud-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=159532 slug=getfabric instance="__name__=up, app_kubernetes_io_instance=vault, app_kubernetes_io_name=vault, cluster=fabric-prod, component=server, controller_revision_hash=vault-7797779c5c, domain=services, env=prod, helm_sh_chart=vault-0.27.0, instance=10.1.6.42:8200, job=fabric/endpoints, mfc=FABRIC-PROD, pod_name=vault-1, source=vault-internal, statefulset_kubernetes_io_pod_name=vault-1" t=2024-05-29T13:44:13.917460492Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxl9g1wo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.917476423Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.917430105Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=159532 slug=getfabric instance="__name__=up, app_kubernetes_io_instance=vault, app_kubernetes_io_name=vault, cluster=fabric-prod, component=server, controller_revision_hash=vault-7797779c5c, domain=services, env=prod, helm_sh_chart=vault-0.27.0, instance=10.1.6.42:8200, job=fabric/endpoints, mfc=FABRIC-PROD, pod_name=vault-1, source=vault-internal, statefulset_kubernetes_io_pod_name=vault-1" t=2024-05-29T13:44:13.917443509Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="__name__=agent_config_last_load_successful, agent_hostname=ip-10-75-66-25, env=prd-prd, host=ip-10-75-66-25, instance=ip-10-75-66-25:12345, job=integrations/agent, region=eu-central-1, stack=mdcb" t=2024-05-29T13:44:13.915068233Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="__name__=agent_config_last_load_successful, agent_hostname=ip-10-21-82-103, env=prd-prd, host=ip-10-21-82-103, instance=ip-10-21-82-103:12345, job=integrations/agent, region=us-east-1, stack=mdcb" t=2024-05-29T13:44:13.91502952Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxkms5wo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.917346422Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=471861 slug=planetstaging instance="cluster=cinext-01, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-gketest-prod, instance=172.24.13.20:9090, job=prometheus-k8s, k8s_cluster=cinext-01, kubernetes_cluster=cinext-01, namespace=monitoring, pod=prometheus-k8s-0, prometheus=monitoring/k8s, prometheus_shard=cinext-01-0, remote_name=b25045, service=prometheus-k8s, url=https://prometheus-dedicated-30-prod-us-central-0.grafana.net/api/prom/push" t=2024-05-29T13:44:13.917371522Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=471861 slug=planetstaging t=2024-05-29T13:44:13.917329033Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service=prometheus-k8s" + logger=ngalert.state.manager user=112387 slug=lucidhq instance="__name__=agent_config_last_load_successful, agent_hostname=ip-10-21-70-245, env=prd-prd, host=ip-10-21-70-245, instance=ip-10-21-70-245:12345, job=integrations/agent, region=us-east-1, stack=mdcb" t=2024-05-29T13:44:13.914945865Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=159532 slug=getfabric instance="__name__=up, app_kubernetes_io_instance=vault, app_kubernetes_io_name=vault, cluster=fabric-prod, component=server, controller_revision_hash=vault-7797779c5c, domain=services, env=prod, helm_sh_chart=vault-0.27.0, instance=10.1.6.42:8200, job=fabric/endpoints, mfc=FABRIC-PROD, pod_name=vault-1, source=vault, statefulset_kubernetes_io_pod_name=vault-1" t=2024-05-29T13:44:13.917357122Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=471861 slug=planetstaging version=1 fingerprint=d800b18028979f8a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.917161623Z level=debug msg="Alert rule evaluated" results="[{Instance:cluster=cinext-01, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-gketest-prod, instance=172.24.13.20:9090, job=prometheus-k8s, k8s_cluster=cinext-01, kubernetes_cluster=cinext-01, namespace=monitoring, pod=prometheus-k8s-0, prometheus=monitoring/k8s, prometheus_shard=cinext-01-0, remote_name=b25045, service=prometheus-k8s, url=https://prometheus-dedicated-30-prod-us-central-0.grafana.net/api/prom/push State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:cluster=cinext-01, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-gketest-prod, instance=172.24.13.20:9090, job=prometheus-k8s, k8s_cluster=cinext-01, kubernetes_cluster=cinext-01, namespace=monitoring, pod=prometheus-k8s-0, prometheus=monitoring/k8s, prometheus_shard=cinext-01-0, remote_name=b25045, service=prometheus-k8s, url=https://prometheus-dedicated-30-prod-us-central-0.grafana.net/api/prom/push Value:0xc0579842d8} C:{Var:C Labels:cluster=cinext-01, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-gketest-prod, instance=172.24.13.20:9090, job=prometheus-k8s, k8s_cluster=cinext-01, kubernetes_cluster=cinext-01, namespace=monitoring, pod=prometheus-k8s-0, prometheus=monitoring/k8s, prometheus_shard=cinext-01-0, remote_name=b25045, service=prometheus-k8s, url=https://prometheus-dedicated-30-prod-us-central-0.grafana.net/api/prom/push Value:0xc057984510}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.916547372s EvaluationString:[ var='B' labels={cluster=cinext-01, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-gketest-prod, instance=172.24.13.20:9090, job=prometheus-k8s, k8s_cluster=cinext-01, kubernetes_cluster=cinext-01, namespace=monitoring, pod=prometheus-k8s-0, prometheus=monitoring/k8s, prometheus_shard=cinext-01-0, remote_name=b25045, service=prometheus-k8s, url=https://prometheus-dedicated-30-prod-us-central-0.grafana.net/api/prom/push} value=1 ], [ var='C' labels={cluster=cinext-01, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-gketest-prod, instance=172.24.13.20:9090, job=prometheus-k8s, k8s_cluster=cinext-01, kubernetes_cluster=cinext-01, namespace=monitoring, pod=prometheus-k8s-0, prometheus=monitoring/k8s, prometheus_shard=cinext-01-0, remote_name=b25045, service=prometheus-k8s, url=https://prometheus-dedicated-30-prod-us-central-0.grafana.net/api/prom/push} value=0 ]} {Instance:cluster=cistable-01, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-gketest-prod, instance=172.24.22.8:9090, job=prometheus-k8s, k8s_cluster=cistable-01, kubernetes_cluster=cistable-01, namespace=monitoring, pod=prometheus-k8s-0, prometheus=monitoring/k8s, prometheus_shard=cistable-01-0, remote_name=b25045, service=prometheus-k8s, url=https://prometheus-dedicated-30-prod-us-central-0.grafana.net/api/prom/push State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:cluster=cistable-01, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-gketest-prod, instance=172.24.22.8:9090, job=prometheus-k8s, k8s_cluster=cistable-01, kubernetes_cluster=cistable-01, namespace=monitoring, pod=prometheus-k8s-0, prometheus=monitoring/k8s, prometheus_shard=cistable-01-0, remote_name=b25045, service=prometheus-k8s, url=https://prometheus-dedicated-30-prod-us-central-0.grafana.net/api/prom/push Value:0xc0579849c8} C:{Var:C Labels:cluster=cistable-01, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-gketest-prod, instance=172.24.22.8:9090, job=prometheus-k8s, k8s_cluster=cistable-01, kubernetes_cluster=cistable-01, namespace=monitoring, pod=prometheus-k8s-0, prometheus=monitoring/k8s, prometheus_shard=cistable-01-0, remote_name=b25045, service=prometheus-k8s, url=https://prometheus-dedicated-30-prod-us-central-0.grafana.net/api/prom/push Value:0xc057984c70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.916576234s EvaluationString:[ var='B' labels={cluster=cistable-01, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-gketest-prod, instance=172.24.22.8:9090, job=prometheus-k8s, k8s_cluster=cistable-01, kubernetes_cluster=cistable-01, namespace=monitoring, pod=prometheus-k8s-0, prometheus=monitoring/k8s, prometheus_shard=cistable-01-0, remote_name=b25045, service=prometheus-k8s, url=https://prometheus-dedicated-30-prod-us-central-0.grafana.net/api/prom/push} value=1 ], [ var='C' labels={cluster=cistable-01, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-gketest-prod, instance=172.24.22.8:9090, job=prometheus-k8s, k8s_cluster=cistable-01, kubernetes_cluster=cistable-01, namespace=monitoring, pod=prometheus-k8s-0, prometheus=monitoring/k8s, prometheus_shard=cistable-01-0, remote_name=b25045, service=prometheus-k8s, url=https://prometheus-dedicated-30-prod-us-central-0.grafana.net/api/prom/push} value=0 ]}]" duration=31.410665ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxkms5wo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.917304482Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.917267422Z caller=remote_instance_store.go:51 user=523054 slug=vialtopartners msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=159532 slug=getfabric instance="__name__=up, app_kubernetes_io_instance=vault, app_kubernetes_io_name=vault, cluster=fabric-prod, component=server, controller_revision_hash=vault-7797779c5c, domain=services, env=prod, helm_sh_chart=vault-0.27.0, instance=10.1.2.42:8200, job=fabric/endpoints, mfc=FABRIC-PROD, pod_name=vault-2, source=vault-internal, statefulset_kubernetes_io_pod_name=vault-2" t=2024-05-29T13:44:13.91725186Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxkms5wo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.91714468Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxkms5wo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.91711478Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=159532 slug=getfabric instance="__name__=up, app_kubernetes_io_instance=vault, app_kubernetes_io_name=vault, cluster=fabric-prod, component=server, controller_revision_hash=vault-7797779c5c, domain=services, env=prod, helm_sh_chart=vault-0.27.0, instance=10.1.2.42:8200, job=fabric/endpoints, mfc=FABRIC-PROD, pod_name=vault-2, source=vault, statefulset_kubernetes_io_pod_name=vault-2" t=2024-05-29T13:44:13.917099327Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=159532 slug=getfabric instance="__name__=up, app_kubernetes_io_instance=vault, app_kubernetes_io_name=vault, cluster=fabric-prod, component=server, controller_revision_hash=vault-7797779c5c, domain=services, env=prod, helm_sh_chart=vault-0.27.0, instance=10.1.2.42:8200, job=fabric/endpoints, mfc=FABRIC-PROD, pod_name=vault-2, source=vault, statefulset_kubernetes_io_pod_name=vault-2" t=2024-05-29T13:44:13.917072137Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxeyg59r-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.917028699Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxeyg59r-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.916957168Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=159532 slug=getfabric instance="__name__=up, app_kubernetes_io_instance=vault, app_kubernetes_io_name=vault, cluster=fabric-prod, component=server, controller_revision_hash=vault-7797779c5c, domain=services, env=prod, helm_sh_chart=vault-0.27.0, instance=10.1.1.13:8200, job=fabric/endpoints, mfc=FABRIC-PROD, pod_name=vault-0, source=vault-internal, statefulset_kubernetes_io_pod_name=vault-0" t=2024-05-29T13:44:13.91688228Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxbs0o7e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.916827927Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-324055laio1use1, cloud_platform=AWS, customer_id=C348, env_id=324055, env_name=C348 CWHH DEV, env_type=dev, instance=env-324055laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.916817752Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxbs0o7e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.916792346Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nxbs0o7e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.916689925Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=159532 slug=getfabric t=2024-05-29T13:44:13.916528727Z level=debug msg="State manager processing evaluation results" resultCount=6 + level=info ts=2024-05-29T13:44:13.916516444Z caller=remote_alert_sender.go:94 user=22398 slug=sunfolding host=sunfolding-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.83.87:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=bee2350c-25b6-4701-aae3-b2618e9f526c alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nx8ct5cg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.916507383Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.91645192Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=493615 slug=iiotsystems + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nx8ct5cg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.916413522Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nx7boz5s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.916275861Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nx7boz5s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.91621834Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.915927315Z caller=ruler.go:522 msg="tenant is owned by this instance" user=661236 slug=hypernetica groups=0 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-323829laio1cac1, cloud_platform=AWS, customer_id=C636, env_id=323829, env_name=C636_UAT_GOEASY_PARALLEL, env_type=qa, instance=env-323829laio1cac1, job=integrations/node_exporter, region=ca-central-1, stage=live" t=2024-05-29T13:44:13.915963678Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-323829laio1cac1, cloud_platform=AWS, customer_id=C636, env_id=323829, env_name=C636_UAT_GOEASY_PARALLEL, env_type=qa, instance=env-323829laio1cac1, job=integrations/node_exporter, region=ca-central-1, stage=live" t=2024-05-29T13:44:13.915949878Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.915909593Z caller=remote_instance_store.go:51 user=473762 slug=intentiq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.915829773Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.915852146Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=473762 slug=intentiq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.915820241Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.915620552Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.915782339Z caller=remote_instance_store.go:51 user=451750 slug=amadeuspfpprod msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.91560222Z caller=remote_alert_sender.go:94 user=177465 slug=fairtiq host=fairtiq-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.149.115.18:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=adlo3vilzdkhsa alerts=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-323793laio1use1, cloud_platform=AWS, customer_id=C699, env_id=323793, env_name=C699 Compucom DEV, env_type=dev, instance=env-323793laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.915763893Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nww8rls3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.915716355Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-323771laio1use2, cloud_platform=AWS, customer_id=C571, env_id=323771, env_name=C571 PROD Ventura Foods, env_type=prod, instance=env-323771laio1use2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:13.915590995Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nwp1r4ai-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.915526983Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.915501711Z caller=ruler.go:522 msg="tenant is owned by this instance" user=699124 slug=hies groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nwo4jxrd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.915430512Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.915322805Z caller=remote_instance_store.go:51 user=526847 slug=soniclabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-323667laio1use1, cloud_platform=AWS, customer_id=C459, env_id=323667, env_name=C459_AZ_US_DEV, env_type=dev, instance=env-323667laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.91539804Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.91537831Z caller=ruler.go:522 msg="tenant is owned by this instance" user=609063 slug=geasft groups=1 + level=info component=discovery ts=2024-05-29T13:44:13.915269309Z caller=client.go:80 msg="creating client for grafana instance" user=690119 addr=dns:///isolvtech-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.915047507Z caller=ruler.go:522 msg="tenant is owned by this instance" user=548162 slug=htmthehague groups=0 + level=warn ts=2024-05-29T13:44:13.915114908Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=665540 slug=habeep + level=debug ts=2024-05-29T13:44:13.915081407Z caller=ruler.go:522 msg="tenant is owned by this instance" user=665540 slug=habeep groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nwlkyqwj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.915069628Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.915012507Z caller=client.go:80 msg="creating client for grafana instance" user=506163 addr=dns:///ismailbay-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.915060409Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nwlkyqwj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.915040688Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nwkzgkji-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.914999758Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-323452laio1use1, cloud_platform=AWS, customer_id=C437, env_id=323452, env_name=C437_DPSG_PROD, env_type=prod, instance=env-323452laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.914828922Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=177465 slug=fairtiq t=2024-05-29T13:44:13.914646264Z level=debug msg="Saving alert states done" count=7 max_state_save_concurrency=1 duration=82.55319ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-323431laio1euc1, cloud_platform=AWS, customer_id=C535, env_id=323431, env_name=C535_HRS_Prod, env_type=prod, instance=env-323431laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:13.914639235Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.914469006Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nwihn0gb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.914501133Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.914428101Z caller=client.go:80 msg="creating client for grafana instance" user=408157 addr=dns:///ipcsmanagedaccountspov-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.914382034Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.914415329Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nwihn0gb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.914383271Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.914361372Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=4947 slug=mediamath instance="datasource_uid=000000020, ref_id=A,B" t=2024-05-29T13:44:13.914363101Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=info component=discovery ts=2024-05-29T13:44:13.9143076Z caller=client.go:80 msg="creating client for grafana instance" user=687789 addr=dns:///intergrid-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.9142632Z caller=ruler.go:522 msg="tenant is owned by this instance" user=612197 slug=horsadevlab groups=0 + level=debug ts=2024-05-29T13:44:13.914266826Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=327842 slug=exabeam instance= t=2024-05-29T13:44:13.914314704Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nwez8e6w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.91426535Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.91422716Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.914214292Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.914221342Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.914206421Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.914126198Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=250150 slug=bizagi version=58 fingerprint=5c69b2976b5ec0e8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.914098212Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.91375944s EvaluationString:}]" duration=169.005078ms + level=warn ts=2024-05-29T13:44:13.914089698Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=506324 slug=hydrorein + level=debug ts=2024-05-29T13:44:13.914058198Z caller=ruler.go:522 msg="tenant is owned by this instance" user=506324 slug=hydrorein groups=0 + level=debug ts=2024-05-29T13:44:13.913979437Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nw3tpwj8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.913982657Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.913946368Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nw0m9xff-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.913858696Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.913704916Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=home-blender-worker, pod=home-blender-worker-687dfcdcb7-fhq7f" t=2024-05-29T13:44:13.913663916Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nvwt23vg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.913574683Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.913493699Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nvwt23vg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.913530963Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nvwt23vg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.913497222Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nvwt23vg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.913453802Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-323157laio1use1, cloud_platform=AWS, customer_id=C481, env_id=323157, env_name=C481 Pfizer QA, env_type=qa, instance=env-323157laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.913334692Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nvvz24pn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.913350631Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nvvz24pn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.91329317Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nvvz24pn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.91325697Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.913188003Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.536097ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nvvz24pn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.913180949Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-323003laio1euc1, cloud_platform=AWS, customer_id=C535, env_id=323003, env_name=C535_HRS_DEV_U10, env_type=dev, instance=env-323003laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:13.912960922Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-323003laio1euc1, cloud_platform=AWS, customer_id=C535, env_id=323003, env_name=C535_HRS_DEV_U10, env_type=dev, instance=env-323003laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:13.912943128Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nvqcm2sn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.912857386Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nvqcm2sn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.912743305Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.912698985Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=554756 slug=hellemahallum + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nvqcm2sn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.912695824Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nvqcm2sn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.912665184Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nvntyzws-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.912555863Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.912408082Z caller=ruler.go:522 msg="tenant is owned by this instance" user=554133 slug=hmesa groups=0 + logger=ngalert.state.historian backend=loki user=374423 slug=bitburst t=2024-05-29T13:44:13.912509146Z level=debug msg="Done saving alert state history batch" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nvntyzws-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.912481812Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.912459082Z caller=client.go:80 msg="creating client for grafana instance" user=500645 addr=dns:///infodation-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-322729laio1cac1, cloud_platform=AWS, customer_id=C636, env_id=322729, env_name=C636_DEV_GoEasy_Parallel, env_type=dev, instance=env-322729laio1cac1, job=integrations/node_exporter, region=ca-central-1, stage=live" t=2024-05-29T13:44:13.912438876Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-322729laio1cac1, cloud_platform=AWS, customer_id=C636, env_id=322729, env_name=C636_DEV_GoEasy_Parallel, env_type=dev, instance=env-322729laio1cac1, job=integrations/node_exporter, region=ca-central-1, stage=live" t=2024-05-29T13:44:13.912422225Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-322672laio1euw1, cloud_platform=AWS, customer_id=C454, env_id=322672, env_name=C454_AZ_UK_Prod_U10, env_type=prod, instance=env-322672laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.91207115Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-322672laio1euw1, cloud_platform=AWS, customer_id=C454, env_id=322672, env_name=C454_AZ_UK_Prod_U10, env_type=prod, instance=env-322672laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.912055192Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nvlw1mgc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.911991147Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nvlw1mgc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.911961907Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-322667laio1use1, cloud_platform=AWS, customer_id=C626, env_id=322667, env_name=C626 Shoe Carnival DEV, env_type=dev, instance=env-322667laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.911872605Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.911686775Z caller=ruler.go:522 msg="tenant is owned by this instance" user=714361 slug=heimdall4nn groups=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-322648laio1aps2, cloud_platform=AWS, customer_id=C671, env_id=322648, env_name=C671_Kmart_DEV, env_type=dev, instance=env-322648laio1aps2, job=integrations/node_exporter, region=ap-southeast-2, stage=live" t=2024-05-29T13:44:13.911700453Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nvg4eja1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.911670804Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-322500laio1usw2, cloud_platform=AWS, customer_id=C723, env_id=322500, env_name=C723 DirecTV Prod, env_type=prod, instance=env-322500laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=live" t=2024-05-29T13:44:13.911520153Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.911506516Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nvg4eja1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.911457471Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.911322672Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=528915 slug=grundsteine + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nvd349de-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.911376171Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-322497laio1use1, cloud_platform=AWS, customer_id=C442, env_id=322497, env_name=C442_NCB_Prod, env_type=prod, instance=env-322497laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.91129104Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nvcf21iu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.911182679Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.911083769Z caller=client.go:80 msg="creating client for grafana instance" user=539034 addr=dns:///ilenkradgps-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nvcf21iu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.911073957Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.911038369Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=751315 slug=hellebore + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nvcf21iu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.910996817Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=21051 slug=mojio instance="datasource_uid=5qNiDjvMz, ref_id=A" t=2024-05-29T13:44:13.911012692Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nv8q0bsx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.910922846Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698103 slug=vericast t=2024-05-29T13:44:13.910806546Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=698103 slug=vericast version=4 fingerprint=df1705e6592a17e3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.910730793Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.910166849s EvaluationString:}]" duration=341.204475ms + level=debug ts=2024-05-29T13:44:13.910777302Z caller=remote_instance_store.go:51 user=191103 slug=amazonadmin msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=191103 slug=amazonadmin version=39 fingerprint=867919fd68ec6233 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.910663566Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.910462111s EvaluationString:}]" duration=173.189857ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nv8fcs15-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.910519942Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nv8fcs15-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.910405921Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.9101825Z caller=grafana.go:247 user=251869 slug=roseburg msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=d7e7eb95-4f29-4d63-82cd-2a28848a60ec" groups=0 alerts=0 + logger=ngalert.state.manager.persist user=819809 slug=sprinter t=2024-05-29T13:44:13.910195998Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.910136674Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nv0cy8bx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.910094667Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe instance= t=2024-05-29T13:44:13.910029223Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.910004625Z caller=grafana.go:247 user=251869 slug=roseburg msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=d7e7eb95-4f29-4d63-82cd-2a28848a60ec" groups=0 alerts=0 + level=debug ts=2024-05-29T13:44:13.910013618Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nv0cy8bx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.909919056Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.909888501Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=806229 slug=simplisafe version=71 fingerprint=30fa8a9222895b02 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.909746459Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[CRON_HEARTBEAT:{Var:CRON_HEARTBEAT Labels: Value:0xc02551e8c0} MINIMUM:{Var:MINIMUM Labels: Value:0xc02551e898}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.909119557s EvaluationString:[ var='CRON_HEARTBEAT' labels={} value=1.002824858757062 ], [ var='MINIMUM' labels={} value=0 ]}]" duration=14.371522ms + logger=ngalert.scheduler user=819809 slug=sprinter version=19 fingerprint=2849c41391779bd2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.909844215Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.909329797s EvaluationString:}]" duration=25.779705ms + level=debug ts=2024-05-29T13:44:13.909858677Z caller=remote_instance_store.go:51 user=703825 slug=andrewbauman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nv0cy8bx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.909855005Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.909812467Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-322250laio1euw1, cloud_platform=AWS, customer_id=C454, env_id=322250, env_name=C454_Test_Paralell_u10, env_type=test, instance=env-322250laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.909753057Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nuzdqukp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.909706133Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nuzdqukp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.909593342Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-322246laio1euw1, cloud_platform=AWS, customer_id=C454, env_id=322246, env_name=C454_AZ_UK_Dev_U10, env_type=dev, instance=env-322246laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.909599186Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=701741 slug=thetradingpitproduction t=2024-05-29T13:44:13.909556128Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.040098ms + level=info component=discovery ts=2024-05-29T13:44:13.909503955Z caller=client.go:80 msg="creating client for grafana instance" user=493615 addr=dns:///iiotsystems-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager.persist user=451223 slug=amadeuspfptest t=2024-05-29T13:44:13.909449661Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.939888ms + level=info ts=2024-05-29T13:44:13.909469465Z caller=remote_alert_sender.go:94 user=112732 slug=gleamer host=gleamer-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.144.180.90:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=bdg8qu0zu04cgf alerts=1 + level=debug ts=2024-05-29T13:44:13.909447154Z caller=ruler.go:522 msg="tenant is owned by this instance" user=731824 slug=greenvalleyit groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nutebesa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.909474221Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nutebesa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.909445791Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-322244laio1use1, cloud_platform=AWS, customer_id=C485, env_id=322244, env_name=C485 Ralph Lauren UAT, env_type=test, instance=env-322244laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.909423323Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.909343965Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=700783 slug=gsgmedia t=2024-05-29T13:44:13.909394888Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:13.909367947Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:13.909360607Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.909348367Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=700783 slug=gsgmedia t=2024-05-29T13:44:13.909333686Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.909224397Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nuoskpos-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.909225108Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nuoskpos-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.909193538Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-numrzr67-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.909073287Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.909075648Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-322218laio1use1, cloud_platform=AWS, customer_id=C737, env_id=322218, env_name=C737 Road Scholar Dev, env_type=dev, instance=env-322218laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.90901385Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:13.909005689Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-numrzr67-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.908866495Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nuil3mtw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.908747444Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.908700747Z caller=ruler.go:522 msg="tenant is owned by this instance" user=485797 slug=gconnect groups=5 + level=info component=discovery ts=2024-05-29T13:44:13.908691747Z caller=client.go:80 msg="creating client for grafana instance" user=661236 addr=dns:///hypernetica-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.908663747Z caller=ruler.go:522 msg="tenant is owned by this instance" user=608977 slug=geaiothrt groups=10 + level=debug ts=2024-05-29T13:44:13.908610646Z caller=ruler.go:522 msg="tenant is owned by this instance" user=560725 slug=gunnebodtc groups=0 + level=debug ts=2024-05-29T13:44:13.908644957Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.908588846Z caller=ruler.go:522 msg="tenant is owned by this instance" user=889702 slug=gamnifytest groups=1 + logger=ngalert.state.manager user=344017 slug=descript instance= t=2024-05-29T13:44:13.908583844Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nugy4hiy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.908571582Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=344017 slug=descript version=24 fingerprint=151624e411ec5bfa attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.90846093Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[AlertFailure:{Var:AlertFailure Labels: Value:0xc01addbb78} LastSuccessRate:{Var:LastSuccessRate Labels: Value:0xc01addbbd0} SuccessRate:{Var:SuccessRate Labels: Value:0xc01addbbd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.908082592s EvaluationString:[ var='AlertFailure' labels={} value=0 ], [ var='LastSuccessRate' labels={} value=1 ], [ var='SuccessRate' labels={} value=1 ]}]" duration=17.094858ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-322081laio1use1, cloud_platform=AWS, customer_id=C459, env_id=322081, env_name=C459_AZ_US_PROD, env_type=prod, instance=env-322081laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.908546806Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.908509717Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.908481803Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nugy4hiy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.908474751Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.908443809Z caller=remote_instance_store.go:51 user=884866 slug=cnonumerique msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=884866 slug=cnonumerique instance="datasource_uid=ddhkbrfewv7k0d, ref_id=A" t=2024-05-29T13:44:13.908381548Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.908354863Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nu8hgcy5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.908196668Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.908110033Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nu44q3f8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.908089427Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-322014laio1use1, cloud_platform=AWS, customer_id=C675, env_id=322014, env_name=C675 Rag & Bone - PROD, env_type=prod, instance=env-322014laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.908022074Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nu44q3f8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.908012366Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.90797494Z caller=client.go:80 msg="creating client for grafana instance" user=644538 addr=dns:///huaji-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ntyg0tfk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.907849574Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.907729939Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.907738938Z caller=client.go:80 msg="creating client for grafana instance" user=548162 addr=dns:///htmthehague-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.907588676Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ntydh48h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.907673423Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321956laio1aps2, cloud_platform=AWS, customer_id=C515, env_id=321956, env_name=C515 Medibank AUS Prod, env_type=prod, instance=env-321956laio1aps2, job=integrations/node_exporter, region=ap-southeast-2, stage=live" t=2024-05-29T13:44:13.907639489Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ntydh48h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.907571382Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.907405568Z caller=remote_instance_store.go:51 user=777670 slug=fakku msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.907342334Z caller=ruler.go:522 msg="tenant is owned by this instance" user=486818 slug=geaiottest groups=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ntucbjvu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.907255228Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.90715547Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321795laio1use1, cloud_platform=AWS, customer_id=C724, env_id=321795, env_name=C724 Travelers PI Dev, env_type=dev, instance=env-321795laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.907187969Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ntq77erp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.907128837Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ntq77erp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.907017706Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ntnl1hq1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.906974185Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321794laio1use1, cloud_platform=AWS, customer_id=C724, env_id=321794, env_name=C724 Travelers CL Dev, env_type=dev, instance=env-321794laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.906968627Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.906955245Z caller=remote_instance_store.go:51 user=22398 slug=sunfolding msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.906836629Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=620860 slug=grafanacloud1105 + level=debug ts=2024-05-29T13:44:13.906815229Z caller=ruler.go:522 msg="tenant is owned by this instance" user=620860 slug=grafanacloud1105 groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ntnl1hq1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.906710173Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ntnl1hq1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.906677142Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ntlwx3c7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.906600292Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ntlwx3c7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.90647821Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.906359281Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nthrjids-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.906331179Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.90622102Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.906070429Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.906137523Z caller=ruler.go:522 msg="tenant is owned by this instance" user=543110 slug=gptest groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ntgqpv8n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.906090656Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ntgqpv8n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.906055476Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.905934855Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ntgqpv8n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.906004725Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.90590852Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321782laio1use1, cloud_platform=AWS, customer_id=C485, env_id=321782, env_name=C485 Ralph Lauren DEV, env_type=dev, instance=env-321782laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.906020735Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=B" t=2024-05-29T13:44:13.905976428Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.90590612Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=22398 slug=sunfolding version=1 fingerprint=1b9588d2603ef0dd attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.905890717Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-sunfolding, ref_id=B State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.905577382s EvaluationString:}]" duration=18.573309ms + level=debug ts=2024-05-29T13:44:13.905918206Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ntgi7kw0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.905889854Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.905787719Z caller=ruler.go:522 msg="tenant is owned by this instance" user=655751 slug=gelectric groups=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321780laio1use1, cloud_platform=AWS, customer_id=C318, env_id=321780, env_name=C318 Kohls QA, env_type=qa, instance=env-321780laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.90580583Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.905587762Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.90555887Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.905531982Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=340731 slug=duckburg version=10 fingerprint=cd5cb56679778477 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.9054922Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-usage, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.905283807s EvaluationString:}]" duration=11.481885ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nt9u5sbj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.905568921Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nt9u5sbj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.905536701Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nt8ryim1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.90545535Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.905398639Z caller=remote_instance_store.go:51 user=384712 slug=nearinc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.905440243Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nt8ryim1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.905328019Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:13.90539965Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nt2e490o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.905218167Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nt2e490o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.905186267Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321713laio1use1, cloud_platform=AWS, customer_id=C724, env_id=321713, env_name=C724 Travelers PI DISC, env_type=disc, instance=env-321713laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.905326475Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=430961 slug=solifi version=2 fingerprint=14aea33a073c897d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.90529919Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=f901086b-e83c-4767-8689-f9c5848eaf68, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.904955414s EvaluationString:}]" duration=36.632603ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nt0b1eu8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.904906634Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nsvw746n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.904719392Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nsmk2mv4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.90449891Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nsmk2mv4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.90445805Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=459086 slug=metricgamingprd instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.905145946Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nsmk2mv4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.904443539Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nskb4g3f-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.904402179Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=459086 slug=metricgamingprd instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.905119982Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321618laio1use1, cloud_platform=AWS, customer_id=C485, env_id=321618, env_name=C485 Ralph Lauren PROD, env_type=prod, instance=env-321618laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.905125988Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.905107913Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=646659 slug=grafanaback4prod + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:13.905096578Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nsigr7bm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.903937934Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nsa5biqr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.903859533Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=288032 slug=dapperlabssre t=2024-05-29T13:44:13.904978458Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nsa5biqr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.903763522Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nsa5biqr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.903594251Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ns7uod30-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.90353135Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=288032 slug=dapperlabssre version=1 fingerprint=aa357fad915755bb attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.904855266Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=RBZj4Ak4z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.904449585s EvaluationString:}]" duration=26.567262ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ns7uod30-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.9035025Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ns7uod30-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.903467419Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ns7uod30-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.903441279Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ns7iuh8g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.903329298Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ns7iuh8g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.903173026Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ns7iuh8g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.903139276Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321556laio1use1, cloud_platform=AWS, customer_id=C569, env_id=321556, env_name=C569 Rite Hite PROD, env_type=prod, instance=env-321556laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.904902417Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ns0fszkz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.903096646Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ns0fszkz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.903025925Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nruce5xw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.902901744Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.904770852Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nrsdeu4q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.902599291Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.904590514Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nrqskgn0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.902441549Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nrqskgn0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.902411119Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.90462056Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nrqskgn0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.902400489Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.904622008Z caller=client.go:80 msg="creating client for grafana instance" user=607509 addr=dns:///henokv-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.904589408Z caller=remote_instance_store.go:51 user=172772 slug=ppbtradingtribe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nrniscce-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.902333948Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nrniscce-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.902323718Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=172772 slug=ppbtradingtribe t=2024-05-29T13:44:13.904535317Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.904435407Z caller=ruler.go:522 msg="tenant is owned by this instance" user=543386 slug=girlkingboy groups=0 + logger=ngalert.state.manager user=172772 slug=ppbtradingtribe t=2024-05-29T13:44:13.904459488Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.904452013Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nrn8fmp8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.902210417Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nrn8fmp8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.902198156Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321540laio1eastus, cloud_platform=AZURE, customer_id=A009, env_id=env-321540, env_name=a009_pkempinski_dnf_test, env_type=prod, instance=env-321540laio1eastus, job=integrations/node_exporter, region=eastus" t=2024-05-29T13:44:13.904398517Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=info component=discovery ts=2024-05-29T13:44:13.904361906Z caller=client.go:80 msg="creating client for grafana instance" user=519196 addr=dns:///heller-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nrgtzrgr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.902055465Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nre5952e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.901869543Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nre5952e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.901836033Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321501laio1use2, cloud_platform=AWS, customer_id=C734, env_id=321501, env_name=C734 GoEasy DR, env_type=prod, instance=env-321501laio1use2, job=integrations/node_exporter, region=us-east-2, stage=preprod" t=2024-05-29T13:44:13.904186386Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.90396221Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321452laio1use1, cloud_platform=AWS, customer_id=C553, env_id=321452, env_name=C553_PWCS_DEV, env_type=dev, instance=env-321452laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.903820815Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.9037174Z caller=ruler.go:522 msg="tenant is owned by this instance" user=514757 slug=gacraid groups=0 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321452laio1use1, cloud_platform=AWS, customer_id=C553, env_id=321452, env_name=C553_PWCS_DEV, env_type=dev, instance=env-321452laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.903800752Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.903662599Z caller=client.go:80 msg="creating client for grafana instance" user=751315 addr=dns:///hellebore-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.903768038Z caller=remote_instance_store.go:51 user=738479 slug=gohero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321451laio1use1, cloud_platform=AWS, customer_id=C553, env_id=321451, env_name=C553_PWCS_PROD, env_type=prod, instance=env-321451laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.903617194Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.90346015Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.903390963Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.903377541Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.903350469Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321372laio1euc1, cloud_platform=AWS, customer_id=C582, env_id=321372, env_name=C582_Legero_prod, env_type=prod, instance=env-321372laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:13.903391796Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.903291418Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.903067795Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321216laio1use1, cloud_platform=AWS, customer_id=C628, env_id=321216, env_name=c628_MSTR_Tutorial_PROD, env_type=prod, instance=env-321216laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.903184543Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.903006389Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.903063511Z caller=remote_instance_store.go:51 user=843304 slug=ppcgroup msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=843304 slug=ppcgroup instance="datasource_uid=adisl0571ei2od, ref_id=CPU %" t=2024-05-29T13:44:13.90300171Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=843304 slug=ppcgroup instance="datasource_uid=adisl0571ei2od, ref_id=CPU %" t=2024-05-29T13:44:13.90298711Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=66104 slug=messagegears t=2024-05-29T13:44:13.902999983Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=66104 slug=messagegears instance= t=2024-05-29T13:44:13.902983546Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=66104 slug=messagegears version=1 fingerprint=8abdb53f0e3e7c2b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.902852027Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.902565602s EvaluationString:}]" duration=145.948044ms + level=debug ts=2024-05-29T13:44:13.902899695Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nrdxqkpx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.901723102Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nrdxqkpx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.901646021Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nr4ph7pu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.90161513Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321171laio1use1, cloud_platform=AWS, customer_id=C724, env_id=321171, env_name=C724 Travelers PI SBX, env_type=dev, instance=env-321171laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.902842067Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nr4ph7pu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.901492819Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nqyl99ed-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.901313977Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.902668549Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nqyl99ed-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.901233927Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=396586 slug=opengov t=2024-05-29T13:44:13.902616545Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=396586 slug=opengov version=19 fingerprint=c110f2e9eb6c9d1d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.902488001Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc035efe840} C:{Var:C Labels: Value:0xc035efe848}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.90213061s EvaluationString:[ var='B' labels={} value=14 ], [ var='C' labels={} value=0 ]}]" duration=203.079485ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nqr7y2yj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.901191456Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321155laio1use1, cloud_platform=AWS, customer_id=C522, env_id=321155, env_name=C522_Stuller_Prod, env_type=prod, instance=env-321155laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.902444867Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nqr7y2yj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.901107305Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.902419145Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.902167169Z caller=remote_instance_store.go:51 user=465816 slug=metricgamingqa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.902130899Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321098laio1euw1, cloud_platform=AWS, customer_id=C637, env_id=321098, env_name=C637 Pfizer EU Dev U10, env_type=dev, instance=env-321098laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.902073159Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321098laio1euw1, cloud_platform=AWS, customer_id=C637, env_id=321098, env_name=C637 Pfizer EU Dev U10, env_type=dev, instance=env-321098laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.902054271Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.901959869Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321097laiouse2, cloud_platform=AWS, customer_id=C503, env_id=321097, env_name=C503 Van Andel PROD, env_type=prod, instance=env-321097laiouse2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:13.901863959Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321097laiouse2, cloud_platform=AWS, customer_id=C503, env_id=321097, env_name=C503 Van Andel PROD, env_type=prod, instance=env-321097laiouse2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:13.901827699Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.90170703Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=307381 slug=kambitaskforce t=2024-05-29T13:44:13.901622772Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=34.489748ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321095laio1cac1, cloud_platform=AWS, customer_id=C594, env_id=321095, env_name=C594 Metro DEV, env_type=dev, instance=env-321095laio1cac1, job=integrations/node_exporter, region=ca-central-1, stage=testing" t=2024-05-29T13:44:13.901677428Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.901492943Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.901493733Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.901359561Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.901202663Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.901244961Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-321092laio1use1, cloud_platform=AWS, customer_id=C705, env_id=321092, env_name=C705 Cox Ecommerce Dev, env_type=dev, instance=env-321092laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.901295163Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.901077975Z caller=ruler.go:522 msg="tenant is owned by this instance" user=548740 slug=frameit groups=0 + level=info component=discovery ts=2024-05-29T13:44:13.900970674Z caller=client.go:80 msg="creating client for grafana instance" user=746551 addr=dns:///gudrunxpert-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.900915773Z caller=client.go:80 msg="creating client for grafana instance" user=750832 addr=dns:///grzegorzgluszek-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.900887573Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=533098 slug=gdd + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nqf5k0nf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.900845643Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nqf5k0nf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.900781992Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320949laio1use1, cloud_platform=AWS, customer_id=C660, env_id=320949, env_name=C660 V.P. Prod, env_type=prod, instance=env-320949laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.900702631Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nqcp8alc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.900664011Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nqcp8alc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.90062459Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.900538071Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.90053977Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=741989 slug=fujiseal + logger=ngalert.state.manager.persist user=701741 slug=thetradingpitproduction t=2024-05-29T13:44:13.90050764Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320920laio1use1, cloud_platform=AWS, customer_id=C423, env_id=320920, env_name=C423 MMRIET PROD, env_type=prod, instance=env-320920laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.900513167Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=701741 slug=thetradingpitproduction version=34 fingerprint=b87431ae9b425bae attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.900379517Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.899909819s EvaluationString:}]" duration=18.908495ms + level=debug ts=2024-05-29T13:44:13.900188068Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nq4o8td5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.900128625Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.90015205Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.900136945Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320755laio1use1, cloud_platform=AWS, customer_id=C664, env_id=320755, env_name=C664 PROD Disney Studios, env_type=prod, instance=env-320755laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.90011536Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nq4o8td5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.900024954Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320754laio1use1, cloud_platform=AWS, customer_id=C664, env_id=320754, env_name=C664 NonProd Disney Studi, env_type=dev, instance=env-320754laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.899971231Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320754laio1use1, cloud_platform=AWS, customer_id=C664, env_id=320754, env_name=C664 NonProd Disney Studi, env_type=dev, instance=env-320754laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.899953849Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.899791748Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nq0ptjbb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.899815222Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320741laio1euw1, cloud_platform=AWS, customer_id=C637, env_id=320741, env_name=C637 Pfizer EU QA U10, env_type=qa, instance=env-320741laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.899761679Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-npx89z93-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.899753961Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-npx89z93-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.899709511Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-npx89z93-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.89965018Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.899637129Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-npuonfs1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.89959046Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.899623767Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-npuonfs1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.899563289Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320713laio1euc1, cloud_platform=AWS, customer_id=C712, env_id=320713, env_name=C712_HKL_Prod_u10, env_type=prod, instance=env-320713laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:13.89955789Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.899527454Z caller=remote_instance_store.go:51 user=173374 slug=felmo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=173374 slug=felmo instance= t=2024-05-29T13:44:13.899441382Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.899390077Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=173374 slug=felmo t=2024-05-29T13:44:13.899402998Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-npsvkljf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.899381898Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=173374 slug=felmo version=146 fingerprint=335e84b8bb5d84b7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.899293897Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.898992369s EvaluationString:}]" duration=322.724235ms + logger=ngalert.state.manager.persist user=160939 slug=jaisonmello t=2024-05-29T13:44:13.896971205Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320677laio1euw1, cloud_platform=AWS, customer_id=C452, env_id=320677, env_name=C452_DEV_2021_U10, env_type=dev, instance=env-320677laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.899345377Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-npsvkljf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.899310607Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=112732 slug=gleamer t=2024-05-29T13:44:13.899230173Z level=debug msg="Skip rule evaluation because it is paused" + level=debug ts=2024-05-29T13:44:13.899304207Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-npsvkljf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.899256256Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-npqyf3mu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.899197866Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-npqyf3mu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.899097075Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.899156295Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.899154755Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=ddm8p4opmn7k0e msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:13.899091201Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.899075567Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-npqyf3mu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.899059314Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=624354 slug=truliooworkflow t=2024-05-29T13:44:13.898673788Z level=debug msg="Saving alert states" count=45 max_state_save_concurrency=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320601laio1use1, cloud_platform=AWS, customer_id=C491, env_id=320601, env_name=C491_DEV, env_type=dev, instance=env-320601laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.898999779Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898638886Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:13.898984847Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320601laio1use1, cloud_platform=AWS, customer_id=C491, env_id=320601, env_name=C491_DEV, env_type=dev, instance=env-320601laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.898983239Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898598184Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.898851954Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=559905 slug=ftr + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898594004Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898584764Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898567123Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898557082Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898522461Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=523054 slug=vialtopartners instance= t=2024-05-29T13:44:13.898947511Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=redis, namespace=argocd, pod=argocd-redis-776789c57-cjp5b" t=2024-05-29T13:44:13.898951919Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898467108Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898459046Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898451937Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=523054 slug=vialtopartners t=2024-05-29T13:44:13.898854283Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898409324Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898398875Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898375482Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898352952Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898344532Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898320981Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320516laio1euw1, cloud_platform=AWS, customer_id=C452, env_id=320516, env_name=C452_Staging_2021U10, env_type=qa, instance=env-320516laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.898801408Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.89831139Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.89829934Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.898787511Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898228235Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=proxy-injector, namespace=linkerd, pod=linkerd-proxy-injector-5bc6654c95-t255r" t=2024-05-29T13:44:13.898784017Z level=debug msg="Changing state" previous_state=Normal next_state=Pending previous_ends_at=2024-05-29T13:44:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-npmypewb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.89868697Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-npmypewb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.89865007Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898222976Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.898732321Z caller=remote_instance_store.go:51 user=624354 slug=truliooworkflow msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898213875Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.898537335Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.898647939Z caller=remote_image_capturer.go:33 user=309009 slug=elestyle rule_org_id=1 rule_uid=ddm8p4opmn7k0e msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898170283Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898156112Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-npmypewb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.898574309Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898102229Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898081828Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898075979Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898035837Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.898020776Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-npkb7442-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.898508409Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=oneqr-svc, namespace=elepay-api, pod=oneqr-svc-7f4696cc8b-282dx" t=2024-05-29T13:44:13.89842202Z level=debug msg="Changing state" previous_state=Normal next_state=Pending previous_ends_at=2024-05-29T13:44:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.897970983Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-np8jmwln-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.898303756Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.897918191Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=oneqr-pos-web, namespace=elepay, pod=oneqr-pos-web-7cbb5b6bdb-scgst" t=2024-05-29T13:44:13.898262137Z level=debug msg="Changing state" previous_state=Normal next_state=Pending previous_ends_at=2024-05-29T13:44:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320364laio1use1, cloud_platform=AWS, customer_id=C569, env_id=320364, env_name=C569 Rite Hite DEV, env_type=dev, instance=env-320364laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.89823639Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320364laio1use1, cloud_platform=AWS, customer_id=C569, env_id=320364, env_name=C569 Rite Hite DEV, env_type=dev, instance=env-320364laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.898220108Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.897974946Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=685066 slug=finalist + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=oneqr-kds-web, namespace=elepay, pod=oneqr-kds-web-7c9fc797dc-2hbk9" t=2024-05-29T13:44:13.898084729Z level=debug msg="Changing state" previous_state=Normal next_state=Pending previous_ends_at=2024-05-29T13:44:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320362laio1euc1, cloud_platform=AWS, customer_id=C559, env_id=320362, env_name=C559_SWIFT_TEST_U10, env_type=prod, instance=env-320362laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:13.898051386Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-np71or7u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.898016533Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320362laio1euc1, cloud_platform=AWS, customer_id=C559, env_id=320362, env_name=C559_SWIFT_TEST_U10, env_type=prod, instance=env-320362laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:13.898034083Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=527204 slug=lnrsusinsurancenonprod t=2024-05-29T13:44:13.897987715Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=oneqr-kds-web, namespace=elepay, pod=oneqr-kds-web-7c9fc797dc-2hbk9" t=2024-05-29T13:44:13.898028008Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-np5xlc1x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.897907572Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.897842177Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=node-exporter, namespace=monitoring, pod=node-exporter-jqphl" t=2024-05-29T13:44:13.89795054Z level=debug msg="Changing state" previous_state=Normal next_state=Pending previous_ends_at=2024-05-29T13:44:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.897821587Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-np5xlc1x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.897812291Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.89742414Z caller=ruler.go:522 msg="tenant is owned by this instance" user=528531 slug=flane groups=0 + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=node-exporter, namespace=monitoring, pod=node-exporter-glm5f" t=2024-05-29T13:44:13.89786986Z level=debug msg="Changing state" previous_state=Normal next_state=Pending previous_ends_at=2024-05-29T13:44:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-np5xlc1x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.897773661Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.897746563Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=node-exporter, namespace=monitoring, pod=node-exporter-glm5f" t=2024-05-29T13:44:13.897846223Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.897737833Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=linkerd-proxy, namespace=linkerd, pod=linkerd-identity-7db9fcdbcf-6lnts" t=2024-05-29T13:44:13.897810555Z level=debug msg="Changing state" previous_state=Normal next_state=Pending previous_ends_at=2024-05-29T13:44:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.897769384Z caller=remote_instance_store.go:51 user=829352 slug=unfnbonp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320296laio1eastus2, cloud_platform=Azure, customer_id=A150, env_id=320296, env_name=A150 Ross POC, env_type=prod, instance=env-320296laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=preprod" t=2024-05-29T13:44:13.8976899Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.89768618Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.897664919Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=linkerd-proxy, namespace=elepay-api, pod=oneqr-connect-api-84bfdcc7c7-hl9k7" t=2024-05-29T13:44:13.897694937Z level=debug msg="Changing state" previous_state=Normal next_state=Pending previous_ends_at=2024-05-29T13:44:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.scheduler user=624354 slug=truliooworkflow version=43 fingerprint=701fc67e0ab1efbc attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.897522972Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.897125842s EvaluationString:}]" duration=9.550333ms + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=kube-rbac-proxy-main, namespace=monitoring, pod=kube-state-metrics-79d79b66cd-bvzd9" t=2024-05-29T13:44:13.897573868Z level=debug msg="Changing state" previous_state=Normal next_state=Pending previous_ends_at=2024-05-29T13:44:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320270laio1use2, cloud_platform=AWS, customer_id=C580, env_id=320270, env_name=C580_TCS_Prod, env_type=prod, instance=env-320270laio1use2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:13.8975212Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-noteove9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.897464468Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-noteove9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.897435947Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=kube-rbac-proxy, namespace=monitoring, pod=node-exporter-jqphl" t=2024-05-29T13:44:13.897439482Z level=debug msg="Changing state" previous_state=Normal next_state=Pending previous_ends_at=2024-05-29T13:44:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.897406222Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.897328739Z caller=ruler.go:522 msg="tenant is owned by this instance" user=767220 slug=fassmus groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-noteove9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.897364487Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320159laio1use1, cloud_platform=AWS, customer_id=C680, env_id=320159, env_name=C680 FIS UAT U11, env_type=qa, instance=env-320159laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.897269444Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-noouxwks-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.897186745Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-noouxwks-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.897157065Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-noouxwks-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.897126374Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.897086537Z caller=ruler.go:522 msg="tenant is owned by this instance" user=547939 slug=frei groups=0 + logger=ngalert.state.manager user=309009 slug=elestyle instance="container=identity, namespace=linkerd, pod=linkerd-identity-7db9fcdbcf-pp9vg" t=2024-05-29T13:44:13.897084221Z level=debug msg="Changing state" previous_state=Normal next_state=Pending previous_ends_at=2024-05-29T13:44:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320133laio1use1, cloud_platform=AWS, customer_id=C522, env_id=320133, env_name=C522_Stuller_Dev, env_type=dev, instance=env-320133laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.897072812Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.896880852Z caller=remote_instance_store.go:51 user=154996 slug=veovo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320115laio1use1, cloud_platform=AWS, customer_id=C660, env_id=320115, env_name=C660 V.P. Dev, env_type=dev, instance=env-320115laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.896944417Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nokd05ni-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.896904552Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320115laio1use1, cloud_platform=AWS, customer_id=C660, env_id=320115, env_name=C660 V.P. Dev, env_type=dev, instance=env-320115laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.896934845Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320109laio1euc1, cloud_platform=AWS, customer_id=C559, env_id=320109, env_name=C559_SWIFT_QA_U10, env_type=prod, instance=env-320109laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:13.896836894Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nokd05ni-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.896877252Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=160939 slug=jaisonmello instance="datasource_uid=x5ZT_m_nk, ref_id=B,D" t=2024-05-29T13:44:13.896835383Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=160939 slug=jaisonmello t=2024-05-29T13:44:13.896765845Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nod9u6wm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.896598509Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nod9u6wm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.896551778Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.896198095Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112732 slug=gleamer instance= t=2024-05-29T13:44:13.89662688Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=155740 slug=routific instance= t=2024-05-29T13:44:13.896635833Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=112732 slug=gleamer version=1 fingerprint=8f210de72488e4f2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.896507111Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[C0:{Var:C Labels: Value:0xc024db5d60} C1:{Var:C Labels: Value:0xc024db5d70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.89621303s EvaluationString:[ var='C0' metric='Value' labels={} value=13 ], [ var='C1' metric='Value' labels={} value=13 ]}]" duration=42.249781ms + level=debug ts=2024-05-29T13:44:13.896545444Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.896493314Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nod9u6wm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.896395527Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=802643 slug=eigenda instance="method=RetrieveBlob" t=2024-05-29T13:44:13.896335314Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=802643 slug=eigenda instance="method=RetrieveBlob" t=2024-05-29T13:44:13.896323007Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-320091laio1use1, cloud_platform=AWS, customer_id=C410, env_id=320091, env_name=C410_PROD, env_type=prod, instance=env-320091laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.896307026Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=802643 slug=eigenda version=3 fingerprint=fc1db7dea362f71a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.896205856Z level=debug msg="Alert rule evaluated" results="[{Instance:method=RetrieveBlob State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:method=RetrieveBlob Value:0xc015875798} B:{Var:B Labels:method=RetrieveBlob Value:0xc0158756f8} C:{Var:C Labels:method=RetrieveBlob Value:0xc015875778}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.895886653s EvaluationString:[ var='A' labels={method=RetrieveBlob} value=0 ], [ var='B' labels={method=RetrieveBlob} value=0 ], [ var='C' labels={method=RetrieveBlob} value=0 ]}]" duration=35.079608ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-noc9iv8i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.896090594Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-noc9iv8i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.896077183Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-319992laio1use1, cloud_platform=AWS, customer_id=C547, env_id=319992, env_name=C547 DEV OBSA-Bluestem, env_type=dev, instance=env-319992laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.896195719Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-noc9iv8i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.896035563Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.896079601Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.896038744Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.896071096Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.896049256Z caller=remote_alert_sender.go:94 user=642786 slug=sophoscomnsg host=sophoscomnsg-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.44.231:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=a6c90281-d9e9-459a-9436-1e18bc59fce5 alerts=1 + level=debug ts=2024-05-29T13:44:13.893963008Z caller=ruler.go:522 msg="tenant is owned by this instance" user=311224 slug=eni groups=31 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-319956laio1euc1, cloud_platform=AWS, customer_id=C559, env_id=319956, env_name=C559_SWIFT_DEV_U10, env_type=prod, instance=env-319956laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:13.896016753Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.895974245Z caller=remote_alert_sender.go:94 user=642786 slug=sophoscomnsg host=sophoscomnsg-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.13.67:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=a6c90281-d9e9-459a-9436-1e18bc59fce5 alerts=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-319839laio1use1, cloud_platform=AWS, customer_id=C313, env_id=319839, env_name=C313 DeRoyal PROD, env_type=prod, instance=env-319839laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.895889858Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-no5smchz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.895678989Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-no5smchz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.895642239Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.895533523Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=687903 slug=energetech + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-no2h6wxa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.895386416Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.892701996Z caller=ruler.go:522 msg="tenant is owned by this instance" user=687903 slug=energetech groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-no2h6wxa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.895244145Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.895388699Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-319735laio1use1, cloud_platform=AWS, customer_id=C433, env_id=319735, env_name=C433 ABC Liquors PROD, env_type=prod, instance=env-319735laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.895406026Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-no1aapti-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.895168964Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-no1aapti-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.895139094Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nns192sg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.89480876Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.893951308Z caller=ruler.go:522 msg="tenant is owned by this instance" user=614592 slug=exonet groups=0 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-319693laio1use1, cloud_platform=AWS, customer_id=C345, env_id=319693, env_name=C345 Fox PROD, env_type=prod, instance=env-319693laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.895011557Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.894912767Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.89213379Z caller=client.go:80 msg="creating client for grafana instance" user=541295 addr=dns:///frikadell0-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nns192sg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.89474133Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-319688laio1use1, cloud_platform=AWS, customer_id=C558, env_id=319688, env_name=C558_Covanta_PROD_U10, env_type=prod, instance=env-319688laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.894754906Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nnm64qwi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.894701479Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-319579laio1use1, cloud_platform=AWS, customer_id=C711, env_id=319579, env_name=C711 Disney CP Prod, env_type=prod, instance=env-319579laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.894552651Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=237629 slug=ocrolus t=2024-05-29T13:44:13.894330616Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.894372922Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-319574laio1use1, cloud_platform=AWS, customer_id=C313, env_id=319574, env_name=C313 DeRoyal DEV, env_type=dev, instance=env-319574laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.894381271Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.894386613Z caller=remote_instance_store.go:51 user=237629 slug=ocrolus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-319574laio1use1, cloud_platform=AWS, customer_id=C313, env_id=319574, env_name=C313 DeRoyal DEV, env_type=dev, instance=env-319574laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.894348817Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nnkyzq40-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.894331775Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nnjme0uo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.894263725Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nnjme0uo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.894220814Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.89424471Z caller=client.go:80 msg="creating client for grafana instance" user=662913 addr=dns:///gealptsfprod-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nnjme0uo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.894164124Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.894091681Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nnf9l9lx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.894096933Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nnf9l9lx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.894070253Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nnf9l9lx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.894028212Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nnf9l9lx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.893953911Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nnd1k40v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.893911081Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.894001838Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:13.893922859Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.893895902Z caller=remote_instance_store.go:51 user=384712 slug=nearinc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:13.893842118Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info component=discovery ts=2024-05-29T13:44:13.893784906Z caller=client.go:80 msg="creating client for grafana instance" user=486818 addr=dns:///geaiottest-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.893713997Z caller=remote_instance_store.go:51 user=277970 slug=teckresourcestest msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=82372 slug=fout t=2024-05-29T13:44:13.893712926Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.89375194Z caller=remote_instance_store.go:51 user=82372 slug=fout msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nnd1k40v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.89376853Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nnc6is78-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.893711029Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=82372 slug=fout t=2024-05-29T13:44:13.893641561Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=277970 slug=teckresourcestest t=2024-05-29T13:44:13.893651255Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nnc6is78-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.893640658Z level=debug msg="Keeping state" state=Normal + Error parsing panelUID for alert annotationruleID449dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=82372 slug=fout version=5 fingerprint=6522fb8d792f2021 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.893558373Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.893242395s EvaluationString:}]" duration=383.794134ms + logger=ngalert.state.manager user=277970 slug=teckresourcestest t=2024-05-29T13:44:13.893593744Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=4 fingerprint=98282332acca0313 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.89354028Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=8.550429ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nnc6is78-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.893613268Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nnc6is78-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.893570578Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nnc6is78-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.893515587Z level=debug msg="Setting next state" handler=resultNormal + level=error ts=2024-05-29T13:44:13.893494623Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-319426laio1euw2, cloud_platform=AWS, customer_id=C578, env_id=319426, env_name=C578_SLC_Dev_U10, env_type=dev, instance=env-319426laio1euw2, job=integrations/node_exporter, region=eu-west-2, stage=live" t=2024-05-29T13:44:13.893541538Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=451223 slug=amadeuspfptest instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.893492905Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nnanr3dg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.893449026Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=451223 slug=amadeuspfptest t=2024-05-29T13:44:13.893450327Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=451223 slug=amadeuspfptest version=28 fingerprint=ee7cc2883844b70f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.893372515Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.893004198s EvaluationString:}]" duration=19.629994ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-319425laio1euw2, cloud_platform=AWS, customer_id=C578, env_id=319425, env_name=C578_SLC_TEST_U10, env_type=test, instance=env-319425laio1euw2, job=integrations/node_exporter, region=eu-west-2, stage=live" t=2024-05-29T13:44:13.89333861Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.893370396Z caller=remote_instance_store.go:51 user=696538 slug=drakkarsoftware msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=767797 slug=mgmresorts t=2024-05-29T13:44:13.893302621Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.695399ms + level=debug ts=2024-05-29T13:44:13.893275801Z caller=ruler.go:522 msg="tenant is owned by this instance" user=569904 slug=ercluster groups=0 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-319398laio1eastus, cloud_platform=Azure, customer_id=A140, env_id=319398, env_name=A140 OBE DEV, env_type=dev, instance=env-319398laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:13.893205456Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-319398laio1eastus, cloud_platform=Azure, customer_id=A140, env_id=319398, env_name=A140 OBE DEV, env_type=dev, instance=env-319398laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:13.89319153Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.8931762Z caller=client.go:80 msg="creating client for grafana instance" user=533098 addr=dns:///gdd-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nn9kn4tf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.893140763Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nn9kn4tf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.892984272Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-319184laio1use1, cloud_platform=AWS, customer_id=C486, env_id=319184, env_name=C486_Cox_Manheim_Dev, env_type=dev, instance=env-319184laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.89303418Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.892839975Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.893011899Z caller=client.go:80 msg="creating client for grafana instance" user=525810 addr=dns:///gategourmet-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.892840087Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.892908598Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=534518 slug=estest + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nn6nvbtg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.892945901Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.892877097Z caller=ruler.go:522 msg="tenant is owned by this instance" user=534518 slug=estest groups=0 + level=warn ts=2024-05-29T13:44:13.892872897Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=523582 slug=ejie + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.892796114Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.090941ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nmen40a2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.892734549Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.892633881Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-319022laio1use1, cloud_platform=AWS, customer_id=C577, env_id=319022, env_name=C577 CBrands QA, env_type=qa, instance=env-319022laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.892538147Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.892443417Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.892375693Z caller=ruler.go:522 msg="tenant is owned by this instance" user=713091 slug=expghohr groups=7 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nmcwhdx3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.892379605Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nmcwhdx3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.892277934Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.892318892Z caller=client.go:80 msg="creating client for grafana instance" user=741989 addr=dns:///fujiseal-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-318931laio1use1, cloud_platform=AWS, customer_id=C577, env_id=318931, env_name=C577 CBrands DEV, env_type=dev, instance=env-318931laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.89233287Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.892209391Z caller=client.go:80 msg="creating client for grafana instance" user=612569 addr=dns:///frontocean-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.892233493Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.892208253Z caller=remote_instance_store.go:51 user=686395 slug=containerfoundation msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.890613676Z caller=client.go:80 msg="creating client for grafana instance" user=625101 addr=dns:///finxone-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.89213699Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=618652 slug=facilitypro + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-318919laio1use1, cloud_platform=AWS, customer_id=C577, env_id=318919, env_name=C577 CBrands SANDBOX, env_type=sandbox, instance=env-318919laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.892190024Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.89210789Z caller=ruler.go:522 msg="tenant is owned by this instance" user=618652 slug=facilitypro groups=0 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-318919laio1use1, cloud_platform=AWS, customer_id=C577, env_id=318919, env_name=C577 CBrands SANDBOX, env_type=sandbox, instance=env-318919laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.892181757Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nmcoi6ki-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.892142213Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-318834laio1use1, cloud_platform=AWS, customer_id=C577, env_id=318834, env_name=C577 CBrands EXTERNAL, env_type=other, instance=env-318834laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.892089527Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.891694386Z caller=ruler.go:522 msg="tenant is owned by this instance" user=550570 slug=ferrioprod groups=1 + level=debug ts=2024-05-29T13:44:13.891989372Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nmc14p7t-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.891951041Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nmc14p7t-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.891922451Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.891863788Z caller=client.go:80 msg="creating client for grafana instance" user=547939 addr=dns:///frei-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.891725887Z caller=client.go:80 msg="creating client for grafana instance" user=659863 addr=dns:///fracek-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.891810087Z caller=client.go:80 msg="creating client for grafana instance" user=495235 addr=dns:///frantchenco-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.89187907Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nma6lssm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.8918379Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-318655laio1euw1, cloud_platform=AWS, customer_id=C600, env_id=318655, env_name=C600_Celio_Dev, env_type=dev, instance=env-318655laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.89178374Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.891773687Z caller=ruler.go:522 msg="tenant is owned by this instance" user=402407 slug=euwest3test groups=1 + level=debug ts=2024-05-29T13:44:13.822848736Z caller=ruler.go:522 msg="tenant is owned by this instance" user=762763 slug=evialaz groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nm8favyz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.891734749Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nm8favyz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.891712468Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.891665786Z caller=ruler.go:522 msg="tenant is owned by this instance" user=633855 slug=eyescreamxd groups=0 + level=warn ts=2024-05-29T13:44:13.891594585Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=716143 slug=eguanarocks + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-318650laio1euw1, cloud_platform=AWS, customer_id=C600, env_id=318650, env_name=C600_Celio_Prod, env_type=prod, instance=env-318650laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.891603934Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.891508451Z caller=remote_instance_store.go:51 user=484167 slug=stabilityai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nm50x54a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.891526167Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.891459607Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-318538laio1usw2, cloud_platform=AWS, customer_id=C731, env_id=318538, env_name=C731 Accurate.com PROD, env_type=prod, instance=env-318538laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=preprod" t=2024-05-29T13:44:13.891448887Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nm50x54a-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.891472636Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-318538laio1usw2, cloud_platform=AWS, customer_id=C731, env_id=318538, env_name=C731 Accurate.com PROD, env_type=prod, instance=env-318538laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=preprod" t=2024-05-29T13:44:13.891433828Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.891410791Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=484167 slug=stabilityai instance="persistentvolumeclaim=data-api-production-postgresql-readonly-1" t=2024-05-29T13:44:13.891370519Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nm45172u-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.891363875Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=484167 slug=stabilityai instance="persistentvolumeclaim=data-api-production-postgresql-0" t=2024-05-29T13:44:13.891292266Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=484167 slug=stabilityai instance="persistentvolumeclaim=data-api-production-postgresql-0" t=2024-05-29T13:44:13.891275646Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=484167 slug=stabilityai t=2024-05-29T13:44:13.891216315Z level=debug msg="State manager processing evaluation results" resultCount=3 + level=debug ts=2024-05-29T13:44:13.891201839Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nlzx3b9u-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.891235334Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=560104 slug=northwestnodes t=2024-05-29T13:44:13.891180879Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.826395ms + logger=ngalert.state.manager.persist user=326888 slug=buildingblocks t=2024-05-29T13:44:13.891026583Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.761218ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nlzf3bt1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.891081672Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nlzf3bt1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.891019511Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.89102348Z caller=ruler.go:522 msg="tenant is owned by this instance" user=731426 slug=emeacluster groups=7 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nlzf3bt1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.890990391Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nlwf7q6z-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.89093267Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.890656161Z caller=remote_instance_store.go:51 user=177465 slug=fairtiq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nlwf7q6z-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.890822089Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.890805578Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=617022 slug=couponsphinx + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-318366laio1use1, cloud_platform=AWS, customer_id=C729, env_id=318366, env_name=C729_Bayer_AG_PROD, env_type=prod, instance=env-318366laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.890738481Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.890629185Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nlngfgdu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.890641547Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.890332473Z caller=ruler.go:522 msg="tenant is owned by this instance" user=556089 slug=easyconnectprod groups=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-318291laio1use1, cloud_platform=AWS, customer_id=C423, env_id=318291, env_name=C423 MMRIET DEV, env_type=dev, instance=env-318291laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.890530087Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-318291laio1use1, cloud_platform=AWS, customer_id=C423, env_id=318291, env_name=C423 MMRIET DEV, env_type=dev, instance=env-318291laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.89051476Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nllzmeaf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.890495906Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nllzmeaf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.890463426Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.890457575Z caller=client.go:80 msg="creating client for grafana instance" user=619273 addr=dns:///finector-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-318286laio1use1, cloud_platform=AWS, customer_id=C487, env_id=318286, env_name=C487 Pfizer QA, env_type=qa, instance=env-318286laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.890339746Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nlk5jnaf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.890355825Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nlk5jnaf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.890331034Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.890326473Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=621117 slug=edwardvoermans2 + level=debug ts=2024-05-29T13:44:13.822847035Z caller=ruler.go:522 msg="tenant is owned by this instance" user=621117 slug=edwardvoermans2 groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nlk5jnaf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.890299974Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nlk5jnaf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.890249323Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nlgghzgf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.890197103Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=384712 slug=nearinc t=2024-05-29T13:44:13.890183669Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.001265ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nlgghzgf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.890108112Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.890053518Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.889980117Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-318184laio1use1, cloud_platform=AWS, customer_id=C726, env_id=318184, env_name=C726 MiMedx DEV, env_type=dev, instance=env-318184laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.890002731Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nlb4ai1z-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.889845249Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=472647 slug=planet version=362 fingerprint=efd7978666ed2a95 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.889774571Z level=debug msg="Alert rule evaluated" results="[{Instance:metric.name=value_oldest_unacked_message_age_aggregate, resource.label.subscription_id=live-ordersv2-queued State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.name=value_oldest_unacked_message_age_aggregate, resource.label.subscription_id=live-ordersv2-queued Value:0xc0331cc958} C:{Var:C Labels:metric.name=value_oldest_unacked_message_age_aggregate, resource.label.subscription_id=live-ordersv2-queued Value:0xc0331cc980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.889467248s EvaluationString:[ var='B' labels={metric.name=value_oldest_unacked_message_age_aggregate, resource.label.subscription_id=live-ordersv2-queued} value=1 ], [ var='C' labels={metric.name=value_oldest_unacked_message_age_aggregate, resource.label.subscription_id=live-ordersv2-queued} value=0 ]}]" duration=97.322389ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nlb4ai1z-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.889784909Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.889652987Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nl9q76pn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.889618337Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nl9etwa4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.889535136Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.889505104Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nl9etwa4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.889481986Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.889458884Z caller=remote_instance_store.go:51 user=571981 slug=pancmc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.889406472Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.889375595Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-318114laio1uksouth, cloud_platform=Azure, customer_id=A187, env_id=318114, env_name=A187_Admiral_Dev, env_type=dev, instance=env-318114laio1uksouth, job=integrations/node_exporter, region=uksouth, stage=live" t=2024-05-29T13:44:13.889398732Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-318114laio1uksouth, cloud_platform=Azure, customer_id=A187, env_id=318114, env_name=A187_Admiral_Dev, env_type=dev, instance=env-318114laio1uksouth, job=integrations/node_exporter, region=uksouth, stage=live" t=2024-05-29T13:44:13.889381755Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.889365668Z caller=remote_image_capturer.go:33 user=571981 slug=pancmc rule_org_id=1 rule_uid=d9b41ccc-ae69-475f-9552-f89787827096 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nl8x5q93-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.889203213Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nl8x5q93-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.889170672Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.889167002Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nl7g01wd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.889010271Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nl7g01wd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.88895524Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.888759477Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.888953803Z caller=remote_instance_store.go:51 user=751407 slug=nethermindjuno msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.888835388Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nl3a7qx6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.888821219Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.888683124Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.888641666Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nkoiwg2p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.888541386Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nkoiwg2p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.888512606Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.888410791Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="datasource_uid=KArZ6Sf4z, ref_id=Authentication Error" previous_handler=resultNoData t=2024-05-29T13:44:13.888331041Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="datasource_uid=KArZ6Sf4z, ref_id=Authentication Error" previous_handler=resultNoData t=2024-05-29T13:44:13.888321682Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="datasource_uid=KArZ6Sf4z, ref_id=Authentication Error" t=2024-05-29T13:44:13.888300355Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod t=2024-05-29T13:44:13.888233458Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nkjxybbi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.888258113Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nkjxybbi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.888230353Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=557231 slug=lnrsusinsuranceprod version=45 fingerprint=4e318fce7ad2de82 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.887929304Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=KArZ6Sf4z, ref_id=Authentication Error State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.887432596s EvaluationString:}]" duration=87.081143ms + level=debug ts=2024-05-29T13:44:13.888129659Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.888100775Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nkbcuvoa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.888118992Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-318025laio1use2, cloud_platform=AWS, customer_id=C685, env_id=318025, env_name=C685_NTTA_SBOX, env_type=dev, instance=env-318025laio1use2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:13.888041872Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nkbcuvoa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.888050831Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:13.887873185Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + Error parsing panelUID for alert annotationruleID2621dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:13.887926616Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=48.67135ms + level=debug ts=2024-05-29T13:44:13.887691502Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-318019laio1use2, cloud_platform=AWS, customer_id=C685, env_id=318019, env_name=C685_NTTA_Dev/Test, env_type=dev, instance=env-318019laio1use2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:13.887695941Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nk99ukd8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.887695247Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nk7xpvis-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.887571236Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.887455895Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nk7xpvis-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.887501715Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nk7xpvis-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.887473495Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nk7xpvis-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.887432055Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nk7xpvis-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.887405114Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-njzqi2ab-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.887293673Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.887127097Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-njyk95k7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.887152032Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-njyk95k7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.88700782Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-317997laio1use1, cloud_platform=AWS, customer_id=C724, env_id=317997, env_name=C724 Travelers ES SBX, env_type=sandbox, instance=env-317997laio1use1, job=integrations/node_exporter, region=us-east-1, stage=preprod" t=2024-05-29T13:44:13.887042257Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-njyk95k7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.88698191Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=340750 slug=aptoslabs t=2024-05-29T13:44:13.886883825Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.400988ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-317810laio1use1, cloud_platform=AWS, customer_id=C709, env_id=317810, env_name=C709_OpenDigital_U10_PROD, env_type=prod, instance=env-317810laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.886916914Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.886860406Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-317810laio1use1, cloud_platform=AWS, customer_id=C709, env_id=317810, env_name=C709_OpenDigital_U10_PROD, env_type=prod, instance=env-317810laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.886903879Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-njtu2bv4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.886862599Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.886816639Z caller=remote_instance_store.go:51 user=777670 slug=fakku msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-njtu2bv4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.886679867Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.886636564Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-317551laio1usw1, cloud_platform=AWS, customer_id=C713, env_id=317551, env_name=c713_CPK_dev, env_type=dev, instance=env-317551laio1usw1, job=integrations/node_exporter, region=us-west-1, stage=live" t=2024-05-29T13:44:13.886643393Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-njs1d4cg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.886601926Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-317548laio1usw1, cloud_platform=AWS, customer_id=C713, env_id=317548, env_name=c713_CPK_prod, env_type=prod, instance=env-317548laio1usw1, job=integrations/node_exporter, region=us-west-1, stage=live" t=2024-05-29T13:44:13.886507707Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.886351065Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.886396584Z caller=remote_instance_store.go:51 user=173374 slug=felmo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.886328986Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=173374 slug=felmo t=2024-05-29T13:44:13.886287744Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-njom12xe-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.886311153Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-njom12xe-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.886285463Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-njom12xe-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.886245612Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.886226185Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.886209976Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-njno7peg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.886173552Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.886120224Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-njno7peg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.886063091Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.886007865Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-njno7peg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.88601826Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=196013 slug=inmediasoftware t=2024-05-29T13:44:13.885953159Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance= t=2024-05-29T13:44:13.885942197Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance= t=2024-05-29T13:44:13.88593307Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.885946619Z caller=remote_instance_store.go:51 user=877555 slug=cmbe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.885908997Z caller=remote_instance_store.go:51 user=320906 slug=techcyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-njdrh0au-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.885920399Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=196013 slug=inmediasoftware version=104 fingerprint=34f997442d288f6e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.885818662Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.885478456s EvaluationString:}]" duration=32.922614ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-njdrh0au-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.885849848Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.885834915Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=877555 slug=cmbe version=30 fingerprint=e33407467833d240 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.885755407Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc0350ebcd0} B:{Var:B Labels: Value:0xc0350ebc10} C:{Var:C Labels: Value:0xc0350ebc18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.885367817s EvaluationString:[ var='A' labels={} value=104 ], [ var='B' labels={} value=104 ], [ var='C' labels={} value=0 ]}]" duration=9.145294ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-njdrh0au-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.885776878Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-317221laio1apn1, cloud_platform=AWS, customer_id=C703, env_id=317221, env_name=C703_sumitomo_DEV, env_type=dev, instance=env-317221laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=live" t=2024-05-29T13:44:13.885786637Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.885700563Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-njc4tuo4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.885723917Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-njc4tuo4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.885656846Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.885692062Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-njc4tuo4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.885629506Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.885558034Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.885512111Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nj36xc5j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.885269502Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nj36xc5j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.885236432Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nj36xc5j-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.885085261Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=363785 slug=moonletmonitor instance= t=2024-05-29T13:44:13.884830133Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-317150laio1use2, cloud_platform=AWS, customer_id=C717, env_id=317150, env_name=C717 R+L Carriers Prd, env_type=prod, instance=env-317150laio1use2, job=integrations/node_exporter, region=us-east-2, stage=live" t=2024-05-29T13:44:13.884860415Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nixmmisr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.884845158Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=363785 slug=moonletmonitor version=104 fingerprint=56d7e8dabcb24b96 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.884684722Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.884341259s EvaluationString:}]" duration=32.811444ms + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.884615431Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-317057laio1use1, cloud_platform=AWS, customer_id=C725, env_id=317057, env_name=C725 Arcos Dorados PROD, env_type=prod, instance=env-317057laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.884684883Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-niwv0020-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.884551125Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-niwv0020-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.884491964Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nisicrhc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.884409084Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-314050laio1northeurope, cloud_platform=Azure, customer_id=A199, env_id=314050, env_name=A199_Sonae_UAT, env_type=test, instance=env-314050laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live" t=2024-05-29T13:44:13.884331838Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nisicrhc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.884332093Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-314050laio1northeurope, cloud_platform=Azure, customer_id=A199, env_id=314050, env_name=A199_Sonae_UAT, env_type=test, instance=env-314050laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live" t=2024-05-29T13:44:13.884317234Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.884211603Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:13.884177785Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=800848 slug=flowfoundation t=2024-05-29T13:44:13.884143934Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.568452ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nim500fa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.884119491Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nim500fa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.88407459Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-313006laio1westeurope, cloud_platform=Azure, customer_id=A198, env_id=313006, env_name=A198_GCO_Prod, env_type=prod, instance=env-313006laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:13.883986705Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nim500fa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.883908268Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.883897917Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-312813laio1usw2, cloud_platform=AWS, customer_id=C663, env_id=312813, env_name=C663 Franconnect Prod DR, env_type=prod, instance=env-312813laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=live" t=2024-05-29T13:44:13.883799718Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nibffzn4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.883692076Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nia2q8w1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.883616995Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nia2q8w1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.883559915Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-312080laio1use1, cloud_platform=AWS, customer_id=C687, env_id=312080, env_name=C687 DirecTV LatAm Dev, env_type=dev, instance=env-312080laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.883473652Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-312080laio1use1, cloud_platform=AWS, customer_id=C687, env_id=312080, env_name=C687 DirecTV LatAm Dev, env_type=dev, instance=env-312080laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.883465923Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.883439795Z caller=remote_instance_store.go:51 user=174054 slug=netrading msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ni9nfqkg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.883232742Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.883224051Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ni9nfqkg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.883162151Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.883170707Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.883162799Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:13.883131249Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.882932733Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.883059462Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ni61byei-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.883021329Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-311840laio1use1, cloud_platform=AWS, customer_id=C706, env_id=311840, env_name=C706 Cox DTDMS Prod, env_type=prod, instance=env-311840laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.882933338Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ni2nkl38-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.882867598Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ni2nkl38-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.882843268Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ni2nkl38-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.882811507Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ni2nkl38-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.882785067Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ni2nkl38-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.882725976Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-311555laio1aps1, cloud_platform=AWS, customer_id=C034, env_id=311555, env_name=AAP-Test environment-AP, env_type=prod, instance=env-311555laio1aps1, job=integrations/node_exporter, region=ap-southeast-1, stage=testing" t=2024-05-29T13:44:13.882770875Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.882619812Z caller=remote_instance_store.go:51 user=87780 slug=zencloudandhosting msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nhvlpp91-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.882627555Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-311512laio1use1, cloud_platform=AWS, customer_id=C706, env_id=311512, env_name=C706 Cox DTDMS Dev, env_type=dev, instance=env-311512laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.882589665Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nhvlpp91-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.882554535Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=87780 slug=zencloudandhosting t=2024-05-29T13:44:13.882551599Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=87780 slug=zencloudandhosting instance="datasource_uid=000000020, ref_id=A" t=2024-05-29T13:44:13.882507108Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:50:10Z next_ends_at=2024-05-29T13:52:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nhvlpp91-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.882477214Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.882463128Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=87780 slug=zencloudandhosting instance="datasource_uid=000000020, ref_id=A" t=2024-05-29T13:44:13.882493734Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=87780 slug=zencloudandhosting version=1 fingerprint=46e62bab51f12983 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.882346181Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=000000020, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.882031339s EvaluationString:}]" duration=320.277629ms + level=debug ts=2024-05-29T13:44:13.882381322Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nhu5193n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.882339362Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.882242809Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.882144809Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nhtkv7xm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.88212864Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-310499laio1usw2, cloud_platform=AWS, customer_id=C702, env_id=310499, env_name=C702 Cox Xtime UAT, env_type=qa, instance=env-310499laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=live" t=2024-05-29T13:44:13.882053839Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.881951597Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=36.447218ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-309888laio1use1, cloud_platform=AWS, customer_id=C711, env_id=309888, env_name=C711 Disney CP Dev, env_type=dev, instance=env-309888laio1use1, job=integrations/node_exporter, region=us-east-1, stage=decommission" t=2024-05-29T13:44:13.881880778Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-309888laio1use1, cloud_platform=AWS, customer_id=C711, env_id=309888, env_name=C711 Disney CP Dev, env_type=dev, instance=env-309888laio1use1, job=integrations/node_exporter, region=us-east-1, stage=decommission" t=2024-05-29T13:44:13.881864263Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=251760 slug=forgerock t=2024-05-29T13:44:13.881881769Z level=warn msg="Tick dropped because alert rule evaluation is too slow" rule_uid=cdd7i0msl55hcf org_id=1 time=2024-05-29T13:43:10Z + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.881872187Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=22.859075ms + logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.881571452Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.881763449Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=36.531845ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nhom4zmv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.881737086Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=507549 slug=coindcx t=2024-05-29T13:44:13.881634326Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=507549 slug=coindcx instance="datasource_uid=4DXtZk24z, ref_id=A" t=2024-05-29T13:44:13.881613556Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.881596213Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.881566567Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.881560097Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.881553427Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.881481077Z caller=remote_instance_store.go:51 user=147497 slug=rhodev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.881403646Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-308236laio1eastus, cloud_platform=Azure, customer_id=A105, env_id=308236, env_name=A105 Henry Schein PROD, env_type=prod, instance=env-308236laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:13.881497953Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-308236laio1eastus, cloud_platform=Azure, customer_id=A105, env_id=308236, env_name=A105 Henry Schein PROD, env_type=prod, instance=env-308236laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:13.881481647Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nhiz7fk3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.881473864Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nhinfpw2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.881422903Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nhinfpw2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.881349212Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-307900laio1eastus, cloud_platform=Azure, customer_id=A105, env_id=307900, env_name=A105 Henry Schein DEV, env_type=dev, instance=env-307900laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:13.881304099Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nh8mnul9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.881258961Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=893151 slug=cmtdsnp t=2024-05-29T13:44:13.881186481Z level=debug msg="Saving alert states" count=105 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.88110161Z caller=remote_instance_store.go:51 user=206439 slug=relaypro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nh247cnx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.88110945Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.881095254Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds-devops, pod=exo-devca-cicd-288-zcl2b-9ws4z-nzgt7" t=2024-05-29T13:44:13.881104789Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.881012717Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ngxrdz2x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.880912798Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-307641laio1usw2, cloud_platform=AWS, customer_id=C702, env_id=307641, env_name=C702 Cox Xtime DEV, env_type=dev, instance=env-307641laio1usw2, job=integrations/node_exporter, region=us-west-2, stage=live" t=2024-05-29T13:44:13.880974631Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.880811354Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ngxrdz2x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.880901258Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds-devops, pod=argocd-notifications-controller-64bb8dcf46-trlct" t=2024-05-29T13:44:13.88071831Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds-devops, pod=argocd-applicationset-controller-5877955b59-h8bhh" t=2024-05-29T13:44:13.880659339Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-307360laio1cac1, cloud_platform=AWS, customer_id=C701, env_id=307360, env_name=C701 RONA DEV, env_type=dev, instance=env-307360laio1cac1, job=integrations/node_exporter, region=ca-central-1, stage=live" t=2024-05-29T13:44:13.880794265Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ngxl9dli-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.880662485Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=wcs9-tds-qaus-jenkins-wc-6f46db6b5b-nw9d2" t=2024-05-29T13:44:13.880489875Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.880679176Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=wcs9-tds-qa-jenkins-wcsv-6797fc675c-8ww9r" t=2024-05-29T13:44:13.880396103Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ngueulzu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.880571864Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.880603628Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ngueulzu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.880550424Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=wcs9-tds-preprod-vault-consul-758ff7bfdd-8dzmg" t=2024-05-29T13:44:13.880355081Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-307359laio1cac1, cloud_platform=AWS, customer_id=C701, env_id=307359, env_name=C701 RONA PROD, env_type=prod, instance=env-307359laio1cac1, job=integrations/node_exporter, region=ca-central-1, stage=live" t=2024-05-29T13:44:13.880616756Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=wcs9-tds-preprod-jenkins-844b87b597-dr7p5" t=2024-05-29T13:44:13.880341632Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.880569338Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=112732 slug=gleamer t=2024-05-29T13:44:13.880424513Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=87.671281ms + level=debug ts=2024-05-29T13:44:13.880408396Z caller=remote_instance_store.go:51 user=196413 slug=form3production msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ngueulzu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.880499134Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=wcs9-tds-preprod-git-git-7b5b648548-spgjg" t=2024-05-29T13:44:13.880323991Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=wcs9-tds-devus-vault-con-74f6c575b8-6d879" t=2024-05-29T13:44:13.880310091Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ngo0xoyx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.880412823Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-307063laio1use1, cloud_platform=AWS, customer_id=C507, env_type=dev, instance=env-307063laio1use1, job=integrations/node_exporter, region=us-east-1" t=2024-05-29T13:44:13.880414572Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=wcs9-tds-devus-jenkins-w-6c6cb984d8-qrpm7" t=2024-05-29T13:44:13.880283851Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-307063laio1use1, cloud_platform=AWS, customer_id=C507, env_type=dev, instance=env-307063laio1use1, job=integrations/node_exporter, region=us-east-1" t=2024-05-29T13:44:13.880394711Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.88032684Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ngo0xoyx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.880358102Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=wcs9-tds-devus-git-gitea-5d9bbcc688-x4sv9" t=2024-05-29T13:44:13.880244199Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=55491 slug=demandbase t=2024-05-29T13:44:13.880304199Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=55491 slug=demandbase instance="datasource_uid=000000384, ref_id=A" t=2024-05-29T13:44:13.880256932Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ngfezkhe-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.88019628Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ngfezkhe-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.88016742Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=wcs9-tds-dev-git-gitea-6d9474cb7-4dg5m" t=2024-05-29T13:44:13.880140276Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsqauslivets-utils-744f56f4b5-r4s5z" t=2024-05-29T13:44:13.880103507Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsqauslivets-app-75884f986d-m7g26" t=2024-05-29T13:44:13.880084786Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ngfezkhe-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.880031859Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsqauslivets-app-75884f986d-hbnzs" t=2024-05-29T13:44:13.880058646Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsqauslivesearch-app-slave-5558869975-m6fb5" t=2024-05-29T13:44:13.880035505Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsqauslivesearch-app-repeater-5d5fdc8d98-bphrx" t=2024-05-29T13:44:13.880025114Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsqausauthts-web-5877cd8545-zccdj" t=2024-05-29T13:44:13.879987294Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ng5sqbcb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.879996468Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ng5sqbcb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.879974798Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ng5sqbcb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.879943678Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ng5sqbcb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.879921368Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsqausauthts-app-989d79dbb-lwc9p" t=2024-05-29T13:44:13.879925752Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ng5sqbcb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.879869417Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsqausauthtdbank-video-67bc7bdcbd-4nl4b" t=2024-05-29T13:44:13.879902372Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ng4u3pk4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.879828737Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ng4u3pk4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.879801126Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ng4u3pk4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.879779706Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsqausauthsearch-app-master-6cdf9cbffc-bgsnv" t=2024-05-29T13:44:13.8798775Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsqalivets-utils-75b748978f-r2vkj" t=2024-05-29T13:44:13.87980504Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsqalivets-utils-75b748978f-r2vkj" t=2024-05-29T13:44:13.879795829Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsqalivesearch-app-slave-557c7fb9b4-8j777" t=2024-05-29T13:44:13.879754319Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsqalivesearch-app-repeater-665b664b99-lk8ws" t=2024-05-29T13:44:13.879743307Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ng4e6f88-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.879675155Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsqaauthsearch-app-master-77dbc97966-jhrfv" t=2024-05-29T13:44:13.879603955Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsqaauthcrs-app-59b767688b-zjtfs" t=2024-05-29T13:44:13.879594424Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsqaauthcrs-app-59b767688b-fclrn" t=2024-05-29T13:44:13.879564724Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ng4e6f88-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.879567924Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.879577454Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.879543083Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdspreprodlivets-app-5b7ff985b6-c59n4" t=2024-05-29T13:44:13.879442371Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-305714laio1uaenorth, cloud_platform=Azure, customer_id=A173, env_id=305714, env_name=A173_ADIA_Dev, env_type=dev, instance=env-305714laio1uaenorth, job=integrations/node_exporter, region=UAENorth, stage=live" t=2024-05-29T13:44:13.879468598Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.879461707Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdspreprodlivets-app-5b7ff985b6-4nw98" t=2024-05-29T13:44:13.87942817Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdspreprodlivets-app-5b7ff985b6-2gw5v" t=2024-05-29T13:44:13.87941749Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdspreprodlivets-app-5b7ff985b6-2gw5v" t=2024-05-29T13:44:13.87941057Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdspreprodlivesearch-app-repeater-7dc846dd7b-qtc64" t=2024-05-29T13:44:13.879353159Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=560104 slug=northwestnodes t=2024-05-29T13:44:13.879352384Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=560104 slug=northwestnodes instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.879336684Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm" t=2024-05-29T13:44:13.879335328Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-r67pm" t=2024-05-29T13:44:13.879331549Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.879287947Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=560104 slug=northwestnodes t=2024-05-29T13:44:13.879297783Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdspreprodlivecrs-app-b9c454b74-c9gzg" t=2024-05-29T13:44:13.879312818Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nfx014sv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.879303971Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdspreprodauthtdbank-video-68f7fcd765-phq4c" t=2024-05-29T13:44:13.879223187Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nfoz5qjk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.87922107Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdspreprodauthcrs-app-76b8477c-sphlt" t=2024-05-29T13:44:13.879139134Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nfoz5qjk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.87913444Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevuslivesearch-app-slave-5955c58b9c-j6bqw" t=2024-05-29T13:44:13.879053292Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nfd0ptyz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.879049749Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nfd0ptyz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.879027918Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nfd0ptyz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.878947548Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevusauthts-web-8469cddd9-q7cjf" t=2024-05-29T13:44:13.87897633Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevusauthts-web-8469cddd9-q7cjf" t=2024-05-29T13:44:13.87896408Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevusauthts-utils-6c77d5f7d5-c2n24" t=2024-05-29T13:44:13.878929609Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevusauthts-utils-6c77d5f7d5-c2n24" t=2024-05-29T13:44:13.878919679Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevusauthtdbank-video-8667db7c76-mptrn" t=2024-05-29T13:44:13.878866828Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-304804laio1westeurope, cloud_platform=Azure, customer_id=A161, env_id=304804, env_name=A161_ThyssenKrupp_Dev, env_type=dev, instance=env-304804laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:13.878831983Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-w8mlk" t=2024-05-29T13:44:13.878800526Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevusauthcrs-app-778c79f6f6-5l6mh" t=2024-05-29T13:44:13.878762065Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevusauth-exo-mesh-669cd5c69d-nxcdx" t=2024-05-29T13:44:13.878728435Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-304381laio1eastus, cloud_platform=Azure, customer_id=A190, env_id=304381, env_name=A190 PwC DEV, env_type=dev, instance=env-304381laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:13.878662341Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nf0t13dh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.878649425Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-304381laio1eastus, cloud_platform=Azure, customer_id=A190, env_id=304381, env_name=A190 PwC DEV, env_type=dev, instance=env-304381laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:13.878609221Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nf0t13dh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.878594804Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevlivecrs-app-55f8998d6c-qdgqv" t=2024-05-29T13:44:13.878551631Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevlive-cdap-sandbox-deployment-855b79f56b-2ll62" t=2024-05-29T13:44:13.87852206Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.87849769Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nezuxwcj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.878467603Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-304380laio1eastus, cloud_platform=Azure, customer_id=A190, env_id=304380, env_name=A190 PwC Stage (UAT), env_type=qa, instance=env-304380laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:13.878449071Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nezuxwcj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.878418022Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevcaauth-strapi-5b6b5f4844-8tvfk" t=2024-05-29T13:44:13.878432368Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=146728 slug=dgc t=2024-05-29T13:44:13.87838126Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=148.590575ms + logger=ngalert.state.manager user=523054 slug=vialtopartners instance= t=2024-05-29T13:44:13.878421407Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=523054 slug=vialtopartners instance= t=2024-05-29T13:44:13.87841216Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ne3i7hbn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.878324431Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-vjfzl" t=2024-05-29T13:44:13.878285425Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ne3i7hbn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.878283721Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.878303402Z caller=remote_instance_store.go:51 user=451427 slug=rocketchat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevcaauth-exo-frontend-5c569cbc88-fr7t4" t=2024-05-29T13:44:13.878265994Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf" t=2024-05-29T13:44:13.878235784Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-mpmqf" t=2024-05-29T13:44:13.878223083Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj" t=2024-05-29T13:44:13.878199813Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevcaauth-exo-frontend-57bb64c945-f4bwj" t=2024-05-29T13:44:13.878190163Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.878150473Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevauthts-web-84684dcc89-64scg" t=2024-05-29T13:44:13.878160592Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.878116192Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ndxn29rm-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.878092069Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevauthts-utils-7f54f8d7b4-njddr" t=2024-05-29T13:44:13.87808178Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevauthts-app-5b5b5d5465-wqvh7" t=2024-05-29T13:44:13.878023059Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevauthtdbank-video-6f6d998b7f-5cmcx" t=2024-05-29T13:44:13.877993538Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevauthtdbank-video-6f6d998b7f-5cmcx" t=2024-05-29T13:44:13.877982168Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=893151 slug=cmtdsnp instance="namespace=tds, pod=tdsdevauthsearch-app-master-65969fb8d5-c7nl4" t=2024-05-29T13:44:13.877964298Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ndiz59yk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.877945667Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ndiz59yk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.877913267Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=539031 slug=chathamtechprd t=2024-05-29T13:44:13.877881158Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=41.934459ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ndiz59yk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.877890337Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ndhxq3ok-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.877862416Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ndhxq3ok-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.877811176Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-303873laio1apn1, cloud_platform=AWS, customer_id=C689, env_id=303873, env_name=C689_Joshin-Denki_DEV, env_type=dev, instance=env-303873laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=live" t=2024-05-29T13:44:13.877849632Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ndhxq3ok-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.877788896Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-303873laio1apn1, cloud_platform=AWS, customer_id=C689, env_id=303873, env_name=C689_Joshin-Denki_DEV, env_type=dev, instance=env-303873laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=live" t=2024-05-29T13:44:13.877831313Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ndh2n5pw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.877720605Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ndh2n5pw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.877667744Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=767797 slug=mgmresorts version=34 fingerprint=76960b6911ad9654 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.87741505Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=d1aebc62-96b9-4d63-9239-4734a6bc96ce, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.876989517s EvaluationString:}]" duration=741.575111ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ndbr0vcf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.87727021Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-300929laio1euc1, cloud_platform=AWS, customer_id=C501, env_id=300929, env_name=C501_Booking_PROD_2021_u7, env_type=prod, instance=env-300929laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:13.877254059Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-300929laio1euc1, cloud_platform=AWS, customer_id=C501, env_id=300929, env_name=C501_Booking_PROD_2021_u7, env_type=prod, instance=env-300929laio1euc1, job=integrations/node_exporter, region=eu-central-1, stage=live" t=2024-05-29T13:44:13.877240216Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ndaufqc4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.877174459Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ndaufqc4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.877131929Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ndaufqc4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.877111919Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=686395 slug=containerfoundation instance="__name__=cf_ingress_metallb_up, cf_oncall_tier=none, cf_support_tier=standard, cluster=cluster-t15, grafanacloud=true, prometheus=cf-observability/prometheus, prometheus_replica=prometheus-prometheus-0" t=2024-05-29T13:44:13.877097831Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.876979374Z caller=remote_alert_sender.go:94 user=337951 slug=pawapay host=pawapay-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.148.61.196:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=a2c822b1 alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nd3taw6d-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.876944967Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=337951 slug=pawapay t=2024-05-29T13:44:13.876766055Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.327988ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nczu85ek-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.876864226Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-300468laio1eastus2, cloud_platform=Azure, customer_id=A157, env_id=300468, env_name=A157 Ryder Prod, env_type=prod, instance=env-300468laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:13.876874606Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-300468laio1eastus2, cloud_platform=Azure, customer_id=A157, env_id=300468, env_name=A157 Ryder Prod, env_type=prod, instance=env-300468laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:13.87685647Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ncvq1qdm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.876759115Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=686395 slug=containerfoundation instance="__name__=cf_ingress_metallb_up, cf_oncall_tier=none, cf_support_tier=standard, cluster=cluster-t09, grafanacloud=true, prometheus=cf-observability/prometheus, prometheus_replica=prometheus-prometheus-0" t=2024-05-29T13:44:13.876762094Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=548166 slug=mapcom1 t=2024-05-29T13:44:13.876671276Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=43.294953ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-299401laio1australiaeast, cloud_platform=Azure, customer_id=A176, env_id=299401, env_name=A176 TWG Dev, env_type=dev, instance=env-299401laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live" t=2024-05-29T13:44:13.876707534Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-299401laio1australiaeast, cloud_platform=Azure, customer_id=A176, env_id=299401, env_name=A176 TWG Dev, env_type=dev, instance=env-299401laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live" t=2024-05-29T13:44:13.876691874Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ncuy62dv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.876598753Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=686395 slug=containerfoundation instance="__name__=cf_ingress_metallb_up, cf_oncall_tier=none, cf_support_tier=standard, cluster=cluster-t05, grafanacloud=true, prometheus=cf-observability/prometheus, prometheus_replica=prometheus-prometheus-0" t=2024-05-29T13:44:13.87655247Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ncq3ydpb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.876404401Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ncq3ydpb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.876373441Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=686395 slug=containerfoundation instance="__name__=cf_ingress_metallb_up, cf_oncall_tier=none, cf_support_tier=standard, cluster=cluster-t03, grafanacloud=true, prometheus=cf-observability/prometheus, prometheus_replica=prometheus-prometheus-0" t=2024-05-29T13:44:13.876406917Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=686395 slug=containerfoundation instance="__name__=cf_ingress_metallb_up, cf_oncall_tier=none, cf_support_tier=standard, cluster=cluster-t02, grafanacloud=true, prometheus=cf-observability/prometheus, prometheus_replica=prometheus-prometheus-0" t=2024-05-29T13:44:13.876344016Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=686395 slug=containerfoundation instance="__name__=cf_ingress_metallb_up, cf_oncall_tier=none, cf_support_tier=standard, cluster=cluster-ist, grafanacloud=true, prometheus=cf-observability/prometheus, prometheus_replica=prometheus-prometheus-0" t=2024-05-29T13:44:13.876258824Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ncl0o550-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.87624013Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.876232898Z caller=remote_instance_store.go:51 user=384712 slug=nearinc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-298570laio1eastus2, cloud_platform=Azure, customer_id=A119, env_id=298570, env_name=A119_UAT, env_type=dev, instance=env-298570laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:13.87617059Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=384712 slug=nearinc instance="datasource_uid=7FBWubNVz, ref_id=A" t=2024-05-29T13:44:13.876167358Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=384712 slug=nearinc instance="datasource_uid=7FBWubNVz, ref_id=A" t=2024-05-29T13:44:13.876156595Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.876105877Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ncl0o550-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.876138869Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=384712 slug=nearinc instance="datasource_uid=7FBWubNVz, ref_id=A" t=2024-05-29T13:44:13.87613964Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.876107477Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ncl0o550-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.876105678Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.875993969Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=150145 slug=pleasant t=2024-05-29T13:44:13.876010294Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.173895ms + level=debug ts=2024-05-29T13:44:13.876029746Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=686395 slug=containerfoundation instance="__name__=cf_ingress_metallb_up, cf_oncall_tier=none, cf_support_tier=expert, cluster=cluster-p13, grafanacloud=true, prometheus=cf-observability/prometheus, prometheus_replica=prometheus-prometheus-0" t=2024-05-29T13:44:13.875984389Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=327842 slug=exabeam t=2024-05-29T13:44:13.875955664Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.036845ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ncer53zp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.875930816Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=686395 slug=containerfoundation instance="__name__=cf_ingress_metallb_up, cf_oncall_tier=none, cf_support_tier=expert, cluster=cluster-p12, grafanacloud=true, prometheus=cf-observability/prometheus, prometheus_replica=prometheus-prometheus-0" t=2024-05-29T13:44:13.875923598Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.87585986Z caller=remote_instance_store.go:51 user=777670 slug=fakku msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.87587365Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ncer53zp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.875833585Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.875782865Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=686395 slug=containerfoundation instance="__name__=cf_ingress_metallb_up, cf_oncall_tier=none, cf_support_tier=expert, cluster=cluster-p11, grafanacloud=true, prometheus=cf-observability/prometheus, prometheus_replica=prometheus-prometheus-0" t=2024-05-29T13:44:13.875852026Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.875719321Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.875736377Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=686395 slug=containerfoundation instance="__name__=cf_ingress_metallb_up, cf_oncall_tier=none, cf_support_tier=expert, cluster=cluster-p09, grafanacloud=true, prometheus=cf-observability/prometheus, prometheus_replica=prometheus-prometheus-0" t=2024-05-29T13:44:13.875772245Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=spectrum-cable, endpoint=PublishEvent, service=Core" t=2024-05-29T13:44:13.875716486Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.875677719Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nccia58m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.875645583Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=686395 slug=containerfoundation instance="__name__=cf_ingress_metallb_up, cf_oncall_tier=none, cf_support_tier=expert, cluster=cluster-p05, grafanacloud=true, prometheus=cf-observability/prometheus, prometheus_replica=prometheus-prometheus-0" t=2024-05-29T13:44:13.875636961Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-296678laio1northeurope, cloud_platform=Azure, customer_id=C435, env_id=296678, env_type=qa, instance=env-296678laio1northeurope, job=integrations/node_exporter, region=northeurope" t=2024-05-29T13:44:13.875602652Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=spectrum-cable, endpoint=Publish, service=Adapter" t=2024-05-29T13:44:13.875609308Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nc7wk8yx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.875474362Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nc7wk8yx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.875421601Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=686395 slug=containerfoundation instance="__name__=cf_ingress_metallb_up, cf_oncall_tier=none, cf_support_tier=expert, cluster=cluster-p02, grafanacloud=true, prometheus=cf-observability/prometheus, prometheus_replica=prometheus-prometheus-0" t=2024-05-29T13:44:13.875407138Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=686395 slug=containerfoundation instance="__name__=cf_ingress_metallb_up, cf_oncall_tier=none, cf_support_tier=expert, cluster=cluster-p02, grafanacloud=true, prometheus=cf-observability/prometheus, prometheus_replica=prometheus-prometheus-0" t=2024-05-29T13:44:13.875396287Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nbuf99mt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.875370851Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=686395 slug=containerfoundation instance="__name__=cf_ingress_metallb_up, cf_oncall_tier=none, cf_support_tier=expert, cluster=cluster-p01, grafanacloud=true, prometheus=cf-observability/prometheus, prometheus_replica=prometheus-prometheus-0" t=2024-05-29T13:44:13.875327365Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=686395 slug=containerfoundation instance="__name__=cf_ingress_metallb_up, cf_oncall_tier=none, cf_support_tier=expert, cluster=cluster-cmb, grafanacloud=true, prometheus=cf-observability/prometheus, prometheus_replica=prometheus-prometheus-0" t=2024-05-29T13:44:13.875264364Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=686395 slug=containerfoundation instance="__name__=cf_ingress_metallb_up, cf_oncall_tier=none, cf_support_tier=expert, cluster=cluster-cmb, grafanacloud=true, prometheus=cf-observability/prometheus, prometheus_replica=prometheus-prometheus-0" t=2024-05-29T13:44:13.875252975Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=ddm0pgeh97tvke, ref_id=A" t=2024-05-29T13:44:13.875149023Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=206107 slug=hydrolix t=2024-05-29T13:44:13.875134794Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nbqwqmxk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.875175079Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-295928laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295928, env_name=A180 DWH-INTRANET PROD, env_type=prod, instance=env-295928laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live" t=2024-05-29T13:44:13.875237783Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=686395 slug=containerfoundation instance="__name__=cf_ingress_metallb_up, cf_oncall_tier=none, cf_support_tier=default, cluster=cluster-tnr, grafanacloud=true, prometheus=cf-observability/prometheus, prometheus_replica=prometheus-prometheus-0" t=2024-05-29T13:44:13.875181853Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=686395 slug=containerfoundation instance="__name__=cf_ingress_metallb_up, cf_oncall_tier=24/7, cf_support_tier=expert, cluster=cluster-zrh, grafanacloud=true, prometheus=cf-observability/prometheus, prometheus_replica=prometheus-prometheus-0" t=2024-05-29T13:44:13.875107991Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nbpfxvis-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.875018697Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nbpfxvis-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.874988847Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nbpfxvis-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.874939266Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nbpfxvis-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.874917646Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nbjx47c1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.874866655Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-295926laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295926, env_name=A179 DmTech Partner Prod, env_type=prod, instance=env-295926laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live" t=2024-05-29T13:44:13.8748632Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nbjx47c1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.874741524Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.874781625Z caller=remote_image_capturer.go:61 user=78812 slug=ssliunian rule_org_id=1 rule_uid=slYe8n_nz dashboard=X7VQmEzZk panel=1908 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nbjx47c1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.874709744Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-295922laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295922, env_name=A180 DWH-INTRANET DEV, env_type=dev, instance=env-295922laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live" t=2024-05-29T13:44:13.874706299Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-295922laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295922, env_name=A180 DWH-INTRANET DEV, env_type=dev, instance=env-295922laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live" t=2024-05-29T13:44:13.874690771Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nbe4bv6o-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.874542082Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-295921laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295921, env_name=A179 DmTech Partner Dev, env_type=dev, instance=env-295921laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live" t=2024-05-29T13:44:13.874522846Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=optimummobile, endpoint=PublishTextPreview, service=Conversation" t=2024-05-29T13:44:13.874512502Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nb3feifu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.874437251Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=351895 slug=abacusworks t=2024-05-29T13:44:13.874439053Z level=debug msg="Saving alert states" count=90 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nb3feifu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.87438801Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=solana, remote=polygon" t=2024-05-29T13:44:13.874409409Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.874280661Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=optimummobile, endpoint=PublishEvent, service=Core" t=2024-05-29T13:44:13.874363044Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-najy56l2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.874285679Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-295389laio1northeurope, cloud_platform=Azure, customer_id=C435, env_type=dev, instance=env-295389laio1northeurope, job=integrations/node_exporter, region=northeurope" t=2024-05-29T13:44:13.874289386Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=solana, remote=gnosis" t=2024-05-29T13:44:13.874259102Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=solana, remote=bsc" t=2024-05-29T13:44:13.874183832Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=solana, remote=avalanche" t=2024-05-29T13:44:13.874165726Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=solana, remote=avalanche" t=2024-05-29T13:44:13.87415972Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nafn7li8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.874176928Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nafn7li8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.874139118Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=polygon, remote=solana" t=2024-05-29T13:44:13.874123723Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nafn7li8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.874064417Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=optimummobile, endpoint=NotifyPong, service=UserPresenceNotifier" t=2024-05-29T13:44:13.874035589Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=polygon, remote=ethereum" t=2024-05-29T13:44:13.874053345Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=polygon, remote=celo" t=2024-05-29T13:44:13.874017556Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.873912937Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nacfbe8r-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.873961556Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-nacfbe8r-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.873932066Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=polygon, remote=arbitrum" t=2024-05-29T13:44:13.873933331Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.873870773Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=optimumfixed, endpoint=SubscribeWhisper, service=Core" t=2024-05-29T13:44:13.873883697Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=optimism, remote=polygon" t=2024-05-29T13:44:13.873873121Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=optimism, remote=polygon" t=2024-05-29T13:44:13.873863378Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=112387 slug=lucidhq t=2024-05-29T13:44:13.873805462Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=112387 slug=lucidhq t=2024-05-29T13:44:13.873744767Z level=debug msg="Deleting alert states" count=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-na3pv73q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.873803324Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=optimism, remote=moonbeam" t=2024-05-29T13:44:13.873836319Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=optimism, remote=gnosis" t=2024-05-29T13:44:13.873815602Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=optimism, remote=celo" t=2024-05-29T13:44:13.873764866Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-na1pqfoj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.873645953Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-na1pqfoj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.873614322Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-294680laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=294680, env_name=A165 DWH-MA TEST, env_type=test, instance=env-294680laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live" t=2024-05-29T13:44:13.873616406Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-na1pqfoj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.873591752Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n9zuyd86-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.873542452Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.873455586Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112387 slug=lucidhq instance="Series=query2747faef8f064da887975c5f4edb98a4" t=2024-05-29T13:44:13.873483184Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=moonbeam, remote=gnosis" t=2024-05-29T13:44:13.873479015Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="Series=query2747faef8f064da887975c5f4edb98a4" t=2024-05-29T13:44:13.87344753Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=moonbeam, remote=ethereum" t=2024-05-29T13:44:13.873452891Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=moonbeam, remote=celo" t=2024-05-29T13:44:13.873423336Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n9zuyd86-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.873441611Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq t=2024-05-29T13:44:13.873410632Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=moonbeam, remote=avalanche" t=2024-05-29T13:44:13.873329066Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-294107laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294107, env_name=A111 Staples STG, env_type=qa, instance=env-294107laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:13.873317184Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n9pw8wc2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.873308369Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n9pw8wc2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.873282469Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=326888 slug=buildingblocks instance="deployment=backend, namespace=bb-prd-products" t=2024-05-29T13:44:13.873246907Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.873297947Z caller=remote_instance_store.go:51 user=326888 slug=buildingblocks msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=326888 slug=buildingblocks instance="deployment=backend, namespace=bb-prd-products" t=2024-05-29T13:44:13.873236364Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=gnosis, remote=solana" t=2024-05-29T13:44:13.873258144Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n9pw8wc2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.873240219Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=optimumfixed, endpoint=Publish, service=Adapter" t=2024-05-29T13:44:13.873203057Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n9pkk1r7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.873152228Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=326888 slug=buildingblocks t=2024-05-29T13:44:13.873098044Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n9pkk1r7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.873083907Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=gnosis, remote=moonbeam" t=2024-05-29T13:44:13.873088949Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.873038039Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n9pkk1r7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.873034167Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n9pkk1r7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.873007576Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n9osrv83-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.872968066Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=gnosis, remote=celo" t=2024-05-29T13:44:13.872931518Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=gnosis, remote=bsc" t=2024-05-29T13:44:13.872909747Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n9osrv83-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.872878155Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=797387 slug=roadrunnerdev t=2024-05-29T13:44:13.872801452Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=13.271677ms + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=gnosis, remote=avalanche" t=2024-05-29T13:44:13.872875638Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=jetblue, endpoint=PublishTextPreview, service=Conversation" t=2024-05-29T13:44:13.872870585Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=jetblue, endpoint=PublishTextMessage, service=Conversation" t=2024-05-29T13:44:13.872787749Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n9o9nki6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.872726043Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n9o9nki6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.872684873Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n9o9nki6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.872657663Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.87263978Z caller=remote_instance_store.go:51 user=706031 slug=miceutest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-293840laio1euw1, cloud_platform=AWS, customer_id=C655, env_id=293840, env_name=c655_booking_dev_2021, env_type=dev, instance=env-293840laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live" t=2024-05-29T13:44:13.872604202Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=ethereum, remote=celo" t=2024-05-29T13:44:13.872606762Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=ethereum, remote=avalanche" t=2024-05-29T13:44:13.872533533Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=ethereum, remote=arbitrum" t=2024-05-29T13:44:13.872515272Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=celo, remote=solana" t=2024-05-29T13:44:13.872489924Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=celo, remote=solana" t=2024-05-29T13:44:13.872482194Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n93i86y0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.872468821Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n93i86y0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.87242731Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n93i86y0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.87240387Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=celo, remote=optimism" t=2024-05-29T13:44:13.872436363Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=celo, remote=ethereum" t=2024-05-29T13:44:13.872325728Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n91wq2ev-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.872269329Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n91wq2ev-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.872228338Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=jetblue, endpoint=AssignToAgent, service=Conversation" t=2024-05-29T13:44:13.872187416Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n8w3dbla-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.872082847Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=celo, remote=arbitrum" t=2024-05-29T13:44:13.871960861Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-293183laio1northeurope, cloud_platform=Azure, customer_id=A164, env_id=293183, env_name=A164 CTTI Corp DEV, env_type=dev, instance=env-293183laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live" t=2024-05-29T13:44:13.871982711Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n8w2yceq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.871920745Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.871817955Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n8w2yceq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.871893385Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n8w2yceq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.871860914Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=bsc, remote=polygon" t=2024-05-29T13:44:13.871785793Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishwireless, endpoint=Publish, service=Adapter" t=2024-05-29T13:44:13.87184054Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishwireless, endpoint=Publish, service=Adapter" t=2024-05-29T13:44:13.871828422Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=bsc, remote=polygon" t=2024-05-29T13:44:13.871778154Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=bsc, remote=optimism" t=2024-05-29T13:44:13.871746937Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=bsc, remote=moonbeam" t=2024-05-29T13:44:13.871725823Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=bsc, remote=ethereum" t=2024-05-29T13:44:13.871640421Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n8p3guih-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.871634112Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishpostpaid, endpoint=PublishSignal, service=Core" t=2024-05-29T13:44:13.871585066Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n8p3guih-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.871552611Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n8p3guih-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.871529191Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=bsc, remote=celo" t=2024-05-29T13:44:13.871508306Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n8ojcy8n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.8714587Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishpostpaid, endpoint=PublishEvent, service=Core" t=2024-05-29T13:44:13.871462606Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=bsc, remote=avalanche" t=2024-05-29T13:44:13.871463284Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=bsc, remote=arbitrum" t=2024-05-29T13:44:13.871432319Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n8ojcy8n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.871373969Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishget, endpoint=PublishEvent, service=Core" t=2024-05-29T13:44:13.871333235Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n8l1bjj3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.871318039Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.871326783Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishget, endpoint=PublishEvent, service=Core" t=2024-05-29T13:44:13.871320154Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n8l1bjj3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.871239618Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.871252765Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-292236laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live" t=2024-05-29T13:44:13.871173364Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n8l1bjj3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.871195488Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=avalanche, remote=optimism" t=2024-05-29T13:44:13.871221247Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n8go0b2p-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.871105657Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.871041169Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=avalanche, remote=ethereum" t=2024-05-29T13:44:13.871127832Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n8go0b2p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.871004316Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.871016899Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.870999555Z caller=remote_instance_store.go:51 user=188994 slug=smartside msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dish, endpoint=PublishEvent, service=Core" t=2024-05-29T13:44:13.870971052Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=188994 slug=smartside t=2024-05-29T13:44:13.870957984Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.870936937Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=188994 slug=smartside instance="datasource_uid=G06lb4XMz, ref_id=A" t=2024-05-29T13:44:13.870945229Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=188994 slug=smartside instance="datasource_uid=G06lb4XMz, ref_id=A" t=2024-05-29T13:44:13.870932161Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.870894856Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=188994 slug=smartside t=2024-05-29T13:44:13.870902227Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=188994 slug=smartside version=5 fingerprint=8898b41d5bf1f1fb attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.87082197Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=G06lb4XMz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.870581132s EvaluationString:}]" duration=147.728923ms + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=assurantlifestyle, endpoint=RequestTransfer, service=Conversation" t=2024-05-29T13:44:13.870838484Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.870825546Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=arbitrum, remote=gnosis" t=2024-05-29T13:44:13.870829885Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=arbitrum, remote=ethereum" t=2024-05-29T13:44:13.870796516Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=assurantlifestyle, endpoint=PublishTextMessage, service=Conversation" t=2024-05-29T13:44:13.870695993Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n8fnb61y-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.870625842Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-291902laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=291902, env_name=A165 DWH-MA DEV, env_type=dev, instance=env-291902laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live" t=2024-05-29T13:44:13.870653154Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=351895 slug=abacusworks instance="origin=arbitrum, remote=bsc" t=2024-05-29T13:44:13.870618718Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n8fnb61y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.870535921Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=351895 slug=abacusworks version=78 fingerprint=8186a4b2e042c43b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.869212545Z level=debug msg="Alert rule evaluated" results="[{Instance:origin=arbitrum, remote=avalanche State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=arbitrum, remote=avalanche Value:0xc03a47cb30} B:{Var:B Labels:origin=arbitrum, remote=avalanche Value:0xc03a47cbf0} C:{Var:C Labels:origin=arbitrum, remote=avalanche Value:0xc03a47cc38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.865958721s EvaluationString:[ var='A' labels={origin=arbitrum, remote=avalanche} value=0 ], [ var='B' labels={origin=arbitrum, remote=avalanche} value=0 ], [ var='C' labels={origin=arbitrum, remote=avalanche} value=0 ]} {Instance:origin=arbitrum, remote=bsc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=arbitrum, remote=bsc Value:0xc03a47cc80} B:{Var:B Labels:origin=arbitrum, remote=bsc Value:0xc03a47ccb8} C:{Var:C Labels:origin=arbitrum, remote=bsc Value:0xc03a47ccf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.8659825s EvaluationString:[ var='A' labels={origin=arbitrum, remote=bsc} value=0 ], [ var='B' labels={origin=arbitrum, remote=bsc} value=0 ], [ var='C' labels={origin=arbitrum, remote=bsc} value=0 ]} {Instance:origin=arbitrum, remote=celo State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=arbitrum, remote=celo Value:0xc03a47cf68} B:{Var:B Labels:origin=arbitrum, remote=celo Value:0xc03a47cf90} C:{Var:C Labels:origin=arbitrum, remote=celo Value:0xc03a47cfb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.865992271s EvaluationString:[ var='A' labels={origin=arbitrum, remote=celo} value=0 ], [ var='B' labels={origin=arbitrum, remote=celo} value=0 ], [ var='C' labels={origin=arbitrum, remote=celo} value=0 ]} {Instance:origin=arbitrum, remote=ethereum State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=arbitrum, remote=ethereum Value:0xc03a47d018} B:{Var:B Labels:origin=arbitrum, remote=ethereum Value:0xc03a47d050} C:{Var:C Labels:origin=arbitrum, remote=ethereum Value:0xc03a47d078}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.865999011s EvaluationString:[ var='A' labels={origin=arbitrum, remote=ethereum} value=0 ], [ var='B' labels={origin=arbitrum, remote=ethereum} value=0 ], [ var='C' labels={origin=arbitrum, remote=ethereum} value=0 ]} {Instance:origin=arbitrum, remote=gnosis State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=arbitrum, remote=gnosis Value:0xc03a47d350} B:{Var:B Labels:origin=arbitrum, remote=gnosis Value:0xc03a47d378} C:{Var:C Labels:origin=arbitrum, remote=gnosis Value:0xc03a47d0f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.86600401s EvaluationString:[ var='A' labels={origin=arbitrum, remote=gnosis} value=0 ], [ var='B' labels={origin=arbitrum, remote=gnosis} value=0 ], [ var='C' labels={origin=arbitrum, remote=gnosis} value=0 ]} {Instance:origin=arbitrum, remote=moonbeam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=arbitrum, remote=moonbeam Value:0xc03a47d3d8} B:{Var:B Labels:origin=arbitrum, remote=moonbeam Value:0xc03a47d400} C:{Var:C Labels:origin=arbitrum, remote=moonbeam Value:0xc03a47d428}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866014802s EvaluationString:[ var='A' labels={origin=arbitrum, remote=moonbeam} value=0 ], [ var='B' labels={origin=arbitrum, remote=moonbeam} value=0 ], [ var='C' labels={origin=arbitrum, remote=moonbeam} value=0 ]} {Instance:origin=arbitrum, remote=optimism State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=arbitrum, remote=optimism Value:0xc03a47d4f8} B:{Var:B Labels:origin=arbitrum, remote=optimism Value:0xc03a47d520} C:{Var:C Labels:origin=arbitrum, remote=optimism Value:0xc03a47d5e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866022446s EvaluationString:[ var='A' labels={origin=arbitrum, remote=optimism} value=0 ], [ var='B' labels={origin=arbitrum, remote=optimism} value=0 ], [ var='C' labels={origin=arbitrum, remote=optimism} value=0 ]} {Instance:origin=arbitrum, remote=polygon State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=arbitrum, remote=polygon Value:0xc03a47d658} B:{Var:B Labels:origin=arbitrum, remote=polygon Value:0xc03a47d690} C:{Var:C Labels:origin=arbitrum, remote=polygon Value:0xc03a47d6b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866029361s EvaluationString:[ var='A' labels={origin=arbitrum, remote=polygon} value=0 ], [ var='B' labels={origin=arbitrum, remote=polygon} value=0 ], [ var='C' labels={origin=arbitrum, remote=polygon} value=0 ]} {Instance:origin=arbitrum, remote=solana State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=arbitrum, remote=solana Value:0xc03a47d948} B:{Var:B Labels:origin=arbitrum, remote=solana Value:0xc03a47d980} C:{Var:C Labels:origin=arbitrum, remote=solana Value:0xc03a47d9c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866038476s EvaluationString:[ var='A' labels={origin=arbitrum, remote=solana} value=0 ], [ var='B' labels={origin=arbitrum, remote=solana} value=0 ], [ var='C' labels={origin=arbitrum, remote=solana} value=0 ]} {Instance:origin=avalanche, remote=arbitrum State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=avalanche, remote=arbitrum Value:0xc03a47da38} B:{Var:B Labels:origin=avalanche, remote=arbitrum Value:0xc03a47da60} C:{Var:C Labels:origin=avalanche, remote=arbitrum Value:0xc03a47dac8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.86604536s EvaluationString:[ var='A' labels={origin=avalanche, remote=arbitrum} value=0 ], [ var='B' labels={origin=avalanche, remote=arbitrum} value=0 ], [ var='C' labels={origin=avalanche, remote=arbitrum} value=0 ]} {Instance:origin=avalanche, remote=bsc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=avalanche, remote=bsc Value:0xc03a47dbf8} B:{Var:B Labels:origin=avalanche, remote=bsc Value:0xc03a47dc40} C:{Var:C Labels:origin=avalanche, remote=bsc Value:0xc03a47db20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866054401s EvaluationString:[ var='A' labels={origin=avalanche, remote=bsc} value=0 ], [ var='B' labels={origin=avalanche, remote=bsc} value=0 ], [ var='C' labels={origin=avalanche, remote=bsc} value=0 ]} {Instance:origin=avalanche, remote=celo State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=avalanche, remote=celo Value:0xc021246178} B:{Var:B Labels:origin=avalanche, remote=celo Value:0xc0212461b0} C:{Var:C Labels:origin=avalanche, remote=celo Value:0xc0212461d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.86606168s EvaluationString:[ var='A' labels={origin=avalanche, remote=celo} value=0 ], [ var='B' labels={origin=avalanche, remote=celo} value=0 ], [ var='C' labels={origin=avalanche, remote=celo} value=0 ]} {Instance:origin=avalanche, remote=ethereum State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=avalanche, remote=ethereum Value:0xc021246278} B:{Var:B Labels:origin=avalanche, remote=ethereum Value:0xc0212462b0} C:{Var:C Labels:origin=avalanche, remote=ethereum Value:0xc021246250}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866069136s EvaluationString:[ var='A' labels={origin=avalanche, remote=ethereum} value=0 ], [ var='B' labels={origin=avalanche, remote=ethereum} value=0 ], [ var='C' labels={origin=avalanche, remote=ethereum} value=0 ]} {Instance:origin=avalanche, remote=gnosis State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=avalanche, remote=gnosis Value:0xc021246320} B:{Var:B Labels:origin=avalanche, remote=gnosis Value:0xc021246358} C:{Var:C Labels:origin=avalanche, remote=gnosis Value:0xc021246390}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866076717s EvaluationString:[ var='A' labels={origin=avalanche, remote=gnosis} value=0 ], [ var='B' labels={origin=avalanche, remote=gnosis} value=0 ], [ var='C' labels={origin=avalanche, remote=gnosis} value=0 ]} {Instance:origin=avalanche, remote=moonbeam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=avalanche, remote=moonbeam Value:0xc0212463f0} B:{Var:B Labels:origin=avalanche, remote=moonbeam Value:0xc021246450} C:{Var:C Labels:origin=avalanche, remote=moonbeam Value:0xc021246478}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866085444s EvaluationString:[ var='A' labels={origin=avalanche, remote=moonbeam} value=0 ], [ var='B' labels={origin=avalanche, remote=moonbeam} value=0 ], [ var='C' labels={origin=avalanche, remote=moonbeam} value=0 ]} {Instance:origin=avalanche, remote=optimism State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=avalanche, remote=optimism Value:0xc021246568} B:{Var:B Labels:origin=avalanche, remote=optimism Value:0xc0212466b0} C:{Var:C Labels:origin=avalanche, remote=optimism Value:0xc021246530}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866094303s EvaluationString:[ var='A' labels={origin=avalanche, remote=optimism} value=0 ], [ var='B' labels={origin=avalanche, remote=optimism} value=0 ], [ var='C' labels={origin=avalanche, remote=optimism} value=0 ]} {Instance:origin=avalanche, remote=polygon State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=avalanche, remote=polygon Value:0xc021246738} B:{Var:B Labels:origin=avalanche, remote=polygon Value:0xc021246760} C:{Var:C Labels:origin=avalanche, remote=polygon Value:0xc021246700}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866103068s EvaluationString:[ var='A' labels={origin=avalanche, remote=polygon} value=0 ], [ var='B' labels={origin=avalanche, remote=polygon} value=0 ], [ var='C' labels={origin=avalanche, remote=polygon} value=0 ]} {Instance:origin=avalanche, remote=solana State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=avalanche, remote=solana Value:0xc0212467b0} B:{Var:B Labels:origin=avalanche, remote=solana Value:0xc0212467d8} C:{Var:C Labels:origin=avalanche, remote=solana Value:0xc021246830}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866111291s EvaluationString:[ var='A' labels={origin=avalanche, remote=solana} value=0 ], [ var='B' labels={origin=avalanche, remote=solana} value=0 ], [ var='C' labels={origin=avalanche, remote=solana} value=0 ]} {Instance:origin=bsc, remote=arbitrum State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=bsc, remote=arbitrum Value:0xc021246880} B:{Var:B Labels:origin=bsc, remote=arbitrum Value:0xc0212468a8} C:{Var:C Labels:origin=bsc, remote=arbitrum Value:0xc0212468e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866117739s EvaluationString:[ var='A' labels={origin=bsc, remote=arbitrum} value=0 ], [ var='B' labels={origin=bsc, remote=arbitrum} value=0 ], [ var='C' labels={origin=bsc, remote=arbitrum} value=0 ]} {Instance:origin=bsc, remote=avalanche State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=bsc, remote=avalanche Value:0xc021246c90} B:{Var:B Labels:origin=bsc, remote=avalanche Value:0xc021246cc0} C:{Var:C Labels:origin=bsc, remote=avalanche Value:0xc021246c60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866131126s EvaluationString:[ var='A' labels={origin=bsc, remote=avalanche} value=0 ], [ var='B' labels={origin=bsc, remote=avalanche} value=0 ], [ var='C' labels={origin=bsc, remote=avalanche} value=0 ]} {Instance:origin=bsc, remote=celo State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=bsc, remote=celo Value:0xc021246e50} B:{Var:B Labels:origin=bsc, remote=celo Value:0xc021246f40} C:{Var:C Labels:origin=bsc, remote=celo Value:0xc021246d10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866139806s EvaluationString:[ var='A' labels={origin=bsc, remote=celo} value=0 ], [ var='B' labels={origin=bsc, remote=celo} value=0 ], [ var='C' labels={origin=bsc, remote=celo} value=0 ]} {Instance:origin=bsc, remote=ethereum State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=bsc, remote=ethereum Value:0xc021247118} B:{Var:B Labels:origin=bsc, remote=ethereum Value:0xc021247138} C:{Var:C Labels:origin=bsc, remote=ethereum Value:0xc021247068}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866149116s EvaluationString:[ var='A' labels={origin=bsc, remote=ethereum} value=0 ], [ var='B' labels={origin=bsc, remote=ethereum} value=0 ], [ var='C' labels={origin=bsc, remote=ethereum} value=0 ]} {Instance:origin=bsc, remote=gnosis State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=bsc, remote=gnosis Value:0xc021247210} B:{Var:B Labels:origin=bsc, remote=gnosis Value:0xc0212471b0} C:{Var:C Labels:origin=bsc, remote=gnosis Value:0xc0212471f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866157832s EvaluationString:[ var='A' labels={origin=bsc, remote=gnosis} value=0 ], [ var='B' labels={origin=bsc, remote=gnosis} value=0 ], [ var='C' labels={origin=bsc, remote=gnosis} value=0 ]} {Instance:origin=bsc, remote=moonbeam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=bsc, remote=moonbeam Value:0xc021247260} B:{Var:B Labels:origin=bsc, remote=moonbeam Value:0xc021247288} C:{Var:C Labels:origin=bsc, remote=moonbeam Value:0xc0212472a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866165551s EvaluationString:[ var='A' labels={origin=bsc, remote=moonbeam} value=0 ], [ var='B' labels={origin=bsc, remote=moonbeam} value=0 ], [ var='C' labels={origin=bsc, remote=moonbeam} value=0 ]} {Instance:origin=bsc, remote=optimism State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=bsc, remote=optimism Value:0xc021247328} B:{Var:B Labels:origin=bsc, remote=optimism Value:0xc0212472e8} C:{Var:C Labels:origin=bsc, remote=optimism Value:0xc021247308}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866172552s EvaluationString:[ var='A' labels={origin=bsc, remote=optimism} value=0 ], [ var='B' labels={origin=bsc, remote=optimism} value=0 ], [ var='C' labels={origin=bsc, remote=optimism} value=0 ]} {Instance:origin=bsc, remote=polygon State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=bsc, remote=polygon Value:0xc0212473e8} B:{Var:B Labels:origin=bsc, remote=polygon Value:0xc0212473a0} C:{Var:C Labels:origin=bsc, remote=polygon Value:0xc0212473c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866179104s EvaluationString:[ var='A' labels={origin=bsc, remote=polygon} value=0 ], [ var='B' labels={origin=bsc, remote=polygon} value=0 ], [ var='C' labels={origin=bsc, remote=polygon} value=0 ]} {Instance:origin=bsc, remote=solana State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=bsc, remote=solana Value:0xc021247450} B:{Var:B Labels:origin=bsc, remote=solana Value:0xc021247490} C:{Var:C Labels:origin=bsc, remote=solana Value:0xc021247430}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866184784s EvaluationString:[ var='A' labels={origin=bsc, remote=solana} value=0 ], [ var='B' labels={origin=bsc, remote=solana} value=0 ], [ var='C' labels={origin=bsc, remote=solana} value=0 ]} {Instance:origin=celo, remote=arbitrum State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=celo, remote=arbitrum Value:0xc0212475f0} B:{Var:B Labels:origin=celo, remote=arbitrum Value:0xc021247618} C:{Var:C Labels:origin=celo, remote=arbitrum Value:0xc021247640}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866191984s EvaluationString:[ var='A' labels={origin=celo, remote=arbitrum} value=0 ], [ var='B' labels={origin=celo, remote=arbitrum} value=0 ], [ var='C' labels={origin=celo, remote=arbitrum} value=0 ]} {Instance:origin=celo, remote=avalanche State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=celo, remote=avalanche Value:0xc0212476d8} B:{Var:B Labels:origin=celo, remote=avalanche Value:0xc021247fd0} C:{Var:C Labels:origin=celo, remote=avalanche Value:0xc0212476b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866199348s EvaluationString:[ var='A' labels={origin=celo, remote=avalanche} value=0 ], [ var='B' labels={origin=celo, remote=avalanche} value=0 ], [ var='C' labels={origin=celo, remote=avalanche} value=0 ]} {Instance:origin=celo, remote=bsc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=celo, remote=bsc Value:0xc014f541c0} B:{Var:B Labels:origin=celo, remote=bsc Value:0xc014f541f0} C:{Var:C Labels:origin=celo, remote=bsc Value:0xc014f54210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.86620908s EvaluationString:[ var='A' labels={origin=celo, remote=bsc} value=0 ], [ var='B' labels={origin=celo, remote=bsc} value=0 ], [ var='C' labels={origin=celo, remote=bsc} value=0 ]} {Instance:origin=celo, remote=ethereum State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=celo, remote=ethereum Value:0xc014f543a0} B:{Var:B Labels:origin=celo, remote=ethereum Value:0xc014f543c8} C:{Var:C Labels:origin=celo, remote=ethereum Value:0xc014f54268}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866216526s EvaluationString:[ var='A' labels={origin=celo, remote=ethereum} value=0 ], [ var='B' labels={origin=celo, remote=ethereum} value=0 ], [ var='C' labels={origin=celo, remote=ethereum} value=0 ]} {Instance:origin=celo, remote=gnosis State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=celo, remote=gnosis Value:0xc014f54680} B:{Var:B Labels:origin=celo, remote=gnosis Value:0xc014f54540} C:{Var:C Labels:origin=celo, remote=gnosis Value:0xc014f54560}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866223598s EvaluationString:[ var='A' labels={origin=celo, remote=gnosis} value=0 ], [ var='B' labels={origin=celo, remote=gnosis} value=0 ], [ var='C' labels={origin=celo, remote=gnosis} value=0 ]} {Instance:origin=celo, remote=moonbeam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=celo, remote=moonbeam Value:0xc014f54d98} B:{Var:B Labels:origin=celo, remote=moonbeam Value:0xc014f54dd0} C:{Var:C Labels:origin=celo, remote=moonbeam Value:0xc014f546e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866231288s EvaluationString:[ var='A' labels={origin=celo, remote=moonbeam} value=0 ], [ var='B' labels={origin=celo, remote=moonbeam} value=0 ], [ var='C' labels={origin=celo, remote=moonbeam} value=0 ]} {Instance:origin=celo, remote=optimism State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=celo, remote=optimism Value:0xc014f54fb0} B:{Var:B Labels:origin=celo, remote=optimism Value:0xc014f54fe8} C:{Var:C Labels:origin=celo, remote=optimism Value:0xc014f55320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866239007s EvaluationString:[ var='A' labels={origin=celo, remote=optimism} value=0 ], [ var='B' labels={origin=celo, remote=optimism} value=0 ], [ var='C' labels={origin=celo, remote=optimism} value=0 ]} {Instance:origin=celo, remote=polygon State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=celo, remote=polygon Value:0xc014f55800} B:{Var:B Labels:origin=celo, remote=polygon Value:0xc014f55848} C:{Var:C Labels:origin=celo, remote=polygon Value:0xc014f55870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866253634s EvaluationString:[ var='A' labels={origin=celo, remote=polygon} value=0 ], [ var='B' labels={origin=celo, remote=polygon} value=0 ], [ var='C' labels={origin=celo, remote=polygon} value=0 ]} {Instance:origin=celo, remote=solana State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=celo, remote=solana Value:0xc014f55e90} B:{Var:B Labels:origin=celo, remote=solana Value:0xc0193ea000} C:{Var:C Labels:origin=celo, remote=solana Value:0xc0193ea020}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866261212s EvaluationString:[ var='A' labels={origin=celo, remote=solana} value=0 ], [ var='B' labels={origin=celo, remote=solana} value=0 ], [ var='C' labels={origin=celo, remote=solana} value=0 ]} {Instance:origin=ethereum, remote=arbitrum State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=ethereum, remote=arbitrum Value:0xc0193ea0c0} B:{Var:B Labels:origin=ethereum, remote=arbitrum Value:0xc0193ea0e8} C:{Var:C Labels:origin=ethereum, remote=arbitrum Value:0xc0193ea110}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866268152s EvaluationString:[ var='A' labels={origin=ethereum, remote=arbitrum} value=0 ], [ var='B' labels={origin=ethereum, remote=arbitrum} value=0 ], [ var='C' labels={origin=ethereum, remote=arbitrum} value=0 ]} {Instance:origin=ethereum, remote=avalanche State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=ethereum, remote=avalanche Value:0xc0193ea170} B:{Var:B Labels:origin=ethereum, remote=avalanche Value:0xc0193ea198} C:{Var:C Labels:origin=ethereum, remote=avalanche Value:0xc0193ea1c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.86627707s EvaluationString:[ var='A' labels={origin=ethereum, remote=avalanche} value=0 ], [ var='B' labels={origin=ethereum, remote=avalanche} value=0 ], [ var='C' labels={origin=ethereum, remote=avalanche} value=0 ]} {Instance:origin=ethereum, remote=bsc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=ethereum, remote=bsc Value:0xc0193ea318} B:{Var:B Labels:origin=ethereum, remote=bsc Value:0xc0193ea218} C:{Var:C Labels:origin=ethereum, remote=bsc Value:0xc0193ea2c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866286725s EvaluationString:[ var='A' labels={origin=ethereum, remote=bsc} value=0 ], [ var='B' labels={origin=ethereum, remote=bsc} value=0 ], [ var='C' labels={origin=ethereum, remote=bsc} value=0 ]} {Instance:origin=ethereum, remote=celo State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=ethereum, remote=celo Value:0xc0193ea378} B:{Var:B Labels:origin=ethereum, remote=celo Value:0xc0193ea3e0} C:{Var:C Labels:origin=ethereum, remote=celo Value:0xc0193ea408}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866295261s EvaluationString:[ var='A' labels={origin=ethereum, remote=celo} value=0 ], [ var='B' labels={origin=ethereum, remote=celo} value=0 ], [ var='C' labels={origin=ethereum, remote=celo} value=0 ]} {Instance:origin=ethereum, remote=gnosis State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=ethereum, remote=gnosis Value:0xc0193ea618} B:{Var:B Labels:origin=ethereum, remote=gnosis Value:0xc0193ea650} C:{Var:C Labels:origin=ethereum, remote=gnosis Value:0xc0193ea688}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866302912s EvaluationString:[ var='A' labels={origin=ethereum, remote=gnosis} value=0 ], [ var='B' labels={origin=ethereum, remote=gnosis} value=0 ], [ var='C' labels={origin=ethereum, remote=gnosis} value=0 ]} {Instance:origin=ethereum, remote=moonbeam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=ethereum, remote=moonbeam Value:0xc0193ea7f0} B:{Var:B Labels:origin=ethereum, remote=moonbeam Value:0xc0193ea828} C:{Var:C Labels:origin=ethereum, remote=moonbeam Value:0xc0193ea7c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866310688s EvaluationString:[ var='A' labels={origin=ethereum, remote=moonbeam} value=0 ], [ var='B' labels={origin=ethereum, remote=moonbeam} value=0 ], [ var='C' labels={origin=ethereum, remote=moonbeam} value=0 ]} {Instance:origin=ethereum, remote=optimism State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=ethereum, remote=optimism Value:0xc0193ea888} B:{Var:B Labels:origin=ethereum, remote=optimism Value:0xc0193ea8b0} C:{Var:C Labels:origin=ethereum, remote=optimism Value:0xc0193eab98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866318399s EvaluationString:[ var='A' labels={origin=ethereum, remote=optimism} value=0 ], [ var='B' labels={origin=ethereum, remote=optimism} value=0 ], [ var='C' labels={origin=ethereum, remote=optimism} value=0 ]} {Instance:origin=ethereum, remote=polygon State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=ethereum, remote=polygon Value:0xc0193eac88} B:{Var:B Labels:origin=ethereum, remote=polygon Value:0xc0193eacb0} C:{Var:C Labels:origin=ethereum, remote=polygon Value:0xc0193eace8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866331142s EvaluationString:[ var='A' labels={origin=ethereum, remote=polygon} value=0 ], [ var='B' labels={origin=ethereum, remote=polygon} value=0 ], [ var='C' labels={origin=ethereum, remote=polygon} value=0 ]} {Instance:origin=ethereum, remote=solana State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=ethereum, remote=solana Value:0xc0193eae38} B:{Var:B Labels:origin=ethereum, remote=solana Value:0xc0193ead38} C:{Var:C Labels:origin=ethereum, remote=solana Value:0xc0193eadc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866338472s EvaluationString:[ var='A' labels={origin=ethereum, remote=solana} value=0 ], [ var='B' labels={origin=ethereum, remote=solana} value=0 ], [ var='C' labels={origin=ethereum, remote=solana} value=0 ]} {Instance:origin=gnosis, remote=arbitrum State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=gnosis, remote=arbitrum Value:0xc0193eaf70} B:{Var:B Labels:origin=gnosis, remote=arbitrum Value:0xc0193eb038} C:{Var:C Labels:origin=gnosis, remote=arbitrum Value:0xc0193eaee8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866344803s EvaluationString:[ var='A' labels={origin=gnosis, remote=arbitrum} value=0 ], [ var='B' labels={origin=gnosis, remote=arbitrum} value=0 ], [ var='C' labels={origin=gnosis, remote=arbitrum} value=0 ]} {Instance:origin=gnosis, remote=avalanche State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=gnosis, remote=avalanche Value:0xc0193eb118} B:{Var:B Labels:origin=gnosis, remote=avalanche Value:0xc0193eb140} C:{Var:C Labels:origin=gnosis, remote=avalanche Value:0xc0193eb0a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866349498s EvaluationString:[ var='A' labels={origin=gnosis, remote=avalanche} value=0 ], [ var='B' labels={origin=gnosis, remote=avalanche} value=0 ], [ var='C' labels={origin=gnosis, remote=avalanche} value=0 ]} {Instance:origin=gnosis, remote=bsc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=gnosis, remote=bsc Value:0xc0193eb2a0} B:{Var:B Labels:origin=gnosis, remote=bsc Value:0xc0193eb2c0} C:{Var:C Labels:origin=gnosis, remote=bsc Value:0xc0193eb270}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866354232s EvaluationString:[ var='A' labels={origin=gnosis, remote=bsc} value=0 ], [ var='B' labels={origin=gnosis, remote=bsc} value=0 ], [ var='C' labels={origin=gnosis, remote=bsc} value=0 ]} {Instance:origin=gnosis, remote=celo State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=gnosis, remote=celo Value:0xc0193eb3a0} B:{Var:B Labels:origin=gnosis, remote=celo Value:0xc0193eb3f0} C:{Var:C Labels:origin=gnosis, remote=celo Value:0xc0193eb410}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866360855s EvaluationString:[ var='A' labels={origin=gnosis, remote=celo} value=0 ], [ var='B' labels={origin=gnosis, remote=celo} value=0 ], [ var='C' labels={origin=gnosis, remote=celo} value=0 ]} {Instance:origin=gnosis, remote=ethereum State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=gnosis, remote=ethereum Value:0xc0193eb488} B:{Var:B Labels:origin=gnosis, remote=ethereum Value:0xc0193eb5f0} C:{Var:C Labels:origin=gnosis, remote=ethereum Value:0xc0193eb460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866366163s EvaluationString:[ var='A' labels={origin=gnosis, remote=ethereum} value=0 ], [ var='B' labels={origin=gnosis, remote=ethereum} value=0 ], [ var='C' labels={origin=gnosis, remote=ethereum} value=0 ]} {Instance:origin=gnosis, remote=moonbeam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=gnosis, remote=moonbeam Value:0xc0193eb750} B:{Var:B Labels:origin=gnosis, remote=moonbeam Value:0xc0193eb660} C:{Var:C Labels:origin=gnosis, remote=moonbeam Value:0xc0193eb728}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866371245s EvaluationString:[ var='A' labels={origin=gnosis, remote=moonbeam} value=0 ], [ var='B' labels={origin=gnosis, remote=moonbeam} value=0 ], [ var='C' labels={origin=gnosis, remote=moonbeam} value=0 ]} {Instance:origin=gnosis, remote=optimism State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=gnosis, remote=optimism Value:0xc0193eb800} B:{Var:B Labels:origin=gnosis, remote=optimism Value:0xc0193eb838} C:{Var:C Labels:origin=gnosis, remote=optimism Value:0xc0193eb860}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866376892s EvaluationString:[ var='A' labels={origin=gnosis, remote=optimism} value=0 ], [ var='B' labels={origin=gnosis, remote=optimism} value=0 ], [ var='C' labels={origin=gnosis, remote=optimism} value=0 ]} {Instance:origin=gnosis, remote=polygon State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=gnosis, remote=polygon Value:0xc0193ebac8} B:{Var:B Labels:origin=gnosis, remote=polygon Value:0xc0193ebbe0} C:{Var:C Labels:origin=gnosis, remote=polygon Value:0xc0193ebaa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866382548s EvaluationString:[ var='A' labels={origin=gnosis, remote=polygon} value=0 ], [ var='B' labels={origin=gnosis, remote=polygon} value=0 ], [ var='C' labels={origin=gnosis, remote=polygon} value=0 ]} {Instance:origin=gnosis, remote=solana State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=gnosis, remote=solana Value:0xc0193ebd40} B:{Var:B Labels:origin=gnosis, remote=solana Value:0xc0193ebc40} C:{Var:C Labels:origin=gnosis, remote=solana Value:0xc0193ebc78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866389908s EvaluationString:[ var='A' labels={origin=gnosis, remote=solana} value=0 ], [ var='B' labels={origin=gnosis, remote=solana} value=0 ], [ var='C' labels={origin=gnosis, remote=solana} value=0 ]} {Instance:origin=moonbeam, remote=arbitrum State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=moonbeam, remote=arbitrum Value:0xc0193ebf38} B:{Var:B Labels:origin=moonbeam, remote=arbitrum Value:0xc0193ebfb0} C:{Var:C Labels:origin=moonbeam, remote=arbitrum Value:0xc0193ebf10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866394214s EvaluationString:[ var='A' labels={origin=moonbeam, remote=arbitrum} value=0 ], [ var='B' labels={origin=moonbeam, remote=arbitrum} value=0 ], [ var='C' labels={origin=moonbeam, remote=arbitrum} value=0 ]} {Instance:origin=moonbeam, remote=avalanche State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=moonbeam, remote=avalanche Value:0xc039766100} B:{Var:B Labels:origin=moonbeam, remote=avalanche Value:0xc039766080} C:{Var:C Labels:origin=moonbeam, remote=avalanche Value:0xc0397660d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866398551s EvaluationString:[ var='A' labels={origin=moonbeam, remote=avalanche} value=0 ], [ var='B' labels={origin=moonbeam, remote=avalanche} value=0 ], [ var='C' labels={origin=moonbeam, remote=avalanche} value=0 ]} {Instance:origin=moonbeam, remote=bsc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=moonbeam, remote=bsc Value:0xc039766148} B:{Var:B Labels:origin=moonbeam, remote=bsc Value:0xc0397662d0} C:{Var:C Labels:origin=moonbeam, remote=bsc Value:0xc0397662f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866403101s EvaluationString:[ var='A' labels={origin=moonbeam, remote=bsc} value=0 ], [ var='B' labels={origin=moonbeam, remote=bsc} value=0 ], [ var='C' labels={origin=moonbeam, remote=bsc} value=0 ]} {Instance:origin=moonbeam, remote=celo State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=moonbeam, remote=celo Value:0xc039766340} B:{Var:B Labels:origin=moonbeam, remote=celo Value:0xc039766568} C:{Var:C Labels:origin=moonbeam, remote=celo Value:0xc0397665a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866407424s EvaluationString:[ var='A' labels={origin=moonbeam, remote=celo} value=0 ], [ var='B' labels={origin=moonbeam, remote=celo} value=0 ], [ var='C' labels={origin=moonbeam, remote=celo} value=0 ]} {Instance:origin=moonbeam, remote=ethereum State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=moonbeam, remote=ethereum Value:0xc039766620} B:{Var:B Labels:origin=moonbeam, remote=ethereum Value:0xc0397666b8} C:{Var:C Labels:origin=moonbeam, remote=ethereum Value:0xc0397666f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866412423s EvaluationString:[ var='A' labels={origin=moonbeam, remote=ethereum} value=0 ], [ var='B' labels={origin=moonbeam, remote=ethereum} value=0 ], [ var='C' labels={origin=moonbeam, remote=ethereum} value=0 ]} {Instance:origin=moonbeam, remote=gnosis State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=moonbeam, remote=gnosis Value:0xc039766990} B:{Var:B Labels:origin=moonbeam, remote=gnosis Value:0xc039766750} C:{Var:C Labels:origin=moonbeam, remote=gnosis Value:0xc039766778}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866416633s EvaluationString:[ var='A' labels={origin=moonbeam, remote=gnosis} value=0 ], [ var='B' labels={origin=moonbeam, remote=gnosis} value=0 ], [ var='C' labels={origin=moonbeam, remote=gnosis} value=0 ]} {Instance:origin=moonbeam, remote=optimism State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=moonbeam, remote=optimism Value:0xc0397669e0} B:{Var:B Labels:origin=moonbeam, remote=optimism Value:0xc039766a08} C:{Var:C Labels:origin=moonbeam, remote=optimism Value:0xc039766a40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866421005s EvaluationString:[ var='A' labels={origin=moonbeam, remote=optimism} value=0 ], [ var='B' labels={origin=moonbeam, remote=optimism} value=0 ], [ var='C' labels={origin=moonbeam, remote=optimism} value=0 ]} {Instance:origin=moonbeam, remote=polygon State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=moonbeam, remote=polygon Value:0xc039766aa0} B:{Var:B Labels:origin=moonbeam, remote=polygon Value:0xc039766ac8} C:{Var:C Labels:origin=moonbeam, remote=polygon Value:0xc039766ef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866425193s EvaluationString:[ var='A' labels={origin=moonbeam, remote=polygon} value=0 ], [ var='B' labels={origin=moonbeam, remote=polygon} value=0 ], [ var='C' labels={origin=moonbeam, remote=polygon} value=0 ]} {Instance:origin=moonbeam, remote=solana State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=moonbeam, remote=solana Value:0xc039766fb0} B:{Var:B Labels:origin=moonbeam, remote=solana Value:0xc039766fd8} C:{Var:C Labels:origin=moonbeam, remote=solana Value:0xc039767010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866429079s EvaluationString:[ var='A' labels={origin=moonbeam, remote=solana} value=0 ], [ var='B' labels={origin=moonbeam, remote=solana} value=0 ], [ var='C' labels={origin=moonbeam, remote=solana} value=0 ]} {Instance:origin=optimism, remote=arbitrum State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=optimism, remote=arbitrum Value:0xc039767330} B:{Var:B Labels:origin=optimism, remote=arbitrum Value:0xc039767070} C:{Var:C Labels:origin=optimism, remote=arbitrum Value:0xc0397672c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866432896s EvaluationString:[ var='A' labels={origin=optimism, remote=arbitrum} value=0 ], [ var='B' labels={origin=optimism, remote=arbitrum} value=0 ], [ var='C' labels={origin=optimism, remote=arbitrum} value=0 ]} {Instance:origin=optimism, remote=avalanche State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=optimism, remote=avalanche Value:0xc0397674e0} B:{Var:B Labels:origin=optimism, remote=avalanche Value:0xc039767570} C:{Var:C Labels:origin=optimism, remote=avalanche Value:0xc039767840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866437747s EvaluationString:[ var='A' labels={origin=optimism, remote=avalanche} value=0 ], [ var='B' labels={origin=optimism, remote=avalanche} value=0 ], [ var='C' labels={origin=optimism, remote=avalanche} value=0 ]} {Instance:origin=optimism, remote=bsc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=optimism, remote=bsc Value:0xc0397678e0} B:{Var:B Labels:origin=optimism, remote=bsc Value:0xc039767908} C:{Var:C Labels:origin=optimism, remote=bsc Value:0xc039767898}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866441812s EvaluationString:[ var='A' labels={origin=optimism, remote=bsc} value=0 ], [ var='B' labels={origin=optimism, remote=bsc} value=0 ], [ var='C' labels={origin=optimism, remote=bsc} value=0 ]} {Instance:origin=optimism, remote=celo State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=optimism, remote=celo Value:0xc039767b00} B:{Var:B Labels:origin=optimism, remote=celo Value:0xc039767b58} C:{Var:C Labels:origin=optimism, remote=celo Value:0xc039767b90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866445638s EvaluationString:[ var='A' labels={origin=optimism, remote=celo} value=0 ], [ var='B' labels={origin=optimism, remote=celo} value=0 ], [ var='C' labels={origin=optimism, remote=celo} value=0 ]} {Instance:origin=optimism, remote=ethereum State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=optimism, remote=ethereum Value:0xc039767c18} B:{Var:B Labels:origin=optimism, remote=ethereum Value:0xc0085ca010} C:{Var:C Labels:origin=optimism, remote=ethereum Value:0xc039767be0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866450351s EvaluationString:[ var='A' labels={origin=optimism, remote=ethereum} value=0 ], [ var='B' labels={origin=optimism, remote=ethereum} value=0 ], [ var='C' labels={origin=optimism, remote=ethereum} value=0 ]} {Instance:origin=optimism, remote=gnosis State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=optimism, remote=gnosis Value:0xc0085ca0d0} B:{Var:B Labels:origin=optimism, remote=gnosis Value:0xc0085ca070} C:{Var:C Labels:origin=optimism, remote=gnosis Value:0xc0085ca098}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866454941s EvaluationString:[ var='A' labels={origin=optimism, remote=gnosis} value=0 ], [ var='B' labels={origin=optimism, remote=gnosis} value=0 ], [ var='C' labels={origin=optimism, remote=gnosis} value=0 ]} {Instance:origin=optimism, remote=moonbeam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=optimism, remote=moonbeam Value:0xc0085ca120} B:{Var:B Labels:origin=optimism, remote=moonbeam Value:0xc0085ca148} C:{Var:C Labels:origin=optimism, remote=moonbeam Value:0xc0085ca1b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866459461s EvaluationString:[ var='A' labels={origin=optimism, remote=moonbeam} value=0 ], [ var='B' labels={origin=optimism, remote=moonbeam} value=0 ], [ var='C' labels={origin=optimism, remote=moonbeam} value=0 ]} {Instance:origin=optimism, remote=polygon State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=optimism, remote=polygon Value:0xc0085ca280} B:{Var:B Labels:origin=optimism, remote=polygon Value:0xc0085ca210} C:{Var:C Labels:origin=optimism, remote=polygon Value:0xc0085ca238}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866465781s EvaluationString:[ var='A' labels={origin=optimism, remote=polygon} value=0 ], [ var='B' labels={origin=optimism, remote=polygon} value=0 ], [ var='C' labels={origin=optimism, remote=polygon} value=0 ]} {Instance:origin=optimism, remote=solana State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=optimism, remote=solana Value:0xc0085ca318} B:{Var:B Labels:origin=optimism, remote=solana Value:0xc0085ca350} C:{Var:C Labels:origin=optimism, remote=solana Value:0xc0085ca2e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866470298s EvaluationString:[ var='A' labels={origin=optimism, remote=solana} value=0 ], [ var='B' labels={origin=optimism, remote=solana} value=0 ], [ var='C' labels={origin=optimism, remote=solana} value=0 ]} {Instance:origin=polygon, remote=arbitrum State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=polygon, remote=arbitrum Value:0xc0085ca400} B:{Var:B Labels:origin=polygon, remote=arbitrum Value:0xc0085ca3a0} C:{Var:C Labels:origin=polygon, remote=arbitrum Value:0xc0085ca3d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866475353s EvaluationString:[ var='A' labels={origin=polygon, remote=arbitrum} value=0 ], [ var='B' labels={origin=polygon, remote=arbitrum} value=0 ], [ var='C' labels={origin=polygon, remote=arbitrum} value=0 ]} {Instance:origin=polygon, remote=avalanche State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=polygon, remote=avalanche Value:0xc0085ca4a0} B:{Var:B Labels:origin=polygon, remote=avalanche Value:0xc0085ca4d0} C:{Var:C Labels:origin=polygon, remote=avalanche Value:0xc0085ca460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866479868s EvaluationString:[ var='A' labels={origin=polygon, remote=avalanche} value=0 ], [ var='B' labels={origin=polygon, remote=avalanche} value=0 ], [ var='C' labels={origin=polygon, remote=avalanche} value=0 ]} {Instance:origin=polygon, remote=bsc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=polygon, remote=bsc Value:0xc0085ca578} B:{Var:B Labels:origin=polygon, remote=bsc Value:0xc0085ca518} C:{Var:C Labels:origin=polygon, remote=bsc Value:0xc0085ca550}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866485788s EvaluationString:[ var='A' labels={origin=polygon, remote=bsc} value=0 ], [ var='B' labels={origin=polygon, remote=bsc} value=0 ], [ var='C' labels={origin=polygon, remote=bsc} value=0 ]} {Instance:origin=polygon, remote=celo State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=polygon, remote=celo Value:0xc0085ca5e8} B:{Var:B Labels:origin=polygon, remote=celo Value:0xc0085ca610} C:{Var:C Labels:origin=polygon, remote=celo Value:0xc0085ca648}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866490637s EvaluationString:[ var='A' labels={origin=polygon, remote=celo} value=0 ], [ var='B' labels={origin=polygon, remote=celo} value=0 ], [ var='C' labels={origin=polygon, remote=celo} value=0 ]} {Instance:origin=polygon, remote=ethereum State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=polygon, remote=ethereum Value:0xc0085ca6d8} B:{Var:B Labels:origin=polygon, remote=ethereum Value:0xc0085ca700} C:{Var:C Labels:origin=polygon, remote=ethereum Value:0xc0085ca748}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866495206s EvaluationString:[ var='A' labels={origin=polygon, remote=ethereum} value=0 ], [ var='B' labels={origin=polygon, remote=ethereum} value=0 ], [ var='C' labels={origin=polygon, remote=ethereum} value=0 ]} {Instance:origin=polygon, remote=gnosis State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=polygon, remote=gnosis Value:0xc0085ca7d8} B:{Var:B Labels:origin=polygon, remote=gnosis Value:0xc0085ca820} C:{Var:C Labels:origin=polygon, remote=gnosis Value:0xc0085ca848}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866500091s EvaluationString:[ var='A' labels={origin=polygon, remote=gnosis} value=0 ], [ var='B' labels={origin=polygon, remote=gnosis} value=0 ], [ var='C' labels={origin=polygon, remote=gnosis} value=0 ]} {Instance:origin=polygon, remote=moonbeam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=polygon, remote=moonbeam Value:0xc0085ca908} B:{Var:B Labels:origin=polygon, remote=moonbeam Value:0xc0085ca8b8} C:{Var:C Labels:origin=polygon, remote=moonbeam Value:0xc0085ca8e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866504222s EvaluationString:[ var='A' labels={origin=polygon, remote=moonbeam} value=0 ], [ var='B' labels={origin=polygon, remote=moonbeam} value=0 ], [ var='C' labels={origin=polygon, remote=moonbeam} value=0 ]} {Instance:origin=polygon, remote=optimism State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=polygon, remote=optimism Value:0xc0085ca968} B:{Var:B Labels:origin=polygon, remote=optimism Value:0xc0085ca9a0} C:{Var:C Labels:origin=polygon, remote=optimism Value:0xc0085ca9c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866508872s EvaluationString:[ var='A' labels={origin=polygon, remote=optimism} value=0 ], [ var='B' labels={origin=polygon, remote=optimism} value=0 ], [ var='C' labels={origin=polygon, remote=optimism} value=0 ]} {Instance:origin=polygon, remote=solana State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=polygon, remote=solana Value:0xc0085caa60} B:{Var:B Labels:origin=polygon, remote=solana Value:0xc0085caa88} C:{Var:C Labels:origin=polygon, remote=solana Value:0xc0085caa28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866513095s EvaluationString:[ var='A' labels={origin=polygon, remote=solana} value=0 ], [ var='B' labels={origin=polygon, remote=solana} value=0 ], [ var='C' labels={origin=polygon, remote=solana} value=0 ]} {Instance:origin=solana, remote=arbitrum State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=solana, remote=arbitrum Value:0xc0085cab00} B:{Var:B Labels:origin=solana, remote=arbitrum Value:0xc0085cab28} C:{Var:C Labels:origin=solana, remote=arbitrum Value:0xc0085caad8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866518269s EvaluationString:[ var='A' labels={origin=solana, remote=arbitrum} value=0 ], [ var='B' labels={origin=solana, remote=arbitrum} value=0 ], [ var='C' labels={origin=solana, remote=arbitrum} value=0 ]} {Instance:origin=solana, remote=avalanche State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=solana, remote=avalanche Value:0xc0085cac00} B:{Var:B Labels:origin=solana, remote=avalanche Value:0xc0085cab98} C:{Var:C Labels:origin=solana, remote=avalanche Value:0xc0085cabc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866522178s EvaluationString:[ var='A' labels={origin=solana, remote=avalanche} value=0 ], [ var='B' labels={origin=solana, remote=avalanche} value=0 ], [ var='C' labels={origin=solana, remote=avalanche} value=0 ]} {Instance:origin=solana, remote=bsc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=solana, remote=bsc Value:0xc0085cacd0} B:{Var:B Labels:origin=solana, remote=bsc Value:0xc0085cacf0} C:{Var:C Labels:origin=solana, remote=bsc Value:0xc0085cac60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866527s EvaluationString:[ var='A' labels={origin=solana, remote=bsc} value=0 ], [ var='B' labels={origin=solana, remote=bsc} value=0 ], [ var='C' labels={origin=solana, remote=bsc} value=0 ]} {Instance:origin=solana, remote=celo State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=solana, remote=celo Value:0xc0085cad50} B:{Var:B Labels:origin=solana, remote=celo Value:0xc0085cad70} C:{Var:C Labels:origin=solana, remote=celo Value:0xc0085cad90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866532367s EvaluationString:[ var='A' labels={origin=solana, remote=celo} value=0 ], [ var='B' labels={origin=solana, remote=celo} value=0 ], [ var='C' labels={origin=solana, remote=celo} value=0 ]} {Instance:origin=solana, remote=ethereum State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=solana, remote=ethereum Value:0xc0085cae70} B:{Var:B Labels:origin=solana, remote=ethereum Value:0xc0085cade0} C:{Var:C Labels:origin=solana, remote=ethereum Value:0xc0085cae18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866536764s EvaluationString:[ var='A' labels={origin=solana, remote=ethereum} value=0 ], [ var='B' labels={origin=solana, remote=ethereum} value=0 ], [ var='C' labels={origin=solana, remote=ethereum} value=0 ]} {Instance:origin=solana, remote=gnosis State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=solana, remote=gnosis Value:0xc0085caf10} B:{Var:B Labels:origin=solana, remote=gnosis Value:0xc0085caf48} C:{Var:C Labels:origin=solana, remote=gnosis Value:0xc0085caf80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866550959s EvaluationString:[ var='A' labels={origin=solana, remote=gnosis} value=0 ], [ var='B' labels={origin=solana, remote=gnosis} value=0 ], [ var='C' labels={origin=solana, remote=gnosis} value=0 ]} {Instance:origin=solana, remote=moonbeam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=solana, remote=moonbeam Value:0xc0085cb030} B:{Var:B Labels:origin=solana, remote=moonbeam Value:0xc0085cb078} C:{Var:C Labels:origin=solana, remote=moonbeam Value:0xc0085cb0b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866557487s EvaluationString:[ var='A' labels={origin=solana, remote=moonbeam} value=0 ], [ var='B' labels={origin=solana, remote=moonbeam} value=0 ], [ var='C' labels={origin=solana, remote=moonbeam} value=0 ]} {Instance:origin=solana, remote=optimism State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=solana, remote=optimism Value:0xc0085cb140} B:{Var:B Labels:origin=solana, remote=optimism Value:0xc0085cb178} C:{Var:C Labels:origin=solana, remote=optimism Value:0xc0085cb1b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866563002s EvaluationString:[ var='A' labels={origin=solana, remote=optimism} value=0 ], [ var='B' labels={origin=solana, remote=optimism} value=0 ], [ var='C' labels={origin=solana, remote=optimism} value=0 ]} {Instance:origin=solana, remote=polygon State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:origin=solana, remote=polygon Value:0xc0085cb238} B:{Var:B Labels:origin=solana, remote=polygon Value:0xc0085cb260} C:{Var:C Labels:origin=solana, remote=polygon Value:0xc0085cb210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866567382s EvaluationString:[ var='A' labels={origin=solana, remote=polygon} value=0 ], [ var='B' labels={origin=solana, remote=polygon} value=0 ], [ var='C' labels={origin=solana, remote=polygon} value=0 ]}]" duration=23.680836ms + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=assurantlifestyle, endpoint=PublishEvent, service=Core" t=2024-05-29T13:44:13.870457469Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n89c69gf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.870398899Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.870383367Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=465816 slug=metricgamingqa instance="cluster=qa_01, instance=ip-10-64-60-46.eu-west-2.compute.internal, job=integrations/node_exporter" t=2024-05-29T13:44:13.870375067Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=assurantlifestyle, endpoint=PublishEvent, service=Conversation" t=2024-05-29T13:44:13.870342739Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n89c69gf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.870324759Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.870243495Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n89c69gf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.870291978Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n87hpt1c-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.870230918Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=assurantlifestyle, endpoint=IssueRequestDisposition, service=Issue" t=2024-05-29T13:44:13.870088284Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=assurantlifestyle, endpoint=IssueEnqueue, service=Issue" t=2024-05-29T13:44:13.870013233Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n87hpt1c-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.870088036Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=assurantlifestyle, endpoint=IssueEnqueue, service=Issue" t=2024-05-29T13:44:13.870003914Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-289178laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289178, env_name=A148_Carrefour_QA, env_type=qa, instance=env-289178laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:13.870050763Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=465816 slug=metricgamingqa instance="cluster=qa_01, instance=ip-10-64-28-174.eu-west-2.compute.internal, job=integrations/node_exporter" t=2024-05-29T13:44:13.86992167Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.869863021Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n7xunxty-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.869767093Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-289152laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289152, env_name=A148_Carrefour_Prod, env_type=prod, instance=env-289152laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:13.869870027Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n7xunxty-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.869743193Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=assurantlifestyle, endpoint=Connect, service=Core" t=2024-05-29T13:44:13.869885151Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n7xunxty-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.869713642Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=assurantlifestyle, endpoint=AssignToAgent, service=Conversation" t=2024-05-29T13:44:13.869810252Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.86951764Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=465816 slug=metricgamingqa instance="cluster=qa_01, instance=ip-10-64-148-192.eu-west-2.compute.internal, job=integrations/node_exporter" t=2024-05-29T13:44:13.869653065Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n7wmdhgb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.869537081Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.869522587Z caller=remote_instance_store.go:51 user=340750 slug=aptoslabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n7wmdhgb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.86946851Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=340750 slug=aptoslabs instance="datasource_uid=vU-Lwva4k, ref_id=A" t=2024-05-29T13:44:13.869459112Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=340750 slug=aptoslabs instance="datasource_uid=vU-Lwva4k, ref_id=A" t=2024-05-29T13:44:13.869449603Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=340750 slug=aptoslabs version=18 fingerprint=33752fd00b420d48 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.869349029Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=vU-Lwva4k, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.869006962s EvaluationString:}]" duration=22.729754ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n7v22673-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.869286108Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=186562 slug=defier instance= t=2024-05-29T13:44:13.867289495Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-288344laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio1westus2, job=integrations/node_exporter, region=westus2, stage=live" t=2024-05-29T13:44:13.869307146Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=186562 slug=defier instance= t=2024-05-29T13:44:13.867276843Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n7gdm6ij-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.869141457Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=american-airlines, endpoint=Publish, service=Adapter" t=2024-05-29T13:44:13.869109891Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n7gdm6ij-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.869105986Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n7gdm6ij-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.869028945Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=american-airlines, endpoint=Publish, service=Adapter" t=2024-05-29T13:44:13.869101673Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n7f0sppe-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.868968925Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-287012laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=287012, env_name=a116_Costco_Qa_2021, env_type=qa, instance=env-287012laio1westus2, job=integrations/node_exporter, region=westus2, stage=live" t=2024-05-29T13:44:13.869022681Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=american-airlines, endpoint=NotifyPong, service=UserPresenceNotifier" t=2024-05-29T13:44:13.868993685Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.868860935Z caller=remote_instance_store.go:51 user=703825 slug=andrewbauman msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.868924948Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.868889774Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.868879597Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=788179 slug=krea instance="__name__=kube_deployment_status_replicas_ready, container=kube-state-metrics, deployment=lcm-service, endpoint=http, instance=10.224.10.112:8080, job=kube-state-metrics, namespace=default, pod=kube-prometheus-stack-1704318916-kube-state-metrics-55d7b96qbf9, service=kube-prometheus-stack-1704318916-kube-state-metrics" t=2024-05-29T13:44:13.86881275Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-286990laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=286990, env_name=A169_Aigues_Prod, env_type=prod, instance=env-286990laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:13.86890158Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.868822783Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n7cyt92a-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.868772623Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=aizhomesol, endpoint=PublishEvent, service=Core" t=2024-05-29T13:44:13.868785025Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=788179 slug=krea version=10 fingerprint=5c7f0d92ab4ed66b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.868634955Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=kube_deployment_status_replicas_ready, container=kube-state-metrics, deployment=lcm-service, endpoint=http, instance=10.224.10.112:8080, job=kube-state-metrics, namespace=default, pod=kube-prometheus-stack-1704318916-kube-state-metrics-55d7b96qbf9, service=kube-prometheus-stack-1704318916-kube-state-metrics State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc0397c5fa8} B:{Var:B Labels:__name__=kube_deployment_status_replicas_ready, container=kube-state-metrics, deployment=lcm-service, endpoint=http, instance=10.224.10.112:8080, job=kube-state-metrics, namespace=default, pod=kube-prometheus-stack-1704318916-kube-state-metrics-55d7b96qbf9, service=kube-prometheus-stack-1704318916-kube-state-metrics Value:0xc0361de030} C:{Var:C Labels:__name__=kube_deployment_status_replicas_ready, container=kube-state-metrics, deployment=lcm-service, endpoint=http, instance=10.224.10.112:8080, job=kube-state-metrics, namespace=default, pod=kube-prometheus-stack-1704318916-kube-state-metrics-55d7b96qbf9, service=kube-prometheus-stack-1704318916-kube-state-metrics Value:0xc0397c5fa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.867974921s EvaluationString:[ var='A' labels={} value=167.24257091725752 ], [ var='B' labels={__name__=kube_deployment_status_replicas_ready, container=kube-state-metrics, deployment=lcm-service, endpoint=http, instance=10.224.10.112:8080, job=kube-state-metrics, namespace=default, pod=kube-prometheus-stack-1704318916-kube-state-metrics-55d7b96qbf9, service=kube-prometheus-stack-1704318916-kube-state-metrics} value=0 ], [ var='C' labels={__name__=kube_deployment_status_replicas_ready, container=kube-state-metrics, deployment=lcm-service, endpoint=http, instance=10.224.10.112:8080, job=kube-state-metrics, namespace=default, pod=kube-prometheus-stack-1704318916-kube-state-metrics-55d7b96qbf9, service=kube-prometheus-stack-1704318916-kube-state-metrics} value=70 ]}]" duration=53.810105ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n7cyt92a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.868731842Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp t=2024-05-29T13:44:13.868552044Z level=debug msg="State manager processing evaluation results" resultCount=58 + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:13.868572596Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:13.868521611Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=37.176187ms + logger=ngalert.state.manager user=191103 slug=amazonadmin t=2024-05-29T13:44:13.868552439Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.868376736Z caller=remote_image_capturer.go:33 user=337951 slug=pawapay rule_org_id=1 rule_uid=a2c822b1 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n76jaxjk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.868406379Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=337951 slug=pawapay instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.868345961Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.868336786Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=337951 slug=pawapay version=8 fingerprint=daea616c9d24cdb7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.868185203Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.86776141s EvaluationString:}]" duration=10.66373ms + logger=ngalert.state.manager user=432323 slug=lithic instance="query=lookup_pan" t=2024-05-29T13:44:13.868101749Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n72s7vpl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.868082676Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n72s7vpl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.868031055Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n72s7vpl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.868016455Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n6wdqcfj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.867979625Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.867921324Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n6wdqcfj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.867845923Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n6si8q9m-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.867807413Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n6si8q9m-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.867655751Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=432323 slug=lithic instance="query=load_pin_encryption_keys" t=2024-05-29T13:44:13.867964487Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n6sbnmhs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.86756046Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=198391 slug=zozio t=2024-05-29T13:44:13.8678507Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n6sbnmhs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.86747858Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n6oh9wwm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.867428719Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n6oh9wwm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.867367828Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n6oh9wwm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.867356138Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=432323 slug=lithic instance="query=load_instance" t=2024-05-29T13:44:13.867800534Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-283735laio1eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:13.867780482Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n6jkzjdq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.867214327Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n6jkzjdq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.867164976Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n6iyci63-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.867001295Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n6ivcwr8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.866963874Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.867564337Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.867653382Z caller=remote_instance_store.go:51 user=243675 slug=oneschema msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n6ivcwr8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.866906174Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=432323 slug=lithic instance="query=load_feature_flags" t=2024-05-29T13:44:13.867677174Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=432323 slug=lithic instance="query=load_cards" t=2024-05-29T13:44:13.867608286Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=800848 slug=flowfoundation t=2024-05-29T13:44:13.867570932Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=800848 slug=flowfoundation instance= t=2024-05-29T13:44:13.86755619Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=800848 slug=flowfoundation version=1 fingerprint=a9f02667ff016e65 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.867398894Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866872969s EvaluationString:}]" duration=31.069634ms + logger=ngalert.state.manager user=432323 slug=lithic instance="query=load_account_config" t=2024-05-29T13:44:13.867447821Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-283469laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=283469, env_name=C630_Tokyo Century_Dev, env_type=dev, instance=env-283469laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=decommission" t=2024-05-29T13:44:13.867407409Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=432323 slug=lithic instance="query=load_account" t=2024-05-29T13:44:13.867367152Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=186562 slug=defier version=2 fingerprint=2654cdfd9f9e92a6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.867120503Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866733849s EvaluationString:}]" duration=23.111253ms + logger=ngalert.state.manager user=516847 slug=signit instance= t=2024-05-29T13:44:13.867178648Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=432323 slug=lithic instance="query=get_hmac_keys" t=2024-05-29T13:44:13.867245364Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.86716421Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=516847 slug=signit version=22 fingerprint=8299134344e58b58 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.867088055Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc072fbd8a0} B:{Var:B Labels: Value:0xc072fbd8a8} C:{Var:C Labels: Value:0xc072fbd858}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866729003s EvaluationString:[ var='A' labels={} value=6.388888888888762 ], [ var='B' labels={} value=6.388888888888762 ], [ var='C' labels={} value=0 ]}]" duration=13.36448ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-283466laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283466, env_name=C591_Novartis_QA, env_type=qa, instance=env-283466laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.867188531Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-283466laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283466, env_name=C591_Novartis_QA, env_type=qa, instance=env-283466laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.867171238Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=432323 slug=lithic instance="query=get_card_auth_rules" t=2024-05-29T13:44:13.867171769Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=307381 slug=kambitaskforce version=46 fingerprint=5b7c1ed8011c0e96 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.866940462Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.866532572s EvaluationString:}]" duration=22.168539ms + logger=ngalert.state.manager user=432323 slug=lithic instance="query=account_spend_velocity" t=2024-05-29T13:44:13.86699052Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.866977153Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n6ivcwr8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.866855173Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n6i86jn4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.866701522Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.866739854Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-283245laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=283245, env_name=A151 Digi-Key Dev/Test, env_type=dev, instance=env-283245laio1centralus, job=integrations/node_exporter, region=centralus, stage=live" t=2024-05-29T13:44:13.866734294Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n6gvhb4j-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.86650436Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.866472121Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n6gvhb4j-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.866417859Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.866386472Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.866291912Z caller=remote_instance_store.go:51 user=526847 slug=soniclabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n6bybcm6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.866351718Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n6bybcm6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.866256367Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n6bybcm6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.866228617Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n6abxdqv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.866165906Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.866117383Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.866100363Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.866096972Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.866099039Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n65ysgyp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.865983154Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-278412laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=278412, env_name=A113 - BAT - Sandbox, env_type=sandbox, instance=env-278412laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:13.865947009Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.865859401Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n65ysgyp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.865895503Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n65ysgyp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.865858763Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.865826429Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.865827598Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.865773114Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.865814325Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.865763466Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.865713151Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=245291 slug=pismo version=29 fingerprint=9e829fb86ff40b81 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.865654262Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.865352195s EvaluationString:}]" duration=157.307913ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n646x9k4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.865674131Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n646x9k4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.865647011Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n62uym7a-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.86560727Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.865528858Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.865423752Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n62uym7a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.865520769Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.865333429Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-275846laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.865474811Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.865339353Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.865303675Z caller=remote_instance_store.go:51 user=557231 slug=lnrsusinsuranceprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-275843laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275843, env_name=C595_Franconnect_DEV, env_type=dev, instance=env-275843laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.86515431Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-275843laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275843, env_name=C595_Franconnect_DEV, env_type=dev, instance=env-275843laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.865128446Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-275315laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275315, env_name=A108_LCBO_QA_M2021, env_type=qa, instance=env-275315laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live" t=2024-05-29T13:44:13.864944283Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=20177 slug=paddledash t=2024-05-29T13:44:13.86542174Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.86527671Z caller=remote_instance_store.go:51 user=154996 slug=veovo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n5w3bsgo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.865370828Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-274836laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=274836, env_name=A108_LCBO_Prod_M2021, env_type=prod, instance=env-274836laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live" t=2024-05-29T13:44:13.864259666Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n5w3bsgo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.865318677Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n5t4kbyv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.865191596Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.865126903Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.865123726Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n5hxwic8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.865067685Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696538 slug=drakkarsoftware instance="__rollup__=true, operation=TaskQueueMgr, task_type=Workflow, temporal_namespace=development.hyjat, temporal_service_type=matching, worker_build_id=_tag_excluded_" t=2024-05-29T13:44:13.865081551Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n5gu8dtg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.864960464Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.86497629Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n5gu8dtg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.864938164Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696538 slug=drakkarsoftware instance="__rollup__=true, operation=TaskQueueMgr, task_type=Activity, temporal_namespace=development.hyjat, temporal_service_type=matching, worker_build_id=_tag_excluded_" t=2024-05-29T13:44:13.864983429Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n5gu8dtg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.864908843Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n5gu8dtg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.864814032Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696538 slug=drakkarsoftware t=2024-05-29T13:44:13.864810755Z level=debug msg="State manager processing evaluation results" resultCount=6 + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=SRviRMnVz, ref_id=A" t=2024-05-29T13:44:13.864655835Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n5cdwwj5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.86463231Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n5cdwwj5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.86455669Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.864534391Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n5cdwwj5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.864504689Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.864438693Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.864382034Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.864352968Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n59clpt9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.864371228Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n59clpt9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.864346687Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112732 slug=gleamer t=2024-05-29T13:44:13.864169894Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.864214485Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n50ccgch-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.864101145Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-274689laio1westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:13.864119081Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.864029411Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-274689laio1westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:13.86409887Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n4yzsi7u-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.863917043Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-274593laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274593, env_name=A101_Jumbo_2021_DEV, env_type=dev, instance=env-274593laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:13.863966283Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-274545laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274545, env_name=A101_Jumbo_2021_ACCEPT, env_type=accept, instance=env-274545laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:13.863730486Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-274545laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274545, env_name=A101_Jumbo_2021_ACCEPT, env_type=accept, instance=env-274545laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:13.863682003Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n4ylq48q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.86359069Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n4ylq48q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.86357699Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n4ylq48q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.863512679Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n4ylq48q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.863472378Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n4ylq48q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.863448778Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n4t5zzqt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.863391788Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n4t5zzqt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.863354647Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n4t5zzqt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.863320517Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n4s1z05r-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.863222456Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.863116345Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-273913laio1westus2, cloud_platform=Azure, customer_id=A144, env_id=273913, env_name=A144 Big 5 Corp PROD, env_type=prod, instance=env-273913laio1westus2, job=integrations/node_exporter, region=westus2, stage=live" t=2024-05-29T13:44:13.863129248Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.863094028Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n4s1z05r-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.863064284Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n4r7qzdc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.862942953Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n4q2n05e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.862830012Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n4q2n05e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.862734131Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.862849622Z caller=remote_instance_store.go:51 user=706031 slug=miceutest msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=150145 slug=pleasant t=2024-05-29T13:44:13.862834649Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=150145 slug=pleasant instance= t=2024-05-29T13:44:13.862819182Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=150145 slug=pleasant instance= t=2024-05-29T13:44:13.862803163Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.scheduler user=150145 slug=pleasant version=2 fingerprint=6608c1fda17af4cf attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.862752341Z level=error msg="Failed to evaluate rule" error="failed to parse expression 'B': failed to unmarshal remarshaled classic condition body: json: cannot unmarshal string into Go struct field ConditionEvalJSON.evaluator.params of type float64" duration=999.655µs + level=error ts=2024-05-29T13:44:13.862723266Z caller=remote_rule_evaluator.go:110 user=150145 slug=pleasant msg="remote evaluate failed" code=Code(422) err="failed to parse expression 'B': failed to unmarshal remarshaled classic condition body: json: cannot unmarshal string into Go struct field ConditionEvalJSON.evaluator.params of type float64" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n4mfrn9v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.86262933Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n4mfrn9v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.862569089Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.86259125Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n4er2s9w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.862394697Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n484vali-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.862306577Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n448lvk5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.862171095Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-273681laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273681, env_name=A158 - Bain - DEV Ent, env_type=dev, instance=env-273681laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:13.8624273Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.862343565Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n448lvk5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.862077924Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n40ylmn2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.862020514Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.862261979Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=316418 slug=workmotion t=2024-05-29T13:44:13.862233289Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=30.672724ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n40ylmn2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.861985093Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n40ylmn2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.861929003Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n3ztrjtl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.861879432Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n3ztrjtl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.861852132Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.862081063Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n3yq1wzc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.86168972Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n3yq1wzc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.86166026Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.862016588Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=245291 slug=pismo instance="QueueName=ImportStatements-dead-letter" t=2024-05-29T13:44:13.861976729Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=245291 slug=pismo instance="QueueName=ImportStatements-dead-letter" t=2024-05-29T13:44:13.861961408Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-273677laio1eastus2, cloud_platform=Azure, customer_id=A157, env_id=273677, env_name=A157 Ryder Dev, env_type=dev, instance=env-273677laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:13.86202495Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.861878032Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n3q337dl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.861292176Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n3oxgllk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.860970353Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n3oxgllk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.860927462Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n3lj0h3s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.86073174Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n3jxx9r1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.860608169Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-272146laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272146, env_name=A154_Monoprix_Prod, env_type=prod, instance=env-272146laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:13.861836791Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n3jxx9r1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.860531388Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n3aob735-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.860495128Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n3aob735-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.860469688Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n3aob735-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.860388407Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n3agrek2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.860260516Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n3agrek2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.860182825Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n35cv7as-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.860118724Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n35cv7as-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.859966103Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n33djpm4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.859924682Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-272010laiouse1, cloud_platform=AWS, customer_id=C583, env_id=272010, env_name=C583 - 5 Hour old Prod, env_type=prod, instance=env-272010laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.861661187Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n33djpm4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.859822831Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n33djpm4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.85976051Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n2zsc4fj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.859591669Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n2y2xnv2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.859392557Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n2xrcvuw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.859236455Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n2ofetxt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.859155684Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n2ofetxt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.859063463Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n2ofetxt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.859025893Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n2bifuff-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.858908862Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n2bifuff-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.858898852Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n2aj9oa6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.858869121Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n2aj9oa6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.858823631Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n2aj9oa6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.858653549Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n259paab-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.858611999Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n259paab-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.858597778Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n17av5rw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.858236285Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n17av5rw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.858192504Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n17av5rw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.858135344Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n17av5rw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.858107223Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n12xtoi6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.858046813Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n12xtoi6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.857913111Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n123if4a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.85776707Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-269670laioeastus, cloud_platform=Azure, customer_id=A128, env_id=269670, env_name=a128_qvc_DEV, env_type=dev, instance=env-269670laioeastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:13.861149573Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n111xu7i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.857627278Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n111xu7i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.857585568Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n111xu7i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.857573118Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n0tovy6o-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.857317985Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=715667 slug=tpcnaprod t=2024-05-29T13:44:13.860902029Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=7.03049ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n0tag7r7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.857254875Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n0tag7r7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.857204364Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n0tag7r7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.857163914Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n0ogs1d4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.857037022Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n0ogs1d4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.856944011Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n0lqxhtw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.85684209Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n0lqxhtw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.85681564Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n0fzgp4j-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.856582218Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n0dkg6vh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.856490087Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n0dkg6vh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.856426726Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n0dkg6vh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.856388726Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n0dkg6vh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.856361215Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-265908laiowestus2, cloud_platform=Azure, customer_id=A144, env_id=265908, env_name=A144 Big 5 Corp DEV, env_type=dev, instance=env-265908laiowestus2, job=integrations/node_exporter, region=westus2, stage=live" t=2024-05-29T13:44:13.860720834Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n0dh6b9d-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.856295715Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n0dh6b9d-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.856227874Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n0dh6b9d-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.856192724Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n0consau-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.856125953Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n0consau-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.855991201Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n0consau-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.855965581Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n0bfvqqy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.855785689Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.860475127Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.860457719Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=HELSINKI Query" t=2024-05-29T13:44:13.86044109Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n07pvvdx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.855351805Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.860320599Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n058svs2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.855100662Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n058svs2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.855068082Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.860131667Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n03fv291-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.854973421Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=95b988b3ba1c1a8f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.860229321Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=HELSINKI Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc009bfe488} Threshold:{Var:Threshold Labels: Value:0xc009bfe4b0} compare:{Var:compare Labels:aggregatedBy=sum, name=HELSINKI Query Value:0xc009bfe4f0} sum:{Var:sum Labels:aggregatedBy=sum, name=HELSINKI Query Value:0xc009bfe518}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.859991789s EvaluationString:[ var='Breaches' labels={} value=72 ], [ var='Threshold' labels={} value=1 ], [ var='compare' labels={aggregatedBy=sum, name=HELSINKI Query} value=0 ], [ var='sum' labels={aggregatedBy=sum, name=HELSINKI Query} value=13 ]}]" duration=22.340736ms + level=debug ts=2024-05-29T13:44:13.860211112Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.85989627Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=797387 slug=roadrunnerdev t=2024-05-29T13:44:13.859525456Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-263178laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263178, env_name=A139 AgReserves DEV, env_type=dev, instance=env-263178laiowestus, job=integrations/node_exporter, region=westus, stage=live" t=2024-05-29T13:44:13.859505698Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=797387 slug=roadrunnerdev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.859457905Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.859423917Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-263173laiosouthcentralus, cloud_platform=Azure, customer_id=A118, env_id=263173, env_name=A118 Libertad PROD, env_type=prod, instance=env-263173laiosouthcentralus, job=integrations/node_exporter, region=southcentralus, stage=live" t=2024-05-29T13:44:13.859289165Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-262058laioeastus2, cloud_platform=Azure, customer_id=A136, env_id=262058, env_name=A136 Coughlan PROD, env_type=dev, instance=env-262058laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:13.859051225Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-262058laioeastus2, cloud_platform=Azure, customer_id=A136, env_id=262058, env_name=A136 Coughlan PROD, env_type=dev, instance=env-262058laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:13.859033822Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.859000154Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.858952571Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=327842 slug=exabeam instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.858904609Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=327842 slug=exabeam instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.858889687Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=info ts=2024-05-29T13:44:13.858722921Z caller=grafana.go:247 user=289650 slug=eurostar msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=16" groups=48 alerts=0 + logger=ngalert.state.manager.persist user=119385 slug=elastio t=2024-05-29T13:44:13.858251513Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=47.413948ms + level=debug ts=2024-05-29T13:44:13.858030865Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:13.858019109Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=41.250554ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-259446laiouse1, cloud_platform=AWS, customer_id=C489, env_id=259446, env_name=c489 Lincoln DEV 2021U2, env_type=dev, instance=env-259446laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.85796857Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.857707148Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=151289 slug=everflow t=2024-05-29T13:44:13.857719156Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.857676738Z caller=remote_rule_evaluator.go:193 user=242310 slug=suzy msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + level=debug ts=2024-05-29T13:44:13.857688164Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.85759326Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:13.857477375Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=52.707619ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-258029laiouse1, cloud_platform=AWS, customer_id=C447, env_id=258029, env_name=C447_VSI_DEV_2021U2, env_type=dev, instance=env-258029laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.857341886Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-258029laiouse1, cloud_platform=AWS, customer_id=C447, env_id=258029, env_name=C447_VSI_DEV_2021U2, env_type=dev, instance=env-258029laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.857321195Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:13.856999424Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=33.443874ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-257707laiouse1, cloud_platform=AWS, customer_id=C448, env_id=257707, env_name=C448_LincolnRPS_DEV_2021U1, env_type=dev, instance=env-257707laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.85709838Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=851297 slug=roadrunneruat t=2024-05-29T13:44:13.856975582Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.917499ms + level=debug ts=2024-05-29T13:44:13.856949506Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-255971laio1use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.856708072Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.856563012Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.856561258Z caller=remote_instance_store.go:51 user=316418 slug=workmotion msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=777670 slug=fakku t=2024-05-29T13:44:13.856112301Z level=debug msg="Saving alert states" count=41 max_state_save_concurrency=1 + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.856001268Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=1mr10216z5, Method=--, Resource=/v1/payroll-funding, Stage=--" t=2024-05-29T13:44:13.856437066Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855983077Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855968887Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855956676Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855947617Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855939947Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-255826laio1eastus, cloud_platform=Azure, customer_id=A129, env_id=255826, env_name=A129_Gap_Prod_2021U1, env_type=prod, instance=env-255826laio1eastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:13.856297887Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.856233608Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855884915Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855861684Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855853234Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855846234Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.856191687Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.85597569Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855750053Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855741602Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855734542Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855704182Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855691051Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-255763laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=255763, env_name=A133 Douglas PROD, env_type=Prod, instance=env-255763laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live" t=2024-05-29T13:44:13.855936936Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855622909Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.85561529Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855593188Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855585169Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855576838Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855568318Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855555057Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855535227Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855531128Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855513127Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-255634laioeastus2, cloud_platform=Azure, customer_id=A129, env_id=255634, env_name=A129_Gap_Dev_2021_east2, env_type=dev, instance=env-255634laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:13.855733581Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855498396Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855488207Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855482906Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855469526Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855453465Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855436745Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855420464Z level=debug msg="Setting next state" handler=resultNoData + level=info ts=2024-05-29T13:44:13.85554428Z caller=remote_alert_sender.go:94 user=230713 slug=flocksafety host=flocksafety-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.22.91:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=a5884170-ce5f-493c-8639-9d702d4b2495 alerts=1 + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855372493Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.855481806Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.855372058Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855321643Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855304112Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.855206861Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=777670 slug=fakku instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.855292981Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.855317038Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.855160013Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.855108586Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.85510841Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.855058791Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114492 slug=railsbank t=2024-05-29T13:44:13.854884963Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-250989laiousw1, cloud_platform=AWS, customer_id=C537, env_id=250989, env_name=C537_PacSun_PROD2021, env_type=prod, instance=env-250989laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live" t=2024-05-29T13:44:13.854921248Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=114492 slug=railsbank version=2 fingerprint=a9c61aa45709ada1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.85473862Z level=debug msg="Alert rule evaluated" results="[{Instance:DBInstanceIdentifier=iam-prod-220220503091207160800000020 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=iam-prod-220220503091207160800000020 Value:0xc020c81178} C:{Var:C Labels:DBInstanceIdentifier=iam-prod-220220503091207160800000020 Value:0xc020c81180}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.854271145s EvaluationString:[ var='B' labels={DBInstanceIdentifier=iam-prod-220220503091207160800000020} value=0 ], [ var='C' labels={DBInstanceIdentifier=iam-prod-220220503091207160800000020} value=0 ]}]" duration=132.969342ms + level=debug ts=2024-05-29T13:44:13.854770612Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.854667139Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.854625474Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n03bwyk2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.854681378Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n03bwyk2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.854652868Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n03bwyk2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.854613277Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n03bwyk2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.854592507Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-n01wata3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.854544946Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-249638laioeastus, cloud_platform=Azure, customer_id=A109, env_id=249638, env_name=A109 Renasant Bank DEV, env_type=dev, instance=env-249638laioeastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:13.854465573Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-249638laioeastus, cloud_platform=Azure, customer_id=A109, env_id=249638, env_name=A109 Renasant Bank DEV, env_type=dev, instance=env-249638laioeastus, job=integrations/node_exporter, region=eastus, stage=live" t=2024-05-29T13:44:13.854447828Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=633335 slug=promqlworkshop t=2024-05-29T13:44:13.854211342Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.640103ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-249508laiowesteurope, cloud_platform=Azure, customer_id=A113, env_id=249508, env_name=A113_BAT_DEV_2021U1, env_type=dev, instance=env-249508laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:13.854260926Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=633335 slug=promqlworkshop t=2024-05-29T13:44:13.854211789Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.692614ms + logger=ngalert.state.manager.persist user=230713 slug=flocksafety t=2024-05-29T13:44:13.854136804Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.934608ms + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-249394laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live" t=2024-05-29T13:44:13.854096072Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mzwa79dk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.854038581Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mzwa79dk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.85393839Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.853904344Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=715667 slug=tpcnaprod t=2024-05-29T13:44:13.853825998Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mzufn48x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.85386995Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.853750821Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.853675367Z caller=remote_instance_store.go:51 user=706031 slug=miceutest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mzsl1mdj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.853667417Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mzsl1mdj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.853635927Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mzsl1mdj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.853531736Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mzrqwjrd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.853498396Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mzrqwjrd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.853468765Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.853467728Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mzrqwjrd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.853390295Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mzqmec8b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.853262583Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.853214853Z caller=remote_instance_store.go:51 user=465816 slug=metricgamingqa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mzqmec8b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.853157732Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mzqmec8b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.853100922Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mzo6fnz2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.853009771Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.852968799Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=112387 slug=lucidhq t=2024-05-29T13:44:13.852947978Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=38.354336ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mzo6fnz2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.85297763Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:13.852990476Z caller=remote_alert_sender.go:94 user=112732 slug=gleamer host=gleamer-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.144.180.90:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fdkzwpsq38w74b alerts=1 + level=debug ts=2024-05-29T13:44:13.852928553Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mzlkk35z-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.852875079Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=83647 slug=bidsolutions t=2024-05-29T13:44:13.852904079Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=65.599411ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mzkxi9fc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.852751098Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.852825662Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.852761029Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mzdjm87d-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.852552266Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.852473483Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-241432laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241432, env_name=A124_PartnersHealthcare_Dev_M2021, env_type=dev, instance=env-241432laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:13.852569447Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:13.852491073Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.852465186Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.852431309Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mzcd3m5u-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.852368254Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mzcd3m5u-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.852343834Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mzcd3m5u-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.852306364Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.852261396Z caller=remote_instance_store.go:51 user=127813 slug=clearsale msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.852265255Z caller=remote_instance_store.go:51 user=765752 slug=octave msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mzcd3m5u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.852234363Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=127813 slug=clearsale t=2024-05-29T13:44:13.852058918Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mz9ak1r2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.852167582Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mz9ak1r2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.852100071Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mz9ak1r2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.852069411Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-241206laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241206, env_name=A121_CTTI_Prod_M2021, env_type=prod, instance=env-241206laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live" t=2024-05-29T13:44:13.852067354Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-241206laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241206, env_name=A121_CTTI_Prod_M2021, env_type=prod, instance=env-241206laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live" t=2024-05-29T13:44:13.852048472Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mz9ak1r2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.85199732Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mz6wjtxc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.851745138Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mz6wjtxc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.851658597Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mz6wjtxc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.851546866Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mz6l80e0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.851494325Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mz6l80e0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.851463465Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-240076laionortheurope, cloud_platform=Azure, customer_id=A102, env_id=240076, env_name=A102_Limagrain_Dev_M2021, env_type=dev, instance=env-240076laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live" t=2024-05-29T13:44:13.851339821Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mz6l80e0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.851384604Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mz39e1v6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.851292013Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.851215663Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mz39e1v6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.851136392Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-238672laio1use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live" t=2024-05-29T13:44:13.851151394Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mz1nheps-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.85103433Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mz1nheps-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.8510073Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mz1nheps-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.85097231Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:13.850807026Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.586784ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mz0ivp98-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.850818788Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.850694338Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-238230laioeastus2, cloud_platform=Azure, customer_id=A126, env_id=238230, env_name=A126_PVH_DEV_M2021, env_type=dev, instance=env-238230laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:13.850628408Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.849768983Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.85051776Z caller=remote_instance_store.go:51 user=451750 slug=amadeuspfpprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=648171 slug=citrixdev t=2024-05-29T13:44:13.850554129Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mz0epftf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.850548946Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=648171 slug=citrixdev instance= t=2024-05-29T13:44:13.850539135Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mz0epftf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.850490735Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.850348353Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412779 slug=microstrategy instance="__name__=mstr_status_message_kafka, agent_hostname=env-238223laio1eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live" t=2024-05-29T13:44:13.85040274Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-myz7e59w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.850296123Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-myz7e59w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.850267883Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=412779 slug=microstrategy t=2024-05-29T13:44:13.850045789Z level=debug msg="State manager processing evaluation results" resultCount=721 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-myrbv5kh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.850233952Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=412779 slug=microstrategy version=115 fingerprint=86a6e020f99a7ff3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.813453568Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=mstr_status_message_kafka, agent_hostname=env-238223laio1eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-238223laio1eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01bc7c908} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-238223laio1eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01bc7cfe0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-238223laio1eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01bc7d0d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.72957176s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-238223laio1eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-238223laio1eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-238223laio1eastus2, cloud_platform=Azure, customer_id=A126, env_id=238223, env_name=A126_PVH_PROD_M2021, env_type=prod, instance=env-238223laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-238230laioeastus2, cloud_platform=Azure, customer_id=A126, env_id=238230, env_name=A126_PVH_DEV_M2021, env_type=dev, instance=env-238230laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-238230laioeastus2, cloud_platform=Azure, customer_id=A126, env_id=238230, env_name=A126_PVH_DEV_M2021, env_type=dev, instance=env-238230laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01bc7d258} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-238230laioeastus2, cloud_platform=Azure, customer_id=A126, env_id=238230, env_name=A126_PVH_DEV_M2021, env_type=dev, instance=env-238230laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01bc7d390} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-238230laioeastus2, cloud_platform=Azure, customer_id=A126, env_id=238230, env_name=A126_PVH_DEV_M2021, env_type=dev, instance=env-238230laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01bc7d488}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729611838s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-238230laioeastus2, cloud_platform=Azure, customer_id=A126, env_id=238230, env_name=A126_PVH_DEV_M2021, env_type=dev, instance=env-238230laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-238230laioeastus2, cloud_platform=Azure, customer_id=A126, env_id=238230, env_name=A126_PVH_DEV_M2021, env_type=dev, instance=env-238230laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-238230laioeastus2, cloud_platform=Azure, customer_id=A126, env_id=238230, env_name=A126_PVH_DEV_M2021, env_type=dev, instance=env-238230laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-238296laiousw1, cloud_platform=AWS, customer_id=C537, env_id=238296, env_name=C537_PacSun_DEV_2021, env_type=dev, instance=env-238296laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-238296laiousw1, cloud_platform=AWS, customer_id=C537, env_id=238296, env_name=C537_PacSun_DEV_2021, env_type=dev, instance=env-238296laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live Value:0xc01bc7d7f0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-238296laiousw1, cloud_platform=AWS, customer_id=C537, env_id=238296, env_name=C537_PacSun_DEV_2021, env_type=dev, instance=env-238296laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live Value:0xc01bc7d620} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-238296laiousw1, cloud_platform=AWS, customer_id=C537, env_id=238296, env_name=C537_PacSun_DEV_2021, env_type=dev, instance=env-238296laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live Value:0xc01bc7d718}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729632511s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-238296laiousw1, cloud_platform=AWS, customer_id=C537, env_id=238296, env_name=C537_PacSun_DEV_2021, env_type=dev, instance=env-238296laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-238296laiousw1, cloud_platform=AWS, customer_id=C537, env_id=238296, env_name=C537_PacSun_DEV_2021, env_type=dev, instance=env-238296laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-238296laiousw1, cloud_platform=AWS, customer_id=C537, env_id=238296, env_name=C537_PacSun_DEV_2021, env_type=dev, instance=env-238296laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-238672laio1use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-238672laio1use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01bc7d988} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-238672laio1use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01bc7da50} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-238672laio1use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01bc7db18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729652727s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-238672laio1use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-238672laio1use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-238672laio1use1, cloud_platform=AWS, customer_id=C538, env_id=238672, env_name=C538_HBC_Prod_M2021, env_type=prod, instance=env-238672laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-240076laionortheurope, cloud_platform=Azure, customer_id=A102, env_id=240076, env_name=A102_Limagrain_Dev_M2021, env_type=dev, instance=env-240076laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-240076laionortheurope, cloud_platform=Azure, customer_id=A102, env_id=240076, env_name=A102_Limagrain_Dev_M2021, env_type=dev, instance=env-240076laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc01bc7de70} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-240076laionortheurope, cloud_platform=Azure, customer_id=A102, env_id=240076, env_name=A102_Limagrain_Dev_M2021, env_type=dev, instance=env-240076laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc01bc7dc98} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-240076laionortheurope, cloud_platform=Azure, customer_id=A102, env_id=240076, env_name=A102_Limagrain_Dev_M2021, env_type=dev, instance=env-240076laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc01bc7ddb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729672411s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-240076laionortheurope, cloud_platform=Azure, customer_id=A102, env_id=240076, env_name=A102_Limagrain_Dev_M2021, env_type=dev, instance=env-240076laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-240076laionortheurope, cloud_platform=Azure, customer_id=A102, env_id=240076, env_name=A102_Limagrain_Dev_M2021, env_type=dev, instance=env-240076laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-240076laionortheurope, cloud_platform=Azure, customer_id=A102, env_id=240076, env_name=A102_Limagrain_Dev_M2021, env_type=dev, instance=env-240076laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-240335laio1northeurope, cloud_platform=Azure, customer_id=A102, env_id=240335, env_name=A102_Limagrain_Prod_M2021, env_type=undefined, instance=env-240335laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-240335laio1northeurope, cloud_platform=Azure, customer_id=A102, env_id=240335, env_name=A102_Limagrain_Prod_M2021, env_type=undefined, instance=env-240335laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc02386c190} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-240335laio1northeurope, cloud_platform=Azure, customer_id=A102, env_id=240335, env_name=A102_Limagrain_Prod_M2021, env_type=undefined, instance=env-240335laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc02386c020} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-240335laio1northeurope, cloud_platform=Azure, customer_id=A102, env_id=240335, env_name=A102_Limagrain_Prod_M2021, env_type=undefined, instance=env-240335laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc02386c0e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729689589s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-240335laio1northeurope, cloud_platform=Azure, customer_id=A102, env_id=240335, env_name=A102_Limagrain_Prod_M2021, env_type=undefined, instance=env-240335laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-240335laio1northeurope, cloud_platform=Azure, customer_id=A102, env_id=240335, env_name=A102_Limagrain_Prod_M2021, env_type=undefined, instance=env-240335laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-240335laio1northeurope, cloud_platform=Azure, customer_id=A102, env_id=240335, env_name=A102_Limagrain_Prod_M2021, env_type=undefined, instance=env-240335laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-241206laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241206, env_name=A121_CTTI_Prod_M2021, env_type=prod, instance=env-241206laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-241206laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241206, env_name=A121_CTTI_Prod_M2021, env_type=prod, instance=env-241206laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc02386c5c0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-241206laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241206, env_name=A121_CTTI_Prod_M2021, env_type=prod, instance=env-241206laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc02386cb30} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-241206laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241206, env_name=A121_CTTI_Prod_M2021, env_type=prod, instance=env-241206laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc02386cbc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729701465s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-241206laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241206, env_name=A121_CTTI_Prod_M2021, env_type=prod, instance=env-241206laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-241206laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241206, env_name=A121_CTTI_Prod_M2021, env_type=prod, instance=env-241206laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-241206laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241206, env_name=A121_CTTI_Prod_M2021, env_type=prod, instance=env-241206laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-241214laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241214, env_name=A121_CTTI_Integ_M2021, env_type=Integ, instance=env-241214laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-241214laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241214, env_name=A121_CTTI_Integ_M2021, env_type=Integ, instance=env-241214laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc02386d020} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-241214laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241214, env_name=A121_CTTI_Integ_M2021, env_type=Integ, instance=env-241214laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc02386d0c0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-241214laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241214, env_name=A121_CTTI_Integ_M2021, env_type=Integ, instance=env-241214laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc02386cf88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729715096s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-241214laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241214, env_name=A121_CTTI_Integ_M2021, env_type=Integ, instance=env-241214laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-241214laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241214, env_name=A121_CTTI_Integ_M2021, env_type=Integ, instance=env-241214laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-241214laionortheurope, cloud_platform=Azure, customer_id=A121, env_id=241214, env_name=A121_CTTI_Integ_M2021, env_type=Integ, instance=env-241214laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-241432laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241432, env_name=A124_PartnersHealthcare_Dev_M2021, env_type=dev, instance=env-241432laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-241432laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241432, env_name=A124_PartnersHealthcare_Dev_M2021, env_type=dev, instance=env-241432laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc02386d2c0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-241432laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241432, env_name=A124_PartnersHealthcare_Dev_M2021, env_type=dev, instance=env-241432laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc02386d350} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-241432laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241432, env_name=A124_PartnersHealthcare_Dev_M2021, env_type=dev, instance=env-241432laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc02386d228}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729733728s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-241432laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241432, env_name=A124_PartnersHealthcare_Dev_M2021, env_type=dev, instance=env-241432laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-241432laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241432, env_name=A124_PartnersHealthcare_Dev_M2021, env_type=dev, instance=env-241432laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-241432laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241432, env_name=A124_PartnersHealthcare_Dev_M2021, env_type=dev, instance=env-241432laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-241433laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241433, env_name=A124_PartnersHealthcare_Prod_M2021, env_type=prod, instance=env-241433laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-241433laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241433, env_name=A124_PartnersHealthcare_Prod_M2021, env_type=prod, instance=env-241433laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc02386d480} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-241433laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241433, env_name=A124_PartnersHealthcare_Prod_M2021, env_type=prod, instance=env-241433laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc02386d508} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-241433laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241433, env_name=A124_PartnersHealthcare_Prod_M2021, env_type=prod, instance=env-241433laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc02386d600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729753883s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-241433laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241433, env_name=A124_PartnersHealthcare_Prod_M2021, env_type=prod, instance=env-241433laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-241433laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241433, env_name=A124_PartnersHealthcare_Prod_M2021, env_type=prod, instance=env-241433laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-241433laioeastus2, cloud_platform=Azure, customer_id=A124, env_id=241433, env_name=A124_PartnersHealthcare_Prod_M2021, env_type=prod, instance=env-241433laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-242641laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242641, env_name=A130_Sumitomo_Dev_M2021, env_type=dev, instance=env-242641laiowestus2, job=integrations/node_exporter, region=westus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-242641laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242641, env_name=A130_Sumitomo_Dev_M2021, env_type=dev, instance=env-242641laiowestus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc02386d9a0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-242641laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242641, env_name=A130_Sumitomo_Dev_M2021, env_type=dev, instance=env-242641laiowestus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc02386d7b0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-242641laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242641, env_name=A130_Sumitomo_Dev_M2021, env_type=dev, instance=env-242641laiowestus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc02386d910}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729773119s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-242641laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242641, env_name=A130_Sumitomo_Dev_M2021, env_type=dev, instance=env-242641laiowestus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-242641laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242641, env_name=A130_Sumitomo_Dev_M2021, env_type=dev, instance=env-242641laiowestus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-242641laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242641, env_name=A130_Sumitomo_Dev_M2021, env_type=dev, instance=env-242641laiowestus2, job=integrations/node_exporter, region=westus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-242642laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242642, env_name=A130_Sumitomo_PROD_M2021, env_type=prod, instance=env-242642laiowestus2, job=integrations/node_exporter, region=westus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-242642laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242642, env_name=A130_Sumitomo_PROD_M2021, env_type=prod, instance=env-242642laiowestus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc02386dd40} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-242642laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242642, env_name=A130_Sumitomo_PROD_M2021, env_type=prod, instance=env-242642laiowestus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc0552c8018} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-242642laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242642, env_name=A130_Sumitomo_PROD_M2021, env_type=prod, instance=env-242642laiowestus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc02386dc70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729790795s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-242642laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242642, env_name=A130_Sumitomo_PROD_M2021, env_type=prod, instance=env-242642laiowestus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-242642laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242642, env_name=A130_Sumitomo_PROD_M2021, env_type=prod, instance=env-242642laiowestus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-242642laiowestus2, cloud_platform=Azure, customer_id=A130, env_id=242642, env_name=A130_Sumitomo_PROD_M2021, env_type=prod, instance=env-242642laiowestus2, job=integrations/node_exporter, region=westus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-245036laioeastus, cloud_platform=Azure, customer_id=A110, env_id=245036, env_name=A110_Cardinal_Prod_2021, env_type=prod, instance=env-245036laioeastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-245036laioeastus, cloud_platform=Azure, customer_id=A110, env_id=245036, env_name=A110_Cardinal_Prod_2021, env_type=prod, instance=env-245036laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc0552c8148} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-245036laioeastus, cloud_platform=Azure, customer_id=A110, env_id=245036, env_name=A110_Cardinal_Prod_2021, env_type=prod, instance=env-245036laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc0552c81f0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-245036laioeastus, cloud_platform=Azure, customer_id=A110, env_id=245036, env_name=A110_Cardinal_Prod_2021, env_type=prod, instance=env-245036laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc0552c8290}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729804091s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-245036laioeastus, cloud_platform=Azure, customer_id=A110, env_id=245036, env_name=A110_Cardinal_Prod_2021, env_type=prod, instance=env-245036laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-245036laioeastus, cloud_platform=Azure, customer_id=A110, env_id=245036, env_name=A110_Cardinal_Prod_2021, env_type=prod, instance=env-245036laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-245036laioeastus, cloud_platform=Azure, customer_id=A110, env_id=245036, env_name=A110_Cardinal_Prod_2021, env_type=prod, instance=env-245036laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-245644laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245644, env_name=A127_PROD_RoundPoint, env_type=prod, instance=env-245644laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-245644laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245644, env_name=A127_PROD_RoundPoint, env_type=prod, instance=env-245644laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0552c8520} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-245644laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245644, env_name=A127_PROD_RoundPoint, env_type=prod, instance=env-245644laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0552c83d8} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-245644laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245644, env_name=A127_PROD_RoundPoint, env_type=prod, instance=env-245644laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0552c8478}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729819469s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-245644laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245644, env_name=A127_PROD_RoundPoint, env_type=prod, instance=env-245644laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-245644laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245644, env_name=A127_PROD_RoundPoint, env_type=prod, instance=env-245644laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-245644laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245644, env_name=A127_PROD_RoundPoint, env_type=prod, instance=env-245644laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-245671laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245671, env_name=A127_DEV_RoundPoint, env_type=dev, instance=env-245671laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-245671laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245671, env_name=A127_DEV_RoundPoint, env_type=dev, instance=env-245671laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0552c8650} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-245671laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245671, env_name=A127_DEV_RoundPoint, env_type=dev, instance=env-245671laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0552c86f0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-245671laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245671, env_name=A127_DEV_RoundPoint, env_type=dev, instance=env-245671laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0552c8790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729834558s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-245671laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245671, env_name=A127_DEV_RoundPoint, env_type=dev, instance=env-245671laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-245671laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245671, env_name=A127_DEV_RoundPoint, env_type=dev, instance=env-245671laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-245671laioeastus2, cloud_platform=Azure, customer_id=A127, env_id=245671, env_name=A127_DEV_RoundPoint, env_type=dev, instance=env-245671laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-249394laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-249394laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc0552c89e0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-249394laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc0552c8a80} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-249394laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc0552c88d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729852536s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-249394laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-249394laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-249394laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=249394, env_name=A113_BAT_Prod_2021U1, env_type=prod, instance=env-249394laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-249508laiowesteurope, cloud_platform=Azure, customer_id=A113, env_id=249508, env_name=A113_BAT_DEV_2021U1, env_type=dev, instance=env-249508laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-249508laiowesteurope, cloud_platform=Azure, customer_id=A113, env_id=249508, env_name=A113_BAT_DEV_2021U1, env_type=dev, instance=env-249508laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc0552c8bd8} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-249508laiowesteurope, cloud_platform=Azure, customer_id=A113, env_id=249508, env_name=A113_BAT_DEV_2021U1, env_type=dev, instance=env-249508laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc0552c8cb0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-249508laiowesteurope, cloud_platform=Azure, customer_id=A113, env_id=249508, env_name=A113_BAT_DEV_2021U1, env_type=dev, instance=env-249508laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc0552c8d50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729868101s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-249508laiowesteurope, cloud_platform=Azure, customer_id=A113, env_id=249508, env_name=A113_BAT_DEV_2021U1, env_type=dev, instance=env-249508laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-249508laiowesteurope, cloud_platform=Azure, customer_id=A113, env_id=249508, env_name=A113_BAT_DEV_2021U1, env_type=dev, instance=env-249508laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-249508laiowesteurope, cloud_platform=Azure, customer_id=A113, env_id=249508, env_name=A113_BAT_DEV_2021U1, env_type=dev, instance=env-249508laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-249638laioeastus, cloud_platform=Azure, customer_id=A109, env_id=249638, env_name=A109 Renasant Bank DEV, env_type=dev, instance=env-249638laioeastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-249638laioeastus, cloud_platform=Azure, customer_id=A109, env_id=249638, env_name=A109 Renasant Bank DEV, env_type=dev, instance=env-249638laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc0552c8e80} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-249638laioeastus, cloud_platform=Azure, customer_id=A109, env_id=249638, env_name=A109 Renasant Bank DEV, env_type=dev, instance=env-249638laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc0552c8f38} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-249638laioeastus, cloud_platform=Azure, customer_id=A109, env_id=249638, env_name=A109 Renasant Bank DEV, env_type=dev, instance=env-249638laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc0552c8fe8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729889551s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-249638laioeastus, cloud_platform=Azure, customer_id=A109, env_id=249638, env_name=A109 Renasant Bank DEV, env_type=dev, instance=env-249638laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-249638laioeastus, cloud_platform=Azure, customer_id=A109, env_id=249638, env_name=A109 Renasant Bank DEV, env_type=dev, instance=env-249638laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-249638laioeastus, cloud_platform=Azure, customer_id=A109, env_id=249638, env_name=A109 Renasant Bank DEV, env_type=dev, instance=env-249638laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-249639laio1eastus, cloud_platform=Azure, customer_id=A109, env_id=249639, env_name=A109 Renasant Bank PROD, env_type=prod, instance=env-249639laio1eastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-249639laio1eastus, cloud_platform=Azure, customer_id=A109, env_id=249639, env_name=A109 Renasant Bank PROD, env_type=prod, instance=env-249639laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc0552c9188} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-249639laio1eastus, cloud_platform=Azure, customer_id=A109, env_id=249639, env_name=A109 Renasant Bank PROD, env_type=prod, instance=env-249639laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc0552c9220} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-249639laio1eastus, cloud_platform=Azure, customer_id=A109, env_id=249639, env_name=A109 Renasant Bank PROD, env_type=prod, instance=env-249639laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc0552c92b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729904052s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-249639laio1eastus, cloud_platform=Azure, customer_id=A109, env_id=249639, env_name=A109 Renasant Bank PROD, env_type=prod, instance=env-249639laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-249639laio1eastus, cloud_platform=Azure, customer_id=A109, env_id=249639, env_name=A109 Renasant Bank PROD, env_type=prod, instance=env-249639laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-249639laio1eastus, cloud_platform=Azure, customer_id=A109, env_id=249639, env_name=A109 Renasant Bank PROD, env_type=prod, instance=env-249639laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-250989laiousw1, cloud_platform=AWS, customer_id=C537, env_id=250989, env_name=C537_PacSun_PROD2021, env_type=prod, instance=env-250989laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-250989laiousw1, cloud_platform=AWS, customer_id=C537, env_id=250989, env_name=C537_PacSun_PROD2021, env_type=prod, instance=env-250989laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live Value:0xc0552c93e8} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-250989laiousw1, cloud_platform=AWS, customer_id=C537, env_id=250989, env_name=C537_PacSun_PROD2021, env_type=prod, instance=env-250989laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live Value:0xc0552c9490} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-250989laiousw1, cloud_platform=AWS, customer_id=C537, env_id=250989, env_name=C537_PacSun_PROD2021, env_type=prod, instance=env-250989laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live Value:0xc0552c9528}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729915831s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-250989laiousw1, cloud_platform=AWS, customer_id=C537, env_id=250989, env_name=C537_PacSun_PROD2021, env_type=prod, instance=env-250989laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-250989laiousw1, cloud_platform=AWS, customer_id=C537, env_id=250989, env_name=C537_PacSun_PROD2021, env_type=prod, instance=env-250989laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-250989laiousw1, cloud_platform=AWS, customer_id=C537, env_id=250989, env_name=C537_PacSun_PROD2021, env_type=prod, instance=env-250989laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-253100laioeastus2, cloud_platform=Azure, customer_id=A132, env_id=253100, env_name=A132 Capstone DEV, env_type=dev, instance=env-253100laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-253100laioeastus2, cloud_platform=Azure, customer_id=A132, env_id=253100, env_name=A132 Capstone DEV, env_type=dev, instance=env-253100laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0552c9660} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-253100laioeastus2, cloud_platform=Azure, customer_id=A132, env_id=253100, env_name=A132 Capstone DEV, env_type=dev, instance=env-253100laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0552c9708} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-253100laioeastus2, cloud_platform=Azure, customer_id=A132, env_id=253100, env_name=A132 Capstone DEV, env_type=dev, instance=env-253100laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0552c97a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729926884s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-253100laioeastus2, cloud_platform=Azure, customer_id=A132, env_id=253100, env_name=A132 Capstone DEV, env_type=dev, instance=env-253100laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-253100laioeastus2, cloud_platform=Azure, customer_id=A132, env_id=253100, env_name=A132 Capstone DEV, env_type=dev, instance=env-253100laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-253100laioeastus2, cloud_platform=Azure, customer_id=A132, env_id=253100, env_name=A132 Capstone DEV, env_type=dev, instance=env-253100laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-253588laio1use1, cloud_platform=AWS, customer_id=C333, env_id=253588, env_name=c333_Mercer_PROD_2021, env_type=prod, instance=env-253588laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-253588laio1use1, cloud_platform=AWS, customer_id=C333, env_id=253588, env_name=c333_Mercer_PROD_2021, env_type=prod, instance=env-253588laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc0552c98e8} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-253588laio1use1, cloud_platform=AWS, customer_id=C333, env_id=253588, env_name=c333_Mercer_PROD_2021, env_type=prod, instance=env-253588laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc0552c9980} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-253588laio1use1, cloud_platform=AWS, customer_id=C333, env_id=253588, env_name=c333_Mercer_PROD_2021, env_type=prod, instance=env-253588laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc0552c9a48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729937157s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-253588laio1use1, cloud_platform=AWS, customer_id=C333, env_id=253588, env_name=c333_Mercer_PROD_2021, env_type=prod, instance=env-253588laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-253588laio1use1, cloud_platform=AWS, customer_id=C333, env_id=253588, env_name=c333_Mercer_PROD_2021, env_type=prod, instance=env-253588laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-253588laio1use1, cloud_platform=AWS, customer_id=C333, env_id=253588, env_name=c333_Mercer_PROD_2021, env_type=prod, instance=env-253588laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-255634laioeastus2, cloud_platform=Azure, customer_id=A129, env_id=255634, env_name=A129_Gap_Dev_2021_east2, env_type=dev, instance=env-255634laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-255634laioeastus2, cloud_platform=Azure, customer_id=A129, env_id=255634, env_name=A129_Gap_Dev_2021_east2, env_type=dev, instance=env-255634laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0552c9be8} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-255634laioeastus2, cloud_platform=Azure, customer_id=A129, env_id=255634, env_name=A129_Gap_Dev_2021_east2, env_type=dev, instance=env-255634laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0552c9c80} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-255634laioeastus2, cloud_platform=Azure, customer_id=A129, env_id=255634, env_name=A129_Gap_Dev_2021_east2, env_type=dev, instance=env-255634laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0552c9df8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729949466s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-255634laioeastus2, cloud_platform=Azure, customer_id=A129, env_id=255634, env_name=A129_Gap_Dev_2021_east2, env_type=dev, instance=env-255634laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-255634laioeastus2, cloud_platform=Azure, customer_id=A129, env_id=255634, env_name=A129_Gap_Dev_2021_east2, env_type=dev, instance=env-255634laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-255634laioeastus2, cloud_platform=Azure, customer_id=A129, env_id=255634, env_name=A129_Gap_Dev_2021_east2, env_type=dev, instance=env-255634laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-255763laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=255763, env_name=A133 Douglas PROD, env_type=Prod, instance=env-255763laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-255763laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=255763, env_name=A133 Douglas PROD, env_type=Prod, instance=env-255763laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc013450058} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-255763laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=255763, env_name=A133 Douglas PROD, env_type=Prod, instance=env-255763laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc0552c9f20} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-255763laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=255763, env_name=A133 Douglas PROD, env_type=Prod, instance=env-255763laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc0552c9fc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729960529s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-255763laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=255763, env_name=A133 Douglas PROD, env_type=Prod, instance=env-255763laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-255763laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=255763, env_name=A133 Douglas PROD, env_type=Prod, instance=env-255763laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-255763laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=255763, env_name=A133 Douglas PROD, env_type=Prod, instance=env-255763laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-255805laioeastus, cloud_platform=Azure, customer_id=A103, env_id=255805, env_name=A103_Allianz_Prod_2021U1, env_type=prod, instance=env-255805laioeastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-255805laioeastus, cloud_platform=Azure, customer_id=A103, env_id=255805, env_name=A103_Allianz_Prod_2021U1, env_type=prod, instance=env-255805laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc0134501b8} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-255805laioeastus, cloud_platform=Azure, customer_id=A103, env_id=255805, env_name=A103_Allianz_Prod_2021U1, env_type=prod, instance=env-255805laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc013450270} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-255805laioeastus, cloud_platform=Azure, customer_id=A103, env_id=255805, env_name=A103_Allianz_Prod_2021U1, env_type=prod, instance=env-255805laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc013450338}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.72997087s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-255805laioeastus, cloud_platform=Azure, customer_id=A103, env_id=255805, env_name=A103_Allianz_Prod_2021U1, env_type=prod, instance=env-255805laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-255805laioeastus, cloud_platform=Azure, customer_id=A103, env_id=255805, env_name=A103_Allianz_Prod_2021U1, env_type=prod, instance=env-255805laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-255805laioeastus, cloud_platform=Azure, customer_id=A103, env_id=255805, env_name=A103_Allianz_Prod_2021U1, env_type=prod, instance=env-255805laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-255826laio1eastus, cloud_platform=Azure, customer_id=A129, env_id=255826, env_name=A129_Gap_Prod_2021U1, env_type=prod, instance=env-255826laio1eastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-255826laio1eastus, cloud_platform=Azure, customer_id=A129, env_id=255826, env_name=A129_Gap_Prod_2021U1, env_type=prod, instance=env-255826laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc013450600} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-255826laio1eastus, cloud_platform=Azure, customer_id=A129, env_id=255826, env_name=A129_Gap_Prod_2021U1, env_type=prod, instance=env-255826laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc013450468} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-255826laio1eastus, cloud_platform=Azure, customer_id=A129, env_id=255826, env_name=A129_Gap_Prod_2021U1, env_type=prod, instance=env-255826laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc013450500}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.729984989s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-255826laio1eastus, cloud_platform=Azure, customer_id=A129, env_id=255826, env_name=A129_Gap_Prod_2021U1, env_type=prod, instance=env-255826laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-255826laio1eastus, cloud_platform=Azure, customer_id=A129, env_id=255826, env_name=A129_Gap_Prod_2021U1, env_type=prod, instance=env-255826laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-255826laio1eastus, cloud_platform=Azure, customer_id=A129, env_id=255826, env_name=A129_Gap_Prod_2021U1, env_type=prod, instance=env-255826laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-255964laioeastus, cloud_platform=Azure, customer_id=A129, env_id=255964, env_name=A129_Gap_UAT_2021U1, env_type=test, instance=env-255964laioeastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-255964laioeastus, cloud_platform=Azure, customer_id=A129, env_id=255964, env_name=A129_Gap_UAT_2021U1, env_type=test, instance=env-255964laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc013450870} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-255964laioeastus, cloud_platform=Azure, customer_id=A129, env_id=255964, env_name=A129_Gap_UAT_2021U1, env_type=test, instance=env-255964laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc013450a88} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-255964laioeastus, cloud_platform=Azure, customer_id=A129, env_id=255964, env_name=A129_Gap_UAT_2021U1, env_type=test, instance=env-255964laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc013450c28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.72999582s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-255964laioeastus, cloud_platform=Azure, customer_id=A129, env_id=255964, env_name=A129_Gap_UAT_2021U1, env_type=test, instance=env-255964laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-255964laioeastus, cloud_platform=Azure, customer_id=A129, env_id=255964, env_name=A129_Gap_UAT_2021U1, env_type=test, instance=env-255964laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-255964laioeastus, cloud_platform=Azure, customer_id=A129, env_id=255964, env_name=A129_Gap_UAT_2021U1, env_type=test, instance=env-255964laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-255971laio1use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-255971laio1use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc013450f38} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-255971laio1use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc013451058} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-255971laio1use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc0134510e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730016256s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-255971laio1use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-255971laio1use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-255971laio1use1, cloud_platform=AWS, customer_id=C448, env_id=255971, env_name=C448_Lincoln_Prod_RPS, env_type=prod, instance=env-255971laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-257197laiousw1, cloud_platform=AWS, customer_id=C536, env_id=257197, env_name=C536_SKX_PRD_2021U1, env_type=prod, instance=env-257197laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-257197laiousw1, cloud_platform=AWS, customer_id=C536, env_id=257197, env_name=C536_SKX_PRD_2021U1, env_type=prod, instance=env-257197laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live Value:0xc013451298} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-257197laiousw1, cloud_platform=AWS, customer_id=C536, env_id=257197, env_name=C536_SKX_PRD_2021U1, env_type=prod, instance=env-257197laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live Value:0xc013451350} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-257197laiousw1, cloud_platform=AWS, customer_id=C536, env_id=257197, env_name=C536_SKX_PRD_2021U1, env_type=prod, instance=env-257197laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live Value:0xc013451218}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730032633s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-257197laiousw1, cloud_platform=AWS, customer_id=C536, env_id=257197, env_name=C536_SKX_PRD_2021U1, env_type=prod, instance=env-257197laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-257197laiousw1, cloud_platform=AWS, customer_id=C536, env_id=257197, env_name=C536_SKX_PRD_2021U1, env_type=prod, instance=env-257197laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-257197laiousw1, cloud_platform=AWS, customer_id=C536, env_id=257197, env_name=C536_SKX_PRD_2021U1, env_type=prod, instance=env-257197laiousw1, job=integrations/node_exporter, region=us-west-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-257707laiouse1, cloud_platform=AWS, customer_id=C448, env_id=257707, env_name=C448_LincolnRPS_DEV_2021U1, env_type=dev, instance=env-257707laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-257707laiouse1, cloud_platform=AWS, customer_id=C448, env_id=257707, env_name=C448_LincolnRPS_DEV_2021U1, env_type=dev, instance=env-257707laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc013451488} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-257707laiouse1, cloud_platform=AWS, customer_id=C448, env_id=257707, env_name=C448_LincolnRPS_DEV_2021U1, env_type=dev, instance=env-257707laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc013451520} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-257707laiouse1, cloud_platform=AWS, customer_id=C448, env_id=257707, env_name=C448_LincolnRPS_DEV_2021U1, env_type=dev, instance=env-257707laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc013451620}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730050559s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-257707laiouse1, cloud_platform=AWS, customer_id=C448, env_id=257707, env_name=C448_LincolnRPS_DEV_2021U1, env_type=dev, instance=env-257707laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-257707laiouse1, cloud_platform=AWS, customer_id=C448, env_id=257707, env_name=C448_LincolnRPS_DEV_2021U1, env_type=dev, instance=env-257707laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-257707laiouse1, cloud_platform=AWS, customer_id=C448, env_id=257707, env_name=C448_LincolnRPS_DEV_2021U1, env_type=dev, instance=env-257707laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-258029laiouse1, cloud_platform=AWS, customer_id=C447, env_id=258029, env_name=C447_VSI_DEV_2021U2, env_type=dev, instance=env-258029laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-258029laiouse1, cloud_platform=AWS, customer_id=C447, env_id=258029, env_name=C447_VSI_DEV_2021U2, env_type=dev, instance=env-258029laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc013451840} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-258029laiouse1, cloud_platform=AWS, customer_id=C447, env_id=258029, env_name=C447_VSI_DEV_2021U2, env_type=dev, instance=env-258029laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc0134518f0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-258029laiouse1, cloud_platform=AWS, customer_id=C447, env_id=258029, env_name=C447_VSI_DEV_2021U2, env_type=dev, instance=env-258029laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc013451760}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730067887s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-258029laiouse1, cloud_platform=AWS, customer_id=C447, env_id=258029, env_name=C447_VSI_DEV_2021U2, env_type=dev, instance=env-258029laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-258029laiouse1, cloud_platform=AWS, customer_id=C447, env_id=258029, env_name=C447_VSI_DEV_2021U2, env_type=dev, instance=env-258029laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-258029laiouse1, cloud_platform=AWS, customer_id=C447, env_id=258029, env_name=C447_VSI_DEV_2021U2, env_type=dev, instance=env-258029laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-258482laio1use1, cloud_platform=AWS, customer_id=C447, env_id=258482, env_name=c447_VSI_PROD_2021U2, env_type=prod, instance=env-258482laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-258482laio1use1, cloud_platform=AWS, customer_id=C447, env_id=258482, env_name=c447_VSI_PROD_2021U2, env_type=prod, instance=env-258482laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc013451a68} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-258482laio1use1, cloud_platform=AWS, customer_id=C447, env_id=258482, env_name=c447_VSI_PROD_2021U2, env_type=prod, instance=env-258482laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc013451bc0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-258482laio1use1, cloud_platform=AWS, customer_id=C447, env_id=258482, env_name=c447_VSI_PROD_2021U2, env_type=prod, instance=env-258482laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc013451d28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730079972s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-258482laio1use1, cloud_platform=AWS, customer_id=C447, env_id=258482, env_name=c447_VSI_PROD_2021U2, env_type=prod, instance=env-258482laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-258482laio1use1, cloud_platform=AWS, customer_id=C447, env_id=258482, env_name=c447_VSI_PROD_2021U2, env_type=prod, instance=env-258482laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-258482laio1use1, cloud_platform=AWS, customer_id=C447, env_id=258482, env_name=c447_VSI_PROD_2021U2, env_type=prod, instance=env-258482laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-259085laio1use1, cloud_platform=AWS, customer_id=C489, env_id=259085, env_name=c489_prod_lincoln_2021u2, env_type=prod, instance=env-259085laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-259085laio1use1, cloud_platform=AWS, customer_id=C489, env_id=259085, env_name=c489_prod_lincoln_2021u2, env_type=prod, instance=env-259085laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc013451f40} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-259085laio1use1, cloud_platform=AWS, customer_id=C489, env_id=259085, env_name=c489_prod_lincoln_2021u2, env_type=prod, instance=env-259085laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc013451fe0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-259085laio1use1, cloud_platform=AWS, customer_id=C489, env_id=259085, env_name=c489_prod_lincoln_2021u2, env_type=prod, instance=env-259085laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc013451e68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730092707s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-259085laio1use1, cloud_platform=AWS, customer_id=C489, env_id=259085, env_name=c489_prod_lincoln_2021u2, env_type=prod, instance=env-259085laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-259085laio1use1, cloud_platform=AWS, customer_id=C489, env_id=259085, env_name=c489_prod_lincoln_2021u2, env_type=prod, instance=env-259085laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-259085laio1use1, cloud_platform=AWS, customer_id=C489, env_id=259085, env_name=c489_prod_lincoln_2021u2, env_type=prod, instance=env-259085laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-259446laiouse1, cloud_platform=AWS, customer_id=C489, env_id=259446, env_name=c489 Lincoln DEV 2021U2, env_type=dev, instance=env-259446laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-259446laiouse1, cloud_platform=AWS, customer_id=C489, env_id=259446, env_name=c489 Lincoln DEV 2021U2, env_type=dev, instance=env-259446laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01640c280} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-259446laiouse1, cloud_platform=AWS, customer_id=C489, env_id=259446, env_name=c489 Lincoln DEV 2021U2, env_type=dev, instance=env-259446laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01640c338} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-259446laiouse1, cloud_platform=AWS, customer_id=C489, env_id=259446, env_name=c489 Lincoln DEV 2021U2, env_type=dev, instance=env-259446laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01640c1a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.73010714s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-259446laiouse1, cloud_platform=AWS, customer_id=C489, env_id=259446, env_name=c489 Lincoln DEV 2021U2, env_type=dev, instance=env-259446laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-259446laiouse1, cloud_platform=AWS, customer_id=C489, env_id=259446, env_name=c489 Lincoln DEV 2021U2, env_type=dev, instance=env-259446laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-259446laiouse1, cloud_platform=AWS, customer_id=C489, env_id=259446, env_name=c489 Lincoln DEV 2021U2, env_type=dev, instance=env-259446laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-259828laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=259828, env_name=A135_Roland_Prod, env_type=prod, instance=env-259828laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-259828laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=259828, env_name=A135_Roland_Prod, env_type=prod, instance=env-259828laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01640c4f8} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-259828laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=259828, env_name=A135_Roland_Prod, env_type=prod, instance=env-259828laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01640c5d0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-259828laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=259828, env_name=A135_Roland_Prod, env_type=prod, instance=env-259828laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01640c6c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730121904s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-259828laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=259828, env_name=A135_Roland_Prod, env_type=prod, instance=env-259828laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-259828laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=259828, env_name=A135_Roland_Prod, env_type=prod, instance=env-259828laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-259828laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=259828, env_name=A135_Roland_Prod, env_type=prod, instance=env-259828laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-260655laiouse1, cloud_platform=AWS, customer_id=C333, env_id=260655, env_name=c333_Mercer_DEV_2021, env_type=dev, instance=env-260655laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-260655laiouse1, cloud_platform=AWS, customer_id=C333, env_id=260655, env_name=c333_Mercer_DEV_2021, env_type=dev, instance=env-260655laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01640c7f0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-260655laiouse1, cloud_platform=AWS, customer_id=C333, env_id=260655, env_name=c333_Mercer_DEV_2021, env_type=dev, instance=env-260655laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01640c890} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-260655laiouse1, cloud_platform=AWS, customer_id=C333, env_id=260655, env_name=c333_Mercer_DEV_2021, env_type=dev, instance=env-260655laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01640c928}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730139375s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-260655laiouse1, cloud_platform=AWS, customer_id=C333, env_id=260655, env_name=c333_Mercer_DEV_2021, env_type=dev, instance=env-260655laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-260655laiouse1, cloud_platform=AWS, customer_id=C333, env_id=260655, env_name=c333_Mercer_DEV_2021, env_type=dev, instance=env-260655laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-260655laiouse1, cloud_platform=AWS, customer_id=C333, env_id=260655, env_name=c333_Mercer_DEV_2021, env_type=dev, instance=env-260655laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-261091laiouse1, cloud_platform=AWS, customer_id=C333, env_id=261091, env_name=c333_Mercer_QA_2021, env_type=qa, instance=env-261091laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-261091laiouse1, cloud_platform=AWS, customer_id=C333, env_id=261091, env_name=c333_Mercer_QA_2021, env_type=qa, instance=env-261091laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01640ca48} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-261091laiouse1, cloud_platform=AWS, customer_id=C333, env_id=261091, env_name=c333_Mercer_QA_2021, env_type=qa, instance=env-261091laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01640cae0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-261091laiouse1, cloud_platform=AWS, customer_id=C333, env_id=261091, env_name=c333_Mercer_QA_2021, env_type=qa, instance=env-261091laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01640cb78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730161817s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-261091laiouse1, cloud_platform=AWS, customer_id=C333, env_id=261091, env_name=c333_Mercer_QA_2021, env_type=qa, instance=env-261091laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-261091laiouse1, cloud_platform=AWS, customer_id=C333, env_id=261091, env_name=c333_Mercer_QA_2021, env_type=qa, instance=env-261091laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-261091laiouse1, cloud_platform=AWS, customer_id=C333, env_id=261091, env_name=c333_Mercer_QA_2021, env_type=qa, instance=env-261091laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-261751laiouse1, cloud_platform=AWS, customer_id=C324, env_id=261751, env_name=C324_Marsh_DEV_2021, env_type=dev, instance=env-261751laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-261751laiouse1, cloud_platform=AWS, customer_id=C324, env_id=261751, env_name=C324_Marsh_DEV_2021, env_type=dev, instance=env-261751laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01640ce28} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-261751laiouse1, cloud_platform=AWS, customer_id=C324, env_id=261751, env_name=C324_Marsh_DEV_2021, env_type=dev, instance=env-261751laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01640ccd0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-261751laiouse1, cloud_platform=AWS, customer_id=C324, env_id=261751, env_name=C324_Marsh_DEV_2021, env_type=dev, instance=env-261751laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01640cd60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730206133s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-261751laiouse1, cloud_platform=AWS, customer_id=C324, env_id=261751, env_name=C324_Marsh_DEV_2021, env_type=dev, instance=env-261751laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-261751laiouse1, cloud_platform=AWS, customer_id=C324, env_id=261751, env_name=C324_Marsh_DEV_2021, env_type=dev, instance=env-261751laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-261751laiouse1, cloud_platform=AWS, customer_id=C324, env_id=261751, env_name=C324_Marsh_DEV_2021, env_type=dev, instance=env-261751laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-262058laioeastus2, cloud_platform=Azure, customer_id=A136, env_id=262058, env_name=A136 Coughlan PROD, env_type=dev, instance=env-262058laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-262058laioeastus2, cloud_platform=Azure, customer_id=A136, env_id=262058, env_name=A136 Coughlan PROD, env_type=dev, instance=env-262058laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01640cf60} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-262058laioeastus2, cloud_platform=Azure, customer_id=A136, env_id=262058, env_name=A136 Coughlan PROD, env_type=dev, instance=env-262058laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01640cff0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-262058laioeastus2, cloud_platform=Azure, customer_id=A136, env_id=262058, env_name=A136 Coughlan PROD, env_type=dev, instance=env-262058laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01640d0a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730223629s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-262058laioeastus2, cloud_platform=Azure, customer_id=A136, env_id=262058, env_name=A136 Coughlan PROD, env_type=dev, instance=env-262058laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-262058laioeastus2, cloud_platform=Azure, customer_id=A136, env_id=262058, env_name=A136 Coughlan PROD, env_type=dev, instance=env-262058laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-262058laioeastus2, cloud_platform=Azure, customer_id=A136, env_id=262058, env_name=A136 Coughlan PROD, env_type=dev, instance=env-262058laioeastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-263173laiosouthcentralus, cloud_platform=Azure, customer_id=A118, env_id=263173, env_name=A118 Libertad PROD, env_type=prod, instance=env-263173laiosouthcentralus, job=integrations/node_exporter, region=southcentralus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-263173laiosouthcentralus, cloud_platform=Azure, customer_id=A118, env_id=263173, env_name=A118 Libertad PROD, env_type=prod, instance=env-263173laiosouthcentralus, job=integrations/node_exporter, region=southcentralus, stage=live Value:0xc01640d490} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-263173laiosouthcentralus, cloud_platform=Azure, customer_id=A118, env_id=263173, env_name=A118 Libertad PROD, env_type=prod, instance=env-263173laiosouthcentralus, job=integrations/node_exporter, region=southcentralus, stage=live Value:0xc01640d1f0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-263173laiosouthcentralus, cloud_platform=Azure, customer_id=A118, env_id=263173, env_name=A118 Libertad PROD, env_type=prod, instance=env-263173laiosouthcentralus, job=integrations/node_exporter, region=southcentralus, stage=live Value:0xc01640d2a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730246133s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-263173laiosouthcentralus, cloud_platform=Azure, customer_id=A118, env_id=263173, env_name=A118 Libertad PROD, env_type=prod, instance=env-263173laiosouthcentralus, job=integrations/node_exporter, region=southcentralus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-263173laiosouthcentralus, cloud_platform=Azure, customer_id=A118, env_id=263173, env_name=A118 Libertad PROD, env_type=prod, instance=env-263173laiosouthcentralus, job=integrations/node_exporter, region=southcentralus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-263173laiosouthcentralus, cloud_platform=Azure, customer_id=A118, env_id=263173, env_name=A118 Libertad PROD, env_type=prod, instance=env-263173laiosouthcentralus, job=integrations/node_exporter, region=southcentralus, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-263178laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263178, env_name=A139 AgReserves DEV, env_type=dev, instance=env-263178laiowestus, job=integrations/node_exporter, region=westus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-263178laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263178, env_name=A139 AgReserves DEV, env_type=dev, instance=env-263178laiowestus, job=integrations/node_exporter, region=westus, stage=live Value:0xc01640dcc0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-263178laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263178, env_name=A139 AgReserves DEV, env_type=dev, instance=env-263178laiowestus, job=integrations/node_exporter, region=westus, stage=live Value:0xc01640d878} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-263178laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263178, env_name=A139 AgReserves DEV, env_type=dev, instance=env-263178laiowestus, job=integrations/node_exporter, region=westus, stage=live Value:0xc01640db50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730262719s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-263178laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263178, env_name=A139 AgReserves DEV, env_type=dev, instance=env-263178laiowestus, job=integrations/node_exporter, region=westus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-263178laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263178, env_name=A139 AgReserves DEV, env_type=dev, instance=env-263178laiowestus, job=integrations/node_exporter, region=westus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-263178laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263178, env_name=A139 AgReserves DEV, env_type=dev, instance=env-263178laiowestus, job=integrations/node_exporter, region=westus, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-263191laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263191, env_name=A139 AgReserves PROD, env_type=prod, instance=env-263191laiowestus, job=integrations/node_exporter, region=westus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-263191laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263191, env_name=A139 AgReserves PROD, env_type=prod, instance=env-263191laiowestus, job=integrations/node_exporter, region=westus, stage=live Value:0xc0266b8610} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-263191laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263191, env_name=A139 AgReserves PROD, env_type=prod, instance=env-263191laiowestus, job=integrations/node_exporter, region=westus, stage=live Value:0xc0266b8d08} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-263191laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263191, env_name=A139 AgReserves PROD, env_type=prod, instance=env-263191laiowestus, job=integrations/node_exporter, region=westus, stage=live Value:0xc01640df48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730281217s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-263191laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263191, env_name=A139 AgReserves PROD, env_type=prod, instance=env-263191laiowestus, job=integrations/node_exporter, region=westus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-263191laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263191, env_name=A139 AgReserves PROD, env_type=prod, instance=env-263191laiowestus, job=integrations/node_exporter, region=westus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-263191laiowestus, cloud_platform=Azure, customer_id=A139, env_id=263191, env_name=A139 AgReserves PROD, env_type=prod, instance=env-263191laiowestus, job=integrations/node_exporter, region=westus, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-263640laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=263640, env_name=A135_Roland_Dev, env_type=dev, instance=env-263640laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-263640laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=263640, env_name=A135_Roland_Dev, env_type=dev, instance=env-263640laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc0266b9d10} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-263640laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=263640, env_name=A135_Roland_Dev, env_type=dev, instance=env-263640laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc0243aa018} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-263640laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=263640, env_name=A135_Roland_Dev, env_type=dev, instance=env-263640laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc0266b97b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730296937s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-263640laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=263640, env_name=A135_Roland_Dev, env_type=dev, instance=env-263640laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-263640laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=263640, env_name=A135_Roland_Dev, env_type=dev, instance=env-263640laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-263640laiowesteurope, cloud_platform=Azure, customer_id=A135, env_id=263640, env_name=A135_Roland_Dev, env_type=dev, instance=env-263640laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-264639laio1use1, cloud_platform=AWS, customer_id=C324, env_id=264639, env_name=C324_Marsh_Prod_2021, env_type=prod, instance=env-264639laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-264639laio1use1, cloud_platform=AWS, customer_id=C324, env_id=264639, env_name=C324_Marsh_Prod_2021, env_type=prod, instance=env-264639laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc0243aa678} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-264639laio1use1, cloud_platform=AWS, customer_id=C324, env_id=264639, env_name=C324_Marsh_Prod_2021, env_type=prod, instance=env-264639laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc0243aa9f0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-264639laio1use1, cloud_platform=AWS, customer_id=C324, env_id=264639, env_name=C324_Marsh_Prod_2021, env_type=prod, instance=env-264639laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc0243ab1e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730313193s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-264639laio1use1, cloud_platform=AWS, customer_id=C324, env_id=264639, env_name=C324_Marsh_Prod_2021, env_type=prod, instance=env-264639laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-264639laio1use1, cloud_platform=AWS, customer_id=C324, env_id=264639, env_name=C324_Marsh_Prod_2021, env_type=prod, instance=env-264639laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-264639laio1use1, cloud_platform=AWS, customer_id=C324, env_id=264639, env_name=C324_Marsh_Prod_2021, env_type=prod, instance=env-264639laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-265723laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=265723, env_name=A133 Douglas DEV, env_type=dev, instance=env-265723laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-265723laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=265723, env_name=A133 Douglas DEV, env_type=dev, instance=env-265723laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc0243aba60} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-265723laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=265723, env_name=A133 Douglas DEV, env_type=dev, instance=env-265723laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc0243abda8} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-265723laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=265723, env_name=A133 Douglas DEV, env_type=dev, instance=env-265723laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc0243abfe8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730332863s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-265723laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=265723, env_name=A133 Douglas DEV, env_type=dev, instance=env-265723laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-265723laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=265723, env_name=A133 Douglas DEV, env_type=dev, instance=env-265723laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-265723laiogermanywestcentral, cloud_platform=Azure, customer_id=A133, env_id=265723, env_name=A133 Douglas DEV, env_type=dev, instance=env-265723laiogermanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-265908laiowestus2, cloud_platform=Azure, customer_id=A144, env_id=265908, env_name=A144 Big 5 Corp DEV, env_type=dev, instance=env-265908laiowestus2, job=integrations/node_exporter, region=westus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-265908laiowestus2, cloud_platform=Azure, customer_id=A144, env_id=265908, env_name=A144 Big 5 Corp DEV, env_type=dev, instance=env-265908laiowestus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc06f52c170} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-265908laiowestus2, cloud_platform=Azure, customer_id=A144, env_id=265908, env_name=A144 Big 5 Corp DEV, env_type=dev, instance=env-265908laiowestus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc06f52c238} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-265908laiowestus2, cloud_platform=Azure, customer_id=A144, env_id=265908, env_name=A144 Big 5 Corp DEV, env_type=dev, instance=env-265908laiowestus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc06f52c2e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730349444s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-265908laiowestus2, cloud_platform=Azure, customer_id=A144, env_id=265908, env_name=A144 Big 5 Corp DEV, env_type=dev, instance=env-265908laiowestus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-265908laiowestus2, cloud_platform=Azure, customer_id=A144, env_id=265908, env_name=A144 Big 5 Corp DEV, env_type=dev, instance=env-265908laiowestus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-265908laiowestus2, cloud_platform=Azure, customer_id=A144, env_id=265908, env_name=A144 Big 5 Corp DEV, env_type=dev, instance=env-265908laiowestus2, job=integrations/node_exporter, region=westus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-266276laioaustraliaeast, cloud_platform=Azure, customer_id=A145, env_id=266276, env_name=A145_Bunnings_Prod, env_type=prod, instance=env-266276laioaustraliaeast, job=integrations/node_exporter, region=australiaeast, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-266276laioaustraliaeast, cloud_platform=Azure, customer_id=A145, env_id=266276, env_name=A145_Bunnings_Prod, env_type=prod, instance=env-266276laioaustraliaeast, job=integrations/node_exporter, region=australiaeast, stage=live Value:0xc06f52c4b0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-266276laioaustraliaeast, cloud_platform=Azure, customer_id=A145, env_id=266276, env_name=A145_Bunnings_Prod, env_type=prod, instance=env-266276laioaustraliaeast, job=integrations/node_exporter, region=australiaeast, stage=live Value:0xc06f52c650} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-266276laioaustraliaeast, cloud_platform=Azure, customer_id=A145, env_id=266276, env_name=A145_Bunnings_Prod, env_type=prod, instance=env-266276laioaustraliaeast, job=integrations/node_exporter, region=australiaeast, stage=live Value:0xc06f52c980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730370288s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-266276laioaustraliaeast, cloud_platform=Azure, customer_id=A145, env_id=266276, env_name=A145_Bunnings_Prod, env_type=prod, instance=env-266276laioaustraliaeast, job=integrations/node_exporter, region=australiaeast, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-266276laioaustraliaeast, cloud_platform=Azure, customer_id=A145, env_id=266276, env_name=A145_Bunnings_Prod, env_type=prod, instance=env-266276laioaustraliaeast, job=integrations/node_exporter, region=australiaeast, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-266276laioaustraliaeast, cloud_platform=Azure, customer_id=A145, env_id=266276, env_name=A145_Bunnings_Prod, env_type=prod, instance=env-266276laioaustraliaeast, job=integrations/node_exporter, region=australiaeast, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-269670laioeastus, cloud_platform=Azure, customer_id=A128, env_id=269670, env_name=a128_qvc_DEV, env_type=dev, instance=env-269670laioeastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-269670laioeastus, cloud_platform=Azure, customer_id=A128, env_id=269670, env_name=a128_qvc_DEV, env_type=dev, instance=env-269670laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc06f52ce80} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-269670laioeastus, cloud_platform=Azure, customer_id=A128, env_id=269670, env_name=a128_qvc_DEV, env_type=dev, instance=env-269670laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc06f52cf70} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-269670laioeastus, cloud_platform=Azure, customer_id=A128, env_id=269670, env_name=a128_qvc_DEV, env_type=dev, instance=env-269670laioeastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc06f52cc90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730388625s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-269670laioeastus, cloud_platform=Azure, customer_id=A128, env_id=269670, env_name=a128_qvc_DEV, env_type=dev, instance=env-269670laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-269670laioeastus, cloud_platform=Azure, customer_id=A128, env_id=269670, env_name=a128_qvc_DEV, env_type=dev, instance=env-269670laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-269670laioeastus, cloud_platform=Azure, customer_id=A128, env_id=269670, env_name=a128_qvc_DEV, env_type=dev, instance=env-269670laioeastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-272009laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272009, env_name=A154_Monoprix_Dev, env_type=dev, instance=env-272009laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-272009laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272009, env_name=A154_Monoprix_Dev, env_type=dev, instance=env-272009laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc06f52d220} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-272009laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272009, env_name=A154_Monoprix_Dev, env_type=dev, instance=env-272009laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc06f52d3d8} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-272009laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272009, env_name=A154_Monoprix_Dev, env_type=dev, instance=env-272009laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc06f52d118}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730405566s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-272009laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272009, env_name=A154_Monoprix_Dev, env_type=dev, instance=env-272009laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-272009laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272009, env_name=A154_Monoprix_Dev, env_type=dev, instance=env-272009laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-272009laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272009, env_name=A154_Monoprix_Dev, env_type=dev, instance=env-272009laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-272010laiouse1, cloud_platform=AWS, customer_id=C583, env_id=272010, env_name=C583 - 5 Hour old Prod, env_type=prod, instance=env-272010laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-272010laiouse1, cloud_platform=AWS, customer_id=C583, env_id=272010, env_name=C583 - 5 Hour old Prod, env_type=prod, instance=env-272010laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc06f52db10} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-272010laiouse1, cloud_platform=AWS, customer_id=C583, env_id=272010, env_name=C583 - 5 Hour old Prod, env_type=prod, instance=env-272010laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc06f52d7c8} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-272010laiouse1, cloud_platform=AWS, customer_id=C583, env_id=272010, env_name=C583 - 5 Hour old Prod, env_type=prod, instance=env-272010laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc06f52da28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730424361s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-272010laiouse1, cloud_platform=AWS, customer_id=C583, env_id=272010, env_name=C583 - 5 Hour old Prod, env_type=prod, instance=env-272010laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-272010laiouse1, cloud_platform=AWS, customer_id=C583, env_id=272010, env_name=C583 - 5 Hour old Prod, env_type=prod, instance=env-272010laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-272010laiouse1, cloud_platform=AWS, customer_id=C583, env_id=272010, env_name=C583 - 5 Hour old Prod, env_type=prod, instance=env-272010laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-272146laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272146, env_name=A154_Monoprix_Prod, env_type=prod, instance=env-272146laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-272146laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272146, env_name=A154_Monoprix_Prod, env_type=prod, instance=env-272146laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc06f52dd68} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-272146laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272146, env_name=A154_Monoprix_Prod, env_type=prod, instance=env-272146laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc06f52de30} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-272146laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272146, env_name=A154_Monoprix_Prod, env_type=prod, instance=env-272146laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc06f52dcc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730441555s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-272146laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272146, env_name=A154_Monoprix_Prod, env_type=prod, instance=env-272146laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-272146laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272146, env_name=A154_Monoprix_Prod, env_type=prod, instance=env-272146laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-272146laio1westeurope, cloud_platform=Azure, customer_id=A154, env_id=272146, env_name=A154_Monoprix_Prod, env_type=prod, instance=env-272146laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-273677laio1eastus2, cloud_platform=Azure, customer_id=A157, env_id=273677, env_name=A157 Ryder Dev, env_type=dev, instance=env-273677laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-273677laio1eastus2, cloud_platform=Azure, customer_id=A157, env_id=273677, env_name=A157 Ryder Dev, env_type=dev, instance=env-273677laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc06f52dfe8} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-273677laio1eastus2, cloud_platform=Azure, customer_id=A157, env_id=273677, env_name=A157 Ryder Dev, env_type=dev, instance=env-273677laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc017568128} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-273677laio1eastus2, cloud_platform=Azure, customer_id=A157, env_id=273677, env_name=A157 Ryder Dev, env_type=dev, instance=env-273677laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc017568208}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730458624s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-273677laio1eastus2, cloud_platform=Azure, customer_id=A157, env_id=273677, env_name=A157 Ryder Dev, env_type=dev, instance=env-273677laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-273677laio1eastus2, cloud_platform=Azure, customer_id=A157, env_id=273677, env_name=A157 Ryder Dev, env_type=dev, instance=env-273677laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-273677laio1eastus2, cloud_platform=Azure, customer_id=A157, env_id=273677, env_name=A157 Ryder Dev, env_type=dev, instance=env-273677laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-273680laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273680, env_name=A158 - Bain - Prod Ent, env_type=prod, instance=env-273680laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-273680laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273680, env_name=A158 - Bain - Prod Ent, env_type=prod, instance=env-273680laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc017568590} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-273680laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273680, env_name=A158 - Bain - Prod Ent, env_type=prod, instance=env-273680laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc017568418} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-273680laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273680, env_name=A158 - Bain - Prod Ent, env_type=prod, instance=env-273680laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0175684d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730477422s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-273680laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273680, env_name=A158 - Bain - Prod Ent, env_type=prod, instance=env-273680laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-273680laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273680, env_name=A158 - Bain - Prod Ent, env_type=prod, instance=env-273680laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-273680laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273680, env_name=A158 - Bain - Prod Ent, env_type=prod, instance=env-273680laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-273681laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273681, env_name=A158 - Bain - DEV Ent, env_type=dev, instance=env-273681laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-273681laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273681, env_name=A158 - Bain - DEV Ent, env_type=dev, instance=env-273681laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0175688c0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-273681laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273681, env_name=A158 - Bain - DEV Ent, env_type=dev, instance=env-273681laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc017568730} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-273681laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273681, env_name=A158 - Bain - DEV Ent, env_type=dev, instance=env-273681laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc0175687d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730511867s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-273681laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273681, env_name=A158 - Bain - DEV Ent, env_type=dev, instance=env-273681laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-273681laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273681, env_name=A158 - Bain - DEV Ent, env_type=dev, instance=env-273681laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-273681laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273681, env_name=A158 - Bain - DEV Ent, env_type=dev, instance=env-273681laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-273900laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273900, env_name=a131_symrise_qa_2021u4, env_type=qa, instance=env-273900laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-273900laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273900, env_name=a131_symrise_qa_2021u4, env_type=qa, instance=env-273900laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc017568c60} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-273900laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273900, env_name=a131_symrise_qa_2021u4, env_type=qa, instance=env-273900laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc017568ad8} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-273900laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273900, env_name=a131_symrise_qa_2021u4, env_type=qa, instance=env-273900laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc017568ba0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730530309s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-273900laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273900, env_name=a131_symrise_qa_2021u4, env_type=qa, instance=env-273900laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-273900laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273900, env_name=a131_symrise_qa_2021u4, env_type=qa, instance=env-273900laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-273900laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273900, env_name=a131_symrise_qa_2021u4, env_type=qa, instance=env-273900laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-273910laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273910, env_name=A158 - Bain - Test Ent, env_type=test, instance=env-273910laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-273910laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273910, env_name=A158 - Bain - Test Ent, env_type=test, instance=env-273910laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc017568de8} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-273910laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273910, env_name=A158 - Bain - Test Ent, env_type=test, instance=env-273910laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc017568eb8} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-273910laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273910, env_name=A158 - Bain - Test Ent, env_type=test, instance=env-273910laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc017568f88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730547586s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-273910laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273910, env_name=A158 - Bain - Test Ent, env_type=test, instance=env-273910laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-273910laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273910, env_name=A158 - Bain - Test Ent, env_type=test, instance=env-273910laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-273910laio1eastus2, cloud_platform=Azure, customer_id=A158, env_id=273910, env_name=A158 - Bain - Test Ent, env_type=test, instance=env-273910laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-273913laio1westus2, cloud_platform=Azure, customer_id=A144, env_id=273913, env_name=A144 Big 5 Corp PROD, env_type=prod, instance=env-273913laio1westus2, job=integrations/node_exporter, region=westus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-273913laio1westus2, cloud_platform=Azure, customer_id=A144, env_id=273913, env_name=A144 Big 5 Corp PROD, env_type=prod, instance=env-273913laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc0175692c0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-273913laio1westus2, cloud_platform=Azure, customer_id=A144, env_id=273913, env_name=A144 Big 5 Corp PROD, env_type=prod, instance=env-273913laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc0175690f0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-273913laio1westus2, cloud_platform=Azure, customer_id=A144, env_id=273913, env_name=A144 Big 5 Corp PROD, env_type=prod, instance=env-273913laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc0175691e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730562506s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-273913laio1westus2, cloud_platform=Azure, customer_id=A144, env_id=273913, env_name=A144 Big 5 Corp PROD, env_type=prod, instance=env-273913laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-273913laio1westus2, cloud_platform=Azure, customer_id=A144, env_id=273913, env_name=A144 Big 5 Corp PROD, env_type=prod, instance=env-273913laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-273913laio1westus2, cloud_platform=Azure, customer_id=A144, env_id=273913, env_name=A144 Big 5 Corp PROD, env_type=prod, instance=env-273913laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-273931laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273931, env_name=a131_symrise_prod_2021u4, env_type=prod, instance=env-273931laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-273931laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273931, env_name=a131_symrise_prod_2021u4, env_type=prod, instance=env-273931laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc017569530} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-273931laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273931, env_name=a131_symrise_prod_2021u4, env_type=prod, instance=env-273931laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc0175695e8} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-273931laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273931, env_name=a131_symrise_prod_2021u4, env_type=prod, instance=env-273931laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc017569450}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730574156s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-273931laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273931, env_name=a131_symrise_prod_2021u4, env_type=prod, instance=env-273931laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-273931laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273931, env_name=a131_symrise_prod_2021u4, env_type=prod, instance=env-273931laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-273931laio1westeurope, cloud_platform=Azure, customer_id=A131, env_id=273931, env_name=a131_symrise_prod_2021u4, env_type=prod, instance=env-273931laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-274545laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274545, env_name=A101_Jumbo_2021_ACCEPT, env_type=accept, instance=env-274545laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-274545laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274545, env_name=A101_Jumbo_2021_ACCEPT, env_type=accept, instance=env-274545laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc017569798} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-274545laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274545, env_name=A101_Jumbo_2021_ACCEPT, env_type=accept, instance=env-274545laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc017569890} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-274545laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274545, env_name=A101_Jumbo_2021_ACCEPT, env_type=accept, instance=env-274545laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc017569a30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730587789s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-274545laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274545, env_name=A101_Jumbo_2021_ACCEPT, env_type=accept, instance=env-274545laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-274545laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274545, env_name=A101_Jumbo_2021_ACCEPT, env_type=accept, instance=env-274545laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-274545laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274545, env_name=A101_Jumbo_2021_ACCEPT, env_type=accept, instance=env-274545laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-274593laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274593, env_name=A101_Jumbo_2021_DEV, env_type=dev, instance=env-274593laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-274593laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274593, env_name=A101_Jumbo_2021_DEV, env_type=dev, instance=env-274593laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc017569bd0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-274593laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274593, env_name=A101_Jumbo_2021_DEV, env_type=dev, instance=env-274593laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc017569d10} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-274593laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274593, env_name=A101_Jumbo_2021_DEV, env_type=dev, instance=env-274593laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc017569df0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730600017s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-274593laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274593, env_name=A101_Jumbo_2021_DEV, env_type=dev, instance=env-274593laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-274593laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274593, env_name=A101_Jumbo_2021_DEV, env_type=dev, instance=env-274593laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-274593laiowesteurope, cloud_platform=Azure, customer_id=A101, env_id=274593, env_name=A101_Jumbo_2021_DEV, env_type=dev, instance=env-274593laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-274689laio1westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-274689laio1westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc017569f68} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-274689laio1westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc03b2d4198} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-274689laio1westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc03b2d4518}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730612809s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-274689laio1westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-274689laio1westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-274689laio1westeurope, cloud_platform=Azure, customer_id=A101, env_id=274689, env_name=A101_Jumbo_2021_Prod, env_type=prod, instance=env-274689laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-274836laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=274836, env_name=A108_LCBO_Prod_M2021, env_type=prod, instance=env-274836laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-274836laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=274836, env_name=A108_LCBO_Prod_M2021, env_type=prod, instance=env-274836laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live Value:0xc03b2d4738} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-274836laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=274836, env_name=A108_LCBO_Prod_M2021, env_type=prod, instance=env-274836laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live Value:0xc03b2d4880} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-274836laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=274836, env_name=A108_LCBO_Prod_M2021, env_type=prod, instance=env-274836laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live Value:0xc03b2d4c00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730629714s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-274836laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=274836, env_name=A108_LCBO_Prod_M2021, env_type=prod, instance=env-274836laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-274836laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=274836, env_name=A108_LCBO_Prod_M2021, env_type=prod, instance=env-274836laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-274836laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=274836, env_name=A108_LCBO_Prod_M2021, env_type=prod, instance=env-274836laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-275128laio1use1, cloud_platform=AWS, customer_id=C591, env_id=275128, env_name=C591_Novartis_Dev, env_type=dev, instance=env-275128laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-275128laio1use1, cloud_platform=AWS, customer_id=C591, env_id=275128, env_name=C591_Novartis_Dev, env_type=dev, instance=env-275128laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc03b2d5db0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-275128laio1use1, cloud_platform=AWS, customer_id=C591, env_id=275128, env_name=C591_Novartis_Dev, env_type=dev, instance=env-275128laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc03b2d5448} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-275128laio1use1, cloud_platform=AWS, customer_id=C591, env_id=275128, env_name=C591_Novartis_Dev, env_type=dev, instance=env-275128laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc03b2d5528}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730647454s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-275128laio1use1, cloud_platform=AWS, customer_id=C591, env_id=275128, env_name=C591_Novartis_Dev, env_type=dev, instance=env-275128laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-275128laio1use1, cloud_platform=AWS, customer_id=C591, env_id=275128, env_name=C591_Novartis_Dev, env_type=dev, instance=env-275128laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-275128laio1use1, cloud_platform=AWS, customer_id=C591, env_id=275128, env_name=C591_Novartis_Dev, env_type=dev, instance=env-275128laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-275276laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275276, env_name=A108_LCBO_Dev_M2021, env_type=dev, instance=env-275276laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-275276laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275276, env_name=A108_LCBO_Dev_M2021, env_type=dev, instance=env-275276laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live Value:0xc02f1a61b0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-275276laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275276, env_name=A108_LCBO_Dev_M2021, env_type=dev, instance=env-275276laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live Value:0xc03b2d5f00} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-275276laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275276, env_name=A108_LCBO_Dev_M2021, env_type=dev, instance=env-275276laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live Value:0xc03b2d5fc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730666153s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-275276laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275276, env_name=A108_LCBO_Dev_M2021, env_type=dev, instance=env-275276laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-275276laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275276, env_name=A108_LCBO_Dev_M2021, env_type=dev, instance=env-275276laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-275276laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275276, env_name=A108_LCBO_Dev_M2021, env_type=dev, instance=env-275276laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-275315laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275315, env_name=A108_LCBO_QA_M2021, env_type=qa, instance=env-275315laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-275315laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275315, env_name=A108_LCBO_QA_M2021, env_type=qa, instance=env-275315laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live Value:0xc02f1a6508} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-275315laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275315, env_name=A108_LCBO_QA_M2021, env_type=qa, instance=env-275315laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live Value:0xc02f1a65c0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-275315laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275315, env_name=A108_LCBO_QA_M2021, env_type=qa, instance=env-275315laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live Value:0xc02f1a6448}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730685875s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-275315laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275315, env_name=A108_LCBO_QA_M2021, env_type=qa, instance=env-275315laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-275315laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275315, env_name=A108_LCBO_QA_M2021, env_type=qa, instance=env-275315laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-275315laio1canadacentral, cloud_platform=Azure, customer_id=A108, env_id=275315, env_name=A108_LCBO_QA_M2021, env_type=qa, instance=env-275315laio1canadacentral, job=integrations/node_exporter, region=CanadaCentral, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-275843laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275843, env_name=C595_Franconnect_DEV, env_type=dev, instance=env-275843laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-275843laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275843, env_name=C595_Franconnect_DEV, env_type=dev, instance=env-275843laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc02f1a68e0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-275843laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275843, env_name=C595_Franconnect_DEV, env_type=dev, instance=env-275843laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc02f1a6720} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-275843laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275843, env_name=C595_Franconnect_DEV, env_type=dev, instance=env-275843laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc02f1a67d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.73070767s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-275843laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275843, env_name=C595_Franconnect_DEV, env_type=dev, instance=env-275843laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-275843laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275843, env_name=C595_Franconnect_DEV, env_type=dev, instance=env-275843laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-275843laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275843, env_name=C595_Franconnect_DEV, env_type=dev, instance=env-275843laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-275846laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-275846laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc02f1a6ab0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-275846laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc02f1a6dc0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-275846laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc02f1a6eb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730725022s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-275846laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-275846laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-275846laio1use1, cloud_platform=AWS, customer_id=C595, env_id=275846, env_name=C595_Franconnect_Prod, env_type=prod, instance=env-275846laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-275957laiouaenorth, cloud_platform=Azure, customer_id=A159, env_id=275957, env_name=A159_Mubadala_Prod, env_type=prod, instance=env-275957laiouaenorth, job=integrations/node_exporter, region=UAENorth, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-275957laiouaenorth, cloud_platform=Azure, customer_id=A159, env_id=275957, env_name=A159_Mubadala_Prod, env_type=prod, instance=env-275957laiouaenorth, job=integrations/node_exporter, region=UAENorth, stage=live Value:0xc02f1a7070} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-275957laiouaenorth, cloud_platform=Azure, customer_id=A159, env_id=275957, env_name=A159_Mubadala_Prod, env_type=prod, instance=env-275957laiouaenorth, job=integrations/node_exporter, region=UAENorth, stage=live Value:0xc02f1a7130} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-275957laiouaenorth, cloud_platform=Azure, customer_id=A159, env_id=275957, env_name=A159_Mubadala_Prod, env_type=prod, instance=env-275957laiouaenorth, job=integrations/node_exporter, region=UAENorth, stage=live Value:0xc02f1a71d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.73074499s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-275957laiouaenorth, cloud_platform=Azure, customer_id=A159, env_id=275957, env_name=A159_Mubadala_Prod, env_type=prod, instance=env-275957laiouaenorth, job=integrations/node_exporter, region=UAENorth, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-275957laiouaenorth, cloud_platform=Azure, customer_id=A159, env_id=275957, env_name=A159_Mubadala_Prod, env_type=prod, instance=env-275957laiouaenorth, job=integrations/node_exporter, region=UAENorth, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-275957laiouaenorth, cloud_platform=Azure, customer_id=A159, env_id=275957, env_name=A159_Mubadala_Prod, env_type=prod, instance=env-275957laiouaenorth, job=integrations/node_exporter, region=UAENorth, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-278412laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=278412, env_name=A113 - BAT - Sandbox, env_type=sandbox, instance=env-278412laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-278412laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=278412, env_name=A113 - BAT - Sandbox, env_type=sandbox, instance=env-278412laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc02f1a7350} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-278412laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=278412, env_name=A113 - BAT - Sandbox, env_type=sandbox, instance=env-278412laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc02f1a73f8} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-278412laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=278412, env_name=A113 - BAT - Sandbox, env_type=sandbox, instance=env-278412laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc02f1a74b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730759252s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-278412laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=278412, env_name=A113 - BAT - Sandbox, env_type=sandbox, instance=env-278412laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-278412laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=278412, env_name=A113 - BAT - Sandbox, env_type=sandbox, instance=env-278412laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-278412laio1westeurope, cloud_platform=Azure, customer_id=A113, env_id=278412, env_name=A113 - BAT - Sandbox, env_type=sandbox, instance=env-278412laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-279146laio1eastus, cloud_platform=Azure, customer_id=A160, env_id=279146, env_name=A160 Marc Jacobs PROD, env_type=prod, instance=env-279146laio1eastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-279146laio1eastus, cloud_platform=Azure, customer_id=A160, env_id=279146, env_name=A160 Marc Jacobs PROD, env_type=prod, instance=env-279146laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc02f1a7c10} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-279146laio1eastus, cloud_platform=Azure, customer_id=A160, env_id=279146, env_name=A160 Marc Jacobs PROD, env_type=prod, instance=env-279146laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc01aa581b0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-279146laio1eastus, cloud_platform=Azure, customer_id=A160, env_id=279146, env_name=A160 Marc Jacobs PROD, env_type=prod, instance=env-279146laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc01aa59340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730776382s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-279146laio1eastus, cloud_platform=Azure, customer_id=A160, env_id=279146, env_name=A160 Marc Jacobs PROD, env_type=prod, instance=env-279146laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-279146laio1eastus, cloud_platform=Azure, customer_id=A160, env_id=279146, env_name=A160 Marc Jacobs PROD, env_type=prod, instance=env-279146laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-279146laio1eastus, cloud_platform=Azure, customer_id=A160, env_id=279146, env_name=A160 Marc Jacobs PROD, env_type=prod, instance=env-279146laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-279208laio1westeurope, cloud_platform=Azure, customer_id=A161, env_id=279208, env_name=A161_Thyssenkrup_Prod, env_type=prod, instance=env-279208laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-279208laio1westeurope, cloud_platform=Azure, customer_id=A161, env_id=279208, env_name=A161_Thyssenkrup_Prod, env_type=prod, instance=env-279208laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01c1bc138} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-279208laio1westeurope, cloud_platform=Azure, customer_id=A161, env_id=279208, env_name=A161_Thyssenkrup_Prod, env_type=prod, instance=env-279208laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01c1bc238} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-279208laio1westeurope, cloud_platform=Azure, customer_id=A161, env_id=279208, env_name=A161_Thyssenkrup_Prod, env_type=prod, instance=env-279208laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc01c1bc408}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.73079702s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-279208laio1westeurope, cloud_platform=Azure, customer_id=A161, env_id=279208, env_name=A161_Thyssenkrup_Prod, env_type=prod, instance=env-279208laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-279208laio1westeurope, cloud_platform=Azure, customer_id=A161, env_id=279208, env_name=A161_Thyssenkrup_Prod, env_type=prod, instance=env-279208laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-279208laio1westeurope, cloud_platform=Azure, customer_id=A161, env_id=279208, env_name=A161_Thyssenkrup_Prod, env_type=prod, instance=env-279208laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-281950laio1australiaeast, cloud_platform=Azure, customer_id=A145, env_id=281950, env_name=A145_Bunnings_Dev, env_type=dev, instance=env-281950laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-281950laio1australiaeast, cloud_platform=Azure, customer_id=A145, env_id=281950, env_name=A145_Bunnings_Dev, env_type=dev, instance=env-281950laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live Value:0xc01c1bc868} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-281950laio1australiaeast, cloud_platform=Azure, customer_id=A145, env_id=281950, env_name=A145_Bunnings_Dev, env_type=dev, instance=env-281950laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live Value:0xc01c1bc5e0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-281950laio1australiaeast, cloud_platform=Azure, customer_id=A145, env_id=281950, env_name=A145_Bunnings_Dev, env_type=dev, instance=env-281950laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live Value:0xc01c1bc720}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730814098s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-281950laio1australiaeast, cloud_platform=Azure, customer_id=A145, env_id=281950, env_name=A145_Bunnings_Dev, env_type=dev, instance=env-281950laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-281950laio1australiaeast, cloud_platform=Azure, customer_id=A145, env_id=281950, env_name=A145_Bunnings_Dev, env_type=dev, instance=env-281950laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-281950laio1australiaeast, cloud_platform=Azure, customer_id=A145, env_id=281950, env_name=A145_Bunnings_Dev, env_type=dev, instance=env-281950laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-283245laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=283245, env_name=A151 Digi-Key Dev/Test, env_type=dev, instance=env-283245laio1centralus, job=integrations/node_exporter, region=centralus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-283245laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=283245, env_name=A151 Digi-Key Dev/Test, env_type=dev, instance=env-283245laio1centralus, job=integrations/node_exporter, region=centralus, stage=live Value:0xc01c1bcb40} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-283245laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=283245, env_name=A151 Digi-Key Dev/Test, env_type=dev, instance=env-283245laio1centralus, job=integrations/node_exporter, region=centralus, stage=live Value:0xc01c1bcc18} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-283245laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=283245, env_name=A151 Digi-Key Dev/Test, env_type=dev, instance=env-283245laio1centralus, job=integrations/node_exporter, region=centralus, stage=live Value:0xc01c1bcd20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.73083162s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-283245laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=283245, env_name=A151 Digi-Key Dev/Test, env_type=dev, instance=env-283245laio1centralus, job=integrations/node_exporter, region=centralus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-283245laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=283245, env_name=A151 Digi-Key Dev/Test, env_type=dev, instance=env-283245laio1centralus, job=integrations/node_exporter, region=centralus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-283245laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=283245, env_name=A151 Digi-Key Dev/Test, env_type=dev, instance=env-283245laio1centralus, job=integrations/node_exporter, region=centralus, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-283453laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-283453laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01c1bd010} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-283453laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01c1bd120} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-283453laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01c1bcee8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730849673s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-283453laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-283453laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-283453laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283453, env_name=C591_Novartis_Prod, env_type=prod, instance=env-283453laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-283466laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283466, env_name=C591_Novartis_QA, env_type=qa, instance=env-283466laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-283466laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283466, env_name=C591_Novartis_QA, env_type=qa, instance=env-283466laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01c1bd5e0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-283466laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283466, env_name=C591_Novartis_QA, env_type=qa, instance=env-283466laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01c1bd738} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-283466laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283466, env_name=C591_Novartis_QA, env_type=qa, instance=env-283466laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc01c1bd860}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730866965s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-283466laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283466, env_name=C591_Novartis_QA, env_type=qa, instance=env-283466laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-283466laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283466, env_name=C591_Novartis_QA, env_type=qa, instance=env-283466laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-283466laio1use1, cloud_platform=AWS, customer_id=C591, env_id=283466, env_name=C591_Novartis_QA, env_type=qa, instance=env-283466laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-283469laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=283469, env_name=C630_Tokyo Century_Dev, env_type=dev, instance=env-283469laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=decommission State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-283469laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=283469, env_name=C630_Tokyo Century_Dev, env_type=dev, instance=env-283469laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=decommission Value:0xc01c1bde18} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-283469laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=283469, env_name=C630_Tokyo Century_Dev, env_type=dev, instance=env-283469laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=decommission Value:0xc01c1bdba0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-283469laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=283469, env_name=C630_Tokyo Century_Dev, env_type=dev, instance=env-283469laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=decommission Value:0xc01c1bdc90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.73088598s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-283469laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=283469, env_name=C630_Tokyo Century_Dev, env_type=dev, instance=env-283469laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=decommission} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-283469laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=283469, env_name=C630_Tokyo Century_Dev, env_type=dev, instance=env-283469laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=decommission} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-283469laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=283469, env_name=C630_Tokyo Century_Dev, env_type=dev, instance=env-283469laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=decommission} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-283470laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=283470, env_name=C630_TokyoCentury_Prod, env_type=prod, instance=env-283470laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=decommission State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-283470laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=283470, env_name=C630_TokyoCentury_Prod, env_type=prod, instance=env-283470laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=decommission Value:0xc0732060d0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-283470laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=283470, env_name=C630_TokyoCentury_Prod, env_type=prod, instance=env-283470laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=decommission Value:0xc073206180} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-283470laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=283470, env_name=C630_TokyoCentury_Prod, env_type=prod, instance=env-283470laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=decommission Value:0xc073206018}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730901841s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-283470laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=283470, env_name=C630_TokyoCentury_Prod, env_type=prod, instance=env-283470laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=decommission} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-283470laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=283470, env_name=C630_TokyoCentury_Prod, env_type=prod, instance=env-283470laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=decommission} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-283470laio1apn1, cloud_platform=AWS, customer_id=C630, env_id=283470, env_name=C630_TokyoCentury_Prod, env_type=prod, instance=env-283470laio1apn1, job=integrations/node_exporter, region=ap-northeast-1, stage=decommission} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-283735laio1eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio1eastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-283735laio1eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc073206430} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-283735laio1eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc0732062c0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-283735laio1eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc073206358}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730919173s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-283735laio1eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-283735laio1eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-283735laio1eastus, cloud_platform=Azure, customer_id=A128, env_id=283735, env_name=A128_qvc_Prod, env_type=prod, instance=env-283735laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-283878laionortheurope, cloud_platform=Azure, customer_id=A164, env_id=283878, env_name=A164_CTTI_Corp_PROD, env_type=prod, instance=env-283878laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-283878laionortheurope, cloud_platform=Azure, customer_id=A164, env_id=283878, env_name=A164_CTTI_Corp_PROD, env_type=prod, instance=env-283878laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc073206638} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-283878laionortheurope, cloud_platform=Azure, customer_id=A164, env_id=283878, env_name=A164_CTTI_Corp_PROD, env_type=prod, instance=env-283878laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc0732066d8} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-283878laionortheurope, cloud_platform=Azure, customer_id=A164, env_id=283878, env_name=A164_CTTI_Corp_PROD, env_type=prod, instance=env-283878laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc073206590}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.730936598s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-283878laionortheurope, cloud_platform=Azure, customer_id=A164, env_id=283878, env_name=A164_CTTI_Corp_PROD, env_type=prod, instance=env-283878laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-283878laionortheurope, cloud_platform=Azure, customer_id=A164, env_id=283878, env_name=A164_CTTI_Corp_PROD, env_type=prod, instance=env-283878laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-283878laionortheurope, cloud_platform=Azure, customer_id=A164, env_id=283878, env_name=A164_CTTI_Corp_PROD, env_type=prod, instance=env-283878laionortheurope, job=integrations/node_exporter, region=northeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-284427laio1use1, cloud_platform=AWS, customer_id=C595, env_id=284427, env_name=c595_franconnect_uat_2021, env_type=qa, instance=env-284427laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-284427laio1use1, cloud_platform=AWS, customer_id=C595, env_id=284427, env_name=c595_franconnect_uat_2021, env_type=qa, instance=env-284427laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc073206858} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-284427laio1use1, cloud_platform=AWS, customer_id=C595, env_id=284427, env_name=c595_franconnect_uat_2021, env_type=qa, instance=env-284427laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc073206900} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-284427laio1use1, cloud_platform=AWS, customer_id=C595, env_id=284427, env_name=c595_franconnect_uat_2021, env_type=qa, instance=env-284427laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc0732069a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.7309533s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-284427laio1use1, cloud_platform=AWS, customer_id=C595, env_id=284427, env_name=c595_franconnect_uat_2021, env_type=qa, instance=env-284427laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-284427laio1use1, cloud_platform=AWS, customer_id=C595, env_id=284427, env_name=c595_franconnect_uat_2021, env_type=qa, instance=env-284427laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-284427laio1use1, cloud_platform=AWS, customer_id=C595, env_id=284427, env_name=c595_franconnect_uat_2021, env_type=qa, instance=env-284427laio1use1, job=integrations/node_exporter, region=us-east-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-284881laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio1centralus, job=integrations/node_exporter, region=centralus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-284881laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio1centralus, job=integrations/node_exporter, region=centralus, stage=live Value:0xc073206b48} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-284881laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio1centralus, job=integrations/node_exporter, region=centralus, stage=live Value:0xc073206be0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-284881laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio1centralus, job=integrations/node_exporter, region=centralus, stage=live Value:0xc073206c80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731013478s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-284881laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio1centralus, job=integrations/node_exporter, region=centralus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-284881laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio1centralus, job=integrations/node_exporter, region=centralus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-284881laio1centralus, cloud_platform=Azure, customer_id=A151, env_id=284881, env_name=A151_Digikey_Prod_mstrbak, env_type=prod, instance=env-284881laio1centralus, job=integrations/node_exporter, region=centralus, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-286302laiowesteurope, cloud_platform=Azure, customer_id=A167, env_id=286302, env_name=A167 Loewe Prod, env_type=prod, instance=env-286302laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-286302laiowesteurope, cloud_platform=Azure, customer_id=A167, env_id=286302, env_name=A167 Loewe Prod, env_type=prod, instance=env-286302laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc073206e78} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-286302laiowesteurope, cloud_platform=Azure, customer_id=A167, env_id=286302, env_name=A167 Loewe Prod, env_type=prod, instance=env-286302laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc073206f30} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-286302laiowesteurope, cloud_platform=Azure, customer_id=A167, env_id=286302, env_name=A167 Loewe Prod, env_type=prod, instance=env-286302laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc073206dd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.73102705s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-286302laiowesteurope, cloud_platform=Azure, customer_id=A167, env_id=286302, env_name=A167 Loewe Prod, env_type=prod, instance=env-286302laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-286302laiowesteurope, cloud_platform=Azure, customer_id=A167, env_id=286302, env_name=A167 Loewe Prod, env_type=prod, instance=env-286302laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-286302laiowesteurope, cloud_platform=Azure, customer_id=A167, env_id=286302, env_name=A167 Loewe Prod, env_type=prod, instance=env-286302laiowesteurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-286767laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=286767, env_name=A116_Costco_Dev_2021, env_type=dev, instance=env-286767laio1westus2, job=integrations/node_exporter, region=westus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-286767laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=286767, env_name=A116_Costco_Dev_2021, env_type=dev, instance=env-286767laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc073207060} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-286767laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=286767, env_name=A116_Costco_Dev_2021, env_type=dev, instance=env-286767laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc073207100} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-286767laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=286767, env_name=A116_Costco_Dev_2021, env_type=dev, instance=env-286767laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc073207190}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731037586s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-286767laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=286767, env_name=A116_Costco_Dev_2021, env_type=dev, instance=env-286767laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-286767laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=286767, env_name=A116_Costco_Dev_2021, env_type=dev, instance=env-286767laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-286767laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=286767, env_name=A116_Costco_Dev_2021, env_type=dev, instance=env-286767laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-286990laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=286990, env_name=A169_Aigues_Prod, env_type=prod, instance=env-286990laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-286990laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=286990, env_name=A169_Aigues_Prod, env_type=prod, instance=env-286990laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc073207380} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-286990laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=286990, env_name=A169_Aigues_Prod, env_type=prod, instance=env-286990laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc073207430} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-286990laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=286990, env_name=A169_Aigues_Prod, env_type=prod, instance=env-286990laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc0732074e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731049007s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-286990laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=286990, env_name=A169_Aigues_Prod, env_type=prod, instance=env-286990laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-286990laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=286990, env_name=A169_Aigues_Prod, env_type=prod, instance=env-286990laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-286990laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=286990, env_name=A169_Aigues_Prod, env_type=prod, instance=env-286990laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-287012laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=287012, env_name=a116_Costco_Qa_2021, env_type=qa, instance=env-287012laio1westus2, job=integrations/node_exporter, region=westus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-287012laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=287012, env_name=a116_Costco_Qa_2021, env_type=qa, instance=env-287012laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc073207610} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-287012laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=287012, env_name=a116_Costco_Qa_2021, env_type=qa, instance=env-287012laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc0732076b0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-287012laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=287012, env_name=a116_Costco_Qa_2021, env_type=qa, instance=env-287012laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc073207748}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731063428s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-287012laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=287012, env_name=a116_Costco_Qa_2021, env_type=qa, instance=env-287012laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-287012laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=287012, env_name=a116_Costco_Qa_2021, env_type=qa, instance=env-287012laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-287012laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=287012, env_name=a116_Costco_Qa_2021, env_type=qa, instance=env-287012laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-287969laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=287969, env_name=a170_ASI_Dev, env_type=prod, instance=env-287969laio1eastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-287969laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=287969, env_name=a170_ASI_Dev, env_type=prod, instance=env-287969laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc0732079e8} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-287969laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=287969, env_name=a170_ASI_Dev, env_type=prod, instance=env-287969laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc073207898} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-287969laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=287969, env_name=a170_ASI_Dev, env_type=prod, instance=env-287969laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc073207940}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731079131s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-287969laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=287969, env_name=a170_ASI_Dev, env_type=prod, instance=env-287969laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-287969laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=287969, env_name=a170_ASI_Dev, env_type=prod, instance=env-287969laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-287969laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=287969, env_name=a170_ASI_Dev, env_type=prod, instance=env-287969laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-288344laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio1westus2, job=integrations/node_exporter, region=westus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-288344laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc073207b28} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-288344laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc073207be0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-288344laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio1westus2, job=integrations/node_exporter, region=westus2, stage=live Value:0xc073207c78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731097122s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-288344laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-288344laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-288344laio1westus2, cloud_platform=Azure, customer_id=A116, env_id=288344, env_name=A116_Costco_Prod_2021, env_type=prod, instance=env-288344laio1westus2, job=integrations/node_exporter, region=westus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-288831laio1northeurope, cloud_platform=Azure, customer_id=A162, env_id=288831, env_name=A162_Sonae_Prod, env_type=prod, instance=env-288831laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-288831laio1northeurope, cloud_platform=Azure, customer_id=A162, env_id=288831, env_name=A162_Sonae_Prod, env_type=prod, instance=env-288831laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc073207e10} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-288831laio1northeurope, cloud_platform=Azure, customer_id=A162, env_id=288831, env_name=A162_Sonae_Prod, env_type=prod, instance=env-288831laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc073207ec0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-288831laio1northeurope, cloud_platform=Azure, customer_id=A162, env_id=288831, env_name=A162_Sonae_Prod, env_type=prod, instance=env-288831laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc073207f70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731134547s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-288831laio1northeurope, cloud_platform=Azure, customer_id=A162, env_id=288831, env_name=A162_Sonae_Prod, env_type=prod, instance=env-288831laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-288831laio1northeurope, cloud_platform=Azure, customer_id=A162, env_id=288831, env_name=A162_Sonae_Prod, env_type=prod, instance=env-288831laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-288831laio1northeurope, cloud_platform=Azure, customer_id=A162, env_id=288831, env_name=A162_Sonae_Prod, env_type=prod, instance=env-288831laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-288988laio1westeurope, cloud_platform=Azure, customer_id=A172, env_id=288988, env_name=A172_SCA_DEV, env_type=dev, instance=env-288988laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-288988laio1westeurope, cloud_platform=Azure, customer_id=A172, env_id=288988, env_name=A172_SCA_DEV, env_type=dev, instance=env-288988laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc020a704b8} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-288988laio1westeurope, cloud_platform=Azure, customer_id=A172, env_id=288988, env_name=A172_SCA_DEV, env_type=dev, instance=env-288988laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc020a70660} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-288988laio1westeurope, cloud_platform=Azure, customer_id=A172, env_id=288988, env_name=A172_SCA_DEV, env_type=dev, instance=env-288988laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc020a715d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731157581s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-288988laio1westeurope, cloud_platform=Azure, customer_id=A172, env_id=288988, env_name=A172_SCA_DEV, env_type=dev, instance=env-288988laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-288988laio1westeurope, cloud_platform=Azure, customer_id=A172, env_id=288988, env_name=A172_SCA_DEV, env_type=dev, instance=env-288988laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-288988laio1westeurope, cloud_platform=Azure, customer_id=A172, env_id=288988, env_name=A172_SCA_DEV, env_type=dev, instance=env-288988laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-289152laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289152, env_name=A148_Carrefour_Prod, env_type=prod, instance=env-289152laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-289152laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289152, env_name=A148_Carrefour_Prod, env_type=prod, instance=env-289152laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ba1a260} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-289152laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289152, env_name=A148_Carrefour_Prod, env_type=prod, instance=env-289152laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ba1a100} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-289152laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289152, env_name=A148_Carrefour_Prod, env_type=prod, instance=env-289152laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ba1a1c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731187631s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-289152laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289152, env_name=A148_Carrefour_Prod, env_type=prod, instance=env-289152laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-289152laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289152, env_name=A148_Carrefour_Prod, env_type=prod, instance=env-289152laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-289152laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289152, env_name=A148_Carrefour_Prod, env_type=prod, instance=env-289152laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-289178laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289178, env_name=A148_Carrefour_QA, env_type=qa, instance=env-289178laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-289178laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289178, env_name=A148_Carrefour_QA, env_type=qa, instance=env-289178laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ba1a3d8} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-289178laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289178, env_name=A148_Carrefour_QA, env_type=qa, instance=env-289178laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ba1a480} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-289178laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289178, env_name=A148_Carrefour_QA, env_type=qa, instance=env-289178laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ba1a548}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.73120734s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-289178laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289178, env_name=A148_Carrefour_QA, env_type=qa, instance=env-289178laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-289178laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289178, env_name=A148_Carrefour_QA, env_type=qa, instance=env-289178laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-289178laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289178, env_name=A148_Carrefour_QA, env_type=qa, instance=env-289178laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-289493laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289493, env_name=a148_Carrefour_Dev_2021, env_type=dev, instance=env-289493laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-289493laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289493, env_name=a148_Carrefour_Dev_2021, env_type=dev, instance=env-289493laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ba1a700} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-289493laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289493, env_name=a148_Carrefour_Dev_2021, env_type=dev, instance=env-289493laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ba1a7b0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-289493laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289493, env_name=a148_Carrefour_Dev_2021, env_type=dev, instance=env-289493laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ba1a850}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731224319s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-289493laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289493, env_name=a148_Carrefour_Dev_2021, env_type=dev, instance=env-289493laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-289493laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289493, env_name=a148_Carrefour_Dev_2021, env_type=dev, instance=env-289493laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-289493laio1westeurope, cloud_platform=Azure, customer_id=A148, env_id=289493, env_name=a148_Carrefour_Dev_2021, env_type=dev, instance=env-289493laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-291882laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=291882, env_name=A170_ASI_Prod, env_type=dev, instance=env-291882laio1eastus, job=integrations/node_exporter, region=eastus, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-291882laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=291882, env_name=A170_ASI_Prod, env_type=dev, instance=env-291882laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc00ba1aaa0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-291882laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=291882, env_name=A170_ASI_Prod, env_type=dev, instance=env-291882laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc00ba1ab68} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-291882laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=291882, env_name=A170_ASI_Prod, env_type=dev, instance=env-291882laio1eastus, job=integrations/node_exporter, region=eastus, stage=live Value:0xc00ba1a9e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731241391s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-291882laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=291882, env_name=A170_ASI_Prod, env_type=dev, instance=env-291882laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-291882laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=291882, env_name=A170_ASI_Prod, env_type=dev, instance=env-291882laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-291882laio1eastus, cloud_platform=Azure, customer_id=A170, env_id=291882, env_name=A170_ASI_Prod, env_type=dev, instance=env-291882laio1eastus, job=integrations/node_exporter, region=eastus, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-291902laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=291902, env_name=A165 DWH-MA DEV, env_type=dev, instance=env-291902laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-291902laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=291902, env_name=A165 DWH-MA DEV, env_type=dev, instance=env-291902laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc00ba1ada0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-291902laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=291902, env_name=A165 DWH-MA DEV, env_type=dev, instance=env-291902laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc00ba1ae58} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-291902laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=291902, env_name=A165 DWH-MA DEV, env_type=dev, instance=env-291902laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc00ba1ace0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731252843s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-291902laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=291902, env_name=A165 DWH-MA DEV, env_type=dev, instance=env-291902laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-291902laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=291902, env_name=A165 DWH-MA DEV, env_type=dev, instance=env-291902laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-291902laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=291902, env_name=A165 DWH-MA DEV, env_type=dev, instance=env-291902laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-291924laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=291924, env_name=A169 Aigues Dev, env_type=dev, instance=env-291924laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-291924laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=291924, env_name=A169 Aigues Dev, env_type=dev, instance=env-291924laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ba1afc8} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-291924laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=291924, env_name=A169 Aigues Dev, env_type=dev, instance=env-291924laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ba1b078} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-291924laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=291924, env_name=A169 Aigues Dev, env_type=dev, instance=env-291924laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ba1b158}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731265036s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-291924laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=291924, env_name=A169 Aigues Dev, env_type=dev, instance=env-291924laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-291924laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=291924, env_name=A169 Aigues Dev, env_type=dev, instance=env-291924laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-291924laio1westeurope, cloud_platform=Azure, customer_id=A169, env_id=291924, env_name=A169 Aigues Dev, env_type=dev, instance=env-291924laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-292236laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-292236laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc00ba1b390} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-292236laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc00ba1b440} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-292236laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc00ba1b2d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731276277s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-292236laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-292236laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-292236laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=292236, env_name=A165 DWH-MA PROD, env_type=prod, instance=env-292236laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-292778laio1northeurope, cloud_platform=Azure, customer_id=C435, env_type=prod, instance=env-292778laio1northeurope, job=integrations/node_exporter, region=northeurope State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-292778laio1northeurope, cloud_platform=Azure, customer_id=C435, env_type=prod, instance=env-292778laio1northeurope, job=integrations/node_exporter, region=northeurope Value:0xc00ba1b558} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-292778laio1northeurope, cloud_platform=Azure, customer_id=C435, env_type=prod, instance=env-292778laio1northeurope, job=integrations/node_exporter, region=northeurope Value:0xc00ba1b5d8} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-292778laio1northeurope, cloud_platform=Azure, customer_id=C435, env_type=prod, instance=env-292778laio1northeurope, job=integrations/node_exporter, region=northeurope Value:0xc00ba1b668}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731286792s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-292778laio1northeurope, cloud_platform=Azure, customer_id=C435, env_type=prod, instance=env-292778laio1northeurope, job=integrations/node_exporter, region=northeurope} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-292778laio1northeurope, cloud_platform=Azure, customer_id=C435, env_type=prod, instance=env-292778laio1northeurope, job=integrations/node_exporter, region=northeurope} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-292778laio1northeurope, cloud_platform=Azure, customer_id=C435, env_type=prod, instance=env-292778laio1northeurope, job=integrations/node_exporter, region=northeurope} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-292945laio1westeurope, cloud_platform=Azure, customer_id=A175, env_id=292945, env_name=A175 - IberianSports Prod, env_type=prod, instance=env-292945laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-292945laio1westeurope, cloud_platform=Azure, customer_id=A175, env_id=292945, env_name=A175 - IberianSports Prod, env_type=prod, instance=env-292945laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ba1b890} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-292945laio1westeurope, cloud_platform=Azure, customer_id=A175, env_id=292945, env_name=A175 - IberianSports Prod, env_type=prod, instance=env-292945laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ba1b930} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-292945laio1westeurope, cloud_platform=Azure, customer_id=A175, env_id=292945, env_name=A175 - IberianSports Prod, env_type=prod, instance=env-292945laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live Value:0xc00ba1b7e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731299644s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-292945laio1westeurope, cloud_platform=Azure, customer_id=A175, env_id=292945, env_name=A175 - IberianSports Prod, env_type=prod, instance=env-292945laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-292945laio1westeurope, cloud_platform=Azure, customer_id=A175, env_id=292945, env_name=A175 - IberianSports Prod, env_type=prod, instance=env-292945laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-292945laio1westeurope, cloud_platform=Azure, customer_id=A175, env_id=292945, env_name=A175 - IberianSports Prod, env_type=prod, instance=env-292945laio1westeurope, job=integrations/node_exporter, region=westeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-293183laio1northeurope, cloud_platform=Azure, customer_id=A164, env_id=293183, env_name=A164 CTTI Corp DEV, env_type=dev, instance=env-293183laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-293183laio1northeurope, cloud_platform=Azure, customer_id=A164, env_id=293183, env_name=A164 CTTI Corp DEV, env_type=dev, instance=env-293183laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc00ba1ba90} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-293183laio1northeurope, cloud_platform=Azure, customer_id=A164, env_id=293183, env_name=A164 CTTI Corp DEV, env_type=dev, instance=env-293183laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc00ba1bb38} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-293183laio1northeurope, cloud_platform=Azure, customer_id=A164, env_id=293183, env_name=A164 CTTI Corp DEV, env_type=dev, instance=env-293183laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live Value:0xc00ba1bbf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731312104s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-293183laio1northeurope, cloud_platform=Azure, customer_id=A164, env_id=293183, env_name=A164 CTTI Corp DEV, env_type=dev, instance=env-293183laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-293183laio1northeurope, cloud_platform=Azure, customer_id=A164, env_id=293183, env_name=A164 CTTI Corp DEV, env_type=dev, instance=env-293183laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-293183laio1northeurope, cloud_platform=Azure, customer_id=A164, env_id=293183, env_name=A164 CTTI Corp DEV, env_type=dev, instance=env-293183laio1northeurope, job=integrations/node_exporter, region=northeurope, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-293328laio1australiaeast, cloud_platform=Azure, customer_id=A176, env_id=293328, env_name=A176 TWG Prod, env_type=prod, instance=env-293328laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-293328laio1australiaeast, cloud_platform=Azure, customer_id=A176, env_id=293328, env_name=A176 TWG Prod, env_type=prod, instance=env-293328laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live Value:0xc00ba1bd88} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-293328laio1australiaeast, cloud_platform=Azure, customer_id=A176, env_id=293328, env_name=A176 TWG Prod, env_type=prod, instance=env-293328laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live Value:0xc00ba1be50} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-293328laio1australiaeast, cloud_platform=Azure, customer_id=A176, env_id=293328, env_name=A176 TWG Prod, env_type=prod, instance=env-293328laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live Value:0xc00ba1bf10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731322923s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-293328laio1australiaeast, cloud_platform=Azure, customer_id=A176, env_id=293328, env_name=A176 TWG Prod, env_type=prod, instance=env-293328laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-293328laio1australiaeast, cloud_platform=Azure, customer_id=A176, env_id=293328, env_name=A176 TWG Prod, env_type=prod, instance=env-293328laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-293328laio1australiaeast, cloud_platform=Azure, customer_id=A176, env_id=293328, env_name=A176 TWG Prod, env_type=prod, instance=env-293328laio1australiaeast, job=integrations/node_exporter, region=australiaeast, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-293839laio1euw1, cloud_platform=AWS, customer_id=C655, env_id=293839, env_name=c655_booking_prod_2021, env_type=prod, instance=env-293839laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-293839laio1euw1, cloud_platform=AWS, customer_id=C655, env_id=293839, env_name=c655_booking_prod_2021, env_type=prod, instance=env-293839laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live Value:0xc01477c230} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-293839laio1euw1, cloud_platform=AWS, customer_id=C655, env_id=293839, env_name=c655_booking_prod_2021, env_type=prod, instance=env-293839laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live Value:0xc01477c308} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-293839laio1euw1, cloud_platform=AWS, customer_id=C655, env_id=293839, env_name=c655_booking_prod_2021, env_type=prod, instance=env-293839laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live Value:0xc01477c068}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731334473s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-293839laio1euw1, cloud_platform=AWS, customer_id=C655, env_id=293839, env_name=c655_booking_prod_2021, env_type=prod, instance=env-293839laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-293839laio1euw1, cloud_platform=AWS, customer_id=C655, env_id=293839, env_name=c655_booking_prod_2021, env_type=prod, instance=env-293839laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-293839laio1euw1, cloud_platform=AWS, customer_id=C655, env_id=293839, env_name=c655_booking_prod_2021, env_type=prod, instance=env-293839laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-293840laio1euw1, cloud_platform=AWS, customer_id=C655, env_id=293840, env_name=c655_booking_dev_2021, env_type=dev, instance=env-293840laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-293840laio1euw1, cloud_platform=AWS, customer_id=C655, env_id=293840, env_name=c655_booking_dev_2021, env_type=dev, instance=env-293840laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live Value:0xc01477c680} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-293840laio1euw1, cloud_platform=AWS, customer_id=C655, env_id=293840, env_name=c655_booking_dev_2021, env_type=dev, instance=env-293840laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live Value:0xc01477c740} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-293840laio1euw1, cloud_platform=AWS, customer_id=C655, env_id=293840, env_name=c655_booking_dev_2021, env_type=dev, instance=env-293840laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live Value:0xc01477c840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731345377s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-293840laio1euw1, cloud_platform=AWS, customer_id=C655, env_id=293840, env_name=c655_booking_dev_2021, env_type=dev, instance=env-293840laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-293840laio1euw1, cloud_platform=AWS, customer_id=C655, env_id=293840, env_name=c655_booking_dev_2021, env_type=dev, instance=env-293840laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-293840laio1euw1, cloud_platform=AWS, customer_id=C655, env_id=293840, env_name=c655_booking_dev_2021, env_type=dev, instance=env-293840laio1euw1, job=integrations/node_exporter, region=eu-west-1, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-294036laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294036, env_name=A111 Staples DEV, env_type=dev, instance=env-294036laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-294036laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294036, env_name=A111 Staples DEV, env_type=dev, instance=env-294036laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01477cf88} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-294036laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294036, env_name=A111 Staples DEV, env_type=dev, instance=env-294036laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01477cbf0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-294036laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294036, env_name=A111 Staples DEV, env_type=dev, instance=env-294036laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01477cdb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731356493s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-294036laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294036, env_name=A111 Staples DEV, env_type=dev, instance=env-294036laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-294036laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294036, env_name=A111 Staples DEV, env_type=dev, instance=env-294036laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-294036laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294036, env_name=A111 Staples DEV, env_type=dev, instance=env-294036laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-294098laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294098, env_name=A111 Staples PROD, env_type=prod, instance=env-294098laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-294098laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294098, env_name=A111 Staples PROD, env_type=prod, instance=env-294098laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01477d1b8} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-294098laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294098, env_name=A111 Staples PROD, env_type=prod, instance=env-294098laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01477d258} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-294098laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294098, env_name=A111 Staples PROD, env_type=prod, instance=env-294098laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01477d0f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.73136615s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-294098laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294098, env_name=A111 Staples PROD, env_type=prod, instance=env-294098laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-294098laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294098, env_name=A111 Staples PROD, env_type=prod, instance=env-294098laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-294098laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294098, env_name=A111 Staples PROD, env_type=prod, instance=env-294098laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-294107laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294107, env_name=A111 Staples STG, env_type=qa, instance=env-294107laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-294107laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294107, env_name=A111 Staples STG, env_type=qa, instance=env-294107laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01477d4b8} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-294107laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294107, env_name=A111 Staples STG, env_type=qa, instance=env-294107laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01477d578} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-294107laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294107, env_name=A111 Staples STG, env_type=qa, instance=env-294107laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01477d3e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731377625s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-294107laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294107, env_name=A111 Staples STG, env_type=qa, instance=env-294107laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-294107laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294107, env_name=A111 Staples STG, env_type=qa, instance=env-294107laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-294107laio1eastus2, cloud_platform=Azure, customer_id=A111, env_id=294107, env_name=A111 Staples STG, env_type=qa, instance=env-294107laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-294680laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=294680, env_name=A165 DWH-MA TEST, env_type=test, instance=env-294680laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-294680laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=294680, env_name=A165 DWH-MA TEST, env_type=test, instance=env-294680laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc01477db40} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-294680laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=294680, env_name=A165 DWH-MA TEST, env_type=test, instance=env-294680laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc01477dd00} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-294680laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=294680, env_name=A165 DWH-MA TEST, env_type=test, instance=env-294680laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc01477ddb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731388967s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-294680laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=294680, env_name=A165 DWH-MA TEST, env_type=test, instance=env-294680laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-294680laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=294680, env_name=A165 DWH-MA TEST, env_type=test, instance=env-294680laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-294680laio1germanywestcentral, cloud_platform=Azure, customer_id=A165, env_id=294680, env_name=A165 DWH-MA TEST, env_type=test, instance=env-294680laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-294786laio1eastus2, cloud_platform=Azure, customer_id=A178, env_id=294786, env_name=A178 Customers Bank DEV, env_type=dev, instance=env-294786laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-294786laio1eastus2, cloud_platform=Azure, customer_id=A178, env_id=294786, env_name=A178 Customers Bank DEV, env_type=dev, instance=env-294786laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc01477dfc8} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-294786laio1eastus2, cloud_platform=Azure, customer_id=A178, env_id=294786, env_name=A178 Customers Bank DEV, env_type=dev, instance=env-294786laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc03bbc0088} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-294786laio1eastus2, cloud_platform=Azure, customer_id=A178, env_id=294786, env_name=A178 Customers Bank DEV, env_type=dev, instance=env-294786laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc03bbc0138}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731403356s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-294786laio1eastus2, cloud_platform=Azure, customer_id=A178, env_id=294786, env_name=A178 Customers Bank DEV, env_type=dev, instance=env-294786laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-294786laio1eastus2, cloud_platform=Azure, customer_id=A178, env_id=294786, env_name=A178 Customers Bank DEV, env_type=dev, instance=env-294786laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-294786laio1eastus2, cloud_platform=Azure, customer_id=A178, env_id=294786, env_name=A178 Customers Bank DEV, env_type=dev, instance=env-294786laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-294791laio1eastus2, cloud_platform=Azure, customer_id=A178, env_id=294791, env_name=A178 Customers Bank PROD, env_type=prod, instance=env-294791laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-294791laio1eastus2, cloud_platform=Azure, customer_id=A178, env_id=294791, env_name=A178 Customers Bank PROD, env_type=prod, instance=env-294791laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc03bbc02f8} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-294791laio1eastus2, cloud_platform=Azure, customer_id=A178, env_id=294791, env_name=A178 Customers Bank PROD, env_type=prod, instance=env-294791laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc03bbc03c0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-294791laio1eastus2, cloud_platform=Azure, customer_id=A178, env_id=294791, env_name=A178 Customers Bank PROD, env_type=prod, instance=env-294791laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live Value:0xc03bbc0488}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731413665s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-294791laio1eastus2, cloud_platform=Azure, customer_id=A178, env_id=294791, env_name=A178 Customers Bank PROD, env_type=prod, instance=env-294791laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-294791laio1eastus2, cloud_platform=Azure, customer_id=A178, env_id=294791, env_name=A178 Customers Bank PROD, env_type=prod, instance=env-294791laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-294791laio1eastus2, cloud_platform=Azure, customer_id=A178, env_id=294791, env_name=A178 Customers Bank PROD, env_type=prod, instance=env-294791laio1eastus2, job=integrations/node_exporter, region=eastus2, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-295389laio1northeurope, cloud_platform=Azure, customer_id=C435, env_type=dev, instance=env-295389laio1northeurope, job=integrations/node_exporter, region=northeurope State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-295389laio1northeurope, cloud_platform=Azure, customer_id=C435, env_type=dev, instance=env-295389laio1northeurope, job=integrations/node_exporter, region=northeurope Value:0xc03bbc05b8} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-295389laio1northeurope, cloud_platform=Azure, customer_id=C435, env_type=dev, instance=env-295389laio1northeurope, job=integrations/node_exporter, region=northeurope Value:0xc03bbc0698} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-295389laio1northeurope, cloud_platform=Azure, customer_id=C435, env_type=dev, instance=env-295389laio1northeurope, job=integrations/node_exporter, region=northeurope Value:0xc03bbc0738}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731425177s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-295389laio1northeurope, cloud_platform=Azure, customer_id=C435, env_type=dev, instance=env-295389laio1northeurope, job=integrations/node_exporter, region=northeurope} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-295389laio1northeurope, cloud_platform=Azure, customer_id=C435, env_type=dev, instance=env-295389laio1northeurope, job=integrations/node_exporter, region=northeurope} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-295389laio1northeurope, cloud_platform=Azure, customer_id=C435, env_type=dev, instance=env-295389laio1northeurope, job=integrations/node_exporter, region=northeurope} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-295921laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295921, env_name=A179 DmTech Partner Dev, env_type=dev, instance=env-295921laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-295921laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295921, env_name=A179 DmTech Partner Dev, env_type=dev, instance=env-295921laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc03bbc0a30} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-295921laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295921, env_name=A179 DmTech Partner Dev, env_type=dev, instance=env-295921laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc03bbc08b8} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-295921laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295921, env_name=A179 DmTech Partner Dev, env_type=dev, instance=env-295921laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc03bbc0980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731437955s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-295921laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295921, env_name=A179 DmTech Partner Dev, env_type=dev, instance=env-295921laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-295921laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295921, env_name=A179 DmTech Partner Dev, env_type=dev, instance=env-295921laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-295921laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295921, env_name=A179 DmTech Partner Dev, env_type=dev, instance=env-295921laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-295922laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295922, env_name=A180 DWH-INTRANET DEV, env_type=dev, instance=env-295922laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-295922laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295922, env_name=A180 DWH-INTRANET DEV, env_type=dev, instance=env-295922laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc03bbc0be0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-295922laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295922, env_name=A180 DWH-INTRANET DEV, env_type=dev, instance=env-295922laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc03bbc0cb0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-295922laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295922, env_name=A180 DWH-INTRANET DEV, env_type=dev, instance=env-295922laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc03bbc0d60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731450206s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-295922laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295922, env_name=A180 DWH-INTRANET DEV, env_type=dev, instance=env-295922laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-295922laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295922, env_name=A180 DWH-INTRANET DEV, env_type=dev, instance=env-295922laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-295922laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295922, env_name=A180 DWH-INTRANET DEV, env_type=dev, instance=env-295922laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-295926laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295926, env_name=A179 DmTech Partner Prod, env_type=prod, instance=env-295926laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-295926laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295926, env_name=A179 DmTech Partner Prod, env_type=prod, instance=env-295926laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc03bbc0ef8} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-295926laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295926, env_name=A179 DmTech Partner Prod, env_type=prod, instance=env-295926laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc03bbc0fc0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-295926laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295926, env_name=A179 DmTech Partner Prod, env_type=prod, instance=env-295926laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc03bbc1078}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731464117s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-295926laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295926, env_name=A179 DmTech Partner Prod, env_type=prod, instance=env-295926laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-295926laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295926, env_name=A179 DmTech Partner Prod, env_type=prod, instance=env-295926laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-295926laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295926, env_name=A179 DmTech Partner Prod, env_type=prod, instance=env-295926laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-295927laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295927, env_name=A179 DmTech Partner Test, env_type=test, instance=env-295927laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-295927laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295927, env_name=A179 DmTech Partner Test, env_type=test, instance=env-295927laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc03bbc11e8} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-295927laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295927, env_name=A179 DmTech Partner Test, env_type=test, instance=env-295927laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc03bbc12c0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-295927laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295927, env_name=A179 DmTech Partner Test, env_type=test, instance=env-295927laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc03bbc1370}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731475115s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-295927laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295927, env_name=A179 DmTech Partner Test, env_type=test, instance=env-295927laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-295927laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295927, env_name=A179 DmTech Partner Test, env_type=test, instance=env-295927laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-295927laio1germanywestcentral, cloud_platform=Azure, customer_id=A179, env_id=295927, env_name=A179 DmTech Partner Test, env_type=test, instance=env-295927laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-295928laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295928, env_name=A180 DWH-INTRANET PROD, env_type=prod, instance=env-295928laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-295928laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295928, env_name=A180 DWH-INTRANET PROD, env_type=prod, instance=env-295928laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc03bbc14d0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-295928laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295928, env_name=A180 DWH-INTRANET PROD, env_type=prod, instance=env-295928laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc03bbc1598} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-295928laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295928, env_name=A180 DWH-INTRANET PROD, env_type=prod, instance=env-295928laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc03bbc1648}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731484847s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-295928laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295928, env_name=A180 DWH-INTRANET PROD, env_type=prod, instance=env-295928laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-295928laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295928, env_name=A180 DWH-INTRANET PROD, env_type=prod, instance=env-295928laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-295928laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295928, env_name=A180 DWH-INTRANET PROD, env_type=prod, instance=env-295928laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-295932laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295932, env_name=A180 DWH-INTRANET TEST, env_type=test, instance=env-295932laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-295932laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295932, env_name=A180 DWH-INTRANET TEST, env_type=test, instance=env-295932laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc03bbc1910} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-295932laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295932, env_name=A180 DWH-INTRANET TEST, env_type=test, instance=env-295932laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc03bbc1a38} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-295932laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295932, env_name=A180 DWH-INTRANET TEST, env_type=test, instance=env-295932laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live Value:0xc03bbc17d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731496054s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-295932laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295932, env_name=A180 DWH-INTRANET TEST, env_type=test, instance=env-295932laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-295932laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295932, env_name=A180 DWH-INTRANET TEST, env_type=test, instance=env-295932laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-295932laio1germanywestcentral, cloud_platform=Azure, customer_id=A180, env_id=295932, env_name=A180 DWH-INTRANET TEST, env_type=test, instance=env-295932laio1germanywestcentral, job=integrations/node_exporter, region=GermanyWestCentral, stage=live} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-296678laio1northeurope, cloud_platform=Azure, customer_id=C435, env_id=296678, env_type=qa, instance=env-296678laio1northeurope, job=integrations/node_exporter, region=northeurope State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-296678laio1northeurope, cloud_platform=Azure, customer_id=C435, env_id=296678, env_type=qa, instance=env-296678laio1northeurope, job=integrations/node_exporter, region=northeurope Value:0xc03bbc1ca0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-296678laio1northeurope, cloud_platform=Azure, customer_id=C435, env_id=296678, env_type=qa, instance=env-296678laio1northeurope, job=integrations/node_exporter, region=northeurope Value:0xc03bbc1dc0} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-296678laio1northeurope, cloud_platform=Azure, customer_id=C435, env_id=296678, env_type=qa, instance=env-296678laio1northeurope, job=integrations/node_exporter, region=northeurope Value:0xc03bbc1e80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731507506s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-296678laio1northeurope, cloud_platform=Azure, customer_id=C435, env_id=296678, env_type=qa, instance=env-296678laio1northeurope, job=integrations/node_exporter, region=northeurope} value=1 ], [ var='B' labels={__name__=mstr_status_message_kafka, agent_hostname=env-296678laio1northeurope, cloud_platform=Azure, customer_id=C435, env_id=296678, env_type=qa, instance=env-296678laio1northeurope, job=integrations/node_exporter, region=northeurope} value=1 ], [ var='C' labels={__name__=mstr_status_message_kafka, agent_hostname=env-296678laio1northeurope, cloud_platform=Azure, customer_id=C435, env_id=296678, env_type=qa, instance=env-296678laio1northeurope, job=integrations/node_exporter, region=northeurope} value=0 ]} {Instance:__name__=mstr_status_message_kafka, agent_hostname=env-298122laiouse1, cloud_platform=AWS, customer_id=C506, env_id=298122, env_name=C506_Prod_MCE_LAST, env_type=prod, instance=env-298122laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=mstr_status_message_kafka, agent_hostname=env-298122laiouse1, cloud_platform=AWS, customer_id=C506, env_id=298122, env_name=C506_Prod_MCE_LAST, env_type=prod, instance=env-298122laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc03f91c1a0} B:{Var:B Labels:__name__=mstr_status_message_kafka, agent_hostname=env-298122laiouse1, cloud_platform=AWS, customer_id=C506, env_id=298122, env_name=C506_Prod_MCE_LAST, env_type=prod, instance=env-298122laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc03f91c050} C:{Var:C Labels:__name__=mstr_status_message_kafka, agent_hostname=env-298122laiouse1, cloud_platform=AWS, customer_id=C506, env_id=298122, env_name=C506_Prod_MCE_LAST, env_type=prod, instance=env-298122laiouse1, job=integrations/node_exporter, region=us-east-1, stage=live Value:0xc03f91c100}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731519395s EvaluationString:[ var='A' labels={__name__=mstr_status_message_kafka, agent_hostname=env-298122laiouse1, cloud_platform + level=debug ts=2024-05-29T13:44:13.850154166Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-myrbv5kh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.850168612Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-myrbv5kh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.850143401Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=432323 slug=lithic instance="FunctionName=wire-transfer-replayer-live" t=2024-05-29T13:44:13.850118489Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.850082244Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=432323 slug=lithic instance="FunctionName=wire-transfer-replayer-live" t=2024-05-29T13:44:13.850107433Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-myrbv5kh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.850086831Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-myqp8h3w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.849957579Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mymjzbea-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.849751157Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mymjzbea-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.849723297Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-myl5w82y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.849616746Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-myl5w82y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.849529915Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-myd4s2j6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.849493015Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-myd4s2j6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.849394884Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-myd4s2j6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.849335233Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company=assurantlifestyle" t=2024-05-29T13:44:13.8492238Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mycpwhw0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.849230192Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mycpwhw0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.849205652Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mycpwhw0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.849137331Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.84903921Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.848734214Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp t=2024-05-29T13:44:13.849031321Z level=debug msg="State manager processing evaluation results" resultCount=2 + level=debug ts=2024-05-29T13:44:13.848952259Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-my6483qc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.84899293Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.848926367Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-my6483qc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.848928229Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.848865548Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-my2xp2rr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.848824088Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-my2xp2rr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.848799178Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-my2xp2rr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.848763417Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-my2nh8ki-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.848704697Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.848611171Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.848547997Z caller=remote_instance_store.go:51 user=154996 slug=veovo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mxwp5a75-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.848505635Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.848460493Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mxvdy7ot-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.848323463Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mxvdy7ot-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.848254052Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mxvdy7ot-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.848183871Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.848063092Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mxuimp6g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.84808099Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.848116011Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.84805525Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mxmkyr9v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.847950889Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.847852256Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mxmkyr9v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.847892868Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.847750841Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.847768739Z caller=grafana.go:247 user=289650 slug=eurostar msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=16" groups=36 alerts=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mxbat71c-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.847661456Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.847650977Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mxbat71c-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.847574515Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mx65vry3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.847546785Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mx65vry3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.847417523Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.847270633Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.847125834Z caller=remote_instance_store.go:51 user=851297 slug=roadrunneruat msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.847021869Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.84708829Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=851297 slug=roadrunneruat instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.847027052Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=851297 slug=roadrunneruat t=2024-05-29T13:44:13.847014332Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=851297 slug=roadrunneruat version=1 fingerprint=635cdf23e424d7de attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.846946371Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.84658368s EvaluationString:}]" duration=32.578726ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mx0l7mur-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.847004479Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.846971323Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mwwqa0ty-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.846939139Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mwwqa0ty-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.846875598Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.846833407Z caller=remote_instance_store.go:51 user=316418 slug=workmotion msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.846715526Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mwp70mzo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.846632915Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=432323 slug=lithic instance="ClusterName=prod, ServiceName=journal-processor-live" t=2024-05-29T13:44:13.84657251Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mwmzjzpn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.846530094Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mwmzjzpn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.846503634Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mwmzjzpn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.846361433Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.846293528Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.846134339Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mwl4qh3p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.846171761Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.846046777Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mwke79w1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.846046359Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.84592354Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mwk1sh0w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.845945448Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.845969991Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mwk1sh0w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.845858637Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mwk1sh0w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.845819567Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.845737096Z caller=remote_instance_store.go:51 user=485459 slug=heroiclabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.845581247Z caller=ruler.go:522 msg="tenant is owned by this instance" user=288067 slug=zwang20 groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mwb8e6br-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.845537834Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.845548861Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=666080 slug=ztv + level=debug ts=2024-05-29T13:44:13.8454518Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.845454417Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.845502502Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.845387337Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mw7k4o8p-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.845376322Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mw7k4o8p-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.845350752Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.845237426Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.845229137Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.845218636Z caller=remote_instance_store.go:51 user=20177 slug=paddledash msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=20177 slug=paddledash instance="DBInstanceIdentifier=paddle-sandbox-import-service-1" t=2024-05-29T13:44:13.845155773Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=20177 slug=paddledash instance="DBInstanceIdentifier=paddle-sandbox-import-service-1" t=2024-05-29T13:44:13.845128221Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.845068474Z caller=ruler.go:522 msg="tenant is owned by this instance" user=399637 slug=webfox groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mw6cl8qa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.845066219Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=fbdfb990c21b28b2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.845017251Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.844743014s EvaluationString:}]" duration=160.218777ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mw6cl8qa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.845007849Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mw6cl8qa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.844979498Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mw44nc1p-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.844920998Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mw44nc1p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.844878687Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mw44nc1p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.844813757Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mvydozq4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.844743986Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mvydozq4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.844717226Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mvru2n9e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.844519994Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mvru2n9e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.844491903Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.844389227Z caller=ruler.go:522 msg="tenant is owned by this instance" user=465820 slug=vianettest groups=2 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mvru2n9e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.844432653Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mvjlgyyk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.84420278Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mvhf17w6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.844084019Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.844067383Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.844045804Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.843505725Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.843963622Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mvhf17w6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.843919748Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.843887643Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mvhf17w6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.843880157Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.843596824Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mvfs6vbv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.843689045Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.843496469Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.843401249Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mvdpn3s0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.843396732Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.843386767Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=556147 slug=bettercloudholding t=2024-05-29T13:44:13.843355376Z level=debug msg="Saving alert states done" count=39 max_state_save_concurrency=1 duration=841.267962ms + level=debug ts=2024-05-29T13:44:13.843242407Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.843029011Z caller=remote_alert_sender.go:94 user=456850 slug=juniz host=juniz-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.148.90.174:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=cdmd7156unkzke alerts=1 + level=warn ts=2024-05-29T13:44:13.84310908Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=543460 slug=waikato + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-muuqcaga-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.843043109Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-murkh6xl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.842860087Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-murkh6xl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.842830676Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.842706467Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.842639405Z caller=remote_alert_sender.go:94 user=770248 slug=aurora host=aurora-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.59.186:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddlxicd8wvls0b alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-munp7124-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.842607834Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=770248 slug=aurora t=2024-05-29T13:44:13.842569712Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.473896ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mumje4kx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.842522303Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mumje4kx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.842479913Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.842394104Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.842355037Z caller=remote_instance_store.go:51 user=93046 slug=nese msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.842299535Z caller=remote_instance_store.go:51 user=71676 slug=lemonbeat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mul9ocwg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.842330211Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mul9ocwg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.84218684Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mugazixr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.842118719Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mugazixr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.842063268Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mufih0a8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.841898337Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mufih0a8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.841870676Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mufih0a8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.841795036Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.841764246Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=163513 slug=dialpad t=2024-05-29T13:44:13.841672065Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.882572ms + level=debug ts=2024-05-29T13:44:13.841693034Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-muaqj8fv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.841686455Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=471861 slug=planetstaging t=2024-05-29T13:44:13.841633205Z level=debug msg="Saving alert states done" count=6 max_state_save_concurrency=1 duration=82.55374ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-muaqj8fv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.841592574Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.841472438Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-muap5120-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.841551513Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-muap5120-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.841495243Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-muap5120-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.841485192Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-muap5120-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.841455742Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mu9dpgwu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.841369331Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mu9dpgwu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.84127214Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mu9dpgwu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.84124304Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mu7qd3u3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.841200319Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mu7qd3u3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.841171779Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mu7qd3u3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.841134769Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mu7qd3u3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.841100568Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.841139828Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mu7qd3u3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.841056348Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.840954247Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.841019454Z caller=ruler.go:522 msg="tenant is owned by this instance" user=649444 slug=vidwalk groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mu748lrp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.840957767Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mu748lrp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.840875856Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.840859317Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mu4qtjmo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.840751395Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.nuts-1-common.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYERS_CLIENT_SERVER_PEER_HOSTED_*-dub,5)) Query" t=2024-05-29T13:44:13.840785416Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.nuts-1-common.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYERS_CLIENT_SERVER_PEER_HOSTED_*-dub,5)) Query" t=2024-05-29T13:44:13.840768496Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mu4qtjmo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.840663074Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=35b5cc3bed3e7eb4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.840543013Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.nuts-1-common.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYERS_CLIENT_SERVER_PEER_HOSTED_*-dub,5)) Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc062c13190} IgnoreBelow:{Var:IgnoreBelow Labels: Value:0xc062c13198} Threshold:{Var:Threshold Labels: Value:0xc062c131e0} compare:{Var:compare Labels:aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.nuts-1-common.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYERS_CLIENT_SERVER_PEER_HOSTED_*-dub,5)) Query Value:0xc062c13090} sum:{Var:sum Labels:aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.nuts-1-common.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYERS_CLIENT_SERVER_PEER_HOSTED_*-dub,5)) Query Value:0xc062c13158}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.840250789s EvaluationString:[ var='Breaches' labels={} value=1 ], [ var='IgnoreBelow' labels={} value=1000 ], [ var='Threshold' labels={} value=20 ], [ var='compare' labels={aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.nuts-1-common.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYERS_CLIENT_SERVER_PEER_HOSTED_*-dub,5)) Query} value=0 ], [ var='sum' labels={aggregatedBy=sum, name=sumSeries(keepLastValue(eadp.gos.blaze.prod.nuts-1-common.coreMaster*.gamemanager_master.status.GMGauge_ACTIVE_PLAYERS_CLIENT_SERVER_PEER_HOSTED_*-dub,5)) Query} value=0 ]}]" duration=55.118668ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mtr9iybi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.840570383Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.840434609Z caller=remote_instance_store.go:51 user=765752 slug=octave msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mthcfucf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.840419571Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mthcfucf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.840352911Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=111653 slug=theassociationmxp instance= t=2024-05-29T13:44:13.840357431Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mtgsys17-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.8402618Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.84026798Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=183214 slug=vectorizedio t=2024-05-29T13:44:13.840233045Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=VtFd5GIVz, ref_id=A" t=2024-05-29T13:44:13.840219757Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=VtFd5GIVz, ref_id=A" t=2024-05-29T13:44:13.84020131Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=VtFd5GIVz, ref_id=A" t=2024-05-29T13:44:13.840172785Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mtgsys17-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.840144969Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mt63cy3i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.840105858Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=230713 slug=flocksafety instance="datasource_uid=grafanacloud-prom, ref_id=D" t=2024-05-29T13:44:13.840151381Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=230713 slug=flocksafety t=2024-05-29T13:44:13.840113218Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:13.840078937Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.839874671Z caller=ruler.go:522 msg="tenant is owned by this instance" user=557953 slug=wspr groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mt2vfgdz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.839871866Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.839773695Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mt2vfgdz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.839808865Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.839760363Z caller=remote_instance_store.go:51 user=441901 slug=openmarkets msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.839771083Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mt2vfgdz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.839781495Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.839711336Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-msx8lysq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.839727354Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.839537516Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.839481388Z caller=ruler.go:522 msg="tenant is owned by this instance" user=411704 slug=xoss groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-msrsivc2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.839515572Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-msrsivc2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.839457411Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=679029 slug=joveoprodaws t=2024-05-29T13:44:13.839395358Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.358582ms + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:13.839251685Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.839194957Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-msnzqmiz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.839182959Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mskbp6ia-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.839118978Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.839133486Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mskbp6ia-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.839038637Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=698963 slug=lemonade version=4 fingerprint=5c490f6acb61f754 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.838995855Z level=debug msg="Alert rule evaluated" results="[{Instance:app=billing-cron-worker, pod=billing-cron-worker-7c4759d8f-v8nbn State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=billing-cron-worker, pod=billing-cron-worker-7c4759d8f-v8nbn Value:0xc010a747a8} THRESHOLD:{Var:THRESHOLD Labels:app=billing-cron-worker, pod=billing-cron-worker-7c4759d8f-v8nbn Value:0xc010a74b70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.83853432s EvaluationString:[ var='QUERY' labels={app=billing-cron-worker, pod=billing-cron-worker-7c4759d8f-v8nbn} value=0 ], [ var='THRESHOLD' labels={app=billing-cron-worker, pod=billing-cron-worker-7c4759d8f-v8nbn} value=0 ]}]" duration=26.722035ms + level=warn ts=2024-05-29T13:44:13.839102083Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=389188 slug=tlwau + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mskbp6ia-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.839015937Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-msiyd5hp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.838920346Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-msiyd5hp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.838743304Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.838796906Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.838814589Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.838622693Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-msiyd5hp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.838718184Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.838387837Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.838572189Z caller=ruler.go:522 msg="tenant is owned by this instance" user=448205 slug=weeb groups=1 + level=debug ts=2024-05-29T13:44:13.838457052Z caller=remote_instance_store.go:51 user=384712 slug=nearinc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=851297 slug=roadrunneruat t=2024-05-29T13:44:13.838380595Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=21.396834ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ms8qsvty-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.838476121Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.838356002Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ms67x0a3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.838210399Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ms67x0a3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.838131458Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.838066393Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ms4g58cb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.838041767Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ms4g58cb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.838010256Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.837837575Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.838029264Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=525161 slug=wolfpacknz + logger=ngalert.state.manager.persist user=788474 slug=elisasre t=2024-05-29T13:44:13.837944532Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.859633ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ms1x34a3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.837822165Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ms0z2c7n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.837750914Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ms0z2c7n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.837721424Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ms0z2c7n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.837678593Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.837603398Z caller=remote_instance_store.go:51 user=633335 slug=promqlworkshop msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.837498935Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=633335 slug=promqlworkshop t=2024-05-29T13:44:13.837553899Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mrybuxzb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.837540842Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mrybuxzb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.837498211Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mrybuxzb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.837456821Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:13.837484523Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mrybuxzb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.837429561Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=806229 slug=simplisafe instance= t=2024-05-29T13:44:13.837456394Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mrogdr9t-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.83736646Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mrogdr9t-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.837276139Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mrogdr9t-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.837246009Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mrnp3946-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.837169418Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.836987535Z caller=remote_instance_store.go:51 user=697672 slug=yrpc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.836914274Z caller=remote_image_capturer.go:33 user=697672 slug=yrpc rule_org_id=1 rule_uid=ee3c38bf-0e3f-4114-aea5-489b20123063 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:13.836822397Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.836848264Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.83685809Z caller=remote_alert_sender.go:94 user=306551 slug=teckresourcesalerts host=teckresourcesalerts-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.157.114:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=YsdAPNHVk alerts=1 + logger=ngalert.state.manager user=697672 slug=yrpc instance= t=2024-05-29T13:44:13.836815543Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=697672 slug=yrpc instance= t=2024-05-29T13:44:13.836803823Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.836762871Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=697672 slug=yrpc version=80 fingerprint=2815a5d03a75af48 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.836707112Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.836299134s EvaluationString:}]" duration=406.404739ms + level=debug ts=2024-05-29T13:44:13.836553497Z caller=remote_instance_store.go:51 user=633335 slug=promqlworkshop msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.836497159Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=563111 slug=zeroflucs + logger=ngalert.state.manager.persist user=633335 slug=promqlworkshop t=2024-05-29T13:44:13.836516744Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=633335 slug=promqlworkshop instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.836500884Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=info ts=2024-05-29T13:44:13.836423278Z caller=remote_alert_sender.go:94 user=548157 slug=kushkiprod host=kushkiprod-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.101.51:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=c037550a-6ff9-4e6c-bdeb-53763ca06925 alerts=1 + level=debug ts=2024-05-29T13:44:13.836363706Z caller=ruler.go:522 msg="tenant is owned by this instance" user=419591 slug=truehybridenergy groups=0 + logger=ngalert.state.manager.persist user=548157 slug=kushkiprod t=2024-05-29T13:44:13.836314557Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.530948ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mre46htf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.836330779Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mre46htf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.836280169Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mre46htf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.836234698Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:13.836117079Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mr9a89s7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.836188898Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mr9a89s7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.836068967Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mr9a89s7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.836013656Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mr3cem1b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.835880235Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mr3cem1b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.835832754Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.836048796Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mr2b7l5w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.835804204Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mr2b7l5w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.835755423Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mqx953xb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.835678463Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.835862643Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=539031 slug=chathamtechprd t=2024-05-29T13:44:13.835942643Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mqx953xb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.835606992Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mqwqu906-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.835562151Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mqwqu906-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.835507101Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mqwqu906-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.835493721Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.835766606Z caller=remote_instance_store.go:51 user=224047 slug=ppbtradingtribeprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mqwqu906-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.83545935Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mqwqu906-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.83543293Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=108112 slug=btctrader t=2024-05-29T13:44:13.835656765Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=45.698525ms + level=debug ts=2024-05-29T13:44:13.835525655Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.835510667Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.835488575Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=245291 slug=pismo version=32 fingerprint=e4c11a49f7331fa2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.835375664Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.835149339s EvaluationString:}]" duration=159.349132ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mqtehjbj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.835325659Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.835224374Z caller=remote_instance_store.go:51 user=890273 slug=cmhusqnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.835234636Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:13.835215552Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mqqq3r3k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.835160487Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=seEOTff4k, ref_id=A" t=2024-05-29T13:44:13.835179928Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.835114807Z caller=remote_instance_store.go:51 user=94501 slug=datastax msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mqqq3r3k-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.835115747Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=538037 slug=drivewealth version=69 fingerprint=edeaa2600183f0ad attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.835107252Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=seEOTff4k, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.83485421s EvaluationString:}]" duration=31.656878ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mqqq3r3k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.835038276Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mqqq3r3k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.835009676Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mqiq4p08-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.834917185Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mqiq4p08-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.834887534Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mqftd5gj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.834748043Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=755975 slug=franprd t=2024-05-29T13:44:13.834728522Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.426342ms + level=debug ts=2024-05-29T13:44:13.834781466Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.834774893Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698122 slug=michaelkors instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.834704842Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698122 slug=michaelkors instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.834688051Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=698122 slug=michaelkors t=2024-05-29T13:44:13.834670412Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=698122 slug=michaelkors version=1 fingerprint=4a20823276b42822 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.83457824Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.83442399s EvaluationString:}]" duration=6.190445ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mqftd5gj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.834585501Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mqa4svto-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.834538111Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=797387 slug=roadrunnerdev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.834370306Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.834402326Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=797387 slug=roadrunnerdev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.834336996Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mqa4svto-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.8344036Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mq92f81o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.834287348Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.834196772Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=307381 slug=kambitaskforce t=2024-05-29T13:44:13.834139308Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.392214ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mq5s9bzs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.834159977Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=307381 slug=kambitaskforce t=2024-05-29T13:44:13.834035632Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.931511ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mq5s9bzs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.834013206Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mq29agei-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.833864664Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.833779535Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:13.833699184Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.762246ms + level=debug ts=2024-05-29T13:44:13.833699839Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mq00t0jv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.833681432Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet instance="metric.name=value_utilization_mean_mean" t=2024-05-29T13:44:13.833717621Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:13.833673159Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mq00t0jv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.833559571Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=548166 slug=mapcom1 t=2024-05-29T13:44:13.83337311Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mpvpxols-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.83342769Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mpvpxols-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.833400919Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mpvpxols-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.833362379Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.8333992Z caller=ruler.go:522 msg="tenant is owned by this instance" user=313778 slug=vfs groups=0 + level=debug ts=2024-05-29T13:44:13.833310531Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mpn5dcjv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.833177247Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mpfowwjr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.833030885Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mpfowwjr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.832957685Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.833189101Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mpfowwjr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.832923764Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID1333dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=548166 slug=mapcom1 version=12 fingerprint=b60f3fa13ff3baf3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.833030379Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=netapp_volume_used_percentage, availability_zone=qts, filer=dc1nas1, instance=localhost:9108, job=netapp, volume=ERPDS, volume_type=rw, vserver=ORACLE_KVM State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=netapp_volume_used_percentage, availability_zone=qts, filer=dc1nas1, instance=localhost:9108, job=netapp, volume=ERPDS, volume_type=rw, vserver=ORACLE_KVM Value:0xc0a04fe998} B:{Var:B Labels:__name__=netapp_volume_used_percentage, availability_zone=qts, filer=dc1nas1, instance=localhost:9108, job=netapp, volume=ERPDS, volume_type=rw, vserver=ORACLE_KVM Value:0xc0a04fea18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.832650888s EvaluationString:[ var='A' labels={__name__=netapp_volume_used_percentage, availability_zone=qts, filer=dc1nas1, instance=localhost:9108, job=netapp, volume=ERPDS, volume_type=rw, vserver=ORACLE_KVM} value=28 ], [ var='B' labels={__name__=netapp_volume_used_percentage, availability_zone=qts, filer=dc1nas1, instance=localhost:9108, job=netapp, volume=ERPDS, volume_type=rw, vserver=ORACLE_KVM} value=0 ]}]" duration=38.393364ms + level=debug ts=2024-05-29T13:44:13.832920519Z caller=ruler.go:522 msg="tenant is owned by this instance" user=298123 slug=ykp groups=3 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mpeulc9u-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.832827353Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mp6r0drf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.832710772Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mp6r0drf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.832569771Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=60199 slug=wallapop t=2024-05-29T13:44:13.832540128Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=47.338734ms + logger=ngalert.state.manager user=177465 slug=fairtiq instance="error-type=runtimeException" t=2024-05-29T13:44:13.83197626Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=177465 slug=fairtiq instance="error-type=processingException" t=2024-05-29T13:44:13.831828846Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mp61f5ne-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.832395139Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mp4pqyrr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.832250887Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mp4pqyrr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.832209227Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.831804598Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mp1b8nhw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.831911634Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-moyz6iit-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.831840293Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.831808436Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=177465 slug=fairtiq instance="error-type=newJourneyNotFound" t=2024-05-29T13:44:13.831728211Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-moxc4g2e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.831656581Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-moxc4g2e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.831622171Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=177465 slug=fairtiq instance="error-type=journeyProcessingException" t=2024-05-29T13:44:13.831627518Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-moxc4g2e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.831580471Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-moxc4g2e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.8315552Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=1mr10216z5, Method=--, Resource=/steps/{proxy+}, Stage=--" t=2024-05-29T13:44:13.831509818Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-moxc4g2e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.83151484Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=1mr10216z5, Method=--, Resource=/steps/{proxy+}, Stage=--" t=2024-05-29T13:44:13.831500908Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mougvs8m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.831392779Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:13.831369633Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mougvs8m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.831377008Z level=debug msg="Setting next state" handler=resultNormal + level=debug component=discovery ts=2024-05-29T13:44:13.83128829Z caller=retry.go:58 user=467639 msg="retrying grpc request" method=/remoteruler.rules.v1.RulesService/GetByRuleGroup attempt=2 + logger=ngalert.state.manager user=70430 slug=dapperlabs instance="datasource_uid=fd9a0652-b4d5-4c7e-aac2-49c009b62c31, ref_id=A" t=2024-05-29T13:44:13.831327682Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=70430 slug=dapperlabs instance="datasource_uid=fd9a0652-b4d5-4c7e-aac2-49c009b62c31, ref_id=A" t=2024-05-29T13:44:13.831315083Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.831231641Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.831282336Z caller=remote_instance_store.go:51 user=441627 slug=foreststaking msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=70430 slug=dapperlabs instance="datasource_uid=fd9a0652-b4d5-4c7e-aac2-49c009b62c31, ref_id=A" t=2024-05-29T13:44:13.831238573Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.831165109Z caller=remote_instance_store.go:51 user=770248 slug=aurora msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=441627 slug=foreststaking instance= t=2024-05-29T13:44:13.831169961Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=441627 slug=foreststaking t=2024-05-29T13:44:13.83115128Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=70430 slug=dapperlabs version=3 fingerprint=ba867919fd6a23e6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.831111079Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=fd9a0652-b4d5-4c7e-aac2-49c009b62c31, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.830803204s EvaluationString:}]" duration=46.403264ms + logger=ngalert.scheduler user=441627 slug=foreststaking version=22 fingerprint=fd28e2bbf674dbf2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.831104316Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=21.65822ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-moq2g9ip-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.831069655Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.831132149Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.831102442Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-moq2g9ip-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.831043685Z level=debug msg="Setting next state" handler=resultNormal + level=error ts=2024-05-29T13:44:13.831067597Z caller=remote_rule_evaluator.go:110 user=441627 slug=foreststaking msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-moq2g9ip-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.831001965Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.83094267Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-moq2g9ip-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.830909434Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.830829032Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mol4u64o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.830697902Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.83072827Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mo9aw09h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.830445959Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.830533417Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.830443877Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.830437889Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=541013 slug=whitecomputing + logger=ngalert.state.manager user=177465 slug=fairtiq instance="error-type=existingJourneyNotFound" t=2024-05-29T13:44:13.830400897Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=177465 slug=fairtiq instance="error-type=existingJourneyNotFound" t=2024-05-29T13:44:13.830390751Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:13.830355972Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mo98bhqc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.830339098Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.830422735Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=286924 slug=kmpdashboard instance= t=2024-05-29T13:44:13.830314799Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=286924 slug=kmpdashboard instance= t=2024-05-29T13:44:13.830273653Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mo6ybmtu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.830184336Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.829974786Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mo6ybmtu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.830146226Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.830119454Z caller=remote_instance_store.go:51 user=735589 slug=sremarek msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.830131666Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=735589 slug=sremarek instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.829989872Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=735589 slug=sremarek t=2024-05-29T13:44:13.829949351Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.829977867Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.829959277Z caller=ruler.go:522 msg="tenant is owned by this instance" user=354447 slug=xenopos groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mo1cx67q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.829492039Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mo1cx67q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.829395038Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475641 slug=mtkg instance="__name__=water_probe_temperature, __proxy_source__=influx, device_type=water_probe, ipaddr=192.168.0.73, topic=192.168.0.73" t=2024-05-29T13:44:13.829141093Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475641 slug=mtkg instance="__name__=water_probe_temperature, __proxy_source__=influx, device_type=water_probe, ipaddr=192.168.0.73, topic=192.168.0.73" t=2024-05-29T13:44:13.829126511Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.829179213Z caller=remote_alert_sender.go:94 user=884866 slug=cnonumerique host=cnonumerique-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.51.155:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=adlsez75ruzuof alerts=1 + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.829013596Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.829006055Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.828936145Z caller=ruler.go:522 msg="tenant is owned by this instance" user=323441 slug=wangwill groups=0 + logger=ngalert.scheduler user=679029 slug=joveoprodaws version=3345 fingerprint=5f10f515d2a5f6c8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.828929852Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.828481784s EvaluationString:}]" duration=14.914211ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mnzol3ul-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.828945734Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=829352 slug=unfnbonp instance="namespace=fnbo-np-uni, pod=nginx-ingress-nginx-controller-766fd5d669-xpggd" t=2024-05-29T13:44:13.82890315Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=829352 slug=unfnbonp instance="namespace=fnbo-np-uni, pod=listener-0" t=2024-05-29T13:44:13.828874699Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=829352 slug=unfnbonp instance="namespace=fnbo-np-uni, pod=hcl-unica-platform-5df7fbbfb9-lrb77" t=2024-05-29T13:44:13.828850108Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mnzol3ul-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.828840232Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mnzol3ul-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.828788352Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=829352 slug=unfnbonp instance="namespace=fnbo-np-uni, pod=hcl-unica-campaign-76998c6f86-5wc64" t=2024-05-29T13:44:13.828775854Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=456850 slug=juniz t=2024-05-29T13:44:13.828660351Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mnzol3ul-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.828752772Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mnwvw3dr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.828704251Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mnwvw3dr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.828669511Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.827701948Z caller=remote_image_capturer.go:54 user=456850 slug=juniz rule_org_id=1 rule_uid=cdmd7156unkzke dashboard=d14ae2cd-5d03-4193-8362-2b4d88734375 panel=5 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mnwvw3dr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.82857248Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.828471395Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.828512331Z caller=remote_instance_store.go:51 user=765752 slug=octave msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mnwvw3dr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.828520849Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.828403797Z caller=remote_instance_store.go:51 user=471861 slug=planetstaging msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mnorhv82-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.828370328Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mnorhv82-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.828355518Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mnm7b6co-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.828211786Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mnm7b6co-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.828169526Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.828014011Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.827949541Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=472647 slug=planet t=2024-05-29T13:44:13.827714046Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=33.516126ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mn8xqbd9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.82762822Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance="datasource_uid=000000002, ref_id=A" t=2024-05-29T13:44:13.82762533Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance="datasource_uid=000000002, ref_id=A" t=2024-05-29T13:44:13.827612223Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=288032 slug=dapperlabssre t=2024-05-29T13:44:13.827559007Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mn8xqbd9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.827525199Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.82750356Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mn8twcou-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.827474368Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=659224 slug=credivera version=23 fingerprint=e7b01a6d2c9f40f9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.827395738Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.827088256s EvaluationString:}]" duration=8.521345ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mn8twcou-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.827435018Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mn8twcou-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.827407308Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mn6h6uz9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.827230296Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mn6h6uz9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.827195086Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mn4ea9t1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.827060604Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mn4ea9t1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.827000614Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.827230379Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mn3h45wk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.826919233Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mn3h45wk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.826871672Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.827172467Z caller=ruler.go:522 msg="tenant is owned by this instance" user=354885 slug=vbao groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mn3h45wk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.826856292Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.827126871Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=283072 slug=willhall14 + level=debug ts=2024-05-29T13:44:13.827040739Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mmzwr3kr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.826732781Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=users-edge, pod=users-edge-bbb5bff86-sl7h4" t=2024-05-29T13:44:13.827113246Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mmzwr3kr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.82668987Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mmycfc2d-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.82662547Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mmycfc2d-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.826591019Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.826937638Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mmycfc2d-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.826535939Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mmsuaush-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.826505269Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mmsuaush-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.826494448Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mmsuaush-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.826462558Z level=debug msg="Keeping state" state=Normal + Error parsing panelUID for alert annotationruleID787dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=698963 slug=lemonade version=4 fingerprint=fa9e3e1b96288fa6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.826912311Z level=debug msg="Alert rule evaluated" results="[{Instance:app=users-edge, pod=users-edge-bbb5bff86-8t6jb State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=users-edge, pod=users-edge-bbb5bff86-8t6jb Value:0xc0656e9838} THRESHOLD:{Var:THRESHOLD Labels:app=users-edge, pod=users-edge-bbb5bff86-8t6jb Value:0xc0656e9870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.826531586s EvaluationString:[ var='QUERY' labels={app=users-edge, pod=users-edge-bbb5bff86-8t6jb} value=0 ], [ var='THRESHOLD' labels={app=users-edge, pod=users-edge-bbb5bff86-8t6jb} value=0 ]} {Instance:app=users-edge, pod=users-edge-bbb5bff86-sl7h4 State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=users-edge, pod=users-edge-bbb5bff86-sl7h4 Value:0xc0656e98b0} THRESHOLD:{Var:THRESHOLD Labels:app=users-edge, pod=users-edge-bbb5bff86-sl7h4 Value:0xc0656e98f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.826540466s EvaluationString:[ var='QUERY' labels={app=users-edge, pod=users-edge-bbb5bff86-sl7h4} value=0 ], [ var='THRESHOLD' labels={app=users-edge, pod=users-edge-bbb5bff86-sl7h4} value=0 ]} {Instance:app=users-edge, pod=users-edge-bbb5bff86-tfvtn State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=users-edge, pod=users-edge-bbb5bff86-tfvtn Value:0xc0656e9938} THRESHOLD:{Var:THRESHOLD Labels:app=users-edge, pod=users-edge-bbb5bff86-tfvtn Value:0xc0656e9970}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.826544507s EvaluationString:[ var='QUERY' labels={app=users-edge, pod=users-edge-bbb5bff86-tfvtn} value=0 ], [ var='THRESHOLD' labels={app=users-edge, pod=users-edge-bbb5bff86-tfvtn} value=0 ]}]" duration=44.777843ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mmsuaush-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.826451288Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mmsuaush-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.826404588Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mmsmrsd5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.826370137Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mmsmrsd5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.826323667Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mmi6olrw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.826236256Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mmi6olrw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.826225586Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mmi6olrw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.826192845Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.826842409Z caller=ruler.go:522 msg="tenant is owned by this instance" user=378369 slug=wpms groups=0 + level=debug ts=2024-05-29T13:44:13.826809463Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mm7i4619-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.825925583Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.826764162Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.826754154Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=163513 slug=dialpad t=2024-05-29T13:44:13.826738751Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.826693086Z caller=remote_instance_store.go:51 user=703825 slug=andrewbauman msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=242310 slug=suzy t=2024-05-29T13:44:13.826599843Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=58.283827ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mm6faxwp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.825824412Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.826581673Z caller=remote_instance_store.go:51 user=851297 slug=roadrunneruat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mm44c0gn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.82564476Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mm1k227j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.825565529Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mm1k227j-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.825507168Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mlmq6jsd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.825422527Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=713314 slug=tpceunonprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.826494172Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=713314 slug=tpceunonprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.826480881Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mlmq6jsd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.825375937Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mlmq6jsd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.825321286Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mlm7gif0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.825285276Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mlm7gif0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.825270376Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.826027331Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.825934284Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.825842415Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mlm7gif0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.825225875Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mlm7gif0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.825192655Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mlhkdpgr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.824757351Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ml5wyqk6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.824629529Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ml5wyqk6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.824612399Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ml5wyqk6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.824557739Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ml5e9p3e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.824447507Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ml1pzrz0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.824311886Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ml1pzrz0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.824255685Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ml1pzrz0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.824240435Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mkkwr6h2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.824185725Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=735588 slug=srepradnya t=2024-05-29T13:44:13.825435193Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.358644ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mkjwhmz1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.824030763Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mkjwhmz1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.823992783Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.825471813Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=489921 slug=statuscake t=2024-05-29T13:44:13.825395877Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=489921 slug=statuscake t=2024-05-29T13:44:13.825344225Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mk6cwmte-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.823603939Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mjrozqlx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.823564248Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mjrozqlx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.823549678Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mjb725ix-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.823161854Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mjb725ix-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.823088864Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mjavdhmp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.822998133Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mjavdhmp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.822928412Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mj8oxdgf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.822884011Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mj8oxdgf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.822826941Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.825076805Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.825066998Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.825046718Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mj6txisc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.822551948Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-miwxei2k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.822510008Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-miwxei2k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.822493537Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-miwxei2k-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.822456207Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-miwxei2k-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.822440867Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:13.824850093Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-misyfxp5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.822116883Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-misyfxp5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.822039293Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.824812422Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=430961 slug=solifi version=1 fingerprint=23031f2228955715 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.824690557Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.824382586s EvaluationString:}]" duration=138.141224ms + level=debug ts=2024-05-29T13:44:13.824817582Z caller=remote_instance_store.go:51 user=237629 slug=ocrolus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mipbqu0a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.821716339Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=237629 slug=ocrolus t=2024-05-29T13:44:13.824781244Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mipbqu0a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.821687599Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=237629 slug=ocrolus instance= t=2024-05-29T13:44:13.824765518Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mij6564n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.821457907Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mij6564n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.821392256Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mij6564n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.821366566Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:13.823896145Z level=debug msg="Saving alert states" count=9 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-miivwm0z-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.821310655Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-e8e83d3a07f64a6d, persistentvolumeclaim=data-zookeeper-0" t=2024-05-29T13:44:13.82388326Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-miivwm0z-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.821287605Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-e8e83d3a07f64a6d, persistentvolumeclaim=data-zookeeper-0" t=2024-05-29T13:44:13.823873018Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-miivwm0z-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.821235224Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-miivwm0z-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.821223484Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-miivwm0z-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.821160254Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-d3fde6aefb9c4e20, persistentvolumeclaim=data-rabbitmq-0" t=2024-05-29T13:44:13.823837928Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-d0d7c8d3abe84be8, persistentvolumeclaim=main-repo1" t=2024-05-29T13:44:13.823813702Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mie30fy9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.821026052Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mie30fy9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.820968152Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-989a3329f2ab4eeb, persistentvolumeclaim=data-zookeeper-2" t=2024-05-29T13:44:13.823746408Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.824527139Z caller=client.go:80 msg="creating client for grafana instance" user=288067 addr=dns:///zwang20-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-6e861c49d5e74f31, persistentvolumeclaim=data-redpanda-0" t=2024-05-29T13:44:13.82368071Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-micxezfq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.820891841Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-6a280dab920c49d4, persistentvolumeclaim=data-zookeeper-1" t=2024-05-29T13:44:13.823660176Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.823592269Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.824412405Z caller=client.go:80 msg="creating client for grafana instance" user=666080 addr=dns:///ztv-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-3dc24d48952f4dc1, persistentvolumeclaim=data-prometheus-0" t=2024-05-29T13:44:13.823626626Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-0cb83ae0660542cc, persistentvolumeclaim=main-main-95wp-pgdata" t=2024-05-29T13:44:13.823575395Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix t=2024-05-29T13:44:13.823524615Z level=debug msg="State manager processing evaluation results" resultCount=9 + level=warn ts=2024-05-29T13:44:13.824373168Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=689358 slug=tisc + level=debug ts=2024-05-29T13:44:13.822574154Z caller=ruler.go:522 msg="tenant is owned by this instance" user=689358 slug=tisc groups=0 + logger=ngalert.state.manager.persist user=407315 slug=ppcp t=2024-05-29T13:44:13.824238461Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=66.283418ms + level=debug ts=2024-05-29T13:44:13.824080285Z caller=remote_instance_store.go:51 user=451750 slug=amadeuspfpprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.824128606Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.824058512Z caller=client.go:80 msg="creating client for grafana instance" user=563111 addr=dns:///zeroflucs-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.823935955Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.82201938Z caller=ruler.go:522 msg="tenant is owned by this instance" user=392355 slug=titanclass groups=0 + level=debug component=discovery ts=2024-05-29T13:44:13.823795852Z caller=retry.go:58 user=526479 msg="retrying grpc request" method=/remoteruler.rules.v1.RulesService/GetByRuleGroup attempt=2 + level=warn ts=2024-05-29T13:44:13.823835715Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=318074 slug=undef + level=debug ts=2024-05-29T13:44:13.823587152Z caller=remote_instance_store.go:51 user=70430 slug=dapperlabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=70430 slug=dapperlabs instance="datasource_uid=RBZj4Ak4z, ref_id=A" t=2024-05-29T13:44:13.823503235Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=70430 slug=dapperlabs t=2024-05-29T13:44:13.823446802Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:13.82337102Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=e88914a3-d4ff-4bed-b456-44713182c10d, ref_id=A" t=2024-05-29T13:44:13.823346965Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:13.823310547Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.82326625Z caller=remote_instance_store.go:51 user=306551 slug=teckresourcesalerts msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.823115731Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=306551 slug=teckresourcesalerts instance="datasource_uid=i12oD1b7k, ref_id=A" t=2024-05-29T13:44:13.823145066Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.822925535Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=729654 slug=bmsmonitoring instance="datasource_uid=grafanacloud-prom, ref_id=p95 is greater than 5s" t=2024-05-29T13:44:13.822751626Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=548157 slug=kushkiprod t=2024-05-29T13:44:13.822781159Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=830631 slug=api3 instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.822771512Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=729654 slug=bmsmonitoring instance="datasource_uid=grafanacloud-prom, ref_id=p95 is greater than 5s" t=2024-05-29T13:44:13.822714304Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=729654 slug=bmsmonitoring instance="datasource_uid=grafanacloud-prom, ref_id=p95 is greater than 5s" t=2024-05-29T13:44:13.822700234Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=729654 slug=bmsmonitoring instance="datasource_uid=grafanacloud-prom, ref_id=p95 is greater than 5s" t=2024-05-29T13:44:13.822669094Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=830631 slug=api3 instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.822733901Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=729654 slug=bmsmonitoring instance="datasource_uid=grafanacloud-prom, ref_id=p95 is greater than 5s" t=2024-05-29T13:44:13.822663964Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=548157 slug=kushkiprod version=132 fingerprint=527d4b20d24fd718 attempt=1 now=2024-05-29T13:44:00Z t=2024-05-29T13:44:13.822633607Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=ddfda265-8321-4dab-9f53-1af50b9462b9, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:00 +0000 UTC EvaluationDuration:13.822092574s EvaluationString:}]" duration=4.205704384s + logger=ngalert.state.manager user=830631 slug=api3 instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.82272131Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=830631 slug=api3 instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.82267927Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=830631 slug=api3 instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.822664819Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.historian backend=loki user=765907 slug=orangebarrelmedia t=2024-05-29T13:44:13.822669565Z level=debug msg="Done saving alert state history batch" + logger=ngalert.state.manager user=729654 slug=bmsmonitoring instance="datasource_uid=grafanacloud-prom, ref_id=p95 is greater than 5s" t=2024-05-29T13:44:13.822586022Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=NY2VLXUAT002.telegraf_service A" t=2024-05-29T13:44:13.822552616Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=729654 slug=bmsmonitoring instance="datasource_uid=grafanacloud-prom, ref_id=p95 is greater than 5s" t=2024-05-29T13:44:13.822567661Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=NY2VLXPROD016.telegraf_service A" t=2024-05-29T13:44:13.822483857Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=729654 slug=bmsmonitoring version=27 fingerprint=73cb2f0691023380 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.822348387Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=p95 is greater than 5s State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.821835309s EvaluationString:}]" duration=9.731954ms + logger=ngalert.state.manager user=755975 slug=franprd instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.822289411Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=NY2VLXPROD014.telegraf_service A" t=2024-05-29T13:44:13.822421917Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=755975 slug=franprd t=2024-05-29T13:44:13.822257239Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=755975 slug=franprd version=1 fingerprint=2c878a16d8ae8ed5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.822187738Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.822001687s EvaluationString:}]" duration=19.413122ms + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=NY2VLXPROD007.telegraf_service A" t=2024-05-29T13:44:13.822289447Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=NY2VLXPROD001.telegraf_service A" t=2024-05-29T13:44:13.822214577Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=27014 slug=baseline instance= t=2024-05-29T13:44:13.822151639Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=NY2VLXDEV001.telegraf_service A" t=2024-05-29T13:44:13.822181093Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=LO7VLXPROD107.telegraf_service A" t=2024-05-29T13:44:13.822138413Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=LO7VLXPROD004.telegraf_service A" t=2024-05-29T13:44:13.822081205Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=LO7VLXPROD001.telegraf_service A" t=2024-05-29T13:44:13.822062385Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=158536 slug=clearsaleantifraude instance="aggregatedBy=sum, name= QueryAlertaTodos, summarize=1min, summarizeFunction=sum" t=2024-05-29T13:44:13.822027309Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.821966792Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1VLXUAT004.telegraf_service A" t=2024-05-29T13:44:13.82195517Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1VLXUAT003.telegraf_service A" t=2024-05-29T13:44:13.821937097Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1VLXUAT002.telegraf_service A" t=2024-05-29T13:44:13.821911332Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1VLXSTG006.telegraf_service A" t=2024-05-29T13:44:13.821891982Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.821886081Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1VLXSTG002.telegraf_service A" t=2024-05-29T13:44:13.821877731Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1VLXPROD050.telegraf_service A" t=2024-05-29T13:44:13.821841397Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.821786594Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1VLXPROD048.telegraf_service A" t=2024-05-29T13:44:13.821800589Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1VLXPROD035.telegraf_service A" t=2024-05-29T13:44:13.821707251Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1VLXPROD023.telegraf_service A" t=2024-05-29T13:44:13.821587127Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.821574606Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1VLXPROD021.telegraf_service A" t=2024-05-29T13:44:13.821567334Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1VLXPROD019.telegraf_service A" t=2024-05-29T13:44:13.821546464Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1VLXPROD017.telegraf_service A" t=2024-05-29T13:44:13.821531356Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1VLXPROD013.telegraf_service A" t=2024-05-29T13:44:13.821513593Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1VLXPROD012.telegraf_service A" t=2024-05-29T13:44:13.821495024Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1VLXPROD011.telegraf_service A" t=2024-05-29T13:44:13.821475975Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1VLXPROD007.telegraf_service A" t=2024-05-29T13:44:13.821403297Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1VLXPROD005.telegraf_service A" t=2024-05-29T13:44:13.821380561Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.818638304Z caller=remote_instance_store.go:51 user=154996 slug=veovo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1VLXPROD003.telegraf_service A" t=2024-05-29T13:44:13.821350688Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1VLXPROD002.telegraf_service A" t=2024-05-29T13:44:13.821327607Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1VLXDEV005.telegraf_service A" t=2024-05-29T13:44:13.821297908Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1LXPROD005.telegraf_service A" t=2024-05-29T13:44:13.821251605Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=CH1LXPROD002.telegraf_service A" t=2024-05-29T13:44:13.821161317Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=AWSLXSTG100.telegraf_service A" t=2024-05-29T13:44:13.821101861Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=AWSLXSTG001.telegraf_service A" t=2024-05-29T13:44:13.821040843Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=27737 slug=edfmancapital instance="name=AWSLXPROD002.telegraf_service A" t=2024-05-29T13:44:13.821014005Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=828268 slug=alsy2600 t=2024-05-29T13:44:13.820710241Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=27737 slug=edfmancapital version=1 fingerprint=654a72fe29d7e086 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.820250161Z level=debug msg="Alert rule evaluated" results="[{Instance:name=AWSLXPROD002.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=AWSLXPROD002.telegraf_service A Value:0xc00a85a8e0} C:{Var:C Labels:name=AWSLXPROD002.telegraf_service A Value:0xc00a85a8f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818801786s EvaluationString:[ var='B' labels={name=AWSLXPROD002.telegraf_service A} value=359 ], [ var='C' labels={name=AWSLXPROD002.telegraf_service A} value=0 ]} {Instance:name=AWSLXSTG001.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=AWSLXSTG001.telegraf_service A Value:0xc00a85a910} C:{Var:C Labels:name=AWSLXSTG001.telegraf_service A Value:0xc00a85a920}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818818539s EvaluationString:[ var='B' labels={name=AWSLXSTG001.telegraf_service A} value=360 ], [ var='C' labels={name=AWSLXSTG001.telegraf_service A} value=0 ]} {Instance:name=AWSLXSTG100.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=AWSLXSTG100.telegraf_service A Value:0xc00a85a940} C:{Var:C Labels:name=AWSLXSTG100.telegraf_service A Value:0xc00a85a950}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818822566s EvaluationString:[ var='B' labels={name=AWSLXSTG100.telegraf_service A} value=359 ], [ var='C' labels={name=AWSLXSTG100.telegraf_service A} value=0 ]} {Instance:name=AWSLXUAT002.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=AWSLXUAT002.telegraf_service A Value:0xc00a85a970} C:{Var:C Labels:name=AWSLXUAT002.telegraf_service A Value:0xc00a85a980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818825559s EvaluationString:[ var='B' labels={name=AWSLXUAT002.telegraf_service A} value=359 ], [ var='C' labels={name=AWSLXUAT002.telegraf_service A} value=0 ]} {Instance:name=CH1LXPROD002.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1LXPROD002.telegraf_service A Value:0xc00a85a9b0} C:{Var:C Labels:name=CH1LXPROD002.telegraf_service A Value:0xc00a85a9a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818827889s EvaluationString:[ var='B' labels={name=CH1LXPROD002.telegraf_service A} value=360 ], [ var='C' labels={name=CH1LXPROD002.telegraf_service A} value=0 ]} {Instance:name=CH1LXPROD005.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1LXPROD005.telegraf_service A Value:0xc00a85a9d0} C:{Var:C Labels:name=CH1LXPROD005.telegraf_service A Value:0xc00a85a9e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818831102s EvaluationString:[ var='B' labels={name=CH1LXPROD005.telegraf_service A} value=359 ], [ var='C' labels={name=CH1LXPROD005.telegraf_service A} value=0 ]} {Instance:name=CH1LXPROD006.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1LXPROD006.telegraf_service A Value:0xc00a85aa10} C:{Var:C Labels:name=CH1LXPROD006.telegraf_service A Value:0xc00a85aa30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818833467s EvaluationString:[ var='B' labels={name=CH1LXPROD006.telegraf_service A} value=360 ], [ var='C' labels={name=CH1LXPROD006.telegraf_service A} value=0 ]} {Instance:name=CH1VLXDEV005.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXDEV005.telegraf_service A Value:0xc00a85aa50} C:{Var:C Labels:name=CH1VLXDEV005.telegraf_service A Value:0xc00a85aa60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818835819s EvaluationString:[ var='B' labels={name=CH1VLXDEV005.telegraf_service A} value=359 ], [ var='C' labels={name=CH1VLXDEV005.telegraf_service A} value=0 ]} {Instance:name=CH1VLXDEV007.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXDEV007.telegraf_service A Value:0xc00a85aa90} C:{Var:C Labels:name=CH1VLXDEV007.telegraf_service A Value:0xc00a85aa80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818838319s EvaluationString:[ var='B' labels={name=CH1VLXDEV007.telegraf_service A} value=360 ], [ var='C' labels={name=CH1VLXDEV007.telegraf_service A} value=0 ]} {Instance:name=CH1VLXPROD002.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXPROD002.telegraf_service A Value:0xc00a85aab0} C:{Var:C Labels:name=CH1VLXPROD002.telegraf_service A Value:0xc00a85aad0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818840792s EvaluationString:[ var='B' labels={name=CH1VLXPROD002.telegraf_service A} value=359 ], [ var='C' labels={name=CH1VLXPROD002.telegraf_service A} value=0 ]} {Instance:name=CH1VLXPROD003.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXPROD003.telegraf_service A Value:0xc00a85aaf0} C:{Var:C Labels:name=CH1VLXPROD003.telegraf_service A Value:0xc00a85ab00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818844349s EvaluationString:[ var='B' labels={name=CH1VLXPROD003.telegraf_service A} value=359 ], [ var='C' labels={name=CH1VLXPROD003.telegraf_service A} value=0 ]} {Instance:name=CH1VLXPROD005.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXPROD005.telegraf_service A Value:0xc00a85ab20} C:{Var:C Labels:name=CH1VLXPROD005.telegraf_service A Value:0xc00a85ab30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818846705s EvaluationString:[ var='B' labels={name=CH1VLXPROD005.telegraf_service A} value=359 ], [ var='C' labels={name=CH1VLXPROD005.telegraf_service A} value=0 ]} {Instance:name=CH1VLXPROD007.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXPROD007.telegraf_service A Value:0xc00a85ab60} C:{Var:C Labels:name=CH1VLXPROD007.telegraf_service A Value:0xc00a85ab50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818848954s EvaluationString:[ var='B' labels={name=CH1VLXPROD007.telegraf_service A} value=360 ], [ var='C' labels={name=CH1VLXPROD007.telegraf_service A} value=0 ]} {Instance:name=CH1VLXPROD008.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXPROD008.telegraf_service A Value:0xc00a85ab80} C:{Var:C Labels:name=CH1VLXPROD008.telegraf_service A Value:0xc00a85ab90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818851084s EvaluationString:[ var='B' labels={name=CH1VLXPROD008.telegraf_service A} value=360 ], [ var='C' labels={name=CH1VLXPROD008.telegraf_service A} value=0 ]} {Instance:name=CH1VLXPROD009.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXPROD009.telegraf_service A Value:0xc00a85abb0} C:{Var:C Labels:name=CH1VLXPROD009.telegraf_service A Value:0xc00a85abc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818853624s EvaluationString:[ var='B' labels={name=CH1VLXPROD009.telegraf_service A} value=359 ], [ var='C' labels={name=CH1VLXPROD009.telegraf_service A} value=0 ]} {Instance:name=CH1VLXPROD011.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXPROD011.telegraf_service A Value:0xc00a85abf0} C:{Var:C Labels:name=CH1VLXPROD011.telegraf_service A Value:0xc00a85abe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818855968s EvaluationString:[ var='B' labels={name=CH1VLXPROD011.telegraf_service A} value=360 ], [ var='C' labels={name=CH1VLXPROD011.telegraf_service A} value=0 ]} {Instance:name=CH1VLXPROD012.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXPROD012.telegraf_service A Value:0xc00a85ac20} C:{Var:C Labels:name=CH1VLXPROD012.telegraf_service A Value:0xc00a85ac10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818858808s EvaluationString:[ var='B' labels={name=CH1VLXPROD012.telegraf_service A} value=360 ], [ var='C' labels={name=CH1VLXPROD012.telegraf_service A} value=0 ]} {Instance:name=CH1VLXPROD013.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXPROD013.telegraf_service A Value:0xc00a85ac60} C:{Var:C Labels:name=CH1VLXPROD013.telegraf_service A Value:0xc00a85ac50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818861592s EvaluationString:[ var='B' labels={name=CH1VLXPROD013.telegraf_service A} value=359 ], [ var='C' labels={name=CH1VLXPROD013.telegraf_service A} value=0 ]} {Instance:name=CH1VLXPROD017.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXPROD017.telegraf_service A Value:0xc00a85ac80} C:{Var:C Labels:name=CH1VLXPROD017.telegraf_service A Value:0xc00a85ac90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818865099s EvaluationString:[ var='B' labels={name=CH1VLXPROD017.telegraf_service A} value=359 ], [ var='C' labels={name=CH1VLXPROD017.telegraf_service A} value=0 ]} {Instance:name=CH1VLXPROD019.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXPROD019.telegraf_service A Value:0xc00a85acb0} C:{Var:C Labels:name=CH1VLXPROD019.telegraf_service A Value:0xc00a85acc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818870667s EvaluationString:[ var='B' labels={name=CH1VLXPROD019.telegraf_service A} value=359 ], [ var='C' labels={name=CH1VLXPROD019.telegraf_service A} value=0 ]} {Instance:name=CH1VLXPROD021.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXPROD021.telegraf_service A Value:0xc00a85ace0} C:{Var:C Labels:name=CH1VLXPROD021.telegraf_service A Value:0xc00a85acf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818872868s EvaluationString:[ var='B' labels={name=CH1VLXPROD021.telegraf_service A} value=359 ], [ var='C' labels={name=CH1VLXPROD021.telegraf_service A} value=0 ]} {Instance:name=CH1VLXPROD023.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXPROD023.telegraf_service A Value:0xc00a85ad20} C:{Var:C Labels:name=CH1VLXPROD023.telegraf_service A Value:0xc00a85ad30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818875255s EvaluationString:[ var='B' labels={name=CH1VLXPROD023.telegraf_service A} value=360 ], [ var='C' labels={name=CH1VLXPROD023.telegraf_service A} value=0 ]} {Instance:name=CH1VLXPROD026.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXPROD026.telegraf_service A Value:0xc00a85ad50} C:{Var:C Labels:name=CH1VLXPROD026.telegraf_service A Value:0xc00a85ad60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818877291s EvaluationString:[ var='B' labels={name=CH1VLXPROD026.telegraf_service A} value=360 ], [ var='C' labels={name=CH1VLXPROD026.telegraf_service A} value=0 ]} {Instance:name=CH1VLXPROD031.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXPROD031.telegraf_service A Value:0xc00a85ad80} C:{Var:C Labels:name=CH1VLXPROD031.telegraf_service A Value:0xc00a85ad90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818879436s EvaluationString:[ var='B' labels={name=CH1VLXPROD031.telegraf_service A} value=359 ], [ var='C' labels={name=CH1VLXPROD031.telegraf_service A} value=0 ]} {Instance:name=CH1VLXPROD034.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXPROD034.telegraf_service A Value:0xc00a85adb0} C:{Var:C Labels:name=CH1VLXPROD034.telegraf_service A Value:0xc00a85adc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818882099s EvaluationString:[ var='B' labels={name=CH1VLXPROD034.telegraf_service A} value=359 ], [ var='C' labels={name=CH1VLXPROD034.telegraf_service A} value=0 ]} {Instance:name=CH1VLXPROD035.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXPROD035.telegraf_service A Value:0xc00a85ade0} C:{Var:C Labels:name=CH1VLXPROD035.telegraf_service A Value:0xc00a85adf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818884352s EvaluationString:[ var='B' labels={name=CH1VLXPROD035.telegraf_service A} value=359 ], [ var='C' labels={name=CH1VLXPROD035.telegraf_service A} value=0 ]} {Instance:name=CH1VLXPROD046.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXPROD046.telegraf_service A Value:0xc00a85ae10} C:{Var:C Labels:name=CH1VLXPROD046.telegraf_service A Value:0xc00a85ae20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818887096s EvaluationString:[ var='B' labels={name=CH1VLXPROD046.telegraf_service A} value=359 ], [ var='C' labels={name=CH1VLXPROD046.telegraf_service A} value=0 ]} {Instance:name=CH1VLXPROD047.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXPROD047.telegraf_service A Value:0xc00a85ae40} C:{Var:C Labels:name=CH1VLXPROD047.telegraf_service A Value:0xc00a85ae50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818889086s EvaluationString:[ var='B' labels={name=CH1VLXPROD047.telegraf_service A} value=359 ], [ var='C' labels={name=CH1VLXPROD047.telegraf_service A} value=0 ]} {Instance:name=CH1VLXPROD048.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXPROD048.telegraf_service A Value:0xc00a85ae80} C:{Var:C Labels:name=CH1VLXPROD048.telegraf_service A Value:0xc00a85ae70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818891201s EvaluationString:[ var='B' labels={name=CH1VLXPROD048.telegraf_service A} value=359 ], [ var='C' labels={name=CH1VLXPROD048.telegraf_service A} value=0 ]} {Instance:name=CH1VLXPROD049.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXPROD049.telegraf_service A Value:0xc00a85aea0} C:{Var:C Labels:name=CH1VLXPROD049.telegraf_service A Value:0xc00a85aec0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818893844s EvaluationString:[ var='B' labels={name=CH1VLXPROD049.telegraf_service A} value=360 ], [ var='C' labels={name=CH1VLXPROD049.telegraf_service A} value=0 ]} {Instance:name=CH1VLXPROD050.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXPROD050.telegraf_service A Value:0xc00a85aee0} C:{Var:C Labels:name=CH1VLXPROD050.telegraf_service A Value:0xc00a85aef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818895857s EvaluationString:[ var='B' labels={name=CH1VLXPROD050.telegraf_service A} value=359 ], [ var='C' labels={name=CH1VLXPROD050.telegraf_service A} value=0 ]} {Instance:name=CH1VLXSTG002.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXSTG002.telegraf_service A Value:0xc00a85af10} C:{Var:C Labels:name=CH1VLXSTG002.telegraf_service A Value:0xc00a85af20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818899044s EvaluationString:[ var='B' labels={name=CH1VLXSTG002.telegraf_service A} value=359 ], [ var='C' labels={name=CH1VLXSTG002.telegraf_service A} value=0 ]} {Instance:name=CH1VLXSTG006.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXSTG006.telegraf_service A Value:0xc00a85af40} C:{Var:C Labels:name=CH1VLXSTG006.telegraf_service A Value:0xc00a85af50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818902245s EvaluationString:[ var='B' labels={name=CH1VLXSTG006.telegraf_service A} value=359 ], [ var='C' labels={name=CH1VLXSTG006.telegraf_service A} value=0 ]} {Instance:name=CH1VLXUAT002.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXUAT002.telegraf_service A Value:0xc00a85af80} C:{Var:C Labels:name=CH1VLXUAT002.telegraf_service A Value:0xc00a85af90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818905103s EvaluationString:[ var='B' labels={name=CH1VLXUAT002.telegraf_service A} value=359 ], [ var='C' labels={name=CH1VLXUAT002.telegraf_service A} value=0 ]} {Instance:name=CH1VLXUAT003.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXUAT003.telegraf_service A Value:0xc00a85afb0} C:{Var:C Labels:name=CH1VLXUAT003.telegraf_service A Value:0xc00a85afc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818907084s EvaluationString:[ var='B' labels={name=CH1VLXUAT003.telegraf_service A} value=360 ], [ var='C' labels={name=CH1VLXUAT003.telegraf_service A} value=0 ]} {Instance:name=CH1VLXUAT004.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CH1VLXUAT004.telegraf_service A Value:0xc00a85aff0} C:{Var:C Labels:name=CH1VLXUAT004.telegraf_service A Value:0xc00a85afe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.81890979s EvaluationString:[ var='B' labels={name=CH1VLXUAT004.telegraf_service A} value=359 ], [ var='C' labels={name=CH1VLXUAT004.telegraf_service A} value=0 ]} {Instance:name=CHGLXPROD001.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=CHGLXPROD001.telegraf_service A Value:0xc00a85b010} C:{Var:C Labels:name=CHGLXPROD001.telegraf_service A Value:0xc00a85b020}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818912217s EvaluationString:[ var='B' labels={name=CHGLXPROD001.telegraf_service A} value=360 ], [ var='C' labels={name=CHGLXPROD001.telegraf_service A} value=0 ]} {Instance:name=FR2LXPROD001.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=FR2LXPROD001.telegraf_service A Value:0xc00a85b050} C:{Var:C Labels:name=FR2LXPROD001.telegraf_service A Value:0xc00a85b040}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818915362s EvaluationString:[ var='B' labels={name=FR2LXPROD001.telegraf_service A} value=360 ], [ var='C' labels={name=FR2LXPROD001.telegraf_service A} value=0 ]} {Instance:name=LD5LXPROD001.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=LD5LXPROD001.telegraf_service A Value:0xc00a85b070} C:{Var:C Labels:name=LD5LXPROD001.telegraf_service A Value:0xc00a85b080}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818917284s EvaluationString:[ var='B' labels={name=LD5LXPROD001.telegraf_service A} value=359 ], [ var='C' labels={name=LD5LXPROD001.telegraf_service A} value=0 ]} {Instance:name=LO7VLXPROD001.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=LO7VLXPROD001.telegraf_service A Value:0xc00a85b0a0} C:{Var:C Labels:name=LO7VLXPROD001.telegraf_service A Value:0xc00a85b0b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818919255s EvaluationString:[ var='B' labels={name=LO7VLXPROD001.telegraf_service A} value=359 ], [ var='C' labels={name=LO7VLXPROD001.telegraf_service A} value=0 ]} {Instance:name=LO7VLXPROD004.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=LO7VLXPROD004.telegraf_service A Value:0xc00a85b0d0} C:{Var:C Labels:name=LO7VLXPROD004.telegraf_service A Value:0xc00a85b0e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818922198s EvaluationString:[ var='B' labels={name=LO7VLXPROD004.telegraf_service A} value=359 ], [ var='C' labels={name=LO7VLXPROD004.telegraf_service A} value=0 ]} {Instance:name=LO7VLXPROD005.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=LO7VLXPROD005.telegraf_service A Value:0xc00a85b100} C:{Var:C Labels:name=LO7VLXPROD005.telegraf_service A Value:0xc00a85b110}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818924937s EvaluationString:[ var='B' labels={name=LO7VLXPROD005.telegraf_service A} value=359 ], [ var='C' labels={name=LO7VLXPROD005.telegraf_service A} value=0 ]} {Instance:name=LO7VLXPROD107.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=LO7VLXPROD107.telegraf_service A Value:0xc00a85b130} C:{Var:C Labels:name=LO7VLXPROD107.telegraf_service A Value:0xc00a85b150}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.81892706s EvaluationString:[ var='B' labels={name=LO7VLXPROD107.telegraf_service A} value=360 ], [ var='C' labels={name=LO7VLXPROD107.telegraf_service A} value=0 ]} {Instance:name=NY2LXPROD001.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=NY2LXPROD001.telegraf_service A Value:0xc00a85b170} C:{Var:C Labels:name=NY2LXPROD001.telegraf_service A Value:0xc00a85b180}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818931006s EvaluationString:[ var='B' labels={name=NY2LXPROD001.telegraf_service A} value=360 ], [ var='C' labels={name=NY2LXPROD001.telegraf_service A} value=0 ]} {Instance:name=NY2VLXDEV001.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=NY2VLXDEV001.telegraf_service A Value:0xc00a85b1a0} C:{Var:C Labels:name=NY2VLXDEV001.telegraf_service A Value:0xc00a85b1b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818933152s EvaluationString:[ var='B' labels={name=NY2VLXDEV001.telegraf_service A} value=359 ], [ var='C' labels={name=NY2VLXDEV001.telegraf_service A} value=0 ]} {Instance:name=NY2VLXPROD001.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=NY2VLXPROD001.telegraf_service A Value:0xc00a85b1d0} C:{Var:C Labels:name=NY2VLXPROD001.telegraf_service A Value:0xc00a85b1e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818936554s EvaluationString:[ var='B' labels={name=NY2VLXPROD001.telegraf_service A} value=359 ], [ var='C' labels={name=NY2VLXPROD001.telegraf_service A} value=0 ]} {Instance:name=NY2VLXPROD002.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=NY2VLXPROD002.telegraf_service A Value:0xc00a85b210} C:{Var:C Labels:name=NY2VLXPROD002.telegraf_service A Value:0xc00a85b200}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818938925s EvaluationString:[ var='B' labels={name=NY2VLXPROD002.telegraf_service A} value=359 ], [ var='C' labels={name=NY2VLXPROD002.telegraf_service A} value=0 ]} {Instance:name=NY2VLXPROD003.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=NY2VLXPROD003.telegraf_service A Value:0xc00a85b230} C:{Var:C Labels:name=NY2VLXPROD003.telegraf_service A Value:0xc00a85b240}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818940935s EvaluationString:[ var='B' labels={name=NY2VLXPROD003.telegraf_service A} value=359 ], [ var='C' labels={name=NY2VLXPROD003.telegraf_service A} value=0 ]} {Instance:name=NY2VLXPROD007.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=NY2VLXPROD007.telegraf_service A Value:0xc00a85b260} C:{Var:C Labels:name=NY2VLXPROD007.telegraf_service A Value:0xc00a85b270}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818943353s EvaluationString:[ var='B' labels={name=NY2VLXPROD007.telegraf_service A} value=360 ], [ var='C' labels={name=NY2VLXPROD007.telegraf_service A} value=0 ]} {Instance:name=NY2VLXPROD011.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=NY2VLXPROD011.telegraf_service A Value:0xc00a85b290} C:{Var:C Labels:name=NY2VLXPROD011.telegraf_service A Value:0xc00a85b2a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818946033s EvaluationString:[ var='B' labels={name=NY2VLXPROD011.telegraf_service A} value=359 ], [ var='C' labels={name=NY2VLXPROD011.telegraf_service A} value=0 ]} {Instance:name=NY2VLXPROD013.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=NY2VLXPROD013.telegraf_service A Value:0xc00a85b2c0} C:{Var:C Labels:name=NY2VLXPROD013.telegraf_service A Value:0xc00a85b2d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818948169s EvaluationString:[ var='B' labels={name=NY2VLXPROD013.telegraf_service A} value=360 ], [ var='C' labels={name=NY2VLXPROD013.telegraf_service A} value=0 ]} {Instance:name=NY2VLXPROD014.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=NY2VLXPROD014.telegraf_service A Value:0xc00a85b2f0} C:{Var:C Labels:name=NY2VLXPROD014.telegraf_service A Value:0xc00a85b300}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818950982s EvaluationString:[ var='B' labels={name=NY2VLXPROD014.telegraf_service A} value=360 ], [ var='C' labels={name=NY2VLXPROD014.telegraf_service A} value=0 ]} {Instance:name=NY2VLXPROD015.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=NY2VLXPROD015.telegraf_service A Value:0xc00a85b330} C:{Var:C Labels:name=NY2VLXPROD015.telegraf_service A Value:0xc00a85b320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818952877s EvaluationString:[ var='B' labels={name=NY2VLXPROD015.telegraf_service A} value=359 ], [ var='C' labels={name=NY2VLXPROD015.telegraf_service A} value=0 ]} {Instance:name=NY2VLXPROD016.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=NY2VLXPROD016.telegraf_service A Value:0xc00a85b350} C:{Var:C Labels:name=NY2VLXPROD016.telegraf_service A Value:0xc00a85b360}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.81895582s EvaluationString:[ var='B' labels={name=NY2VLXPROD016.telegraf_service A} value=359 ], [ var='C' labels={name=NY2VLXPROD016.telegraf_service A} value=0 ]} {Instance:name=NY2VLXSTG001.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=NY2VLXSTG001.telegraf_service A Value:0xc00a85b380} C:{Var:C Labels:name=NY2VLXSTG001.telegraf_service A Value:0xc00a85b390}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818958697s EvaluationString:[ var='B' labels={name=NY2VLXSTG001.telegraf_service A} value=360 ], [ var='C' labels={name=NY2VLXSTG001.telegraf_service A} value=0 ]} {Instance:name=NY2VLXUAT002.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=NY2VLXUAT002.telegraf_service A Value:0xc00a85b3b0} C:{Var:C Labels:name=NY2VLXUAT002.telegraf_service A Value:0xc00a85b3c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818961989s EvaluationString:[ var='B' labels={name=NY2VLXUAT002.telegraf_service A} value=360 ], [ var='C' labels={name=NY2VLXUAT002.telegraf_service A} value=0 ]} {Instance:name=netbrain_data_server.telegraf_service A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=netbrain_data_server.telegraf_service A Value:0xc00a85b3e0} C:{Var:C Labels:name=netbrain_data_server.telegraf_service A Value:0xc00a85b3f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.818965343s EvaluationString:[ var='B' labels={name=netbrain_data_server.telegraf_service A} value=360 ], [ var='C' labels={name=netbrain_data_server.telegraf_service A} value=0 ]}]" duration=69.311791ms + level=debug ts=2024-05-29T13:44:13.820786291Z caller=remote_instance_store.go:51 user=828268 slug=alsy2600 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=691055 slug=simonsdropshipqa instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.820818295Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=691055 slug=simonsdropshipqa t=2024-05-29T13:44:13.820789665Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=691055 slug=simonsdropshipqa version=1 fingerprint=c33d57d4ccda6ea2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.820722003Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.820541066s EvaluationString:}]" duration=7.881625ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mic4br5w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.820731669Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=828268 slug=alsy2600 version=17 fingerprint=12d125c7790a44ad attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.820445125Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc01c335ab0} C:{Var:C Labels: Value:0xc01c335ab8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.819991975s EvaluationString:[ var='B' labels={} value=9.65 ], [ var='C' labels={} value=0 ]}]" duration=312.645224ms + level=debug ts=2024-05-29T13:44:13.820452745Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mibtuini-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.820461586Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.820351088Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mibtuini-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.820433996Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mibtuini-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.820424376Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-miamqqxh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.820368835Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-miamqqxh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.820260644Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mi8qk5j4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.820141363Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mhzhayvk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.820009732Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.819989453Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.819913117Z caller=remote_instance_store.go:51 user=384712 slug=nearinc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:13.819930848Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:13.819909897Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mhzhayvk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.81983149Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mhw0azon-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.819761989Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mhw0azon-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.819643738Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mhueb9y7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.819601117Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.819561253Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mho7g9or-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.819410395Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.819421543Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mho7g9or-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.819277514Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=148654 slug=tinybeans t=2024-05-29T13:44:13.81927405Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mhm7tu3d-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.819070202Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.819043469Z caller=client.go:80 msg="creating client for grafana instance" user=411704 addr=dns:///xoss-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mhm7tu3d-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.818996861Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.818997031Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mhetja4l-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.818954111Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.818930888Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=507157 slug=tce + level=debug ts=2024-05-29T13:44:13.818708476Z caller=ruler.go:522 msg="tenant is owned by this instance" user=337009 slug=thepunter99 groups=0 + level=debug ts=2024-05-29T13:44:13.818711577Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.818690273Z caller=client.go:80 msg="creating client for grafana instance" user=354447 addr=dns:///xenopos-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.818627208Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.818546664Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.818531013Z caller=remote_instance_store.go:51 user=387869 slug=lantor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mgpmi77a-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.818391325Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.818323993Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=724029 slug=energetechbgtest + level=debug ts=2024-05-29T13:44:13.818293792Z caller=ruler.go:522 msg="tenant is owned by this instance" user=724029 slug=energetechbgtest groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mgjao2r9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.818293754Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.818242189Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mgjao2r9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.818254123Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid instance="datasource_uid=grafanacloud-prom, ref_id=QUERY" t=2024-05-29T13:44:13.818189808Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=873368 slug=euid instance="datasource_uid=grafanacloud-prom, ref_id=QUERY" t=2024-05-29T13:44:13.818182765Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=873368 slug=euid version=38 fingerprint=19bd7c178ca82e15 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.818099282Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=QUERY State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.817785633s EvaluationString:}]" duration=7.632037ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mge6dut3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.818205693Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mge6dut3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.818144812Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.818045706Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mgci5qod-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.818058761Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mgci5qod-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.818036041Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mgci5qod-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.818002171Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mgci5qod-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.817978991Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.81787338Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mgbnj2oy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.81790431Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mgbnj2oy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.81788007Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mgbnj2oy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.817843759Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mgbnj2oy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.817748768Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.817652185Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.817684247Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mgbnj2oy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.817717318Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mg8mp0pc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.817501736Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mg8lsz5h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.817455975Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mg8lsz5h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.817426435Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mg8lsz5h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.817354244Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.817366457Z caller=client.go:80 msg="creating client for grafana instance" user=557953 addr=dns:///wspr-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.817311124Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=331542 slug=tecevo + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mg8lsz5h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.817291094Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mg7rfit1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.817167922Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance="datasource_uid=DvdcQoO7k, ref_id=A" t=2024-05-29T13:44:13.817184459Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance="datasource_uid=DvdcQoO7k, ref_id=A" t=2024-05-29T13:44:13.817177592Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=288032 slug=dapperlabssre t=2024-05-29T13:44:13.817165422Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info component=discovery ts=2024-05-29T13:44:13.817162782Z caller=client.go:80 msg="creating client for grafana instance" user=731159 addr=dns:///festinafinance-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.816533696Z caller=client.go:80 msg="creating client for grafana instance" user=299140 addr=dns:///wowarriors-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mg7rfit1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.817075861Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=851297 slug=roadrunneruat instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.816955389Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mg7rfit1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.817012141Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.817103772Z caller=remote_instance_store.go:51 user=851297 slug=roadrunneruat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=851297 slug=roadrunneruat instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.816945079Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.817060644Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.816985579Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mg7rfit1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.81697544Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.817021719Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.816907657Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=851297 slug=roadrunneruat version=1 fingerprint=c4aa47c81682d06f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.816822207Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.816603969s EvaluationString:}]" duration=8.981994ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mg7afsn4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.816855469Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.816732169Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=70430 slug=dapperlabs t=2024-05-29T13:44:13.816648581Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=484427 slug=gqccollimator instance= t=2024-05-29T13:44:13.816619474Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mg53w7pk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.816588116Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mg26uem9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.816421375Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mg26uem9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.816279613Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.816031671Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=569985 slug=dsaz + Error parsing panelUID for alert annotationruleID1543dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=155740 slug=routific version=6 fingerprint=a9906ea554e4681f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.816208189Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.81591073s EvaluationString:}]" duration=14.217171ms + logger=ngalert.state.manager.persist user=307381 slug=kambitaskforce t=2024-05-29T13:44:13.816099057Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=warn ts=2024-05-29T13:44:13.816186016Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=624530 slug=tgi + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mfx9ayaf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.816162692Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mfx9ayaf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.816123212Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mfx9ayaf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.816015441Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.81598171Z caller=ruler.go:522 msg="tenant is owned by this instance" user=624530 slug=tgi groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mfvi2v6b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.81594686Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mfvi2v6b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.815913619Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.815799725Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=307381 slug=kambitaskforce t=2024-05-29T13:44:13.815641358Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info component=discovery ts=2024-05-29T13:44:13.815663739Z caller=client.go:80 msg="creating client for grafana instance" user=525161 addr=dns:///wolfpacknz-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mftyl4j8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.815646137Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mfpg1qe6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.815613996Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mfpg1qe6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.815590136Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.813059826Z caller=ruler.go:522 msg="tenant is owned by this instance" user=417187 slug=punterstech groups=3 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mfpg1qe6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.815559536Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.812741422Z caller=client.go:80 msg="creating client for grafana instance" user=338095 addr=dns:///venitynetwork-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.815433565Z caller=ruler.go:522 msg="tenant is owned by this instance" user=715255 slug=dvbxcplatform groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mfpg1qe6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.815467355Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.815400465Z caller=client.go:80 msg="creating client for grafana instance" user=618652 addr=dns:///facilitypro-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mfnw02e0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.815373304Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.815346689Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=287085 slug=simongray + level=debug ts=2024-05-29T13:44:13.815234863Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=87780 slug=zencloudandhosting t=2024-05-29T13:44:13.815161122Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=51.273938ms + level=info ts=2024-05-29T13:44:13.815119975Z caller=grafana.go:247 user=309706 slug=felfel msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=16" groups=1 alerts=0 + level=debug ts=2024-05-29T13:44:13.814916963Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.814636658Z caller=ruler.go:522 msg="tenant is owned by this instance" user=502287 slug=drikusvd groups=0 + level=debug ts=2024-05-29T13:44:13.81464277Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.812213428Z caller=client.go:80 msg="creating client for grafana instance" user=425307 addr=dns:///upendrapathuri-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager.persist user=704123 slug=sportsmanwarehouse t=2024-05-29T13:44:13.814483937Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=112387 slug=lucidhq instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.814571144Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=704123 slug=sportsmanwarehouse instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.814468096Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.814422747Z caller=client.go:80 msg="creating client for grafana instance" user=541013 addr=dns:///whitecomputing-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=704123 slug=sportsmanwarehouse instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.814423766Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mf6fq376-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.814471185Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.811420596Z caller=ruler.go:522 msg="tenant is owned by this instance" user=284581 slug=sysolv groups=1 + logger=ngalert.state.manager user=704123 slug=sportsmanwarehouse t=2024-05-29T13:44:13.814363545Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mf48dpl6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.814130481Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.814208245Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mf48dpl6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.814121121Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=704123 slug=sportsmanwarehouse version=1 fingerprint=76b970c1b0ac9f76 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.814236803Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.813950342s EvaluationString:}]" duration=7.218043ms + logger=ngalert.state.manager user=695888 slug=boeingdr instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.81402845Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=735588 slug=srepradnya version=5 fingerprint=db76f13451418d84 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.813948838Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.813551148s EvaluationString:}]" duration=7.382206ms + level=debug ts=2024-05-29T13:44:13.811165913Z caller=ruler.go:522 msg="tenant is owned by this instance" user=535905 slug=soficshifts groups=2 + level=info component=discovery ts=2024-05-29T13:44:13.813941548Z caller=client.go:80 msg="creating client for grafana instance" user=401916 addr=dns:///wchchua-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.813884683Z caller=client.go:80 msg="creating client for grafana instance" user=323441 addr=dns:///wangwill-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mek8ognc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.813959579Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mek8ognc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.813937589Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.813908281Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mejbzjk7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.813849688Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mejbzjk7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.813794178Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.813872902Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.812186293Z caller=ruler.go:522 msg="tenant is owned by this instance" user=324288 slug=tafensw groups=0 + level=debug ts=2024-05-29T13:44:13.813762543Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-med6dzeh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.813723227Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-med6dzeh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.813699167Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.813642351Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=643598 slug=teamlma + level=debug ts=2024-05-29T13:44:13.810534111Z caller=ruler.go:522 msg="tenant is owned by this instance" user=643598 slug=teamlma groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-med6dzeh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.813644986Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.813602608Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-med6dzeh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.813615596Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-me0j7hw5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.813478715Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.813388646Z caller=ruler.go:522 msg="tenant is owned by this instance" user=551616 slug=eltuko groups=0 + level=info component=discovery ts=2024-05-29T13:44:13.812861241Z caller=client.go:80 msg="creating client for grafana instance" user=563718 addr=dns:///exertus-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mdygdar7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.813376933Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mdygdar7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.813353053Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.813261845Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=557005 slug=egsi + level=debug ts=2024-05-29T13:44:13.813222135Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mdygdar7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.813251472Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mdygdar7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.813241652Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=277807 slug=info96f8 t=2024-05-29T13:44:13.81304362Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.262585ms + level=debug ts=2024-05-29T13:44:13.813091187Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mduz9lkd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.812944309Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.812883941Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.812863418Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.812808537Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.812806439Z caller=client.go:80 msg="creating client for grafana instance" user=465820 addr=dns:///vianettest-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.812806341Z caller=client.go:80 msg="creating client for grafana instance" user=754297 addr=dns:///exelcia-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.812698906Z caller=ruler.go:522 msg="tenant is owned by this instance" user=301742 slug=redbelly groups=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mdrticn9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.812763657Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.812669082Z caller=ruler.go:522 msg="tenant is owned by this instance" user=654542 slug=royalelimousines groups=0 + level=debug ts=2024-05-29T13:44:13.812551138Z caller=ruler.go:522 msg="tenant is owned by this instance" user=874973 slug=elcorteinglesdev groups=0 + level=debug ts=2024-05-29T13:44:13.812259735Z caller=ruler.go:522 msg="tenant is owned by this instance" user=726919 slug=eb1jwlbqlc groups=0 + level=info component=discovery ts=2024-05-29T13:44:13.812120934Z caller=client.go:80 msg="creating client for grafana instance" user=554880 addr=dns:///evebury-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.812024633Z caller=ruler.go:522 msg="tenant is owned by this instance" user=654500 slug=eclipticsv groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mdd711zt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.811971309Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mdd711zt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.811930259Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.811989316Z caller=remote_instance_store.go:51 user=843304 slug=ppcgroup msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.810848512Z caller=ruler.go:522 msg="tenant is owned by this instance" user=402858 slug=pushwoosh groups=0 + level=debug ts=2024-05-29T13:44:13.811858842Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mdbxyzr2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.811669686Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mda8hmqu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.811636686Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mda8hmqu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.811578695Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.811703128Z caller=client.go:80 msg="creating client for grafana instance" user=318074 addr=dns:///undef-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.811576955Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.811492528Z caller=ruler.go:522 msg="tenant is owned by this instance" user=708551 slug=digitaltechnology groups=2 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-md1niblt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.811429494Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.811441531Z caller=ruler.go:522 msg="tenant is owned by this instance" user=407212 slug=scottkuhlmann1 groups=0 + logger=ngalert.state.manager.persist user=421567 slug=nexx360 t=2024-05-29T13:44:13.811255628Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.99468ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mcyw5770-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.811262482Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.811266047Z caller=ruler.go:522 msg="tenant is owned by this instance" user=499590 slug=procarenz groups=2 + level=debug ts=2024-05-29T13:44:13.811101762Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mcv39m11-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.81113019Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.811043724Z caller=client.go:80 msg="creating client for grafana instance" user=569904 addr=dns:///ercluster-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mcv39m11-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.81104979Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.810750486Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.81074625Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mcrfz5tl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.810995499Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.811002Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.810960267Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.81091487Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=301946 slug=rrrr + logger=ngalert.state.manager user=376364 slug=pn0625prod01 instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.81088171Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.810888958Z caller=ruler.go:522 msg="tenant is owned by this instance" user=301946 slug=rrrr groups=0 + logger=ngalert.state.manager user=376364 slug=pn0625prod01 instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.81087101Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=119385 slug=elastio t=2024-05-29T13:44:13.810834538Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=119385 slug=elastio instance="datasource_uid=TOpNjQW4k, ref_id=A" t=2024-05-29T13:44:13.81081808Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=376364 slug=pn0625prod01 version=11 fingerprint=73a359fa74e7a3f8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.81075471Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.807728976s EvaluationString:}]" duration=24.893149ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mcp9smow-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.810846478Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mcp9smow-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.810777147Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.810755041Z caller=remote_instance_store.go:51 user=290313 slug=replit msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mcp9smow-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.810729256Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.810686482Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.810669121Z caller=remote_instance_store.go:51 user=438855 slug=teckresources msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.81063772Z caller=client.go:80 msg="creating client for grafana instance" user=707638 addr=dns:///eparagony-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.810600641Z caller=client.go:80 msg="creating client for grafana instance" user=689358 addr=dns:///tisc-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.81059942Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=696197 slug=ealiakbar + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mcmm4in2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.810633605Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mcmm4in2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.810603475Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=438855 slug=teckresources version=5 fingerprint=b6f66d64440929de attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.810441835Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.810132128s EvaluationString:}]" duration=88.576388ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mcmm4in2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.810493154Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mclyspxh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.810416463Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mclyspxh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.810383113Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.810336972Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=355429 slug=zenpli t=2024-05-29T13:44:13.810268273Z level=debug msg="Saving alert states done" count=11 max_state_save_concurrency=1 duration=223.250969ms + logger=ngalert.state.manager.persist user=615392 slug=shinemetrics t=2024-05-29T13:44:13.810148483Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=22.760631ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mclyspxh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.810263312Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.810112275Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mcjrjwoh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.810208021Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mcjrjwoh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.810184901Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.810140487Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mcjrjwoh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.81013324Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mcixsyvc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.810024129Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mcixsyvc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.809993429Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.810005647Z caller=remote_instance_store.go:51 user=485459 slug=heroiclabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.809930981Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.809866215Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.809820921Z caller=client.go:80 msg="creating client for grafana instance" user=360502 addr=dns:///timwilson1-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mchlmupa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.809791817Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.809672111Z caller=client.go:80 msg="creating client for grafana instance" user=335740 addr=dns:///enilab-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.80968325Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mcgyv9vg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.809689956Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mcgyv9vg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.809566594Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.809572277Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.809564952Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mcemhac3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.809343672Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mcemhac3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.809329482Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.809272038Z caller=remote_instance_store.go:51 user=451750 slug=amadeuspfpprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mca06c2p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.809203481Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mca06c2p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.809083269Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=451750 slug=amadeuspfpprod instance="__name__=node_scrape_collector_success, agent_hostname=proxy03, collector=tapestats, instance=proxy03, job=integrations/node_exporter" t=2024-05-29T13:44:13.809028985Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=451750 slug=amadeuspfpprod instance="__name__=node_scrape_collector_success, agent_hostname=proxy03, collector=tapestats, instance=proxy03, job=integrations/node_exporter" t=2024-05-29T13:44:13.809013897Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mc85cfy8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.808832967Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=451750 slug=amadeuspfpprod instance="__name__=node_scrape_collector_success, agent_hostname=proxy03, collector=rapl, instance=proxy03, job=integrations/node_exporter" t=2024-05-29T13:44:13.808847456Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.808758837Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mc7yxfvx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.808763016Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=451750 slug=amadeuspfpprod instance="__name__=node_scrape_collector_success, agent_hostname=proxy03, collector=nfsd, instance=proxy03, job=integrations/node_exporter" t=2024-05-29T13:44:13.808704956Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mc038agw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.808555084Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=451750 slug=amadeuspfpprod instance="__name__=node_scrape_collector_success, agent_hostname=proxy03, collector=nfs, instance=proxy03, job=integrations/node_exporter" t=2024-05-29T13:44:13.808520827Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mc038agw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.808446443Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mc038agw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.808382202Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=451750 slug=amadeuspfpprod instance="__name__=node_scrape_collector_success, agent_hostname=proxy03, collector=ipvs, instance=proxy03, job=integrations/node_exporter" t=2024-05-29T13:44:13.80835521Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.808164197Z caller=ruler.go:522 msg="tenant is owned by this instance" user=625346 slug=cicou02 groups=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mbybn0rz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.80819594Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.808124049Z caller=remote_instance_store.go:51 user=151289 slug=everflow msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.808072791Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.808044596Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=501268 slug=dirkbromberg + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mbtf59xj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.808090179Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=451750 slug=amadeuspfpprod instance="__name__=node_scrape_collector_success, agent_hostname=proxy03, collector=bonding, instance=proxy03, job=integrations/node_exporter" t=2024-05-29T13:44:13.807971314Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=295631 slug=dapvizor instance="datasource_uid=Ta6tIPbnz, ref_id=A" t=2024-05-29T13:44:13.807822903Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=295631 slug=dapvizor instance="datasource_uid=Ta6tIPbnz, ref_id=A" t=2024-05-29T13:44:13.80781141Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mbsrr7rb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.807904667Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=295631 slug=dapvizor version=77 fingerprint=756370f4dc4b847e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.807720189Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=Ta6tIPbnz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.807318532s EvaluationString:}]" duration=34.632066ms + logger=ngalert.state.manager user=451750 slug=amadeuspfpprod t=2024-05-29T13:44:13.807755098Z level=debug msg="State manager processing evaluation results" resultCount=8 + level=debug ts=2024-05-29T13:44:13.807644382Z caller=remote_instance_store.go:51 user=243675 slug=oneschema msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.807552408Z caller=remote_instance_store.go:51 user=147497 slug=rhodev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.807517386Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mbjkijlx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.807471193Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.807331405Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.807268788Z caller=client.go:80 msg="creating client for grafana instance" user=687903 addr=dns:///energetech-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mbjkijlx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.807328571Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mbjkijlx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.807305821Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.807276879Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mbhlr8xm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.807278551Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mbhlr8xm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.807238571Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.80725669Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mbcsaj1r-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.807089509Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.806827884Z caller=client.go:80 msg="creating client for grafana instance" user=731426 addr=dns:///emeacluster-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mbcsaj1r-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.806918327Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.80668947Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mb2lclb3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.806709305Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.806766483Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=553021 slug=dccom + level=debug ts=2024-05-29T13:44:13.806713416Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.806692583Z caller=ruler.go:522 msg="tenant is owned by this instance" user=553021 slug=dccom groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mb2lclb3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.806646484Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mb2lclb3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.806618284Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mb2lclb3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.806545633Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mb1wa9b5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.806407542Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.806249679Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=347647 slug=digiparc + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mb0tuqu9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.806268051Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.805937376Z caller=client.go:80 msg="creating client for grafana instance" user=561133 addr=dns:///electromukke-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mb0tuqu9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.806134779Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.806077366Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.806077156Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-marfzlxx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.806097039Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.805946076Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=748169 slug=dkopitsa + level=debug ts=2024-05-29T13:44:13.805926476Z caller=ruler.go:522 msg="tenant is owned by this instance" user=748169 slug=dkopitsa groups=0 + level=warn ts=2024-05-29T13:44:13.805913175Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=612807 slug=deeplabac + level=debug ts=2024-05-29T13:44:13.805880075Z caller=ruler.go:522 msg="tenant is owned by this instance" user=612807 slug=deeplabac groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-marfzlxx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.805945377Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.80586838Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.805782817Z caller=remote_instance_store.go:51 user=765752 slug=octave msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-malykc6h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.805688225Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.805487771Z caller=client.go:80 msg="creating client for grafana instance" user=612616 addr=dns:///electrojoule-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.80555212Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.805446771Z caller=client.go:80 msg="creating client for grafana instance" user=874973 addr=dns:///elcorteinglesdev-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.805424371Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=532128 slug=dineshtak83 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-malykc6h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.805503893Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mafoagan-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.805440012Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mafoagan-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.80527844Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-mafoagan-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.80519113Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ma9y8ug2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.805146789Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ma9y8ug2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.805095999Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.805064166Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=307381 slug=kambitaskforce t=2024-05-29T13:44:13.80506054Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=86.975165ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ma9y8ug2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.805023968Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.804937536Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=426229 slug=accelbyte t=2024-05-29T13:44:13.804895099Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.804788743Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.804516362Z caller=client.go:80 msg="creating client for grafana instance" user=557005 addr=dns:///egsi-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.803868356Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=667431 slug=dcce + level=debug ts=2024-05-29T13:44:13.80456119Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ma5swuru-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.804502042Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ma5swuru-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.804433562Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.804417176Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ma5swuru-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.804397661Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=441901 slug=openmarkets instance="cloud/rolename=identityserveradmin-web" t=2024-05-29T13:44:13.804333209Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.804355561Z caller=client.go:80 msg="creating client for grafana instance" user=549493 addr=dns:///egilolberg-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.804304612Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:13.804205326Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=71.498207ms + level=debug ts=2024-05-29T13:44:13.803900856Z caller=ruler.go:522 msg="tenant is owned by this instance" user=765721 slug=cshy groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ma2lb0q4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.804144459Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=441901 slug=openmarkets instance="cloud/rolename=accounts-opening-api" t=2024-05-29T13:44:13.803928817Z level=debug msg="Setting next state" handler=resultAlerting + level=warn ts=2024-05-29T13:44:13.803889656Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=725901 slug=descry + level=debug ts=2024-05-29T13:44:13.803868156Z caller=ruler.go:522 msg="tenant is owned by this instance" user=725901 slug=descry groups=0 + level=debug ts=2024-05-29T13:44:13.803739155Z caller=ruler.go:522 msg="tenant is owned by this instance" user=492072 slug=decareto groups=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m9xd4f77-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.803722845Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m9n9h34c-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.803587933Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m9n9h34c-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.803551863Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=277807 slug=info96f8 t=2024-05-29T13:44:13.802777692Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.802929701Z caller=remote_instance_store.go:51 user=277807 slug=info96f8 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m9gvx3x5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.803354151Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.803369417Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m9gvx3x5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.80327793Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.803007848Z caller=client.go:80 msg="creating client for grafana instance" user=639048 addr=dns:///ecoviumcloud-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m9ert66o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.803096548Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.802965348Z caller=ruler.go:522 msg="tenant is owned by this instance" user=556188 slug=dchome groups=1 + level=debug ts=2024-05-29T13:44:13.803060711Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m9ert66o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.803016877Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.802771846Z caller=client.go:80 msg="creating client for grafana instance" user=726919 addr=dns:///eb1jwlbqlc-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m9d8zlop-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.802790425Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.802765683Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m9d8zlop-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.802723284Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m9d8zlop-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.802691714Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=277807 slug=info96f8 version=1 fingerprint=3ebd79149ad20a6e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.802487131Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=magento_rabbitmq_health_check, environment=swiss-sense, instance=www.swisssense.nl:443, job=magento, monitor=magento State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=magento_rabbitmq_health_check, environment=swiss-sense, instance=www.swisssense.nl:443, job=magento, monitor=magento Value:0xc04f38ee90} B:{Var:B Labels:__name__=magento_rabbitmq_health_check, environment=swiss-sense, instance=www.swisssense.nl:443, job=magento, monitor=magento Value:0xc04f38ef08} C:{Var:C Labels:__name__=magento_rabbitmq_health_check, environment=swiss-sense, instance=www.swisssense.nl:443, job=magento, monitor=magento Value:0xc04f38ef70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.801934974s EvaluationString:[ var='A' labels={__name__=magento_rabbitmq_health_check, environment=swiss-sense, instance=www.swisssense.nl:443, job=magento, monitor=magento} value=1 ], [ var='B' labels={__name__=magento_rabbitmq_health_check, environment=swiss-sense, instance=www.swisssense.nl:443, job=magento, monitor=magento} value=1 ], [ var='C' labels={__name__=magento_rabbitmq_health_check, environment=swiss-sense, instance=www.swisssense.nl:443, job=magento, monitor=magento} value=0 ]}]" duration=11.81312ms + level=warn ts=2024-05-29T13:44:13.802615344Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=749955 slug=denton85 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m94jdvo7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.802605443Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m94jdvo7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.802569133Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m94ap5wd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.802409121Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.802243079Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m94ap5wd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.8022662Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m91c6ndy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.802226789Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m91c6ndy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.802194359Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.802002038Z caller=client.go:80 msg="creating client for grafana instance" user=556089 addr=dns:///easyconnectprod-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.801978438Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=546950 slug=dfafrica + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m8zllnm9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.801914896Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m8u88mym-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.801791145Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m8u88mym-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.801660973Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:13.801610061Z caller=remote_image_capturer.go:61 user=107179 slug=ibaudata rule_org_id=1 rule_uid=e4f3e4ba-5f2e-426c-b500-b71e022f47a2 dashboard=Y4q26LEGk panel=15 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m8u88mym-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.801633003Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m8obh6yc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.801597233Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.801456533Z caller=client.go:80 msg="creating client for grafana instance" user=520076 addr=dns:///eabdulaziz1996-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m8obh6yc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.801565522Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.801434333Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=657841 slug=ctdemo + level=debug ts=2024-05-29T13:44:13.801390454Z caller=remote_instance_store.go:51 user=706031 slug=miceutest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m8obh6yc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.801467931Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m8o6q9xk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.80138205Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=630397 slug=tatin instance= t=2024-05-29T13:44:13.801432179Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=471517 slug=trist85 t=2024-05-29T13:44:13.801285515Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=73.13974ms + level=debug ts=2024-05-29T13:44:13.801258731Z caller=ruler.go:522 msg="tenant is owned by this instance" user=672672 slug=danielgibson728 groups=0 + level=debug ts=2024-05-29T13:44:13.801209892Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m8npi0eh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.801144488Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.801162329Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.299923ms + level=debug ts=2024-05-29T13:44:13.801120751Z caller=remote_instance_store.go:51 user=471861 slug=planetstaging msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.800826127Z caller=client.go:80 msg="creating client for grafana instance" user=502287 addr=dns:///drikusvd-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.801051072Z caller=remote_image_capturer.go:54 user=107179 slug=ibaudata rule_org_id=1 rule_uid=e4f3e4ba-5f2e-426c-b500-b71e022f47a2 dashboard=Y4q26LEGk panel=15 msg="rendering alert image with grafana" + level=info component=discovery ts=2024-05-29T13:44:13.800949429Z caller=client.go:80 msg="creating client for grafana instance" user=715255 addr=dns:///dvbxcplatform-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m8npi0eh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.801034477Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.800878428Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=548962 slug=crustified + level=info component=discovery ts=2024-05-29T13:44:13.800865728Z caller=client.go:80 msg="creating client for grafana instance" user=569985 addr=dns:///dsaz-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.800825727Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=574127 slug=cynetwork + logger=ngalert.state.manager user=107179 slug=ibaudata instance= t=2024-05-29T13:44:13.800962488Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:13.800803427Z caller=ruler.go:522 msg="tenant is owned by this instance" user=574127 slug=cynetwork groups=0 + level=info component=discovery ts=2024-05-29T13:44:13.800792827Z caller=client.go:80 msg="creating client for grafana instance" user=572672 addr=dns:///drath-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.800780727Z caller=ruler.go:522 msg="tenant is owned by this instance" user=605118 slug=deintegratiespecialist groups=0 + level=warn ts=2024-05-29T13:44:13.800764627Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=606867 slug=datacenters + level=debug ts=2024-05-29T13:44:13.800735126Z caller=ruler.go:522 msg="tenant is owned by this instance" user=606867 slug=datacenters groups=0 + level=debug ts=2024-05-29T13:44:13.800665907Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m8cd8usl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.800797274Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m8cd8usl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.800725054Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m8cd8usl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.800698153Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m8cd8usl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.800654633Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.800296422Z caller=client.go:80 msg="creating client for grafana instance" user=678966 addr=dns:///dpuzik58-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.800490479Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m8b6tssm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.800485261Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.800496364Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.800333642Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m89l23k8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.800303299Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m89l23k8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.800272689Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.800252252Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.453017ms + level=info ts=2024-05-29T13:44:13.800172012Z caller=remote_alert_sender.go:94 user=656459 slug=activeport host=activeport-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.164.20.114:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ab9c2232-5fe9-4a9c-9015-bf22416d4b89 alerts=1 + level=debug ts=2024-05-29T13:44:13.799939919Z caller=ruler.go:522 msg="tenant is owned by this instance" user=755822 slug=danigs69 groups=0 + level=debug ts=2024-05-29T13:44:13.7997758Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m7zj1ebx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.799842364Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.799781108Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m7zj1ebx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.799803684Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m7szn7pb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.799740743Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=526835 slug=fundbot t=2024-05-29T13:44:13.799662269Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.855712ms + level=debug ts=2024-05-29T13:44:13.799590682Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=109452 slug=deltarisk instance= t=2024-05-29T13:44:13.799610316Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m7r3fjmc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.799507801Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.799511182Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m7ox5n8e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.799308109Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.7992216Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m7ox5n8e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.799154927Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m7ox5n8e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.799121397Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.799105874Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m7mbkenu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.799049146Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.79901481Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=519184 slug=dawsonf + level=debug ts=2024-05-29T13:44:13.799025543Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.798796608Z caller=ruler.go:522 msg="tenant is owned by this instance" user=530280 slug=dennishei groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m7kddfxh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.798548431Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m7kddfxh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.798508991Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m7kddfxh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.79847696Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.798366204Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=681349 slug=coyotesoftware + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m7cx3ytq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.798336049Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m789z0u4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.798284678Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m789z0u4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.798149047Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m77x7elx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.798112047Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.798019622Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m77x7elx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.798038606Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.7979295Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.797862699Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m6uw5cuo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.797872944Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=7e311f0d592a867c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.797806821Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.797506091s EvaluationString:}]" duration=329.892992ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m6uw5cuo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.797862414Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m6qkzc3e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.797711062Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m6kneogl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.797641562Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m6kneogl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.797586521Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m6jch8hg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.79743566Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.797377295Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=530119 slug=daanscheper + level=debug ts=2024-05-29T13:44:13.797403492Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.797296467Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m6jch8hg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.797369419Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=421567 slug=nexx360 instance= t=2024-05-29T13:44:13.797239174Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.796695588Z caller=ruler.go:522 msg="tenant is owned by this instance" user=602996 slug=cryptoe groups=0 + level=info component=discovery ts=2024-05-29T13:44:13.797187093Z caller=client.go:80 msg="creating client for grafana instance" user=532128 addr=dns:///dineshtak83-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.796801089Z caller=ruler.go:522 msg="tenant is owned by this instance" user=609852 slug=cogepart groups=1 + logger=ngalert.state.manager user=421567 slug=nexx360 t=2024-05-29T13:44:13.797174415Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m6hpl34t-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.797174847Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m6dupp8t-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.797136217Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.797169014Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m6dupp8t-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.797037905Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.797041592Z caller=client.go:80 msg="creating client for grafana instance" user=708551 addr=dns:///digitaltechnology-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m6do8h7q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.796988295Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.796974291Z caller=ruler.go:522 msg="tenant is owned by this instance" user=550120 slug=craftsuchtddnsnet groups=0 + level=debug ts=2024-05-29T13:44:13.796881742Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.796321085Z caller=ruler.go:522 msg="tenant is owned by this instance" user=603280 slug=christophlangwieser groups=0 + level=debug ts=2024-05-29T13:44:13.796909309Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.796779122Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.796800468Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.082921ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m6an0i9d-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.796702212Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m6an0i9d-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.796676812Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.796755669Z caller=remote_instance_store.go:51 user=139073 slug=cargo1 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=139073 slug=cargo1 t=2024-05-29T13:44:13.796615492Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m6an0i9d-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.796565911Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=662363 slug=facephi version=3 fingerprint=bd0b3a282cbe1003 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.796369943Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=saa-eks-dpad-sae-1, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.796017266s EvaluationString:}]" duration=207.891196ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m6aii5sn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.796349468Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m5xnr076-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.796304698Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m5xnr076-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.796238537Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m5xnr076-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.796194487Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m5vfy2lw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.796110806Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m5vfy2lw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.796076916Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m5vfy2lw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.796060745Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=wes.sea, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.wes.sea, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.796177055Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=wes.sea, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.wes.sea, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.796051711Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.79586877Z caller=remote_instance_store.go:51 user=526847 slug=soniclabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=wes.sea, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.wes.sea, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.795828573Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=wes.sea, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.wes.sea, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.795749434Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=wes.sea, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.wes.sea, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.795669183Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m4tbwh8m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.795583661Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=wes.sea, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.wes.sea, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.795563945Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m4tbwh8m-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.795469709Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.795305042Z caller=remote_instance_store.go:51 user=149323 slug=theatreonline msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.795195488Z caller=remote_image_capturer.go:61 user=149323 slug=theatreonline rule_org_id=1 rule_uid=8HNC0vXnk dashboard=CzuJb6tMk panel=29 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:13.795140774Z caller=ruler.go:522 msg="tenant is owned by this instance" user=902002 slug=cmgas2prdneu groups=0 + logger=ngalert.state.manager user=149323 slug=theatreonline instance= t=2024-05-29T13:44:13.795229543Z level=warn msg="Failed to take an image" dashboard=CzuJb6tMk panel=29 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ty8.tky, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ty8.tky, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.795086981Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m4drfqsi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.794999995Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ty8.tky, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.ty8.tky, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.794783293Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:13.794842447Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.794773408Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m4bakkpd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.794658871Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m4bakkpd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.7945937Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.794540976Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=288032 slug=dapperlabssre version=1 fingerprint=b9ffa1b60e642f4d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.794529761Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=9F7TNpxVk, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.794218722s EvaluationString:}]" duration=91.682607ms + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ty8.tky, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ty8.tky, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.794576073Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.794363467Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=149323 slug=theatreonline t=2024-05-29T13:44:13.794323779Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.794336617Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.794326531Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.794222665Z caller=ruler.go:522 msg="tenant is owned by this instance" user=742098 slug=cminformatiktest groups=0 + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ty8.tky, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ty8.tky, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.794312716Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m49j8vhb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.794249177Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m49j8vhb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.794167556Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.794122747Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=109452 slug=deltarisk t=2024-05-29T13:44:13.79413228Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=61.647968ms + logger=ngalert.state.manager user=472647 slug=planet instance="metadata.label.system.name=g4c-sub-04-task-a5e5d3d2, metric.name=value_utilization_mean_mean" t=2024-05-29T13:44:13.794138836Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet instance="metadata.label.system.name=g4c-sub-04-workload-7f3b1c16, metric.name=value_utilization_mean_mean" t=2024-05-29T13:44:13.79410927Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m47k6epe-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.794130166Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance="metadata.label.system.name=g4c-sub-04-workload-7f3b1c16, metric.name=value_utilization_mean_mean" t=2024-05-29T13:44:13.79409653Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m47k6epe-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.794041425Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:13.79404277Z level=debug msg="State manager processing evaluation results" resultCount=2 + level=debug ts=2024-05-29T13:44:13.793917762Z caller=ruler.go:522 msg="tenant is owned by this instance" user=557927 slug=axpodev groups=8 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m43uhj6e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.793890053Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.793845741Z caller=remote_instance_store.go:51 user=407477 slug=inventa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=tr2.tor, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.tr2.tor, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.793891097Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.793561642Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m43f398s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.793696451Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.793646159Z caller=ruler.go:522 msg="tenant is owned by this instance" user=548300 slug=climaxhost groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m43f398s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.793671461Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.793672297Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.79365803Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=tr2.tor, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.tr2.tor, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.793671768Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=tr2.tor, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.tr2.tor, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.793643854Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m41kip14-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.79357679Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:13.793585058Z caller=remote_alert_sender.go:94 user=127813 slug=clearsale host=clearsale-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.37.46:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=eddallt3ir1ttc alerts=1 + level=debug ts=2024-05-29T13:44:13.793593238Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=111653 slug=theassociationmxp t=2024-05-29T13:44:13.79356362Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=61.085144ms + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=tr2.tor, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-vntd-01.tr2.tor, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.79356378Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.793392661Z caller=remote_instance_store.go:51 user=884866 slug=cnonumerique msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m41kip14-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.793503509Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.793519487Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.793494307Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=tr2.tor, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.tr2.tor, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.793484017Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m3ubmptd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.793468059Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m3ubmptd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.793423898Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=tr2.tor, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.tr2.tor, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.793470452Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m3ubmptd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.793409008Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.793442024Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=tr2.tor, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.tr2.tor, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.79339642Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.793326045Z caller=remote_instance_store.go:51 user=94501 slug=datastax msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.79331184Z caller=remote_instance_store.go:51 user=93046 slug=nese msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.79326467Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.793213018Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m3sjg02q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.793162846Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=tr2.tor, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.tr2.tor, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.793158675Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=tr2.tor, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.tr2.tor, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.793059454Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112732 slug=gleamer instance="datasource_uid=grafanacloud-prom, ref_id=C" t=2024-05-29T13:44:13.7927354Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112732 slug=gleamer instance="datasource_uid=grafanacloud-prom, ref_id=C" t=2024-05-29T13:44:13.792717297Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.790196227Z caller=ruler.go:522 msg="tenant is owned by this instance" user=505433 slug=brairlab groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m3edgvg5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.792811472Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=tr2.tor, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.tr2.tor, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.792783099Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=tr2.tor, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.tr2.tor, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.792768992Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m3edgvg5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.792717121Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sy5.syd, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sy5.syd, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.792700272Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sy5.syd, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sy5.syd, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.79268472Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m3aet4sz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.7925921Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sy5.syd, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sy5.syd, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.792596302Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.792509249Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=649479 slug=cfrmichalski + level=warn ts=2024-05-29T13:44:13.792332747Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=540485 slug=christophbronold + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m36inm2l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.792354657Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.792371759Z caller=remote_instance_store.go:51 user=697570 slug=carroteco msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.792327264Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m36inm2l-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.792321077Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m36inm2l-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.792295677Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sy5.syd, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sy5.syd, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.792331774Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.790806533Z caller=ruler.go:522 msg="tenant is owned by this instance" user=540485 slug=christophbronold groups=0 + level=warn ts=2024-05-29T13:44:13.792157845Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=909264 slug=bawagdev1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m35vkpgq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.792250196Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m35vkpgq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.792217266Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.79216421Z caller=remote_instance_store.go:51 user=485459 slug=heroiclabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.792082257Z caller=remote_instance_store.go:51 user=751407 slug=nethermindjuno msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sy5.syd, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sy5.syd, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.792121604Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.792032622Z caller=remote_instance_store.go:51 user=384712 slug=nearinc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m32on8jc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.792047854Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:13.792050907Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m32on8jc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.792014014Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:13.792034615Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m32on8jc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.791968404Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sy5.syd, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sy5.syd, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.791979845Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.791803142Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=650163 slug=cloudfuel + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m32g5y41-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.791879343Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.790738132Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.791715431Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.791724196Z caller=remote_instance_store.go:51 user=706031 slug=miceutest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.791729118Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sv10.sjc, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sv10.sjc, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.79169121Z level=debug msg="Keeping state" state=Normal + ts=2024-05-29T13:44:13.791590769Z caller=memberlist_logger.go:74 level=debug msg="Initiating push/pull sync with: grafana-ruler-55c859bbcd-4r8rp-965058d2 10.15.183.110:7946" + level=info component=discovery ts=2024-05-29T13:44:13.791494339Z caller=client.go:80 msg="creating client for grafana instance" user=709157 addr=dns:///db71-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.791493839Z caller=client.go:80 msg="creating client for grafana instance" user=628097 addr=dns:///dc01-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m30cidsu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.79158167Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.791446339Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=641855 slug=clintoncao + level=debug ts=2024-05-29T13:44:13.791424538Z caller=ruler.go:522 msg="tenant is owned by this instance" user=605621 slug=cashflash groups=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m2zgcpl4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.791400048Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sv10.sjc, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.sv10.sjc, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.791391093Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m2wsuhqr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.791302637Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m2wsuhqr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.791277356Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.791214336Z caller=ruler.go:522 msg="tenant is owned by this instance" user=641855 slug=clintoncao groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m2wsuhqr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.791242206Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m2udk983-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.790961213Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.791118236Z caller=ruler.go:522 msg="tenant is owned by this instance" user=638946 slug=climeworks groups=5 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m2tejaxd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.790882382Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m2tejaxd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.790828862Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m2tejaxd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.790709201Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m2t1mcyx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.7906159Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m2pos3mx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.790527119Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.791049953Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.790920781Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.79093578Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sv10.sjc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sv10.sjc, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.791044245Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=642786 slug=sophoscomnsg t=2024-05-29T13:44:13.790929446Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=warn ts=2024-05-29T13:44:13.790920534Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=549526 slug=codesyl + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m2jehk0r-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.790078784Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.790814298Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m2iifwxt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.789773471Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m2iifwxt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.78971041Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m28x4vbs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.78966016Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.789826212Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.790215306Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sv10.sjc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-02.sv10.sjc, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.790792891Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.79073008Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=316418 slug=workmotion t=2024-05-29T13:44:13.790711797Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=57.262318ms + level=info ts=2024-05-29T13:44:13.790686469Z caller=remote_alert_sender.go:94 user=328971 slug=wefight host=wefight-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.149.102.248:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=R-wokZ74k alerts=1 + level=debug ts=2024-05-29T13:44:13.790607952Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.790566366Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.790489925Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.790481748Z caller=remote_instance_store.go:51 user=407315 slug=ppcp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=335419 slug=tbauctions t=2024-05-29T13:44:13.790470791Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=33.900986ms + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sv10.sjc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sv10.sjc, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.790555136Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.790478591Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.250479ms + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sto6.sto, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.790347892Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=856040 slug=kuady instance= t=2024-05-29T13:44:13.790386999Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sto6.sto, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.790225794Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sto6.sto, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.790083473Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sto6.sto, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.789995345Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=512398 slug=brightdigital t=2024-05-29T13:44:13.789844424Z level=debug msg="Saving alert states done" count=11 max_state_save_concurrency=1 duration=356.258868ms + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sto6.sto, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.78982013Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=656459 slug=activeport t=2024-05-29T13:44:13.789724158Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=108112 slug=btctrader version=23 fingerprint=8059a6fe75149254 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.789695609Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.789436387s EvaluationString:}]" duration=131.286441ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m28x4vbs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.789554219Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m24xpf19-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.789477028Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m24xpf19-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.789360957Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sto6.sto, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.789319827Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sto6.sto, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.789229366Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.789174587Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m213xdsf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.789191855Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.789064735Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.789042182Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.789034121Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sp4.sao, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.789134367Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sp4.sao, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.789123266Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m1yar63y-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.789077984Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=530405 slug=zetetic instance="chain=Kusama, pool=Green 1" t=2024-05-29T13:44:13.789027102Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sp4.sao, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.789055365Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=163513 slug=dialpad version=11 fingerprint=5cfa54992e4d3809 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.788910616Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.788480258s EvaluationString:}]" duration=58.650714ms + logger=ngalert.scheduler user=530405 slug=zetetic version=82 fingerprint=cb3ada9a5bab82cf attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.788910745Z level=debug msg="Alert rule evaluated" results="[{Instance:chain=Kusama, pool=Green 1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:chain=Kusama, pool=Green 1 Value:0xc061511738} C:{Var:C Labels:chain=Kusama, pool=Green 1 Value:0xc061511758}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.788577138s EvaluationString:[ var='B' labels={chain=Kusama, pool=Green 1} value=1 ], [ var='C' labels={chain=Kusama, pool=Green 1} value=0 ]}]" duration=5.884114ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m1xmvq6k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.788845982Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sp4.sao, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.788967094Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.788794171Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sp4.sao, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.788765901Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m1wqw8qz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.788628299Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m1wqw8qz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.788598859Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m1wqw8qz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.788456078Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m1jne33x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.788390927Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sp4.sao, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.788667136Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m1jne33x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.788349456Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.788606533Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m1fwl3nd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.788082664Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m1dudrsf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.787938982Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m1dudrsf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.787825331Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m1diza0f-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.78772504Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sif.che, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.788429549Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sif.che, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.78830657Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sif.che, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.788210335Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sif.che, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.788077966Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.787870349Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.787843585Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sg3.sin, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.787772635Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.787797755Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.787645771Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sg3.sin, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.787617332Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sg3.sin, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.787524775Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.78744481Z caller=remote_instance_store.go:51 user=615392 slug=shinemetrics msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="__name__=probe_success, config_version=1715008305429875712, instance=https://api.shine.fr/v2/account_closures/liveness_check, job=Liveness Check account-closures-v2, probe=Paris" t=2024-05-29T13:44:13.78736447Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="__name__=probe_success, config_version=1715008305429875712, instance=https://api.shine.fr/v2/account_closures/liveness_check, job=Liveness Check account-closures-v2, probe=Paris" t=2024-05-29T13:44:13.787349933Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sg3.sin, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.787317424Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.7872634Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m165dqex-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.786832051Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=615392 slug=shinemetrics t=2024-05-29T13:44:13.786967985Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.scheduler user=615392 slug=shinemetrics version=10 fingerprint=dc69b38d8c5a585e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.786810615Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=probe_success, config_version=1715008305429875712, instance=https://api.shine.fr/v2/account_closures/liveness_check, job=Liveness Check account-closures-v2, probe=Amsterdam State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=probe_success, config_version=1715008305429875712, instance=https://api.shine.fr/v2/account_closures/liveness_check, job=Liveness Check account-closures-v2, probe=Amsterdam Value:0xc055e031f8} B:{Var:B Labels:__name__=probe_success, config_version=1715008305429875712, instance=https://api.shine.fr/v2/account_closures/liveness_check, job=Liveness Check account-closures-v2, probe=Amsterdam Value:0xc055e03238}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.786173845s EvaluationString:[ var='A' labels={__name__=probe_success, config_version=1715008305429875712, instance=https://api.shine.fr/v2/account_closures/liveness_check, job=Liveness Check account-closures-v2, probe=Amsterdam} value=1 ], [ var='B' labels={__name__=probe_success, config_version=1715008305429875712, instance=https://api.shine.fr/v2/account_closures/liveness_check, job=Liveness Check account-closures-v2, probe=Amsterdam} value=0 ]} {Instance:__name__=probe_success, config_version=1715008305429875712, instance=https://api.shine.fr/v2/account_closures/liveness_check, job=Liveness Check account-closures-v2, probe=Paris State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=probe_success, config_version=1715008305429875712, instance=https://api.shine.fr/v2/account_closures/liveness_check, job=Liveness Check account-closures-v2, probe=Paris Value:0xc055e03320} B:{Var:B Labels:__name__=probe_success, config_version=1715008305429875712, instance=https://api.shine.fr/v2/account_closures/liveness_check, job=Liveness Check account-closures-v2, probe=Paris Value:0xc055e032d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.786191406s EvaluationString:[ var='A' labels={__name__=probe_success, config_version=1715008305429875712, instance=https://api.shine.fr/v2/account_closures/liveness_check, job=Liveness Check account-closures-v2, probe=Paris} value=1 ], [ var='B' labels={__name__=probe_success, config_version=1715008305429875712, instance=https://api.shine.fr/v2/account_closures/liveness_check, job=Liveness Check account-closures-v2, probe=Paris} value=0 ]}]" duration=12.266927ms + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sg3.sin, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.787028139Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.786951683Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.786939585Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.786857988Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m165dqex-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.78671603Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=620339 slug=energostack instance="datasource_uid=bdjm40q3tf1tse, ref_id=A" t=2024-05-29T13:44:13.786758916Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m165dqex-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.786689799Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.786709455Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m15fk1l3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.786548398Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m0pz97g0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.786432837Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.78635988Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.78634565Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:13.786300904Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:13.786230758Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.786158069Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.78608862Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="resource.label.project_id=shine-163816, resource.type=spanner_instance" t=2024-05-29T13:44:13.786102376Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.786019742Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m0jq0zbq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.786101363Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.786073617Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=83647 slug=bidsolutions instance= t=2024-05-29T13:44:13.786028913Z level=debug msg="Execution error state is Alerting" handler=resultAlerting previous_handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m0jq0zbq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.786073453Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=83647 slug=bidsolutions t=2024-05-29T13:44:13.785990275Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m0djr36u-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.785995042Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m0djr36u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.785889091Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=374423 slug=bitburst t=2024-05-29T13:44:13.785591652Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=44.579285ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m0co3qhq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.785570328Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m0anadah-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.785472957Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa4.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.78545858Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-m0anadah-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.785341515Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lzz0uft6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.785218524Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lzz0uft6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.785172844Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.785181582Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lzwan7rh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.784738879Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lzrn0e87-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.784685919Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=60199 slug=wallapop t=2024-05-29T13:44:13.78519925Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lzrn0e87-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.784672858Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lzrn0e87-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.784573087Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.785188349Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.785120841Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lzidmh8q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.784142773Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.784935208Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lzh1zew3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.78381025Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lzcf5aet-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.783753979Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lzcf5aet-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.783622358Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.784863858Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lz463s0g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.783542227Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lz463s0g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.783492036Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.784817199Z caller=remote_instance_store.go:51 user=554491 slug=safeskyindustries msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lz1lnjt1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.783175813Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.784722225Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lz1asqt5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.783012791Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lz1asqt5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.782948851Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.784684052Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyz2qyd3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.782643757Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyz2qyd3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.782586257Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyz2qyd3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.782516346Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyxoxzo0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.782447255Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyxoxzo0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.782312084Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyxmk5yz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.782247903Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyxmk5yz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.782196823Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyxmk5yz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.782157092Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyxmk5yz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.782145592Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyx0r99u-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.782109682Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyx0r99u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.782006221Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyx0r99u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.781992231Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa4.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.784529095Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.784365763Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyqv3z6u-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.78194224Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyqv3z6u-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.78189391Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyqv3z6u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.781851689Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.784350406Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyltil0v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.781788779Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.784324532Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyltil0v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.781752898Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=236496 slug=improbable t=2024-05-29T13:44:13.78432009Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.683323ms + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.784404911Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyltil0v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.781741018Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyltil0v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.781692068Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.784286911Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyh9l3hd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.781639857Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyh9l3hd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.781548136Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyh9l3hd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.781534746Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyftgg5l-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.781478856Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lybhjsm6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.781317284Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lybhjsm6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.781276553Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lybhjsm6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.781224473Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyaa8fpw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.781090882Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lyaa8fpw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.781006801Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ly6vrui8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.7809656Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ly6vrui8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.780892589Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ly6vrui8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.780852069Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ly6vrui8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.780838579Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.784036795Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lxqlv0wp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.780737948Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lxpzfhck-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.780478335Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lxkfptn3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.780420185Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.783892464Z caller=remote_instance_store.go:51 user=526835 slug=fundbot msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.783845052Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lxkfptn3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.780299693Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lxkfptn3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.780286473Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=526835 slug=fundbot instance= t=2024-05-29T13:44:13.783780074Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lxib7pig-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.780181052Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.783819178Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lxib7pig-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.780101951Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lxi76l63-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.780038241Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.783711069Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.783682013Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lxh0tsht-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.779811258Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lxh0tsht-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.779794538Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lxh0tsht-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.779754588Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.783419868Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.783302241Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.783209534Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.783160422Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.783089114Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.783037408Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.782955215Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.782875378Z caller=remote_instance_store.go:51 user=738479 slug=gohero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.782788179Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.782688002Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.782580455Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.782351485Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.782451861Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa2.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.782097984Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=608555 slug=ias t=2024-05-29T13:44:13.781857151Z level=debug msg="Skip rule evaluation because it is paused" + level=debug ts=2024-05-29T13:44:13.781806388Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.781548076Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:13.781358652Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.78798ms + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.781391603Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:13.781278002Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.084416ms + level=debug ts=2024-05-29T13:44:13.781239177Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.781166065Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.781052724Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.781091878Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.780713651Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.780556787Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:13.780519877Z level=debug msg="Saving alert states done" count=6 max_state_save_concurrency=1 duration=159.225737ms + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.780510418Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.780271167Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.780226002Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.780154551Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=os1.osa, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.780133061Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=os1.osa, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.780068648Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=os1.osa, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.779780878Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ny2.nyc, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.779513999Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ny2.nyc, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.779432598Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ny2.nyc, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.779336378Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ny2.nyc, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.778969982Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=mtl7.mon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.778577653Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=mtl7.mon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.778563887Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.779091657Z caller=remote_instance_store.go:51 user=890273 slug=cmhusqnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.779054786Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lxc18eok-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.7789722Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lxc18eok-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.778866219Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lxc18eok-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.778700917Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lx9b3zvh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.778602206Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.778713707Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.778350264Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lx7bkk7m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.778399254Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lx7bkk7m-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.778311783Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lx4uzgoe-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.778272013Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.778263457Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lx2f8wgy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.77800371Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lx2f8wgy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.777958349Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=mtl7.mon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.778100191Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lx2f8wgy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.777937569Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=me1.mel, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.77801161Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lx1jnpi3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.777900269Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lx1jnpi3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.777790248Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lx1jnpi3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.777696677Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=me1.mel, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.777732907Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.777724605Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.777677817Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.777668257Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=me1.mel, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.777652006Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.777595119Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.701121ms + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=me1.mel, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.777637732Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lwtmd5kx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.777617216Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lwtmd5kx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.777583966Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=me1.mel, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.777564209Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.77746634Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=me1.mel, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.777480595Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=183214 slug=vectorizedio t=2024-05-29T13:44:13.777392256Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=31.291033ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lwojr1d0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.777342523Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.777326823Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lwojr1d0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.777299083Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.777350585Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.777309478Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=md2.mad, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.77724357Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:13.777245809Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.777121247Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.777031668Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.776948333Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=438185 slug=nodeinfra t=2024-05-29T13:44:13.776968213Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=53.991404ms + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=md2.mad, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.776974851Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=884866 slug=cnonumerique t=2024-05-29T13:44:13.776863649Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.776934914Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.776891058Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=884866 slug=cnonumerique instance="datasource_uid=ddhkbrfewv7k0d, ref_id=A" t=2024-05-29T13:44:13.776827638Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=md2.mad, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.776909826Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=md2.mad, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.776895932Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lwk6scjo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.776779867Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=md2.mad, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.776729816Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lwdrmkwg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.776710587Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=md2.mad, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.776715712Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.776746939Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lwdrmkwg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.776558905Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.776510184Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.776326579Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lw8knwz2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.776278922Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=mb2.mum, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.77621372Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lw8knwz2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.776249682Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lw8knwz2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.776190661Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lw80a201-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.77606465Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=mb2.mum, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.776140796Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=mb2.mum, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.776130238Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lw80a201-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.77602034Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=mb2.mum, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.776083338Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=mb2.mum, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.776074088Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lvzgshse-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.775965059Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lvzgshse-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.775878938Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lvzgshse-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.775855648Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ls1.lis, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.775925473Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ls1.lis, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.775850833Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.775588824Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.775594716Z caller=remote_instance_store.go:51 user=60199 slug=wallapop msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.775560756Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lvyqenag-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.775587555Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lvyqenag-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.775559445Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lvy49anr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.775488774Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=698119 slug=simonsprod t=2024-05-29T13:44:13.77541431Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=7.092991ms + logger=ngalert.state.manager user=60199 slug=wallapop instance= t=2024-05-29T13:44:13.775459155Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ls1.lis, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.775518338Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ls1.lis, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.775507465Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lvy49anr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.775421023Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ls1.lis, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.775399904Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lvy49anr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.775373043Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ls1.lis, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.775281555Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.775127901Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ld6.lon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.775195656Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.775123982Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ld6.lon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.775097437Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ld6.lon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.774973294Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lvwtv1ry-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.774930408Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lvv39g78-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.774576305Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.774519242Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ld6.lon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.774507986Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lvn7a6i2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.774440443Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=hk2.hkg, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.774438484Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=hk2.hkg, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.774319034Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.774263886Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lvlhpw1k-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.774210701Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lvlhpw1k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.77414366Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=hk2.hkg, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.774160083Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=hk2.hkg, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.774147963Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=hk2.hkg, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.774058014Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lvhvyq25-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.773990189Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lvhvyq25-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.773965028Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.773917459Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=hk2.hkg, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.773950622Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.773804629Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.773748538Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.773706988Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.773756997Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.773584474Z caller=remote_instance_store.go:51 user=884866 slug=cnonumerique msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lvgknmh3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.773690216Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.773613826Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.77362916Z caller=remote_rule_evaluator.go:193 user=656459 slug=activeport msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lvftll6p-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.773571324Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.773541603Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lvftll6p-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.773545414Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=HONG KONG Query" t=2024-05-29T13:44:13.773306011Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lv91wlsd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.773345732Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=714711 slug=nomiai t=2024-05-29T13:44:13.773298132Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=714711 slug=nomiai instance="service=text-to-speech-female" t=2024-05-29T13:44:13.773249291Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=fr4.fra, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.773262689Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lv91wlsd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.773194521Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.773208487Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lv91wlsd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.77316613Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:13.773167989Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=d045d39e090e84b2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.773103059Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=HONG KONG Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc07609de78} Threshold:{Var:Threshold Labels: Value:0xc07609deb0} compare:{Var:compare Labels:aggregatedBy=sum, name=HONG KONG Query Value:0xc07609df20} sum:{Var:sum Labels:aggregatedBy=sum, name=HONG KONG Query Value:0xc07609df58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.772765153s EvaluationString:[ var='Breaches' labels={} value=72 ], [ var='Threshold' labels={} value=1 ], [ var='compare' labels={aggregatedBy=sum, name=HONG KONG Query} value=0 ], [ var='sum' labels={aggregatedBy=sum, name=HONG KONG Query} value=17 ]}]" duration=24.807334ms + logger=ngalert.state.manager user=475799 slug=dpdcz t=2024-05-29T13:44:13.773096946Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-luvd268k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.77312471Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.773081201Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.77305021Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.773012263Z caller=remote_instance_store.go:51 user=471861 slug=planetstaging msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=fr4.fra, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.773031354Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-luv67xga-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.772883257Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-luv67xga-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.772853057Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.772858419Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.772800834Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.772773581Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.772769628Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-luv67xga-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.772758406Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-luv63ii3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.772699556Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-luv63ii3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.772683345Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dx1.dxb, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.772691072Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.772649131Z caller=remote_instance_store.go:51 user=355429 slug=zenpli msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dx1.dxb, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.772679558Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-luv63ii3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.772610755Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dx1.dxb, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.772609571Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-luv63ii3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.772578884Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-luv63ii3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.772554314Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lup7odqf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.772467383Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lup7odqf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.772392152Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.772488226Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dx1.dxb, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.772526532Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dx1.dxb, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.772518229Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.772435321Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dx1.dxb, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.77245083Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dx1.dxb, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.772437917Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=231061 slug=teamaround t=2024-05-29T13:44:13.772398806Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=231061 slug=teamaround instance= t=2024-05-29T13:44:13.772387086Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dx1.dxb, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.772340577Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lufh86w6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.772294771Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lufh86w6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.772267381Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=231061 slug=teamaround t=2024-05-29T13:44:13.77226545Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lufh86w6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.77219249Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dx1.dxb, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.772226212Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dx1.dxb, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.772214238Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lu1wl752-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.772045699Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dx1.dxb, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.772144195Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ltsl0deh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.771991358Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dx1.dxb, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.772132586Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.771993579Z caller=remote_instance_store.go:51 user=159781 slug=suncornoc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dx1.dxb, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.772061161Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dc12.ash, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.771972714Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ltsl0deh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.771893757Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ltsl0deh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.771844127Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dc12.ash, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.771869287Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dc12.ash, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.771800175Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.771780448Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ltrjt5qm-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.771750816Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dc12.ash, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.771683274Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ltphy3m3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.771543204Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ltphy3m3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.771515003Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dc12.ash, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.771523203Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ltphy3m3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.771393632Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.771402566Z caller=remote_instance_store.go:51 user=767797 slug=mgmresorts msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.771337891Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ltl6q0ez-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.771320821Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ltl6q0ez-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.771303681Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ltl6q0ez-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.771246631Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=767797 slug=mgmresorts version=3 fingerprint=fa35a9c5dbac64f7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.771201644Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=d1aebc62-96b9-4d63-9239-4734a6bc96ce, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.770739201s EvaluationString:}]" duration=28.994789ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ltl6q0ez-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.77121194Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ltl6q0ez-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.77118436Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.771046869Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lti2qz7z-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.771121499Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lti2qz7z-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.771056499Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.770970907Z caller=remote_instance_store.go:51 user=154996 slug=veovo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.771078819Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.77102594Z caller=remote_instance_store.go:51 user=662362 slug=imsafu msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ltb3ti71-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.770873167Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.770962416Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.770717025Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.770614069Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ltb3ti71-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.770630924Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ltb3ti71-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.770569774Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dal3.dal, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.77060716Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.770453605Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dal3.dal, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.770508571Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ltacqx4f-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.770277561Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.770224027Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.770213404Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.770137484Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.770127167Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.770092092Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lsxts1fk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.770073099Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lsxts1fk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.770022088Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ams9.ams, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.769965064Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lsxts1fk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.769835406Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lswzfu62-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.769719735Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ams9.ams, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.769781282Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.76970078Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lswzfu62-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.769594474Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lswzfu62-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.769582214Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.769648962Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ams9.ams, origin=volterra-infra-hypervisor" t=2024-05-29T13:44:13.769462518Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=806229 slug=simplisafe t=2024-05-29T13:44:13.769488573Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lstqfurb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.769431142Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lstqfurb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.769390112Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=806229 slug=simplisafe version=14 fingerprint=a459ae816869d8fc attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.769373432Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.768984202s EvaluationString:}]" duration=16.840184ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lstqfurb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.769359841Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lsp3v4vx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.769317571Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=343338 slug=f5sdc version=2 fingerprint=a7bfd50cf1753600 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.761892077Z level=debug msg="Alert rule evaluated" results="[{Instance:datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be6990} B:{Var:B Labels:datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be68d0} C:{Var:C Labels:datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be6928}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747218127s EvaluationString:[ var='A' labels={datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be6ac8} B:{Var:B Labels:datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be6a18} C:{Var:C Labels:datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be6a80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747235672s EvaluationString:[ var='A' labels={datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be6cf0} B:{Var:B Labels:datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be6d58} C:{Var:C Labels:datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be6b58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747243376s EvaluationString:[ var='A' labels={datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ams9.ams, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be6e18} B:{Var:B Labels:datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be6e58} C:{Var:C Labels:datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be6e98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747254343s EvaluationString:[ var='A' labels={datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ams9.ams, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be7098} B:{Var:B Labels:datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be6ff8} C:{Var:C Labels:datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be7050}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747262769s EvaluationString:[ var='A' labels={datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be7218} B:{Var:B Labels:datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be72a8} C:{Var:C Labels:datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be7328}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747269961s EvaluationString:[ var='A' labels={datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be73b0} B:{Var:B Labels:datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be7400} C:{Var:C Labels:datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be7448}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747276401s EvaluationString:[ var='A' labels={datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ams9.ams, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be77f8} B:{Var:B Labels:datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be76a0} C:{Var:C Labels:datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be76e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747282329s EvaluationString:[ var='A' labels={datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ams9.ams, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ams9.ams, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ams9.ams, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be7c98} B:{Var:B Labels:datacenter=ams9.ams, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc049fc6040} C:{Var:C Labels:datacenter=ams9.ams, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc020be7908}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747288671s EvaluationString:[ var='A' labels={datacenter=ams9.ams, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ams9.ams, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ams9.ams, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ams9.ams, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ams9.ams, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor Value:0xc049fc60d8} B:{Var:B Labels:datacenter=ams9.ams, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor Value:0xc049fc6118} C:{Var:C Labels:datacenter=ams9.ams, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor Value:0xc049fc6160}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747296391s EvaluationString:[ var='A' labels={datacenter=ams9.ams, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ams9.ams, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ams9.ams, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ams9.ams, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ams9.ams, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc049fc6220} B:{Var:B Labels:datacenter=ams9.ams, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc049fc6278} C:{Var:C Labels:datacenter=ams9.ams, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc049fc62b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747303084s EvaluationString:[ var='A' labels={datacenter=ams9.ams, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ams9.ams, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ams9.ams, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc049fc6358} B:{Var:B Labels:datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc049fc63a0} C:{Var:C Labels:datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc049fc63e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747308963s EvaluationString:[ var='A' labels={datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor Value:0xc049fc64d0} B:{Var:B Labels:datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor Value:0xc049fc6578} C:{Var:C Labels:datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor Value:0xc049fc65e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74731452s EvaluationString:[ var='A' labels={datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ams9.ams, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc049fc66e0} B:{Var:B Labels:datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc049fc6720} C:{Var:C Labels:datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor Value:0xc049fc6778}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747321862s EvaluationString:[ var='A' labels={datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ams9.ams, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.ams9.ams, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dal3.dal, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dal3.dal, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc6818} B:{Var:B Labels:datacenter=dal3.dal, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc6860} C:{Var:C Labels:datacenter=dal3.dal, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc68a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747327708s EvaluationString:[ var='A' labels={datacenter=dal3.dal, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dal3.dal, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dal3.dal, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dal3.dal, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dal3.dal, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc69e0} B:{Var:B Labels:datacenter=dal3.dal, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc6950} C:{Var:C Labels:datacenter=dal3.dal, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc6998}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74733342s EvaluationString:[ var='A' labels={datacenter=dal3.dal, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dal3.dal, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dal3.dal, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dal3.dal, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dal3.dal, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc6a80} B:{Var:B Labels:datacenter=dal3.dal, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc6ac8} C:{Var:C Labels:datacenter=dal3.dal, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc6b18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747339521s EvaluationString:[ var='A' labels={datacenter=dal3.dal, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dal3.dal, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dal3.dal, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dal3.dal, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dal3.dal, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc6bb0} B:{Var:B Labels:datacenter=dal3.dal, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc6bf8} C:{Var:C Labels:datacenter=dal3.dal, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc6c38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747355426s EvaluationString:[ var='A' labels={datacenter=dal3.dal, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dal3.dal, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dal3.dal, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dal3.dal, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dal3.dal, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc6dd8} B:{Var:B Labels:datacenter=dal3.dal, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc6d50} C:{Var:C Labels:datacenter=dal3.dal, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc6d98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.7473608s EvaluationString:[ var='A' labels={datacenter=dal3.dal, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dal3.dal, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dal3.dal, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dal3.dal, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dal3.dal, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc6e80} B:{Var:B Labels:datacenter=dal3.dal, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc6ec8} C:{Var:C Labels:datacenter=dal3.dal, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc6f08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747367552s EvaluationString:[ var='A' labels={datacenter=dal3.dal, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dal3.dal, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dal3.dal, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dal3.dal, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dal3.dal, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc7030} B:{Var:B Labels:datacenter=dal3.dal, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc6fa8} C:{Var:C Labels:datacenter=dal3.dal, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc6fe8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747373797s EvaluationString:[ var='A' labels={datacenter=dal3.dal, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dal3.dal, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dal3.dal, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dal3.dal, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dal3.dal, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dal3.dal, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc70d8} B:{Var:B Labels:datacenter=dal3.dal, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc7118} C:{Var:C Labels:datacenter=dal3.dal, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor Value:0xc049fc7158}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747379246s EvaluationString:[ var='A' labels={datacenter=dal3.dal, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dal3.dal, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dal3.dal, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dal3.dal, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dc12.ash, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dc12.ash, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc7268} B:{Var:B Labels:datacenter=dc12.ash, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc71e0} C:{Var:C Labels:datacenter=dc12.ash, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc7228}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747386371s EvaluationString:[ var='A' labels={datacenter=dc12.ash, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dc12.ash, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dc12.ash, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dc12.ash, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dc12.ash, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc72e8} B:{Var:B Labels:datacenter=dc12.ash, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc7330} C:{Var:C Labels:datacenter=dc12.ash, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc7378}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747392191s EvaluationString:[ var='A' labels={datacenter=dc12.ash, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dc12.ash, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dc12.ash, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dc12.ash, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dc12.ash, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc7400} B:{Var:B Labels:datacenter=dc12.ash, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc7440} C:{Var:C Labels:datacenter=dc12.ash, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc7488}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747397641s EvaluationString:[ var='A' labels={datacenter=dc12.ash, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dc12.ash, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dc12.ash, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dc12.ash, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dc12.ash, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc7510} B:{Var:B Labels:datacenter=dc12.ash, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc7558} C:{Var:C Labels:datacenter=dc12.ash, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc75a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747402528s EvaluationString:[ var='A' labels={datacenter=dc12.ash, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dc12.ash, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dc12.ash, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dc12.ash, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dc12.ash, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc7650} B:{Var:B Labels:datacenter=dc12.ash, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc7698} C:{Var:C Labels:datacenter=dc12.ash, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc76e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74740883s EvaluationString:[ var='A' labels={datacenter=dc12.ash, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dc12.ash, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dc12.ash, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dc12.ash, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dc12.ash, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc7768} B:{Var:B Labels:datacenter=dc12.ash, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc77b0} C:{Var:C Labels:datacenter=dc12.ash, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc77f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747415249s EvaluationString:[ var='A' labels={datacenter=dc12.ash, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dc12.ash, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dc12.ash, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dc12.ash, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dc12.ash, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc7880} B:{Var:B Labels:datacenter=dc12.ash, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc78c8} C:{Var:C Labels:datacenter=dc12.ash, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc7908}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747423215s EvaluationString:[ var='A' labels={datacenter=dc12.ash, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dc12.ash, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dc12.ash, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dc12.ash, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dc12.ash, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dc12.ash, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc7990} B:{Var:B Labels:datacenter=dc12.ash, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc79d8} C:{Var:C Labels:datacenter=dc12.ash, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor Value:0xc049fc7a18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747429151s EvaluationString:[ var='A' labels={datacenter=dc12.ash, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dc12.ash, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dc12.ash, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dc12.ash, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dx1.dxb, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dx1.dxb, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc049fc7aa0} B:{Var:B Labels:datacenter=dx1.dxb, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc049fc7ae8} C:{Var:C Labels:datacenter=dx1.dxb, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc049fc7b28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74743615s EvaluationString:[ var='A' labels={datacenter=dx1.dxb, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dx1.dxb, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dx1.dxb, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dx1.dxb, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dx1.dxb, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc049fc7c48} B:{Var:B Labels:datacenter=dx1.dxb, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc049fc7bb8} C:{Var:C Labels:datacenter=dx1.dxb, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc049fc7c00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74744206s EvaluationString:[ var='A' labels={datacenter=dx1.dxb, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dx1.dxb, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dx1.dxb, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dx1.dxb, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dx1.dxb, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc049fc7cd8} B:{Var:B Labels:datacenter=dx1.dxb, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc049fc7d20} C:{Var:C Labels:datacenter=dx1.dxb, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc049fc7d68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74744779s EvaluationString:[ var='A' labels={datacenter=dx1.dxb, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dx1.dxb, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dx1.dxb, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dx1.dxb, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dx1.dxb, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc049fc7e38} B:{Var:B Labels:datacenter=dx1.dxb, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc049fc7e80} C:{Var:C Labels:datacenter=dx1.dxb, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc049fc7df0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74745288s EvaluationString:[ var='A' labels={datacenter=dx1.dxb, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dx1.dxb, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dx1.dxb, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dx1.dxb, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dx1.dxb, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc049fc7f40} B:{Var:B Labels:datacenter=dx1.dxb, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc049fc7f98} C:{Var:C Labels:datacenter=dx1.dxb, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc049fc7fd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747471747s EvaluationString:[ var='A' labels={datacenter=dx1.dxb, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dx1.dxb, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dx1.dxb, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dx1.dxb, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dx1.dxb, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc021050098} B:{Var:B Labels:datacenter=dx1.dxb, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc0210500f0} C:{Var:C Labels:datacenter=dx1.dxb, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc021050158}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747477552s EvaluationString:[ var='A' labels={datacenter=dx1.dxb, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dx1.dxb, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dx1.dxb, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dx1.dxb, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dx1.dxb, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc0210502e8} B:{Var:B Labels:datacenter=dx1.dxb, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc021050220} C:{Var:C Labels:datacenter=dx1.dxb, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc021050268}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747482678s EvaluationString:[ var='A' labels={datacenter=dx1.dxb, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dx1.dxb, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dx1.dxb, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=dx1.dxb, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=dx1.dxb, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc0210503c0} B:{Var:B Labels:datacenter=dx1.dxb, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc021050408} C:{Var:C Labels:datacenter=dx1.dxb, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor Value:0xc021050448}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747487882s EvaluationString:[ var='A' labels={datacenter=dx1.dxb, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=dx1.dxb, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=dx1.dxb, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.dx1.dxb, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050548} B:{Var:B Labels:datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc0210504c8} C:{Var:C Labels:datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050508}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747493147s EvaluationString:[ var='A' labels={datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor Value:0xc0210505f8} B:{Var:B Labels:datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050638} C:{Var:C Labels:datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050680}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747498052s EvaluationString:[ var='A' labels={datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050718} B:{Var:B Labels:datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050760} C:{Var:C Labels:datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc0210507b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747513825s EvaluationString:[ var='A' labels={datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=fr4.fra, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=fr4.fra, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=fr4.fra, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050848} B:{Var:B Labels:datacenter=fr4.fra, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050888} C:{Var:C Labels:datacenter=fr4.fra, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc0210508c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747520988s EvaluationString:[ var='A' labels={datacenter=fr4.fra, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=fr4.fra, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=fr4.fra, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=fr4.fra, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=fr4.fra, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor Value:0xc0210509b8} B:{Var:B Labels:datacenter=fr4.fra, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050a10} C:{Var:C Labels:datacenter=fr4.fra, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050970}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747526544s EvaluationString:[ var='A' labels={datacenter=fr4.fra, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=fr4.fra, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=fr4.fra, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=fr4.fra, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=fr4.fra, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050b48} B:{Var:B Labels:datacenter=fr4.fra, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050ac0} C:{Var:C Labels:datacenter=fr4.fra, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050b08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74753457s EvaluationString:[ var='A' labels={datacenter=fr4.fra, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=fr4.fra, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=fr4.fra, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=fr4.fra, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=fr4.fra, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050c90} B:{Var:B Labels:datacenter=fr4.fra, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050cd8} C:{Var:C Labels:datacenter=fr4.fra, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050c38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747540278s EvaluationString:[ var='A' labels={datacenter=fr4.fra, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=fr4.fra, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=fr4.fra, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=fr4.fra, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=fr4.fra, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050da8} B:{Var:B Labels:datacenter=fr4.fra, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050df0} C:{Var:C Labels:datacenter=fr4.fra, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050d60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747546355s EvaluationString:[ var='A' labels={datacenter=fr4.fra, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=fr4.fra, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=fr4.fra, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=fr4.fra, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=fr4.fra, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050e78} B:{Var:B Labels:datacenter=fr4.fra, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050eb8} C:{Var:C Labels:datacenter=fr4.fra, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050ef8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747552812s EvaluationString:[ var='A' labels={datacenter=fr4.fra, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=fr4.fra, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=fr4.fra, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=fr4.fra, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=fr4.fra, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050f78} B:{Var:B Labels:datacenter=fr4.fra, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021050fb8} C:{Var:C Labels:datacenter=fr4.fra, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021051000}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747559721s EvaluationString:[ var='A' labels={datacenter=fr4.fra, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=fr4.fra, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=fr4.fra, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=fr4.fra, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=fr4.fra, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021051090} B:{Var:B Labels:datacenter=fr4.fra, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor Value:0xc0210510d8} C:{Var:C Labels:datacenter=fr4.fra, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021051118}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747565112s EvaluationString:[ var='A' labels={datacenter=fr4.fra, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=fr4.fra, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=fr4.fra, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.fr4.fra, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=fr4.fra, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=fr4.fra, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc0210511a0} B:{Var:B Labels:datacenter=fr4.fra, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc0210511e8} C:{Var:C Labels:datacenter=fr4.fra, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor Value:0xc021051228}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747571155s EvaluationString:[ var='A' labels={datacenter=fr4.fra, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=fr4.fra, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=fr4.fra, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.fr4.fra, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=hk2.hkg, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=hk2.hkg, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc021051338} B:{Var:B Labels:datacenter=hk2.hkg, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc0210512a8} C:{Var:C Labels:datacenter=hk2.hkg, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc0210512f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747577969s EvaluationString:[ var='A' labels={datacenter=hk2.hkg, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=hk2.hkg, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=hk2.hkg, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=hk2.hkg, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=hk2.hkg, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc0210513b8} B:{Var:B Labels:datacenter=hk2.hkg, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc021051400} C:{Var:C Labels:datacenter=hk2.hkg, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc021051440}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747583669s EvaluationString:[ var='A' labels={datacenter=hk2.hkg, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=hk2.hkg, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=hk2.hkg, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=hk2.hkg, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=hk2.hkg, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc0210514c8} B:{Var:B Labels:datacenter=hk2.hkg, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc021051508} C:{Var:C Labels:datacenter=hk2.hkg, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc021051558}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747590197s EvaluationString:[ var='A' labels={datacenter=hk2.hkg, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=hk2.hkg, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=hk2.hkg, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=hk2.hkg, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=hk2.hkg, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc021051660} B:{Var:B Labels:datacenter=hk2.hkg, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc0210515e0} C:{Var:C Labels:datacenter=hk2.hkg, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc021051620}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74759578s EvaluationString:[ var='A' labels={datacenter=hk2.hkg, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=hk2.hkg, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=hk2.hkg, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=hk2.hkg, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=hk2.hkg, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc021051700} B:{Var:B Labels:datacenter=hk2.hkg, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc021051778} C:{Var:C Labels:datacenter=hk2.hkg, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc0210517f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747601382s EvaluationString:[ var='A' labels={datacenter=hk2.hkg, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=hk2.hkg, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=hk2.hkg, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=hk2.hkg, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=hk2.hkg, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc021051970} B:{Var:B Labels:datacenter=hk2.hkg, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc0210519e8} C:{Var:C Labels:datacenter=hk2.hkg, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc0210518e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747606988s EvaluationString:[ var='A' labels={datacenter=hk2.hkg, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=hk2.hkg, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=hk2.hkg, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=hk2.hkg, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=hk2.hkg, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc021051bb8} B:{Var:B Labels:datacenter=hk2.hkg, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc021051ae0} C:{Var:C Labels:datacenter=hk2.hkg, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc021051b58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747612685s EvaluationString:[ var='A' labels={datacenter=hk2.hkg, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=hk2.hkg, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=hk2.hkg, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=hk2.hkg, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=hk2.hkg, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc021051d38} B:{Var:B Labels:datacenter=hk2.hkg, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc021051d98} C:{Var:C Labels:datacenter=hk2.hkg, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor Value:0xc021051cb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747617887s EvaluationString:[ var='A' labels={datacenter=hk2.hkg, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=hk2.hkg, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=hk2.hkg, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.hk2.hkg, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ld6.lon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ld6.lon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor Value:0xc021051ea0} B:{Var:B Labels:datacenter=ld6.lon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor Value:0xc021051f18} C:{Var:C Labels:datacenter=ld6.lon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor Value:0xc021051fb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747622892s EvaluationString:[ var='A' labels={datacenter=ld6.lon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ld6.lon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ld6.lon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ld6.lon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ld6.lon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor Value:0xc0208be1b8} B:{Var:B Labels:datacenter=ld6.lon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor Value:0xc0208be208} C:{Var:C Labels:datacenter=ld6.lon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor Value:0xc0208be258}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74762942s EvaluationString:[ var='A' labels={datacenter=ld6.lon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ld6.lon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ld6.lon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ld6.lon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ld6.lon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor Value:0xc0208be320} B:{Var:B Labels:datacenter=ld6.lon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor Value:0xc0208be3f8} C:{Var:C Labels:datacenter=ld6.lon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor Value:0xc0208be438}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747634413s EvaluationString:[ var='A' labels={datacenter=ld6.lon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ld6.lon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ld6.lon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ld6.lon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ld6.lon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor Value:0xc0208be5a0} B:{Var:B Labels:datacenter=ld6.lon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor Value:0xc0208be638} C:{Var:C Labels:datacenter=ld6.lon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor Value:0xc0208be528}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74764026s EvaluationString:[ var='A' labels={datacenter=ld6.lon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ld6.lon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ld6.lon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ld6.lon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ld6.lon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor Value:0xc0208be868} B:{Var:B Labels:datacenter=ld6.lon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor Value:0xc0208be758} C:{Var:C Labels:datacenter=ld6.lon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor Value:0xc0208be7e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747646142s EvaluationString:[ var='A' labels={datacenter=ld6.lon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ld6.lon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ld6.lon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ld6.lon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ld6.lon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor Value:0xc0208bea18} B:{Var:B Labels:datacenter=ld6.lon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor Value:0xc0208beb00} C:{Var:C Labels:datacenter=ld6.lon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor Value:0xc0208be9a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74765216s EvaluationString:[ var='A' labels={datacenter=ld6.lon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ld6.lon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ld6.lon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ld6.lon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ld6.lon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor Value:0xc0208bec80} B:{Var:B Labels:datacenter=ld6.lon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor Value:0xc0208beb98} C:{Var:C Labels:datacenter=ld6.lon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor Value:0xc0208bebd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747657583s EvaluationString:[ var='A' labels={datacenter=ld6.lon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ld6.lon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ld6.lon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ld6.lon, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ld6.lon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ld6.lon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor Value:0xc0208bede8} B:{Var:B Labels:datacenter=ld6.lon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor Value:0xc0208bed58} C:{Var:C Labels:datacenter=ld6.lon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor Value:0xc0208bed98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747662707s EvaluationString:[ var='A' labels={datacenter=ld6.lon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ld6.lon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ld6.lon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ld6.lon, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ls1.lis, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ls1.lis, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208bef20} B:{Var:B Labels:datacenter=ls1.lis, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208bee88} C:{Var:C Labels:datacenter=ls1.lis, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208beee0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747668504s EvaluationString:[ var='A' labels={datacenter=ls1.lis, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ls1.lis, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ls1.lis, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ls1.lis, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ls1.lis, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208beff8} B:{Var:B Labels:datacenter=ls1.lis, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208bf0b8} C:{Var:C Labels:datacenter=ls1.lis, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208bf100}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74767343s EvaluationString:[ var='A' labels={datacenter=ls1.lis, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ls1.lis, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ls1.lis, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ls1.lis, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ls1.lis, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208bf1f0} B:{Var:B Labels:datacenter=ls1.lis, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208bf268} C:{Var:C Labels:datacenter=ls1.lis, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208bf2a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747680921s EvaluationString:[ var='A' labels={datacenter=ls1.lis, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ls1.lis, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ls1.lis, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ls1.lis, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ls1.lis, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208bf478} B:{Var:B Labels:datacenter=ls1.lis, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208bf4d0} C:{Var:C Labels:datacenter=ls1.lis, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208bf3e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747689864s EvaluationString:[ var='A' labels={datacenter=ls1.lis, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ls1.lis, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ls1.lis, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ls1.lis, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ls1.lis, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208bf5e0} B:{Var:B Labels:datacenter=ls1.lis, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208bf6e8} C:{Var:C Labels:datacenter=ls1.lis, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208bf760}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747698125s EvaluationString:[ var='A' labels={datacenter=ls1.lis, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ls1.lis, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ls1.lis, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ls1.lis, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ls1.lis, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208bf868} B:{Var:B Labels:datacenter=ls1.lis, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208bf8d0} C:{Var:C Labels:datacenter=ls1.lis, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208bf918}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747708064s EvaluationString:[ var='A' labels={datacenter=ls1.lis, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ls1.lis, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ls1.lis, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ls1.lis, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ls1.lis, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208bf9e8} B:{Var:B Labels:datacenter=ls1.lis, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208bfa40} C:{Var:C Labels:datacenter=ls1.lis, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208bfaf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747732492s EvaluationString:[ var='A' labels={datacenter=ls1.lis, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ls1.lis, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ls1.lis, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.ls1.lis, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ls1.lis, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ls1.lis, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208bfca8} B:{Var:B Labels:datacenter=ls1.lis, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208bfc10} C:{Var:C Labels:datacenter=ls1.lis, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor Value:0xc0208bfc68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747742501s EvaluationString:[ var='A' labels={datacenter=ls1.lis, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ls1.lis, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ls1.lis, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.ls1.lis, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=mb2.mum, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mb2.mum, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor Value:0xc0208bfd60} B:{Var:B Labels:datacenter=mb2.mum, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor Value:0xc0208bfda8} C:{Var:C Labels:datacenter=mb2.mum, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor Value:0xc0208bfde8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747750834s EvaluationString:[ var='A' labels={datacenter=mb2.mum, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=mb2.mum, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=mb2.mum, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=mb2.mum, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mb2.mum, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor Value:0xc0208bfea0} B:{Var:B Labels:datacenter=mb2.mum, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor Value:0xc0208bff08} C:{Var:C Labels:datacenter=mb2.mum, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor Value:0xc0208bff50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747759713s EvaluationString:[ var='A' labels={datacenter=mb2.mum, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=mb2.mum, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=mb2.mum, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=mb2.mum, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mb2.mum, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor Value:0xc0208bffe8} B:{Var:B Labels:datacenter=mb2.mum, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor Value:0xc01b952030} C:{Var:C Labels:datacenter=mb2.mum, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor Value:0xc01b9520e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747771645s EvaluationString:[ var='A' labels={datacenter=mb2.mum, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=mb2.mum, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=mb2.mum, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=mb2.mum, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mb2.mum, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor Value:0xc01b952220} B:{Var:B Labels:datacenter=mb2.mum, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor Value:0xc01b9524c8} C:{Var:C Labels:datacenter=mb2.mum, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor Value:0xc01b9521b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74777898s EvaluationString:[ var='A' labels={datacenter=mb2.mum, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=mb2.mum, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=mb2.mum, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=mb2.mum, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mb2.mum, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor Value:0xc01b9529b0} B:{Var:B Labels:datacenter=mb2.mum, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor Value:0xc01b9529f8} C:{Var:C Labels:datacenter=mb2.mum, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor Value:0xc01b952768}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747789599s EvaluationString:[ var='A' labels={datacenter=mb2.mum, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=mb2.mum, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=mb2.mum, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=mb2.mum, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mb2.mum, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor Value:0xc01b952bc8} B:{Var:B Labels:datacenter=mb2.mum, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor Value:0xc01b952b08} C:{Var:C Labels:datacenter=mb2.mum, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor Value:0xc01b952b88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747799115s EvaluationString:[ var='A' labels={datacenter=mb2.mum, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=mb2.mum, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=mb2.mum, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=mb2.mum, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mb2.mum, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor Value:0xc01b952c50} B:{Var:B Labels:datacenter=mb2.mum, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor Value:0xc01b952c98} C:{Var:C Labels:datacenter=mb2.mum, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor Value:0xc01b952ea0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747808868s EvaluationString:[ var='A' labels={datacenter=mb2.mum, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=mb2.mum, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=mb2.mum, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.mb2.mum, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=mb2.mum, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mb2.mum, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor Value:0xc01b952fd8} B:{Var:B Labels:datacenter=mb2.mum, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor Value:0xc01b953070} C:{Var:C Labels:datacenter=mb2.mum, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor Value:0xc01b953118}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747818723s EvaluationString:[ var='A' labels={datacenter=mb2.mum, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=mb2.mum, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=mb2.mum, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.mb2.mum, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=md2.mad, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=md2.mad, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor Value:0xc01b9533a8} B:{Var:B Labels:datacenter=md2.mad, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor Value:0xc01b9532d8} C:{Var:C Labels:datacenter=md2.mad, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor Value:0xc01b953320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74782599s EvaluationString:[ var='A' labels={datacenter=md2.mad, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=md2.mad, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=md2.mad, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=md2.mad, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=md2.mad, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor Value:0xc01b953a48} B:{Var:B Labels:datacenter=md2.mad, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor Value:0xc01b953918} C:{Var:C Labels:datacenter=md2.mad, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor Value:0xc01b9539c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747832279s EvaluationString:[ var='A' labels={datacenter=md2.mad, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=md2.mad, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=md2.mad, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=md2.mad, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=md2.mad, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor Value:0xc01b953ae0} B:{Var:B Labels:datacenter=md2.mad, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor Value:0xc01b953b88} C:{Var:C Labels:datacenter=md2.mad, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor Value:0xc01b953bd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747837888s EvaluationString:[ var='A' labels={datacenter=md2.mad, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=md2.mad, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=md2.mad, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=md2.mad, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=md2.mad, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor Value:0xc01b953dc8} B:{Var:B Labels:datacenter=md2.mad, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor Value:0xc01b953cc8} C:{Var:C Labels:datacenter=md2.mad, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor Value:0xc01b953d88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747845359s EvaluationString:[ var='A' labels={datacenter=md2.mad, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=md2.mad, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=md2.mad, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=md2.mad, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=md2.mad, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor Value:0xc01b953fe8} B:{Var:B Labels:datacenter=md2.mad, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor Value:0xc046b92070} C:{Var:C Labels:datacenter=md2.mad, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor Value:0xc01b953f90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747865628s EvaluationString:[ var='A' labels={datacenter=md2.mad, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=md2.mad, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=md2.mad, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=md2.mad, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=md2.mad, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor Value:0xc046b92178} B:{Var:B Labels:datacenter=md2.mad, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor Value:0xc046b92298} C:{Var:C Labels:datacenter=md2.mad, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor Value:0xc046b92120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747873267s EvaluationString:[ var='A' labels={datacenter=md2.mad, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=md2.mad, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=md2.mad, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=md2.mad, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=md2.mad, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor Value:0xc046b92528} B:{Var:B Labels:datacenter=md2.mad, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor Value:0xc046b92348} C:{Var:C Labels:datacenter=md2.mad, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor Value:0xc046b92410}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747884231s EvaluationString:[ var='A' labels={datacenter=md2.mad, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=md2.mad, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=md2.mad, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.md2.mad, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=md2.mad, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=md2.mad, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor Value:0xc046b92710} B:{Var:B Labels:datacenter=md2.mad, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor Value:0xc046b925d0} C:{Var:C Labels:datacenter=md2.mad, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor Value:0xc046b92658}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747893323s EvaluationString:[ var='A' labels={datacenter=md2.mad, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=md2.mad, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=md2.mad, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.md2.mad, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=me1.mel, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=me1.mel, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b928b8} B:{Var:B Labels:datacenter=me1.mel, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b927d8} C:{Var:C Labels:datacenter=me1.mel, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b92830}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747903348s EvaluationString:[ var='A' labels={datacenter=me1.mel, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=me1.mel, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=me1.mel, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=me1.mel, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=me1.mel, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b92948} B:{Var:B Labels:datacenter=me1.mel, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b929b8} C:{Var:C Labels:datacenter=me1.mel, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b92a18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747926139s EvaluationString:[ var='A' labels={datacenter=me1.mel, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=me1.mel, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=me1.mel, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=me1.mel, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=me1.mel, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b92b30} B:{Var:B Labels:datacenter=me1.mel, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b92b78} C:{Var:C Labels:datacenter=me1.mel, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b92ab8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747932094s EvaluationString:[ var='A' labels={datacenter=me1.mel, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=me1.mel, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=me1.mel, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=me1.mel, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=me1.mel, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b92cd8} B:{Var:B Labels:datacenter=me1.mel, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b92de0} C:{Var:C Labels:datacenter=me1.mel, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b92e38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747937751s EvaluationString:[ var='A' labels={datacenter=me1.mel, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=me1.mel, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=me1.mel, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=me1.mel, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=me1.mel, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b92fd8} B:{Var:B Labels:datacenter=me1.mel, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b92f38} C:{Var:C Labels:datacenter=me1.mel, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b92f90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747944419s EvaluationString:[ var='A' labels={datacenter=me1.mel, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=me1.mel, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=me1.mel, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=me1.mel, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=me1.mel, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b93338} B:{Var:B Labels:datacenter=me1.mel, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b930c8} C:{Var:C Labels:datacenter=me1.mel, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b93250}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747950591s EvaluationString:[ var='A' labels={datacenter=me1.mel, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=me1.mel, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=me1.mel, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=me1.mel, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=me1.mel, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b93400} B:{Var:B Labels:datacenter=me1.mel, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b93478} C:{Var:C Labels:datacenter=me1.mel, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b934e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747956667s EvaluationString:[ var='A' labels={datacenter=me1.mel, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=me1.mel, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=me1.mel, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.me1.mel, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=me1.mel, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=me1.mel, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b93640} B:{Var:B Labels:datacenter=me1.mel, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b936a8} C:{Var:C Labels:datacenter=me1.mel, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor Value:0xc046b93710}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747964708s EvaluationString:[ var='A' labels={datacenter=me1.mel, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=me1.mel, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=me1.mel, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.me1.mel, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=mtl7.mon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mtl7.mon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc046b937f0} B:{Var:B Labels:datacenter=mtl7.mon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc046b93878} C:{Var:C Labels:datacenter=mtl7.mon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc046b939e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747972843s EvaluationString:[ var='A' labels={datacenter=mtl7.mon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=mtl7.mon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=mtl7.mon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=mtl7.mon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mtl7.mon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc046b93bc8} B:{Var:B Labels:datacenter=mtl7.mon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc046b93c18} C:{Var:C Labels:datacenter=mtl7.mon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc046b93c90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74798098s EvaluationString:[ var='A' labels={datacenter=mtl7.mon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=mtl7.mon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=mtl7.mon, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=mtl7.mon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mtl7.mon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc046b93ee0} B:{Var:B Labels:datacenter=mtl7.mon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc046b93fe8} C:{Var:C Labels:datacenter=mtl7.mon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc046b93d98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.747991017s EvaluationString:[ var='A' labels={datacenter=mtl7.mon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=mtl7.mon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=mtl7.mon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=mtl7.mon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mtl7.mon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc021538278} B:{Var:B Labels:datacenter=mtl7.mon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc021538080} C:{Var:C Labels:datacenter=mtl7.mon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc021538100}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748002184s EvaluationString:[ var='A' labels={datacenter=mtl7.mon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=mtl7.mon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=mtl7.mon, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=mtl7.mon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mtl7.mon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc0215386d0} B:{Var:B Labels:datacenter=mtl7.mon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc021538718} C:{Var:C Labels:datacenter=mtl7.mon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc0215384a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74801177s EvaluationString:[ var='A' labels={datacenter=mtl7.mon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=mtl7.mon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=mtl7.mon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=mtl7.mon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mtl7.mon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc021538888} B:{Var:B Labels:datacenter=mtl7.mon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc0215388c8} C:{Var:C Labels:datacenter=mtl7.mon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc021538918}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748023664s EvaluationString:[ var='A' labels={datacenter=mtl7.mon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=mtl7.mon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=mtl7.mon, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=mtl7.mon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mtl7.mon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc0215389b0} B:{Var:B Labels:datacenter=mtl7.mon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc0215389f8} C:{Var:C Labels:datacenter=mtl7.mon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc021538a40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748037911s EvaluationString:[ var='A' labels={datacenter=mtl7.mon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=mtl7.mon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=mtl7.mon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=mtl7.mon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=mtl7.mon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc021538ad0} B:{Var:B Labels:datacenter=mtl7.mon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc021538b18} C:{Var:C Labels:datacenter=mtl7.mon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor Value:0xc021538c48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74804833s EvaluationString:[ var='A' labels={datacenter=mtl7.mon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=mtl7.mon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=mtl7.mon, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.mtl7.mon, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ny2.nyc, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc021538d48} B:{Var:B Labels:datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc021538cc8} C:{Var:C Labels:datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc021538d08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748058777s EvaluationString:[ var='A' labels={datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ny2.nyc, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc021538dd0} B:{Var:B Labels:datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc021538e18} C:{Var:C Labels:datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc021538e58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748068302s EvaluationString:[ var='A' labels={datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-01.ny2.nyc, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-01.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc021538f60} B:{Var:B Labels:datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-01.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc021538ed8} C:{Var:C Labels:datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-01.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc021538f18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748077744s EvaluationString:[ var='A' labels={datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-01.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-01.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-01.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-02.ny2.nyc, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-02.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc021538ff0} B:{Var:B Labels:datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-02.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc021539038} C:{Var:C Labels:datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-02.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc021539080}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748086994s EvaluationString:[ var='A' labels={datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-02.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-02.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-02.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ny2.nyc, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc0215390f8} B:{Var:B Labels:datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc021539140} C:{Var:C Labels:datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc021539270}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748095567s EvaluationString:[ var='A' labels={datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ny2.nyc, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ny2.nyc, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc0215396b8} B:{Var:B Labels:datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc0215397b0} C:{Var:C Labels:datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc021539828}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748106682s EvaluationString:[ var='A' labels={datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ny2.nyc, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc021539ae0} B:{Var:B Labels:datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc0215398e8} C:{Var:C Labels:datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc0215399f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748116996s EvaluationString:[ var='A' labels={datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ny2.nyc, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc021539da8} B:{Var:B Labels:datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc021539e20} C:{Var:C Labels:datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc021539b70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748127612s EvaluationString:[ var='A' labels={datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ny2.nyc, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ny2.nyc, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-01.ny2.nyc, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ny2.nyc, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-01.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc021539ed8} B:{Var:B Labels:datacenter=ny2.nyc, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-01.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc021539f20} C:{Var:C Labels:datacenter=ny2.nyc, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-01.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc021539fd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748138117s EvaluationString:[ var='A' labels={datacenter=ny2.nyc, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-01.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ny2.nyc, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-01.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ny2.nyc, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-01.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=ny2.nyc, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-02.ny2.nyc, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=ny2.nyc, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-02.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc01fd6e0b0} B:{Var:B Labels:datacenter=ny2.nyc, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-02.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc01fd6e118} C:{Var:C Labels:datacenter=ny2.nyc, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-02.ny2.nyc, origin=volterra-infra-hypervisor Value:0xc01fd6e170}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748151827s EvaluationString:[ var='A' labels={datacenter=ny2.nyc, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-02.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=ny2.nyc, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-02.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=ny2.nyc, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-corero-02.ny2.nyc, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=os1.osa, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=os1.osa, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6e250} B:{Var:B Labels:datacenter=os1.osa, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6e298} C:{Var:C Labels:datacenter=os1.osa, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6e318}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748161314s EvaluationString:[ var='A' labels={datacenter=os1.osa, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=os1.osa, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=os1.osa, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=os1.osa, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=os1.osa, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6e438} B:{Var:B Labels:datacenter=os1.osa, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6e490} C:{Var:C Labels:datacenter=os1.osa, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6e508}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748171515s EvaluationString:[ var='A' labels={datacenter=os1.osa, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=os1.osa, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=os1.osa, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=os1.osa, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=os1.osa, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6e600} B:{Var:B Labels:datacenter=os1.osa, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6e658} C:{Var:C Labels:datacenter=os1.osa, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6e6d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748180235s EvaluationString:[ var='A' labels={datacenter=os1.osa, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=os1.osa, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=os1.osa, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=os1.osa, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=os1.osa, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6e818} B:{Var:B Labels:datacenter=os1.osa, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6e878} C:{Var:C Labels:datacenter=os1.osa, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6e7a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748186167s EvaluationString:[ var='A' labels={datacenter=os1.osa, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=os1.osa, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=os1.osa, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=os1.osa, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=os1.osa, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6e950} B:{Var:B Labels:datacenter=os1.osa, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6e9a8} C:{Var:C Labels:datacenter=os1.osa, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6ea18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74819202s EvaluationString:[ var='A' labels={datacenter=os1.osa, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=os1.osa, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=os1.osa, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=os1.osa, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=os1.osa, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6ead0} B:{Var:B Labels:datacenter=os1.osa, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6eb18} C:{Var:C Labels:datacenter=os1.osa, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6eb80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748198182s EvaluationString:[ var='A' labels={datacenter=os1.osa, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=os1.osa, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=os1.osa, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=os1.osa, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=os1.osa, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6ec98} B:{Var:B Labels:datacenter=os1.osa, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6ece0} C:{Var:C Labels:datacenter=os1.osa, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6ec30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748204236s EvaluationString:[ var='A' labels={datacenter=os1.osa, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=os1.osa, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=os1.osa, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.os1.osa, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=os1.osa, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=os1.osa, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6ed98} B:{Var:B Labels:datacenter=os1.osa, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6ede8} C:{Var:C Labels:datacenter=os1.osa, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor Value:0xc01fd6ee68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748210915s EvaluationString:[ var='A' labels={datacenter=os1.osa, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=os1.osa, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=os1.osa, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.os1.osa, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6ef88} B:{Var:B Labels:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6efd8} C:{Var:C Labels:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6ef38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748216579s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6f150} B:{Var:B Labels:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6f190} C:{Var:C Labels:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6f0a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748222669s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6f348} B:{Var:B Labels:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6f390} C:{Var:C Labels:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6f2c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74822831s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6f578} B:{Var:B Labels:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6f468} C:{Var:C Labels:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6f4e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748234862s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6f628} B:{Var:B Labels:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6f6a0} C:{Var:C Labels:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6f738}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748241281s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6f8d8} B:{Var:B Labels:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6f808} C:{Var:C Labels:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6f860}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748246871s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6f9d0} B:{Var:B Labels:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6fa28} C:{Var:C Labels:datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6fa80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748254786s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6fbd8} B:{Var:B Labels:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6fc38} C:{Var:C Labels:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6fb70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748275722s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6fd78} B:{Var:B Labels:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6fe10} C:{Var:C Labels:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6fd20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748284464s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6fee0} B:{Var:B Labels:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6ff38} C:{Var:C Labels:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc01fd6ffa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748293308s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc011f383e8} B:{Var:B Labels:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc011f38f40} C:{Var:C Labels:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc011f39048}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748304487s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc011f39a78} B:{Var:B Labels:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc011f39260} C:{Var:C Labels:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc011f396f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748314898s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc011f39e00} B:{Var:B Labels:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc011f39fa8} C:{Var:C Labels:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f0110}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748326212s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f01b0} B:{Var:B Labels:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f01f8} C:{Var:C Labels:datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f0238}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748337524s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f02b8} B:{Var:B Labels:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f0300} C:{Var:C Labels:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f0348}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748347433s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f03d8} B:{Var:B Labels:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f0418} C:{Var:C Labels:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f0458}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74835712s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f0748} B:{Var:B Labels:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f0790} C:{Var:C Labels:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f07d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748367864s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f09c0} B:{Var:B Labels:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f0a08} C:{Var:C Labels:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f0978}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748376681s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f0a88} B:{Var:B Labels:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f0ac8} C:{Var:C Labels:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f0b08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748388229s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f0b98} B:{Var:B Labels:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f0d08} C:{Var:C Labels:datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f0d48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748395192s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f0f88} B:{Var:B Labels:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f0dc8} C:{Var:C Labels:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f0f40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74840151s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f1018} B:{Var:B Labels:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f1188} C:{Var:C Labels:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f11d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748410409s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f1338} B:{Var:B Labels:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f1278} C:{Var:C Labels:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f12f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748419075s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f17f8} B:{Var:B Labels:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f13c8} C:{Var:C Labels:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f17b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748427731s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f18c8} B:{Var:B Labels:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f1908} C:{Var:C Labels:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f1948}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748438788s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f1db8} B:{Var:B Labels:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f1e00} C:{Var:C Labels:datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor Value:0xc0127f1d70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748447698s EvaluationString:[ var='A' labels={datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa2.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa2.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325a008} B:{Var:B Labels:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325a200} C:{Var:C Labels:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325a248}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748457218s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325a2d8} B:{Var:B Labels:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325a320} C:{Var:C Labels:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325a360}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748465927s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325a500} B:{Var:B Labels:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325a540} C:{Var:C Labels:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325a588}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748476051s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325a908} B:{Var:B Labels:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325a610} C:{Var:C Labels:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325a680}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748485759s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325ab38} B:{Var:B Labels:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325ae70} C:{Var:C Labels:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325aeb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748495951s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325b170} B:{Var:B Labels:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325b1b8} C:{Var:C Labels:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325b1f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748506668s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325b4a8} B:{Var:B Labels:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325b4e8} C:{Var:C Labels:datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325b530}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748516964s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325b5f8} B:{Var:B Labels:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325bb40} C:{Var:C Labels:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325b5b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748528166s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc00fa5c610} B:{Var:B Labels:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc02325bbd0} C:{Var:C Labels:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc00fa5c4e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748536569s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc00fa5d688} B:{Var:B Labels:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc00fa5d2d0} C:{Var:C Labels:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc00fa5d3c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74854281s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc00fa5dc48} B:{Var:B Labels:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738020} C:{Var:C Labels:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738068}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748550485s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc0337380f0} B:{Var:B Labels:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738138} C:{Var:C Labels:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738180}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748558044s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738230} B:{Var:B Labels:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738278} C:{Var:C Labels:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc0337382c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748565272s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738360} B:{Var:B Labels:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc0337383a8} C:{Var:C Labels:datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738400}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748572881s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738490} B:{Var:B Labels:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc0337384d0} C:{Var:C Labels:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738518}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748582874s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc0337385c8} B:{Var:B Labels:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738610} C:{Var:C Labels:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738658}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748591283s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc0337386e8} B:{Var:B Labels:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738740} C:{Var:C Labels:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748600702s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738820} B:{Var:B Labels:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738868} C:{Var:C Labels:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc0337388a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748610773s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738998} B:{Var:B Labels:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc0337389e0} C:{Var:C Labels:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738940}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748621911s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738a70} B:{Var:B Labels:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738ac8} C:{Var:C Labels:datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738b10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748631911s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738c40} B:{Var:B Labels:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738bb0} C:{Var:C Labels:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738bf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748641456s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738cd8} B:{Var:B Labels:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738d30} C:{Var:C Labels:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738d78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74866012s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738e10} B:{Var:B Labels:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738e58} C:{Var:C Labels:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738ea0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748669727s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738f28} B:{Var:B Labels:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738f70} C:{Var:C Labels:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor Value:0xc033738fb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748678857s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-01.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc0337390d0} B:{Var:B Labels:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc033739040} C:{Var:C Labels:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor Value:0xc033739088}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748687634s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-02.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc0337391b0} B:{Var:B Labels:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc0337391f8} C:{Var:C Labels:datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor Value:0xc033739168}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748696837s EvaluationString:[ var='A' labels={datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=pa4.par, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-neteng-03.pa4.par, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sg3.sin, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sg3.sin, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739280} B:{Var:B Labels:datacenter=sg3.sin, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor Value:0xc0337392c8} C:{Var:C Labels:datacenter=sg3.sin, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739308}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748707152s EvaluationString:[ var='A' labels={datacenter=sg3.sin, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sg3.sin, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sg3.sin, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sg3.sin, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sg3.sin, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739388} B:{Var:B Labels:datacenter=sg3.sin, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor Value:0xc0337393d0} C:{Var:C Labels:datacenter=sg3.sin, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739418}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748730269s EvaluationString:[ var='A' labels={datacenter=sg3.sin, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sg3.sin, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sg3.sin, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sg3.sin, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.sg3.sin, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sg3.sin, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.sg3.sin, origin=volterra-infra-hypervisor Value:0xc0337394a0} B:{Var:B Labels:datacenter=sg3.sin, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.sg3.sin, origin=volterra-infra-hypervisor Value:0xc0337394f8} C:{Var:C Labels:datacenter=sg3.sin, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739558}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748740449s EvaluationString:[ var='A' labels={datacenter=sg3.sin, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.sg3.sin, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sg3.sin, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.sg3.sin, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sg3.sin, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.sg3.sin, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sg3.sin, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sg3.sin, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor Value:0xc0337395e0} B:{Var:B Labels:datacenter=sg3.sin, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739628} C:{Var:C Labels:datacenter=sg3.sin, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739668}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748753433s EvaluationString:[ var='A' labels={datacenter=sg3.sin, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sg3.sin, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sg3.sin, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sg3.sin, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sg3.sin, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor Value:0xc0337396f0} B:{Var:B Labels:datacenter=sg3.sin, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739738} C:{Var:C Labels:datacenter=sg3.sin, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739780}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74877103s EvaluationString:[ var='A' labels={datacenter=sg3.sin, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sg3.sin, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sg3.sin, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sg3.sin, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.sg3.sin, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sg3.sin, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739898} B:{Var:B Labels:datacenter=sg3.sin, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739808} C:{Var:C Labels:datacenter=sg3.sin, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739858}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748781868s EvaluationString:[ var='A' labels={datacenter=sg3.sin, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.sg3.sin, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sg3.sin, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.sg3.sin, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sg3.sin, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-neteng-rr-01.sg3.sin, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sg3.sin, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sg3.sin, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor Value:0xc0337399b8} B:{Var:B Labels:datacenter=sg3.sin, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739928} C:{Var:C Labels:datacenter=sg3.sin, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739970}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748790243s EvaluationString:[ var='A' labels={datacenter=sg3.sin, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sg3.sin, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sg3.sin, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sg3.sin, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sg3.sin, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739ab8} B:{Var:B Labels:datacenter=sg3.sin, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739a38} C:{Var:C Labels:datacenter=sg3.sin, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739a78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748796743s EvaluationString:[ var='A' labels={datacenter=sg3.sin, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sg3.sin, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sg3.sin, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sg3.sin, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sg3.sin, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739bb8} B:{Var:B Labels:datacenter=sg3.sin, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739b38} C:{Var:C Labels:datacenter=sg3.sin, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739b78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748803638s EvaluationString:[ var='A' labels={datacenter=sg3.sin, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sg3.sin, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sg3.sin, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sg3.sin, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sg3.sin, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sg3.sin, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739c40} B:{Var:B Labels:datacenter=sg3.sin, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739d38} C:{Var:C Labels:datacenter=sg3.sin, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor Value:0xc033739d78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74880991s EvaluationString:[ var='A' labels={datacenter=sg3.sin, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sg3.sin, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sg3.sin, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sg3.sin, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sif.che, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sif.che, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor Value:0xc033739e00} B:{Var:B Labels:datacenter=sif.che, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor Value:0xc033739e48} C:{Var:C Labels:datacenter=sif.che, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor Value:0xc033739e98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748816195s EvaluationString:[ var='A' labels={datacenter=sif.che, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sif.che, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sif.che, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sif.che, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sif.che, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor Value:0xc033739f38} B:{Var:B Labels:datacenter=sif.che, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor Value:0xc033739f78} C:{Var:C Labels:datacenter=sif.che, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor Value:0xc033739fc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748821628s EvaluationString:[ var='A' labels={datacenter=sif.che, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sif.che, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sif.che, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sif.che, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sif.che, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor Value:0xc01e206128} B:{Var:B Labels:datacenter=sif.che, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor Value:0xc01e206098} C:{Var:C Labels:datacenter=sif.che, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor Value:0xc01e2060d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748826827s EvaluationString:[ var='A' labels={datacenter=sif.che, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sif.che, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sif.che, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sif.che, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sif.che, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor Value:0xc01e206268} B:{Var:B Labels:datacenter=sif.che, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor Value:0xc01e2062c8} C:{Var:C Labels:datacenter=sif.che, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor Value:0xc01e206370}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748832239s EvaluationString:[ var='A' labels={datacenter=sif.che, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sif.che, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sif.che, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sif.che, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sif.che, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor Value:0xc01e206448} B:{Var:B Labels:datacenter=sif.che, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor Value:0xc01e2064d8} C:{Var:C Labels:datacenter=sif.che, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor Value:0xc01e206518}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748838633s EvaluationString:[ var='A' labels={datacenter=sif.che, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sif.che, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sif.che, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sif.che, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sif.che, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor Value:0xc01e2066c0} B:{Var:B Labels:datacenter=sif.che, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor Value:0xc01e2065a0} C:{Var:C Labels:datacenter=sif.che, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor Value:0xc01e206638}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748845101s EvaluationString:[ var='A' labels={datacenter=sif.che, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sif.che, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sif.che, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sif.che, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sif.che, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor Value:0xc01e2069c8} B:{Var:B Labels:datacenter=sif.che, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor Value:0xc01e206a58} C:{Var:C Labels:datacenter=sif.che, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor Value:0xc01e206a98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748850478s EvaluationString:[ var='A' labels={datacenter=sif.che, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sif.che, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sif.che, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sif.che, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sif.che, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sif.che, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor Value:0xc01e206bc0} B:{Var:B Labels:datacenter=sif.che, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor Value:0xc01e206c08} C:{Var:C Labels:datacenter=sif.che, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor Value:0xc01e206c98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748855835s EvaluationString:[ var='A' labels={datacenter=sif.che, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sif.che, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sif.che, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sif.che, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sp4.sao, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sp4.sao, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e206ea8} B:{Var:B Labels:datacenter=sp4.sao, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e206dd0} C:{Var:C Labels:datacenter=sp4.sao, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e206e18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74886096s EvaluationString:[ var='A' labels={datacenter=sp4.sao, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sp4.sao, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sp4.sao, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sp4.sao, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sp4.sao, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e2070b8} B:{Var:B Labels:datacenter=sp4.sao, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e206f80} C:{Var:C Labels:datacenter=sp4.sao, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e207028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748867612s EvaluationString:[ var='A' labels={datacenter=sp4.sao, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sp4.sao, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sp4.sao, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sp4.sao, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sp4.sao, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e207148} B:{Var:B Labels:datacenter=sp4.sao, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e207188} C:{Var:C Labels:datacenter=sp4.sao, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e2071d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74887452s EvaluationString:[ var='A' labels={datacenter=sp4.sao, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sp4.sao, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sp4.sao, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sp4.sao, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sp4.sao, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e2072e8} B:{Var:B Labels:datacenter=sp4.sao, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e207328} C:{Var:C Labels:datacenter=sp4.sao, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e207290}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748880812s EvaluationString:[ var='A' labels={datacenter=sp4.sao, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sp4.sao, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sp4.sao, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sp4.sao, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sp4.sao, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e2073e0} B:{Var:B Labels:datacenter=sp4.sao, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e207428} C:{Var:C Labels:datacenter=sp4.sao, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e207470}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748886511s EvaluationString:[ var='A' labels={datacenter=sp4.sao, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sp4.sao, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sp4.sao, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sp4.sao, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sp4.sao, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e207538} B:{Var:B Labels:datacenter=sp4.sao, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e207580} C:{Var:C Labels:datacenter=sp4.sao, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e2074f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748906534s EvaluationString:[ var='A' labels={datacenter=sp4.sao, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sp4.sao, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sp4.sao, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sp4.sao, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sp4.sao, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e207698} B:{Var:B Labels:datacenter=sp4.sao, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e207608} C:{Var:C Labels:datacenter=sp4.sao, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e207650}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748917188s EvaluationString:[ var='A' labels={datacenter=sp4.sao, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sp4.sao, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sp4.sao, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sp4.sao, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sp4.sao, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sp4.sao, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e207798} B:{Var:B Labels:datacenter=sp4.sao, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e207718} C:{Var:C Labels:datacenter=sp4.sao, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor Value:0xc01e207758}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748927823s EvaluationString:[ var='A' labels={datacenter=sp4.sao, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sp4.sao, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sp4.sao, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-02.sp4.sao, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sto6.sto, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sto6.sto, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor Value:0xc01e2078b8} B:{Var:B Labels:datacenter=sto6.sto, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor Value:0xc01e207828} C:{Var:C Labels:datacenter=sto6.sto, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor Value:0xc01e207870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74893814s EvaluationString:[ var='A' labels={datacenter=sto6.sto, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sto6.sto, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sto6.sto, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sto6.sto, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sto6.sto, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor Value:0xc01e2079c8} B:{Var:B Labels:datacenter=sto6.sto, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor Value:0xc01e207938} C:{Var:C Labels:datacenter=sto6.sto, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor Value:0xc01e207980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748947871s EvaluationString:[ var='A' labels={datacenter=sto6.sto, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sto6.sto, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sto6.sto, device=eno5, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sto6.sto, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sto6.sto, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor Value:0xc01e207a98} B:{Var:B Labels:datacenter=sto6.sto, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor Value:0xc01e207ae0} C:{Var:C Labels:datacenter=sto6.sto, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor Value:0xc01e207b28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748956108s EvaluationString:[ var='A' labels={datacenter=sto6.sto, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sto6.sto, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sto6.sto, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sto6.sto, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sto6.sto, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor Value:0xc01e207c38} B:{Var:B Labels:datacenter=sto6.sto, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor Value:0xc01e207ba8} C:{Var:C Labels:datacenter=sto6.sto, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor Value:0xc01e207bf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748965853s EvaluationString:[ var='A' labels={datacenter=sto6.sto, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sto6.sto, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sto6.sto, device=eno6, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sto6.sto, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sto6.sto, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor Value:0xc01e207d08} B:{Var:B Labels:datacenter=sto6.sto, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor Value:0xc01e207d48} C:{Var:C Labels:datacenter=sto6.sto, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor Value:0xc01e207cc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74897492s EvaluationString:[ var='A' labels={datacenter=sto6.sto, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sto6.sto, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sto6.sto, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sto6.sto, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sto6.sto, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor Value:0xc01e207ec0} B:{Var:B Labels:datacenter=sto6.sto, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor Value:0xc01e207f48} C:{Var:C Labels:datacenter=sto6.sto, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor Value:0xc01e207fc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748985713s EvaluationString:[ var='A' labels={datacenter=sto6.sto, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sto6.sto, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor} value=0 ], [ var='C' labels={datacenter=sto6.sto, device=ens1p0, group=hypervisors-prod-proxmox, instance=hyprox-02.sto6.sto, origin=volterra-infra-hypervisor} value=0 ]} {Instance:datacenter=sto6.sto, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:datacenter=sto6.sto, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor Value:0xc035afe080} B:{Var:B Labels:datacenter=sto6.sto, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor Value:0xc035afe0c8} C:{Var:C Labels:datacenter=sto6.sto, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor Value:0xc035afe040}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.748995984s EvaluationString:[ var='A' labels={datacenter=sto6.sto, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor} value=0 ], [ var='B' labels={datacenter=sto6.sto, device=ens1p1, group=hypervisors-prod-proxmox, instance=hyprox-01.sto6.sto, origin=volterra-infra-hypervisor} value + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lsp3v4vx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.769287311Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.769176552Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lsp3v4vx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.76921253Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lsp3v4vx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.769167639Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.768987186Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lsjst4gg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.768808686Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=840286 slug=y0h0h0 instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.768606125Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=840286 slug=y0h0h0 version=6 fingerprint=93075b62e8af20f4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.768500478Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.76816516s EvaluationString:}]" duration=11.794229ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lshhnd70-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.768541133Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lshhnd70-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.768467572Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.768428Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.768383011Z caller=remote_instance_store.go:51 user=698119 slug=simonsprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.768399126Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698119 slug=simonsprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.768299159Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=698119 slug=simonsprod t=2024-05-29T13:44:13.768275419Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=242310 slug=suzy instance= t=2024-05-29T13:44:13.768287339Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy instance= t=2024-05-29T13:44:13.768265196Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=698119 slug=simonsprod version=1 fingerprint=0d2b5380a70905c8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.768214788Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.767916337s EvaluationString:}]" duration=6.862036ms + level=debug ts=2024-05-29T13:44:13.768256016Z caller=remote_rule_evaluator.go:193 user=403369 slug=clearsaletechlabs msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lsfn9jhk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.7682429Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lsfn9jhk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.76821242Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=806229 slug=simplisafe instance= t=2024-05-29T13:44:13.768157026Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lsctpi6w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.768134849Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lsctpi6w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.768093358Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=242310 slug=suzy t=2024-05-29T13:44:13.768053257Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ls441hfb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.767939947Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.767870844Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ls43izck-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.767758525Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ls43izck-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.767693834Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lrs0yefp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.767564983Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lrs0yefp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.767467532Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lrh5z66g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.767361421Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lrh5z66g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.767347211Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lrh5z66g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.76727941Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.767308502Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lrh5z66g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.767213839Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=342039 slug=criblcloud t=2024-05-29T13:44:13.767005056Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=342039 slug=criblcloud instance= t=2024-05-29T13:44:13.766975197Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=342039 slug=criblcloud version=6 fingerprint=35f95102c272c8d8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.766814907Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.7664937s EvaluationString:}]" duration=67.521895ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lrcfx204-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.766834165Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lrcfx204-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.766713984Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lrbfgccg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.766667954Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lrbfgccg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.766640103Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.766678101Z caller=remote_instance_store.go:51 user=236496 slug=improbable msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.766669486Z caller=remote_instance_store.go:51 user=848777 slug=opsalert msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:13.766607124Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.766607162Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node26 - 35.214.1.71, job=node-exporter, metrics_node_id=27, node_id=26" t=2024-05-29T13:44:13.766507387Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node26 - 35.214.1.71, job=node-exporter, metrics_node_id=27, node_id=26" t=2024-05-29T13:44:13.766499733Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lrbfgccg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.766537182Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node23 - 35.215.85.221, job=node-exporter, metrics_node_id=24, node_id=23" t=2024-05-29T13:44:13.766091684Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lr526uk1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.766259409Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lr526uk1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.766151268Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lr526uk1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.766120478Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lqwcq9nk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.765768074Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.765747432Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lqwcq9nk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.765693114Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.765537629Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lqresedd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.765478211Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lqresedd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.765446041Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.76538669Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.765276573Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node17 - 35.213.204.46, job=node-exporter, metrics_node_id=18, node_id=17" t=2024-05-29T13:44:13.7652891Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.765257657Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" + level=debug ts=2024-05-29T13:44:13.765130589Z caller=remote_instance_store.go:51 user=374423 slug=bitburst msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node16 - 35.215.219.90, job=node-exporter, metrics_node_id=17, node_id=16" t=2024-05-29T13:44:13.765153254Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lqrb8y60-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.765147448Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lqlnwsqr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.764905845Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.764961969Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lqlnwsqr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.764872835Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.764775226Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.764896286Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.764775227Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lqlnwsqr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.764835185Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node14 - 69.67.150.139, job=node-exporter, metrics_node_id=15, node_id=14" t=2024-05-29T13:44:13.764838599Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node14 - 69.67.150.139, job=node-exporter, metrics_node_id=15, node_id=14" t=2024-05-29T13:44:13.764832674Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.764766277Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node13 - 35.214.254.95, job=node-exporter, metrics_node_id=14, node_id=13" t=2024-05-29T13:44:13.76472426Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.764704914Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lqkya8it-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.764619582Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.764557745Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lqkya8it-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.764537222Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.764312286Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:13.764434279Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=432323 slug=lithic instance= t=2024-05-29T13:44:13.764410017Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696798 slug=mcv instance="datasource_uid=a7c4b457-6a7a-416c-b994-1407dd32ed34, ref_id=Query,QueryPrevious" t=2024-05-29T13:44:13.764252795Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.764227351Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=5535eec87786e737 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.764182091Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=a7c4b457-6a7a-416c-b994-1407dd32ed34, ref_id=Query,QueryPrevious State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.763895431s EvaluationString:}]" duration=52.455828ms + level=debug ts=2024-05-29T13:44:13.76410233Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.764103808Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" + level=debug ts=2024-05-29T13:44:13.764055358Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=87780 slug=zencloudandhosting instance="datasource_uid=000000020, ref_id=A,B" t=2024-05-29T13:44:13.763839685Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lqbzgktq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.763957696Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lqbzgktq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.763930145Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lqbzgktq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.763862965Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.763814158Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" + level=debug ts=2024-05-29T13:44:13.763767169Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.763664945Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.763739032Z caller=remote_instance_store.go:51 user=765907 slug=orangebarrelmedia msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.763674741Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lqbytrj8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.763703253Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=174675 slug=journalprod instance="datasource_uid=uF2hBHyGz, ref_id=A" t=2024-05-29T13:44:13.763685015Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lqbytrj8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.763549021Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=174675 slug=journalprod instance="datasource_uid=uF2hBHyGz, ref_id=A" t=2024-05-29T13:44:13.763652116Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.763520891Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lqbytrj8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.763531951Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lqbt37m2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.763493211Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.763162133Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=714577 slug=readypactest t=2024-05-29T13:44:13.76309492Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lqbei0a7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.763077106Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=performance, instance=localhost:9100, instance_type=hedera-node, inventory_name=node03 - 35.213.128.79, job=node-exporter, metrics_node_id=4, node_id=3" t=2024-05-29T13:44:13.763117545Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lq9crpzq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.762992756Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=714577 slug=readypactest instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.763008158Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.763055389Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.762778692Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=performance" + level=debug ts=2024-05-29T13:44:13.762677763Z caller=remote_instance_store.go:51 user=173374 slug=felmo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lq87wh9y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.762579841Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=173374 slug=felmo instance= t=2024-05-29T13:44:13.762625423Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.762559968Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=173374 slug=felmo version=93 fingerprint=5b0a073bbf890886 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.762460586Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.761666024s EvaluationString:}]" duration=475.840727ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lprvwjq1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.76247265Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.762416164Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lprvwjq1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.762322799Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.762288009Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=integration-docker" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lpp1man5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.762188657Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.762132966Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=integration-docker" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lpp1man5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.762123467Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lpov2pfx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.762023926Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=integration-docker, instance=localhost:9100, instance_type=hedera-node, inventory_name=node03 - 34.127.65.96, job=node-exporter, metrics_node_id=4, node_id=3" t=2024-05-29T13:44:13.762011259Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.761928468Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lpov2pfx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.761984975Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.761922898Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.761731897Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lpkdc3u8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.761844044Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.761766463Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lpkdc3u8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.761763163Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.761682637Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=integration-docker" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lpg2zjht-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.761617421Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lpg2zjht-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.761585691Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.761573636Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=integration-docker, instance=localhost:9100, instance_type=hedera-node, inventory_name=node00 - 34.138.249.218, job=node-exporter, metrics_node_id=1, node_id=0" t=2024-05-29T13:44:13.76157483Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lpg2zjht-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.76151754Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=integration, instance=localhost:9100, instance_type=hedera-node, inventory_name=node06 - 34.125.38.151, job=node-exporter, metrics_node_id=7, node_id=6" t=2024-05-29T13:44:13.76142192Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.761336278Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=integration, instance=localhost:9100, instance_type=hedera-node, inventory_name=node05 - 34.106.94.135, job=node-exporter, metrics_node_id=6, node_id=5" t=2024-05-29T13:44:13.761263527Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.761237285Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=integration" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=integration, instance=localhost:9100, instance_type=hedera-node, inventory_name=node04 - 34.94.112.161, job=node-exporter, metrics_node_id=5, node_id=4" t=2024-05-29T13:44:13.761123424Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.761085462Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=integration" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lp2ylu9d-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.761363619Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lp2ylu9d-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.761336019Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.761335688Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lp2ylu9d-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.761268108Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-loy4jmsz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.761127276Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID2267dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:13.76117858Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=44.631962ms + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=integration, instance=localhost:9100, instance_type=hedera-node, inventory_name=node03 - 35.203.145.150, job=node-exporter, metrics_node_id=4, node_id=3" t=2024-05-29T13:44:13.760968482Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-loy4jmsz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.760988925Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.760948427Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lov2raba-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.760923014Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lov2raba-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.760890714Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.760861623Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.76084615Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lov2raba-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.760844393Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lov2raba-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.760774453Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.760762427Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lor4fgq5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.760625751Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.760548941Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=integration, instance=localhost:9100, instance_type=hedera-node, inventory_name=node01 - 35.245.91.14, job=node-exporter, metrics_node_id=2, node_id=1" t=2024-05-29T13:44:13.760547749Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.760487108Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=integration" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.760314163Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=integration" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lopdei5d-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.760314268Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.760210457Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-loibslz4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.760156926Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-loibslz4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.760086266Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.760129382Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-loi5ohob-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.759937224Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node30 - 35.213.160.82, job=node-exporter, metrics_node_id=31, node_id=30" t=2024-05-29T13:44:13.760042635Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.760010969Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet2" + level=debug ts=2024-05-29T13:44:13.759997288Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.759890972Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.759838259Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-loi5ohob-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.759871254Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-loi5ohob-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.759760362Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node27 - 35.212.125.100, job=node-exporter, metrics_node_id=28, node_id=27" t=2024-05-29T13:44:13.759624265Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lohqbwa5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.759580781Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lohqbwa5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.75954952Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lognnk2h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.759477649Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node26 - 35.207.87.37, job=node-exporter, metrics_node_id=27, node_id=26" t=2024-05-29T13:44:13.759460081Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lognnk2h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.759434919Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lognnk2h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.759363078Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lognnk2h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.759335178Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-loekztfo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.759219917Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.759164647Z caller=remote_instance_store.go:51 user=848777 slug=opsalert msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-loekztfo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.759112066Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=471861 slug=planetstaging instance="cluster=pgw-01, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-gketest-prod, instance=172.24.60.141:9090, job=prometheus-k8s, k8s_cluster=pgw-01, kubernetes_cluster=pgw-01, namespace=monitoring, pod=prom-agent-k8s-1, prometheus=monitoring/k8s, prometheus_shard=pgw-01-0, service=prometheus-k8s" t=2024-05-29T13:44:13.759053904Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=848777 slug=opsalert instance="__name__=probe_http_status_code, config_version=1715334557691530240, instance=https://servicexchange.thomsonreuters.com/, job=SX-TR Prod UI, probe=NorthVirginia" t=2024-05-29T13:44:13.759067877Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lo8nu7w9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.758995925Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=471861 slug=planetstaging instance="cluster=pgw-01, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-gketest-prod, instance=172.24.14.77:9090, job=prometheus-k8s, k8s_cluster=pgw-01, kubernetes_cluster=pgw-01, namespace=monitoring, pod=prom-agent-k8s-0, prometheus=monitoring/k8s, prometheus_shard=pgw-01-0, service=prometheus-k8s" t=2024-05-29T13:44:13.758939622Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=471861 slug=planetstaging instance="cluster=pgw-01, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-gketest-prod, instance=172.24.14.77:9090, job=prometheus-k8s, k8s_cluster=pgw-01, kubernetes_cluster=pgw-01, namespace=monitoring, pod=prom-agent-k8s-0, prometheus=monitoring/k8s, prometheus_shard=pgw-01-0, service=prometheus-k8s" t=2024-05-29T13:44:13.758923462Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lo5l4mn6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.758812383Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=848777 slug=opsalert version=96 fingerprint=6f16726bc10de885 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.758751863Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=probe_http_status_code, config_version=1715334557691530240, instance=https://servicexchange.thomsonreuters.com/, job=SX-TR Prod UI, probe=Bangalore State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=probe_http_status_code, config_version=1715334557691530240, instance=https://servicexchange.thomsonreuters.com/, job=SX-TR Prod UI, probe=Bangalore Value:0xc00f07bd10} C:{Var:C Labels:__name__=probe_http_status_code, config_version=1715334557691530240, instance=https://servicexchange.thomsonreuters.com/, job=SX-TR Prod UI, probe=Bangalore Value:0xc00f07bd60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.758352652s EvaluationString:[ var='A' labels={__name__=probe_http_status_code, config_version=1715334557691530240, instance=https://servicexchange.thomsonreuters.com/, job=SX-TR Prod UI, probe=Bangalore} value=200 ], [ var='C' labels={__name__=probe_http_status_code, config_version=1715334557691530240, instance=https://servicexchange.thomsonreuters.com/, job=SX-TR Prod UI, probe=Bangalore} value=0 ]} {Instance:__name__=probe_http_status_code, config_version=1715334557691530240, instance=https://servicexchange.thomsonreuters.com/, job=SX-TR Prod UI, probe=NorthVirginia State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=probe_http_status_code, config_version=1715334557691530240, instance=https://servicexchange.thomsonreuters.com/, job=SX-TR Prod UI, probe=NorthVirginia Value:0xc00f07be00} C:{Var:C Labels:__name__=probe_http_status_code, config_version=1715334557691530240, instance=https://servicexchange.thomsonreuters.com/, job=SX-TR Prod UI, probe=NorthVirginia Value:0xc00f07be50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.758369883s EvaluationString:[ var='A' labels={__name__=probe_http_status_code, config_version=1715334557691530240, instance=https://servicexchange.thomsonreuters.com/, job=SX-TR Prod UI, probe=NorthVirginia} value=200 ], [ var='C' labels={__name__=probe_http_status_code, config_version=1715334557691530240, instance=https://servicexchange.thomsonreuters.com/, job=SX-TR Prod UI, probe=NorthVirginia} value=0 ]}]" duration=9.696088ms + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node22 - 34.0.1.175, job=node-exporter, metrics_node_id=23, node_id=22" t=2024-05-29T13:44:13.758800465Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=471861 slug=planetstaging instance="cluster=erb-01, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-gketest-prod, instance=172.24.123.166:9090, job=prometheus-k8s, k8s_cluster=erb-01, kubernetes_cluster=erb-01, namespace=monitoring, pod=prom-agent-k8s-0, prometheus=monitoring/k8s, prometheus_shard=erb-01-0, service=prometheus-k8s" t=2024-05-29T13:44:13.758674137Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=471861 slug=planetstaging t=2024-05-29T13:44:13.758644526Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service=prometheus-k8s" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lo5l4mn6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.758632331Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lo2hcvtw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.75858396Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=471861 slug=planetstaging instance="cluster=cistable-01, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-gketest-prod, instance=172.24.22.8:9090, job=prometheus-k8s, k8s_cluster=cistable-01, kubernetes_cluster=cistable-01, namespace=monitoring, pod=prometheus-k8s-0, prometheus=monitoring/k8s, prometheus_shard=cistable-01-0, service=prometheus-k8s" t=2024-05-29T13:44:13.758578861Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.758589461Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet2" + logger=ngalert.state.manager user=471861 slug=planetstaging t=2024-05-29T13:44:13.758535252Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service=prometheus-k8s" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node20 - 35.208.128.216, job=node-exporter, metrics_node_id=21, node_id=20" t=2024-05-29T13:44:13.758484942Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node20 - 35.208.128.216, job=node-exporter, metrics_node_id=21, node_id=20" t=2024-05-29T13:44:13.758472345Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.75844653Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet2" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lo2hcvtw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.758411999Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.758369943Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lo04wmfu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.758300157Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=471861 slug=planetstaging t=2024-05-29T13:44:13.758279117Z level=debug msg="State manager processing evaluation results" resultCount=6 + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node19 - 35.216.15.158, job=node-exporter, metrics_node_id=20, node_id=19" t=2024-05-29T13:44:13.758292385Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.758267611Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lo04wmfu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.758208526Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lo04wmfu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.758194866Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lnwgjnlu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.758124846Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.758156406Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.758034505Z caller=remote_instance_store.go:51 user=407315 slug=ppcp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lnwgjnlu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.758079905Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=407315 slug=ppcp instance="DBInstanceIdentifier=metabase-instance-1" t=2024-05-29T13:44:13.757928788Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=407315 slug=ppcp instance="DBInstanceIdentifier=clipboardcluster-worker1" t=2024-05-29T13:44:13.757867682Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node17 - 35.215.245.35, job=node-exporter, metrics_node_id=18, node_id=17" t=2024-05-29T13:44:13.758019558Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=316960 slug=mojamteam t=2024-05-29T13:44:13.757965543Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=316960 slug=mojamteam instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.757909395Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=316960 slug=mojamteam instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.757896015Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node16 - 35.214.36.62, job=node-exporter, metrics_node_id=17, node_id=16" t=2024-05-29T13:44:13.757833542Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=407315 slug=ppcp version=5 fingerprint=e4ef6e41e9433879 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.757550266Z level=debug msg="Alert rule evaluated" results="[{Instance:DBInstanceIdentifier=binderreadonly State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=binderreadonly Value:0xc07eb9fa58} C:{Var:C Labels:DBInstanceIdentifier=binderreadonly Value:0xc07eb9fa80} D:{Var:D Labels:DBInstanceIdentifier=binderreadonly Value:0xc07eb9fa50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.757040809s EvaluationString:[ var='B' labels={DBInstanceIdentifier=binderreadonly} value=2.5482440704e+10 ], [ var='C' labels={DBInstanceIdentifier=binderreadonly} value=0 ], [ var='D' labels={DBInstanceIdentifier=binderreadonly} value=25.482440704000002 ]} {Instance:DBInstanceIdentifier=clipboardcluster-worker1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=clipboardcluster-worker1 Value:0xc07eb9fa98} C:{Var:C Labels:DBInstanceIdentifier=clipboardcluster-worker1 Value:0xc07eb9faa0} D:{Var:D Labels:DBInstanceIdentifier=clipboardcluster-worker1 Value:0xc07eb9fa90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.75705871s EvaluationString:[ var='B' labels={DBInstanceIdentifier=clipboardcluster-worker1} value=1.8625777664e+10 ], [ var='C' labels={DBInstanceIdentifier=clipboardcluster-worker1} value=0 ], [ var='D' labels={DBInstanceIdentifier=clipboardcluster-worker1} value=18.625777664 ]} {Instance:DBInstanceIdentifier=metabase-instance-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=metabase-instance-1 Value:0xc07eb9fab8} C:{Var:C Labels:DBInstanceIdentifier=metabase-instance-1 Value:0xc07eb9fac0} D:{Var:D Labels:DBInstanceIdentifier=metabase-instance-1 Value:0xc07eb9fab0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.757068332s EvaluationString:[ var='B' labels={DBInstanceIdentifier=metabase-instance-1} value=6.92279296e+09 ], [ var='C' labels={DBInstanceIdentifier=metabase-instance-1} value=0 ], [ var='D' labels={DBInstanceIdentifier=metabase-instance-1} value=6.92279296 ]}]" duration=50.873101ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lnsqkmxs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.757702351Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node15 - 35.213.254.208, job=node-exporter, metrics_node_id=16, node_id=15" t=2024-05-29T13:44:13.757698538Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lnsqkmxs-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.757634721Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lnsqkmxs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.7575579Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lnn1gpwf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.757504629Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.757284321Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lnetp7c5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.757293387Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=83647 slug=bidsolutions instance="datasource_uid=000000023, ref_id=A,B" t=2024-05-29T13:44:13.757271982Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lnetp7c5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.757221396Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=83647 slug=bidsolutions version=1 fingerprint=0fc3bf1fdd806430 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.757177653Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=000000023, ref_id=A,B State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.756712697s EvaluationString:}]" duration=71.963421ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ln7djmox-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.757113195Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ln7djmox-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.757085505Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=543654 slug=jobcloudprogrammaticprod t=2024-05-29T13:44:13.757065416Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.455528ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ln7djmox-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.757032754Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ln7djmox-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.757000874Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.757052874Z caller=remote_instance_store.go:51 user=554491 slug=safeskyindustries msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.757087396Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet2" + level=debug ts=2024-05-29T13:44:13.75692781Z caller=remote_instance_store.go:51 user=154996 slug=veovo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.75694023Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node11 - 35.215.159.121, job=node-exporter, metrics_node_id=12, node_id=11" t=2024-05-29T13:44:13.75694557Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lmybzh85-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.756438348Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lmybzh85-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.756371148Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lmp5qlj0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.756263857Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lmh8c0bo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.755863012Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lmc487vg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.755793032Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lmc487vg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.755727731Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lmc487vg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.75565451Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lm5kswtz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.75558836Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lm5kswtz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.755558309Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node10 - 35.213.99.36, job=node-exporter, metrics_node_id=11, node_id=10" t=2024-05-29T13:44:13.756764401Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lm5kswtz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.755515949Z level=debug msg="Keeping state" state=Normal + level=error ts=2024-05-29T13:44:13.756381982Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'Hoist Drum Bearing - Gearbox Temp': data source not found" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llxv1sxq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.755087814Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llxv1sxq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.755054364Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llxv1sxq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.755013864Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node09 - 35.214.241.250, job=node-exporter, metrics_node_id=10, node_id=9" t=2024-05-29T13:44:13.756619488Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llxp0ghn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.754864922Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llxp0ghn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.754826472Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llxp0ghn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.754710661Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llxp0ghn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.75468383Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llvneaqs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.75461434Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llvneaqs-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.754573599Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llvneaqs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.754505139Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=335419 slug=tbauctions instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.756525043Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=2 fingerprint=3f96252dcb0ef338 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.756415526Z level=error msg="Failed to evaluate rule" error="failed to build query 'Hoist Drum Bearing - Gearbox Temp': data source not found" duration=7.157678ms + logger=ngalert.state.manager user=335419 slug=tbauctions t=2024-05-29T13:44:13.756507926Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.756492853Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet2" + logger=ngalert.scheduler user=335419 slug=tbauctions version=57 fingerprint=ec5f40c1e782ea41 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.756389118Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.756024554s EvaluationString:}]" duration=26.144389ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lltjvttn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.754268236Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lltjvttn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.754254176Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llt2d0k2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.754213726Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node06 - 35.210.253.145, job=node-exporter, metrics_node_id=7, node_id=6" t=2024-05-29T13:44:13.75619722Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:13.756032846Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=217.407699ms + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=engnet2, instance=localhost:9100, instance_type=hedera-node, inventory_name=node05 - 35.215.8.212, job=node-exporter, metrics_node_id=6, node_id=5" t=2024-05-29T13:44:13.756031108Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.755853036Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet2" + level=debug ts=2024-05-29T13:44:13.755724418Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.75559836Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.755507497Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.954099ms + level=debug ts=2024-05-29T13:44:13.75511477Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.754964105Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=engnet1, instance=localhost:9100, instance_type=hedera-node, inventory_name=node05 - 35.217.108.203, job=node-exporter, metrics_node_id=6, node_id=5" t=2024-05-29T13:44:13.754914918Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction instance="__name__=node_sysctl_net_ipv4_tcp_rmem_max, environment=engnet1, instance=localhost:9100, instance_type=hedera-node, inventory_name=node04 - 35.215.75.175, job=node-exporter, metrics_node_id=5, node_id=4" t=2024-05-29T13:44:13.754780713Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.754757045Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet1" + logger=ngalert.state.manager user=371756 slug=asapp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.754588534Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=info ts=2024-05-29T13:44:13.754493231Z caller=remote_alert_sender.go:94 user=22398 slug=sunfolding host=sunfolding-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.37.117:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=abc884fb-6956-4678-8b6a-ac17e39c0433 alerts=1 + logger=ngalert.state.manager user=439643 slug=swirldslabspreproduction t=2024-05-29T13:44:13.754453447Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="environment=engnet1" + logger=ngalert.scheduler user=371756 slug=asapp version=31 fingerprint=f0a9aabfc993bf98 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.754239637Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.753961908s EvaluationString:}]" duration=36.461796ms + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.754200123Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=pre-bf.router03-02.router03-02 A" t=2024-05-29T13:44:13.754076282Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=pre-bf.router03-01.router03-01 A" t=2024-05-29T13:44:13.754044166Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llsi9sk6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.753825922Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llshs56k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.753785141Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:13.753921057Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=612525 slug=adleyeview instance= t=2024-05-29T13:44:13.753901156Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=pre-bf.postfix.ip-10-72-35-179 A" t=2024-05-29T13:44:13.753974525Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llshs56k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.75364084Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llpga7kl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.753582119Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=pre-bf.port03-01.port03-01 A" t=2024-05-29T13:44:13.75394815Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.75384832Z caller=remote_instance_store.go:51 user=384712 slug=nearinc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llpga7kl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.753567579Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llpga7kl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.753495448Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llpga7kl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.753437308Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=pre-bf.panapi03-02.panapi03-02 A" t=2024-05-29T13:44:13.753888773Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llo3x765-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.753372887Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llo3x765-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.753298536Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llo3x765-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.753260816Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llo3x765-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.753246176Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=pre-bf.panapi03-01.panapi03-01 A" t=2024-05-29T13:44:13.753846439Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llmxsapl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.753177035Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llmxsapl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.753163975Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=pre-bf.newrelic-private-location.ip-10-72-37-203 A" t=2024-05-29T13:44:13.753824652Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llmxsapl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.753126984Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=pre-bf.newrelic-private-location.ip-10-72-37-203 A" t=2024-05-29T13:44:13.753814941Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=pre-bf.matchmaker03-02.matchmaker03-02 A" t=2024-05-29T13:44:13.753791176Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=pre-bf.matchmaker03-01.matchmaker03-01 A" t=2024-05-29T13:44:13.753765906Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-llccf8uo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.752974723Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=pre-bf.ints03-02.ints03-02 A" t=2024-05-29T13:44:13.753609995Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.753559252Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ll9rl4pt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.752826941Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ll9rl4pt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.752813561Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.753516926Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ll9rl4pt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.75272731Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=pre-bf.dpi03-02.dpi03-02 A" t=2024-05-29T13:44:13.753523741Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ll8mxx7j-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.752550198Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ll6rwl8v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.752437537Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=pre-bf.dpi03-01.dpi03-01 A" t=2024-05-29T13:44:13.753493095Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.753406972Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=pre-bf.carbon-relay-ng.ip-10-72-40-34 A" t=2024-05-29T13:44:13.75345963Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="name=pre-bf.carbon-relay-ng.ip-10-72-39-19 A" t=2024-05-29T13:44:13.753425543Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ll6mpns6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.752247065Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ll6mpns6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.752195165Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lky3f1al-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.752145954Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lky3f1al-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.752039403Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=679831 slug=joveostageaws t=2024-05-29T13:44:13.753350664Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.335572ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lkmzxtc6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.751971333Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lkmzxtc6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.751930442Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=112387 slug=lucidhq version=15 fingerprint=a0f80706cc8b7575 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.752923392Z level=debug msg="Alert rule evaluated" results="[{Instance:name=pre-bf.apis03-02.apis03-02 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.apis03-02.apis03-02 A Value:0xc045ebe520} C:{Var:C Labels:name=pre-bf.apis03-02.apis03-02 A Value:0xc045ebe510}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751643848s EvaluationString:[ var='B' labels={name=pre-bf.apis03-02.apis03-02 A} value=2.8222331602149127 ], [ var='C' labels={name=pre-bf.apis03-02.apis03-02 A} value=0 ]} {Instance:name=pre-bf.carbon-relay-ng.ip-10-72-39-19 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.carbon-relay-ng.ip-10-72-39-19 A Value:0xc045ebe540} C:{Var:C Labels:name=pre-bf.carbon-relay-ng.ip-10-72-39-19 A Value:0xc045ebe550}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751667373s EvaluationString:[ var='B' labels={name=pre-bf.carbon-relay-ng.ip-10-72-39-19 A} value=1.3919081135603617 ], [ var='C' labels={name=pre-bf.carbon-relay-ng.ip-10-72-39-19 A} value=0 ]} {Instance:name=pre-bf.carbon-relay-ng.ip-10-72-40-34 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.carbon-relay-ng.ip-10-72-40-34 A Value:0xc045ebe580} C:{Var:C Labels:name=pre-bf.carbon-relay-ng.ip-10-72-40-34 A Value:0xc045ebe570}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751673186s EvaluationString:[ var='B' labels={name=pre-bf.carbon-relay-ng.ip-10-72-40-34 A} value=1.3729239820501653 ], [ var='C' labels={name=pre-bf.carbon-relay-ng.ip-10-72-40-34 A} value=0 ]} {Instance:name=pre-bf.dpi03-01.dpi03-01 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.dpi03-01.dpi03-01 A Value:0xc045ebe5a0} C:{Var:C Labels:name=pre-bf.dpi03-01.dpi03-01 A Value:0xc045ebe5b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751681289s EvaluationString:[ var='B' labels={name=pre-bf.dpi03-01.dpi03-01 A} value=5.116680712170061 ], [ var='C' labels={name=pre-bf.dpi03-01.dpi03-01 A} value=0 ]} {Instance:name=pre-bf.dpi03-02.dpi03-02 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.dpi03-02.dpi03-02 A Value:0xc045ebe5e0} C:{Var:C Labels:name=pre-bf.dpi03-02.dpi03-02 A Value:0xc045ebe5d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.75168652s EvaluationString:[ var='B' labels={name=pre-bf.dpi03-02.dpi03-02 A} value=5.118279730552804 ], [ var='C' labels={name=pre-bf.dpi03-02.dpi03-02 A} value=0 ]} {Instance:name=pre-bf.ints03-01.ints03-01 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.ints03-01.ints03-01 A Value:0xc045ebe600} C:{Var:C Labels:name=pre-bf.ints03-01.ints03-01 A Value:0xc045ebe610}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751693206s EvaluationString:[ var='B' labels={name=pre-bf.ints03-01.ints03-01 A} value=2.409273486410591 ], [ var='C' labels={name=pre-bf.ints03-01.ints03-01 A} value=0 ]} {Instance:name=pre-bf.ints03-02.ints03-02 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.ints03-02.ints03-02 A Value:0xc045ebe630} C:{Var:C Labels:name=pre-bf.ints03-02.ints03-02 A Value:0xc045ebe640}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751699368s EvaluationString:[ var='B' labels={name=pre-bf.ints03-02.ints03-02 A} value=2.7428725157724543 ], [ var='C' labels={name=pre-bf.ints03-02.ints03-02 A} value=0 ]} {Instance:name=pre-bf.logstash.ip-10-72-38-229 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.logstash.ip-10-72-38-229 A Value:0xc045ebe670} C:{Var:C Labels:name=pre-bf.logstash.ip-10-72-38-229 A Value:0xc045ebe660}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751704049s EvaluationString:[ var='B' labels={name=pre-bf.logstash.ip-10-72-38-229 A} value=1.3730637774296974 ], [ var='C' labels={name=pre-bf.logstash.ip-10-72-38-229 A} value=0 ]} {Instance:name=pre-bf.logstash.ip-10-72-41-22 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.logstash.ip-10-72-41-22 A Value:0xc045ebe690} C:{Var:C Labels:name=pre-bf.logstash.ip-10-72-41-22 A Value:0xc045ebe6a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751708807s EvaluationString:[ var='B' labels={name=pre-bf.logstash.ip-10-72-41-22 A} value=1.3730840648551745 ], [ var='C' labels={name=pre-bf.logstash.ip-10-72-41-22 A} value=0 ]} {Instance:name=pre-bf.lrid02-01.lrid02-01 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.lrid02-01.lrid02-01 A Value:0xc045ebe6d0} C:{Var:C Labels:name=pre-bf.lrid02-01.lrid02-01 A Value:0xc045ebe6c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751716172s EvaluationString:[ var='B' labels={name=pre-bf.lrid02-01.lrid02-01 A} value=3.104499089357712 ], [ var='C' labels={name=pre-bf.lrid02-01.lrid02-01 A} value=0 ]} {Instance:name=pre-bf.lrid02-02.lrid02-02 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.lrid02-02.lrid02-02 A Value:0xc045ebe6f0} C:{Var:C Labels:name=pre-bf.lrid02-02.lrid02-02 A Value:0xc045ebe700}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751813343s EvaluationString:[ var='B' labels={name=pre-bf.lrid02-02.lrid02-02 A} value=3.0212589639695073 ], [ var='C' labels={name=pre-bf.lrid02-02.lrid02-02 A} value=0 ]} {Instance:name=pre-bf.matchmaker03-01.matchmaker03-01 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.matchmaker03-01.matchmaker03-01 A Value:0xc045ebe720} C:{Var:C Labels:name=pre-bf.matchmaker03-01.matchmaker03-01 A Value:0xc045ebe730}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751820012s EvaluationString:[ var='B' labels={name=pre-bf.matchmaker03-01.matchmaker03-01 A} value=4.441805185911767 ], [ var='C' labels={name=pre-bf.matchmaker03-01.matchmaker03-01 A} value=0 ]} {Instance:name=pre-bf.matchmaker03-02.matchmaker03-02 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.matchmaker03-02.matchmaker03-02 A Value:0xc045ebe760} C:{Var:C Labels:name=pre-bf.matchmaker03-02.matchmaker03-02 A Value:0xc045ebe750}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751827463s EvaluationString:[ var='B' labels={name=pre-bf.matchmaker03-02.matchmaker03-02 A} value=4.617239734691357 ], [ var='C' labels={name=pre-bf.matchmaker03-02.matchmaker03-02 A} value=0 ]} {Instance:name=pre-bf.newrelic-private-location.ip-10-72-37-203 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.newrelic-private-location.ip-10-72-37-203 A Value:0xc045ebe780} C:{Var:C Labels:name=pre-bf.newrelic-private-location.ip-10-72-37-203 A Value:0xc045ebe790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751833591s EvaluationString:[ var='B' labels={name=pre-bf.newrelic-private-location.ip-10-72-37-203 A} value=3.3560550086179433 ], [ var='C' labels={name=pre-bf.newrelic-private-location.ip-10-72-37-203 A} value=0 ]} {Instance:name=pre-bf.panapi03-01.panapi03-01 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.panapi03-01.panapi03-01 A Value:0xc045ebe7b0} C:{Var:C Labels:name=pre-bf.panapi03-01.panapi03-01 A Value:0xc045ebe7c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.75183931s EvaluationString:[ var='B' labels={name=pre-bf.panapi03-01.panapi03-01 A} value=4.065596039820466 ], [ var='C' labels={name=pre-bf.panapi03-01.panapi03-01 A} value=0 ]} {Instance:name=pre-bf.panapi03-02.panapi03-02 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.panapi03-02.panapi03-02 A Value:0xc045ebe7e0} C:{Var:C Labels:name=pre-bf.panapi03-02.panapi03-02 A Value:0xc045ebe7f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751845006s EvaluationString:[ var='B' labels={name=pre-bf.panapi03-02.panapi03-02 A} value=3.8790366497906823 ], [ var='C' labels={name=pre-bf.panapi03-02.panapi03-02 A} value=0 ]} {Instance:name=pre-bf.port03-01.port03-01 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.port03-01.port03-01 A Value:0xc045ebe810} C:{Var:C Labels:name=pre-bf.port03-01.port03-01 A Value:0xc045ebe820}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751852099s EvaluationString:[ var='B' labels={name=pre-bf.port03-01.port03-01 A} value=5.487932537483975 ], [ var='C' labels={name=pre-bf.port03-01.port03-01 A} value=0 ]} {Instance:name=pre-bf.postfix.ip-10-72-35-179 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.postfix.ip-10-72-35-179 A Value:0xc045ebe840} C:{Var:C Labels:name=pre-bf.postfix.ip-10-72-35-179 A Value:0xc045ebe850}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751861851s EvaluationString:[ var='B' labels={name=pre-bf.postfix.ip-10-72-35-179 A} value=0.8733098409138524 ], [ var='C' labels={name=pre-bf.postfix.ip-10-72-35-179 A} value=0 ]} {Instance:name=pre-bf.postfix.ip-10-72-39-117 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.postfix.ip-10-72-39-117 A Value:0xc045ebe870} C:{Var:C Labels:name=pre-bf.postfix.ip-10-72-39-117 A Value:0xc045ebe880}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751867466s EvaluationString:[ var='B' labels={name=pre-bf.postfix.ip-10-72-39-117 A} value=0.8755117330433775 ], [ var='C' labels={name=pre-bf.postfix.ip-10-72-39-117 A} value=0 ]} {Instance:name=pre-bf.router03-01.router03-01 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.router03-01.router03-01 A Value:0xc045ebe8a0} C:{Var:C Labels:name=pre-bf.router03-01.router03-01 A Value:0xc045ebe8b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751875203s EvaluationString:[ var='B' labels={name=pre-bf.router03-01.router03-01 A} value=3.360361227737935 ], [ var='C' labels={name=pre-bf.router03-01.router03-01 A} value=0 ]} {Instance:name=pre-bf.router03-02.router03-02 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.router03-02.router03-02 A Value:0xc045ebe8d0} C:{Var:C Labels:name=pre-bf.router03-02.router03-02 A Value:0xc045ebe8e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751881609s EvaluationString:[ var='B' labels={name=pre-bf.router03-02.router03-02 A} value=3.586270174828736 ], [ var='C' labels={name=pre-bf.router03-02.router03-02 A} value=0 ]} {Instance:name=pre-bf.supapi03-01.supapi03-01 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.supapi03-01.supapi03-01 A Value:0xc045ebe910} C:{Var:C Labels:name=pre-bf.supapi03-01.supapi03-01 A Value:0xc045ebe900}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.75188665s EvaluationString:[ var='B' labels={name=pre-bf.supapi03-01.supapi03-01 A} value=5.516684851849334 ], [ var='C' labels={name=pre-bf.supapi03-01.supapi03-01 A} value=0 ]} {Instance:name=pre-bf.supapi03-02.supapi03-02 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.supapi03-02.supapi03-02 A Value:0xc045ebe930} C:{Var:C Labels:name=pre-bf.supapi03-02.supapi03-02 A Value:0xc045ebe940}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751891553s EvaluationString:[ var='B' labels={name=pre-bf.supapi03-02.supapi03-02 A} value=5.589543510748644 ], [ var='C' labels={name=pre-bf.supapi03-02.supapi03-02 A} value=0 ]} {Instance:name=pre-bf.survey03-01.survey03-01 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.survey03-01.survey03-01 A Value:0xc045ebe960} C:{Var:C Labels:name=pre-bf.survey03-01.survey03-01 A Value:0xc045ebe970}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751897643s EvaluationString:[ var='B' labels={name=pre-bf.survey03-01.survey03-01 A} value=4.209258277514612 ], [ var='C' labels={name=pre-bf.survey03-01.survey03-01 A} value=0 ]} {Instance:name=pre-bf.survey03-02.survey03-02 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.survey03-02.survey03-02 A Value:0xc045ebe990} C:{Var:C Labels:name=pre-bf.survey03-02.survey03-02 A Value:0xc045ebe9a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751905959s EvaluationString:[ var='B' labels={name=pre-bf.survey03-02.survey03-02 A} value=3.6877686472417976 ], [ var='C' labels={name=pre-bf.survey03-02.survey03-02 A} value=0 ]} {Instance:name=pre-bf.utils03-01.utils03-01 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.utils03-01.utils03-01 A Value:0xc045ebe9c0} C:{Var:C Labels:name=pre-bf.utils03-01.utils03-01 A Value:0xc045ebe9d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751912194s EvaluationString:[ var='B' labels={name=pre-bf.utils03-01.utils03-01 A} value=4.521931747507702 ], [ var='C' labels={name=pre-bf.utils03-01.utils03-01 A} value=0 ]} {Instance:name=pre-bf.utils03-02.utils03-02 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.utils03-02.utils03-02 A Value:0xc045ebe9f0} C:{Var:C Labels:name=pre-bf.utils03-02.utils03-02 A Value:0xc045ebea00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751917797s EvaluationString:[ var='B' labels={name=pre-bf.utils03-02.utils03-02 A} value=5.580924347742473 ], [ var='C' labels={name=pre-bf.utils03-02.utils03-02 A} value=0 ]} {Instance:name=pre-bf.web03-01.web03-01 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.web03-01.web03-01 A Value:0xc045ebea30} C:{Var:C Labels:name=pre-bf.web03-01.web03-01 A Value:0xc045ebea20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751923311s EvaluationString:[ var='B' labels={name=pre-bf.web03-01.web03-01 A} value=3.1741361665306504 ], [ var='C' labels={name=pre-bf.web03-01.web03-01 A} value=0 ]} {Instance:name=pre-bf.web03-02.web03-02 A State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:name=pre-bf.web03-02.web03-02 A Value:0xc045ebea60} C:{Var:C Labels:name=pre-bf.web03-02.web03-02 A Value:0xc045ebea50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751928649s EvaluationString:[ var='B' labels={name=pre-bf.web03-02.web03-02 A} value=3.3305567216812575 ], [ var='C' labels={name=pre-bf.web03-02.web03-02 A} value=0 ]}]" duration=1.964224024s + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lkjubjkl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.751799811Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lkbjilit-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.751531758Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.75320527Z caller=client.go:80 msg="creating client for grafana instance" user=346616 addr=dns:///tikt-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lkb8gpqy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.751409697Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lkb8gpqy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.751346896Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lk43emu8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.751299776Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=713314 slug=tpceunonprod t=2024-05-29T13:44:13.753078039Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=19.852038ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lk09c9lo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.751110604Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.752884486Z caller=remote_instance_store.go:51 user=691059 slug=deluxeconfstg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=691059 slug=deluxeconfstg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.752800355Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=691059 slug=deluxeconfstg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.752761914Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=691059 slug=deluxeconfstg version=1 fingerprint=155a7263af2d5eff attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.752678123Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.752526933s EvaluationString:}]" duration=7.011969ms + level=info component=discovery ts=2024-05-29T13:44:13.75260734Z caller=client.go:80 msg="creating client for grafana instance" user=660019 addr=dns:///thierryorvoen-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.752558364Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.752406615Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.752283731Z caller=client.go:80 msg="creating client for grafana instance" user=306290 addr=dns:///theclinician-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.752202781Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=446750 slug=streamshark + level=debug ts=2024-05-29T13:44:13.752042386Z caller=remote_instance_store.go:51 user=228733 slug=csmoney msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.752009655Z caller=remote_instance_store.go:51 user=320906 slug=techcyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.751963974Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=228733 slug=csmoney version=169 fingerprint=8d02eff062aa4366 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.751702247Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.751337401s EvaluationString:}]" duration=57.010648ms + level=debug ts=2024-05-29T13:44:13.75180779Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.751615849Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.751691904Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.751648456Z caller=remote_instance_store.go:51 user=460915 slug=funrise msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.751665196Z caller=ruler.go:522 msg="tenant is owned by this instance" user=393676 slug=snatesa1 groups=0 + level=debug ts=2024-05-29T13:44:13.751578085Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=318831 slug=grafanavianet t=2024-05-29T13:44:13.751555343Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.751484365Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.751306424Z caller=ruler.go:522 msg="tenant is owned by this instance" user=326055 slug=syndim groups=0 + level=info ts=2024-05-29T13:44:13.751324585Z caller=remote_alert_sender.go:94 user=633381 slug=arascorp host=arascorp-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.229.151:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddiw3mej58ruod alerts=1 + logger=ngalert.state.manager.persist user=633381 slug=arascorp t=2024-05-29T13:44:13.75124092Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.300934ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lk09c9lo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.751072703Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lk09c9lo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.751055233Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.750790321Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ljy7rjqf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.750675759Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:13.750683901Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.282061ms + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.750643231Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.60736ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ljvq9pu6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.750607229Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ljvq9pu6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.750471917Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ljo4bo8r-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.750403486Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.750340952Z caller=ruler.go:522 msg="tenant is owned by this instance" user=656463 slug=santiagodiaz1 groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ljo4bo8r-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.750360406Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ljo4bo8r-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.750332586Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ljnygrvh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.750215955Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ljnygrvh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.750188364Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ljnygrvh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.750075893Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ljjsh1uw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.749974052Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:13.749948611Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.684571ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ljjsh1uw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.749863781Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.749868416Z caller=client.go:80 msg="creating client for grafana instance" user=667213 addr=dns:///tastybites-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=612525 slug=adleyeview t=2024-05-29T13:44:13.749747897Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info ts=2024-05-29T13:44:13.749664627Z caller=remote_image_capturer.go:61 user=127813 slug=clearsale rule_org_id=1 rule_uid=eddallt3ir1ttc dashboard=2CWpLaDVk panel=71 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager.persist user=691102 slug=deluxeconfdev t=2024-05-29T13:44:13.749532719Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.319645ms + level=info component=discovery ts=2024-05-29T13:44:13.749420192Z caller=client.go:80 msg="creating client for grafana instance" user=354245 addr=dns:///tan2-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-liihow2n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.749356276Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-liihow2n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.749315745Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.749207752Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-liihow2n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.749217614Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.749181081Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ligdmqep-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.749081703Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ligdmqep-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.748964892Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=127813 slug=clearsale instance= t=2024-05-29T13:44:13.748888006Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=127813 slug=clearsale instance= t=2024-05-29T13:44:13.748879841Z level=debug msg="Execution error state is Alerting" handler=resultAlerting previous_handler=resultError + logger=ngalert.state.manager user=127813 slug=clearsale instance= t=2024-05-29T13:44:13.748870069Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lie2k0ep-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.748897121Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lie2k0ep-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.74881791Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=233863 slug=rtsystems t=2024-05-29T13:44:13.748595474Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.737994ms + level=debug ts=2024-05-29T13:44:13.748592039Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.748363146Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.historian backend=loki user=516446 slug=awarehqdev t=2024-05-29T13:44:13.748480117Z level=debug msg="Done saving alert state history batch" + level=debug ts=2024-05-29T13:44:13.748440791Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.748205246Z caller=remote_instance_store.go:51 user=765752 slug=octave msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-li8f577s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.748209954Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-li8f577s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.748166744Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-li7ltty9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.747998012Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-li47hu0g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.74785847Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-li47hu0g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.74784586Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-li47hu0g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.74778233Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-li47hu0g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.747742689Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-li1rkkro-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.747633218Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-li1rkkro-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.747518737Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-li0e4t45-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.747453986Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=328453 slug=jitolabs t=2024-05-29T13:44:13.747398724Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=60199 slug=wallapop instance= t=2024-05-29T13:44:13.747279885Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-li0e4t45-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.747306475Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:13.747290299Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=21.038944ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lhx6v8ky-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.747217264Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lhx6v8ky-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.747205004Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lhx6v8ky-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.747170133Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lhx6v8ky-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.747140603Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lhx6v8ky-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.747098293Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=103548 slug=gen2 instance= t=2024-05-29T13:44:13.746989214Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lhuwd7wg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.746908311Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lhe758dx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.74684961Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.746714918Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.746654039Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=548157 slug=kushkiprod t=2024-05-29T13:44:13.746593877Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=548157 slug=kushkiprod version=34 fingerprint=6c761fa1829a9e22 attempt=1 now=2024-05-29T13:44:00Z t=2024-05-29T13:44:13.746431075Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=ddfda265-8321-4dab-9f53-1af50b9462b9, ref_id=A,B State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:00 +0000 UTC EvaluationDuration:13.745865915s EvaluationString:}]" duration=8.682851178s + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lh960bxk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.746306334Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lh960bxk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.746245564Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lh8qtdn6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.746175523Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lh8qtdn6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.746055332Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:13.745981793Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=46.568412ms + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=R3cOulc4z, ref_id=A" t=2024-05-29T13:44:13.745980351Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lh848vu3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.74592068Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=183214 slug=vectorizedio version=1 fingerprint=9e09fecf33ce0c35 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.745877219Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=R3cOulc4z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.745638014s EvaluationString:}]" duration=37.708263ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lh848vu3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.74585231Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.745872058Z caller=remote_instance_store.go:51 user=22398 slug=sunfolding msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=22398 slug=sunfolding t=2024-05-29T13:44:13.745749469Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lh77uuqc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.745708098Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lh61zrea-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.745493856Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lh61zrea-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.745457716Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.745432816Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698047 slug=gamesworkshop instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.74544968Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698047 slug=gamesworkshop instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.745439559Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.745432492Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.745389182Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lh43216b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.745352144Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lh43216b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.745306704Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lh43216b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.745276924Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lh43216b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.745234493Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lh3f6azm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.745087502Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lh3f6azm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.745065642Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.745060114Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.historian backend=loki user=516446 slug=awarehqdev t=2024-05-29T13:44:13.744749924Z level=debug msg="Alert state changed creating annotation" newState="Normal (MissingSeries)" oldState=Pending + logger=ngalert.state.historian backend=loki user=516446 slug=awarehqdev t=2024-05-29T13:44:13.744725123Z level=debug msg="Alert state changed creating annotation" newState=Pending oldState=Normal + level=debug ts=2024-05-29T13:44:13.744836222Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.744812987Z caller=remote_instance_store.go:51 user=554491 slug=safeskyindustries msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lgyhwarh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.744817929Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.744624764Z caller=remote_instance_store.go:51 user=154996 slug=veovo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.74467Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.744637869Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=516446 slug=awarehqdev t=2024-05-29T13:44:13.744629621Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.883621ms + logger=ngalert.state.manager user=554491 slug=safeskyindustries instance="cluster=we-svc, persistentvolumeclaim=datadir-primary-0" t=2024-05-29T13:44:13.744656484Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=554491 slug=safeskyindustries instance="cluster=mon, persistentvolumeclaim=postgres-pvc" t=2024-05-29T13:44:13.744609483Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lgp12gjy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.744661437Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=55491 slug=demandbase t=2024-05-29T13:44:13.74460548Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=54.818862ms + logger=ngalert.state.manager user=554491 slug=safeskyindustries instance="cluster=mon, persistentvolumeclaim=postgres-pvc" t=2024-05-29T13:44:13.744594183Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.744566889Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lgp12gjy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.744560436Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.744544291Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.744557784Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.744378986Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lgp12gjy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.744519196Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.744269954Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.922689ms + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=ATLANTA Query" t=2024-05-29T13:44:13.744286674Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.744380549Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lgl43acr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.744351514Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lgl43acr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.744307424Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.744332614Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.744287834Z caller=remote_alert_sender.go:94 user=70430 slug=dapperlabs host=dapperlabs-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.144.208.13:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=d1f512a1-15d4-4176-b977-4aa3a820423c alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lgjdn1hc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.744235553Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lgjdn1hc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.744168612Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lgjdn1hc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.744147082Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lgi6dx9s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.744063891Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.744014847Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=174675 slug=journalprod instance="datasource_uid=uF2hBHyGz, ref_id=A" t=2024-05-29T13:44:13.74409052Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=174675 slug=journalprod t=2024-05-29T13:44:13.74403425Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=174675 slug=journalprod version=1 fingerprint=37ea2b960be30af9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.743908747Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=uF2hBHyGz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.743581497s EvaluationString:}]" duration=21.354174ms + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:13.743922092Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=f901086b-e83c-4767-8689-f9c5848eaf68, ref_id=A" t=2024-05-29T13:44:13.743888895Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lgfbahp1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.743869999Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:13.743866717Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.74389347Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.743835954Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.743752887Z caller=remote_instance_store.go:51 user=691855 slug=chainlake msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.743747263Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lgfbahp1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.743773908Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lgf3dpz4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.743737308Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.743734152Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lgf3dpz4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.743681647Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=691855 slug=chainlake t=2024-05-29T13:44:13.743687419Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=691855 slug=chainlake instance="instance=compute-hel-4-cpx41-compute-hel-4, nodename=compute-hel-4" t=2024-05-29T13:44:13.743669312Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.743621948Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.743574902Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.743562021Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=f46d8ce9b30bbff5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.743522254Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.743310496s EvaluationString:}]" duration=203.858767ms + logger=ngalert.state.manager user=691855 slug=chainlake t=2024-05-29T13:44:13.743476894Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lgf1ojfa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.743403244Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.743414319Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.743378044Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lges3yky-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.743272633Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.743261835Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lges3yky-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.743243723Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=228733 slug=csmoney t=2024-05-29T13:44:13.743202904Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=228733 slug=csmoney instance="datasource_uid=stWgXyV7z, ref_id=D" t=2024-05-29T13:44:13.743185691Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.743244067Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lges3yky-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.743171652Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.743157291Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=228733 slug=csmoney version=167 fingerprint=e77695c0568910c4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.74298014Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=stWgXyV7z, ref_id=D State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.742542124s EvaluationString:}]" duration=191.610911ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lges3yky-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.743104641Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lgcgbu6s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.743041811Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.743084934Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=307001 slug=hirerightdev t=2024-05-29T13:44:13.743028902Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.272119ms + level=debug ts=2024-05-29T13:44:13.742974423Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=679831 slug=joveostageaws t=2024-05-29T13:44:13.743012532Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=info component=discovery ts=2024-05-29T13:44:13.7428858Z caller=client.go:80 msg="creating client for grafana instance" user=457194 addr=dns:///szhangau-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.742863505Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=357276 slug=simplyenergy + level=debug ts=2024-05-29T13:44:13.742830722Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=937416 slug=cambridgeuniversitypress t=2024-05-29T13:44:13.742628665Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.934558ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lfxounh4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.742573466Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.742581174Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.742525103Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lfxounh4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.742535965Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.742540782Z caller=client.go:80 msg="creating client for grafana instance" user=326055 addr=dns:///syndim-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.742474517Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lfxounh4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.742441014Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lfxczchs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.742281253Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.742116556Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.742127692Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=647401 slug=riodiep + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lfujmtqs-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.742146251Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.742035981Z caller=remote_instance_store.go:51 user=713314 slug=tpceunonprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.742039518Z caller=remote_instance_store.go:51 user=355429 slug=zenpli msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lfujmtqs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.74203705Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lfqme3no-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.741937159Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lfqme3no-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.741911139Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.741741975Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lfqme3no-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.741870238Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:13.74177304Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.741725007Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:13.741703467Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:13.741699965Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lfndpkmw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.741710967Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=543660 slug=jobcloudprogrammaticstage version=1 fingerprint=9710dd7bcd2ce01b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.741577571Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.741176662s EvaluationString:}]" duration=11.178546ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lf6ascb2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.741591176Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lf6ascb2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.741549135Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.741466314Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lf2rua31-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.741452154Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.741498615Z caller=remote_instance_store.go:51 user=328971 slug=wefight msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lf2rua31-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.741425724Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=328971 slug=wefight instance= t=2024-05-29T13:44:13.741397168Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.741338996Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lf2hbnlc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.741253762Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lf2hbnlc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.741177631Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.741086196Z caller=remote_instance_store.go:51 user=374423 slug=bitburst msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=374423 slug=bitburst instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.740981634Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=374423 slug=bitburst version=74 fingerprint=3818e8b3fddaf900 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.740759894Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.74040843s EvaluationString:}]" duration=23.08514ms + logger=ngalert.state.manager user=916144 slug=cmjjilpd instance="__name__=jvm_thread_count, cluster=gke-jill-p-us-west1-1, job=jill/jillprdlivets-app, jvm_thread_daemon=false" t=2024-05-29T13:44:13.740706064Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=916144 slug=cmjjilpd instance="__name__=jvm_thread_count, cluster=gke-jill-p-us-west1-1, job=jill/jillprdlivesearch-app-slave, jvm_thread_daemon=false" t=2024-05-29T13:44:13.740670643Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.740711857Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=916144 slug=cmjjilpd instance="__name__=jvm_thread_count, cluster=gke-jill-p-us-west1-1, job=jill/jillprdauthxc-app, jvm_thread_daemon=true" t=2024-05-29T13:44:13.740559751Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.740595052Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=470314 slug=sba3 + level=info component=discovery ts=2024-05-29T13:44:13.740632118Z caller=client.go:80 msg="creating client for grafana instance" user=446750 addr=dns:///streamshark-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.740585496Z caller=ruler.go:522 msg="tenant is owned by this instance" user=316890 slug=satishsreenivasan groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-leyamsop-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.740649096Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-leyamsop-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.740626026Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=916144 slug=cmjjilpd instance="__name__=jvm_thread_count, cluster=gke-jill-p-us-west1-1, job=jill/jillprdauthts-app, jvm_thread_daemon=true" t=2024-05-29T13:44:13.740508289Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:53:10Z next_ends_at=2024-05-29T13:56:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-leyamsop-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.740597795Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=916144 slug=cmjjilpd instance="__name__=jvm_thread_count, cluster=gke-jill-p-us-west1-1, job=jill/jillprdauthts-app, jvm_thread_daemon=true" t=2024-05-29T13:44:13.740499829Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=916144 slug=cmjjilpd t=2024-05-29T13:44:13.740340786Z level=debug msg="State manager processing evaluation results" resultCount=18 + level=debug ts=2024-05-29T13:44:13.740415488Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.740393877Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.740381856Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.740352897Z caller=remote_instance_store.go:51 user=306551 slug=teckresourcesalerts msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-leq7zgd3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.740355443Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-leq7zgd3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.740302932Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-leq7zgd3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.740261072Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.740244558Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.740090401Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-leowbq0h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.74008981Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-leltk04u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.739933979Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.739938225Z caller=ruler.go:522 msg="tenant is owned by this instance" user=547513 slug=r88 groups=0 + level=debug ts=2024-05-29T13:44:13.739804197Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.739846266Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-leim33s6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.739854968Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-leim33s6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.739826718Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lehihbp8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.739732947Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lehihbp8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.739711256Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lehihbp8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.739624055Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-le5w6yot-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.739492594Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-le5w6yot-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.739450834Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-le4koiui-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.739348953Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=689030 slug=simonsuat instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.739305004Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-le4koiui-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.739306252Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ldz86h48-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.739271902Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ldz86h48-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.739243372Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=691102 slug=deluxeconfdev t=2024-05-29T13:44:13.739210363Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.739261904Z caller=remote_instance_store.go:51 user=691102 slug=deluxeconfdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=691102 slug=deluxeconfdev t=2024-05-29T13:44:13.739160891Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ldz86h48-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.739140641Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.738868277Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=633381 slug=arascorp instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.738920784Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.738887628Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ldxdbxb1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.738906878Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=633381 slug=arascorp t=2024-05-29T13:44:13.738844691Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.738746055Z caller=remote_instance_store.go:51 user=715709 slug=mtbprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=715709 slug=mtbprod t=2024-05-29T13:44:13.738690874Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.73827497Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ld8n84rl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.73812453Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ld8n84rl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.73810298Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ld8n84rl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.73807166Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ld8n84rl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.738047249Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.737828766Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.737799603Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ld8h0u40-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.737822577Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ld6ld3jt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.737684896Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698103 slug=vericast t=2024-05-29T13:44:13.737689245Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.737663627Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.737581457Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lcvlfl40-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.737563274Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lcvlfl40-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.737490874Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lcvlfl40-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.737418463Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lcchb931-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.737309862Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:13.737246893Z caller=remote_alert_sender.go:94 user=548157 slug=kushkiprod host=kushkiprod-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.209.249:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=db21ee33-ca9f-4306-919d-466628ce423b alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lcchb931-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.737234501Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lc8phhvv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.737199451Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=548157 slug=kushkiprod t=2024-05-29T13:44:13.737155549Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.766626ms + level=debug ts=2024-05-29T13:44:13.737186597Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lc8phhvv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.73709805Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.737020062Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lc7leu3b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.736875367Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lc7leu3b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.736811497Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lc6g9f4v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.736773176Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lc6g9f4v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.736720386Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.73658772Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.736532241Z caller=client.go:80 msg="creating client for grafana instance" user=293244 addr=dns:///spyn-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lc2audti-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.736600104Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lc2audti-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.736547574Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.736585083Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.736530876Z caller=remote_instance_store.go:51 user=751407 slug=nethermindjuno msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.736568412Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.736538213Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lc2audti-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.736500133Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lc0unnk7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.736375052Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.736297256Z caller=remote_instance_store.go:51 user=739130 slug=redphasetech msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.736225879Z caller=remote_instance_store.go:51 user=407477 slug=inventa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lbzt9en0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.73619931Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lbzt9en0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.7361728Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lbux4wjf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.736059849Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.73612395Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=4947 slug=mediamath instance= t=2024-05-29T13:44:13.736071612Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lbux4wjf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.735984498Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lbux4wjf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.735940048Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lbreo02s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.735876637Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lbreo02s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.735780276Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID2762dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:13.73577054Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=103.089684ms + level=warn ts=2024-05-29T13:44:13.735744253Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=345253 slug=r3x + level=debug ts=2024-05-29T13:44:13.735548674Z caller=ruler.go:522 msg="tenant is owned by this instance" user=649071 slug=radicalsync groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lbkyj60q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.735409612Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lbkyj60q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.735367372Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:13.735397616Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lbkyj60q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.735339052Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.735295194Z caller=remote_instance_store.go:51 user=432323 slug=lithic msg="calling SaveAlertInstance" + Error parsing panelUID for alert annotationruleID2081dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=432323 slug=lithic version=25 fingerprint=c190fec765a98a84 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.735137173Z level=debug msg="Alert rule evaluated" results="[{Instance:CacheClusterId=asapusw1-staging-sandbox-cluster-edd6886-001 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:CacheClusterId=asapusw1-staging-sandbox-cluster-edd6886-001 Value:0xc02640ab28} C:{Var:C Labels:CacheClusterId=asapusw1-staging-sandbox-cluster-edd6886-001 Value:0xc02640ab20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.734773428s EvaluationString:[ var='B' labels={CacheClusterId=asapusw1-staging-sandbox-cluster-edd6886-001} value=1.3333555559259322 ], [ var='C' labels={CacheClusterId=asapusw1-staging-sandbox-cluster-edd6886-001} value=0 ]}]" duration=54.364512ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lbkf48l5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.73522456Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lbkf48l5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.73516562Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lbh0g80x-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.735056769Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.734993146Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lbh0g80x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.734996668Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lbe7by38-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.734854967Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lbe7by38-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.734825966Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lbajr234-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.734660545Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lbajr234-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.734619334Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.734515275Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.734491453Z caller=remote_instance_store.go:51 user=677132 slug=dragonflydbdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=677132 slug=dragonflydbdev instance="database_id=attos-dev:stagingv2-control-plane-db-replica" t=2024-05-29T13:44:13.734437881Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=677132 slug=dragonflydbdev instance="database_id=attos-dev:stagingv2-control-plane-db-replica" t=2024-05-29T13:44:13.734427712Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID1020dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:13.734392183Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=145.199496ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lb5m3bb3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.734402192Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=677132 slug=dragonflydbdev instance="database_id=attos-dev:stagingv2-control-plane-db" t=2024-05-29T13:44:13.734405851Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lb5m3bb3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.734332441Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lb3xspy3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.73421751Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:13.734338058Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lb3xspy3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.734160809Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:13.734094623Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lb0pk92y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.734069689Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=309009 slug=elestyle t=2024-05-29T13:44:13.734001402Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=47.4565ms + level=warn ts=2024-05-29T13:44:13.733933762Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=303552 slug=posmalaysiaapm + level=warn ts=2024-05-29T13:44:13.733917773Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=517303 slug=randmcnally + logger=ngalert.state.manager.persist user=274199 slug=telemetriahgm t=2024-05-29T13:44:13.733920264Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=33.820399ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-latgw7o5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.733872627Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-latgw7o5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.733840546Z level=debug msg="Setting next state" handler=resultNormal + Error parsing panelUID for alert annotationruleID2542dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:13.733810703Z level=debug msg="Saving alert states done" count=10 max_state_save_concurrency=1 duration=795.812531ms + logger=ngalert.state.manager user=849222 slug=franv2dev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.7337466Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-latgw7o5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.733792856Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=233863 slug=rtsystems t=2024-05-29T13:44:13.733740206Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=849222 slug=franv2dev version=1 fingerprint=3b1060b6ef3261ba attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.733663798Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.733436626s EvaluationString:}]" duration=29.212049ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-larpc9yi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.733639954Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-larpc9yi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.733560803Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.733587656Z caller=remote_instance_store.go:51 user=60199 slug=wallapop msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-larpc9yi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.733547803Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=60199 slug=wallapop t=2024-05-29T13:44:13.733494283Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=491157 slug=prd01wr t=2024-05-29T13:44:13.73345615Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=22.245415ms + level=debug ts=2024-05-29T13:44:13.733420311Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-laqqbh4j-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.733450252Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-laqqbh4j-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.733422762Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.733394827Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=316418 slug=workmotion version=4 fingerprint=c37abc2f4067c6dd attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.733080349Z level=debug msg="Alert rule evaluated" results="[{Instance:ClientId=455456564602, DomainName=prod-workmotion-auditlog State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ClientId=455456564602, DomainName=prod-workmotion-auditlog Value:0xc072558998} C:{Var:C Labels:ClientId=455456564602, DomainName=prod-workmotion-auditlog Value:0xc072558948}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.732627487s EvaluationString:[ var='B' labels={ClientId=455456564602, DomainName=prod-workmotion-auditlog} value=1 ], [ var='C' labels={ClientId=455456564602, DomainName=prod-workmotion-auditlog} value=0 ]}]" duration=28.41774ms + logger=ngalert.state.manager user=526847 slug=soniclabs t=2024-05-29T13:44:13.733232148Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=coinbureau-eks-cluster" + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb, phase=Unknown, pod=cb-strapi-66cc6967fc-fhkqt, uid=9502c577-5691-43ee-a176-5d483e9ccf8f" t=2024-05-29T13:44:13.733199323Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.733221634Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=290943 slug=purewhite + level=warn ts=2024-05-29T13:44:13.732993222Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=424390 slug=quipcheck + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb, phase=Unknown, pod=cb-frontend-7bcb7ccddc-s6nzx, uid=82c32131-b4bf-466c-831f-6a70ad6113c3" t=2024-05-29T13:44:13.733152868Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=526847 slug=soniclabs t=2024-05-29T13:44:13.733101624Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=coinbureau-eks-cluster" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lal9ry7e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.733099729Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-lal9ry7e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.733020208Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.73303135Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=667104 slug=nzmit + level=debug ts=2024-05-29T13:44:13.733086804Z caller=ruler.go:522 msg="tenant is owned by this instance" user=544318 slug=redphasetechdemo groups=0 + logger=ngalert.state.manager user=526847 slug=soniclabs t=2024-05-29T13:44:13.733050074Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=coinbureau-eks-cluster" + level=info component=discovery ts=2024-05-29T13:44:13.733025045Z caller=client.go:80 msg="creating client for grafana instance" user=613746 addr=dns:///siar-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.733010433Z caller=ruler.go:522 msg="tenant is owned by this instance" user=667104 slug=nzmit groups=0 + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb, phase=Unknown, pod=cb-backend-649c9f4dc7-k26b5, uid=add9f0ad-0295-4bc1-8f95-1667ea249587" t=2024-05-29T13:44:13.733014125Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.732987626Z caller=ruler.go:522 msg="tenant is owned by this instance" user=389674 slug=otsee groups=0 + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb, phase=Unknown, pod=cb-backend-649c9f4dc7-k26b5, uid=add9f0ad-0295-4bc1-8f95-1667ea249587" t=2024-05-29T13:44:13.732998648Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb, phase=Unknown, pod=cb-backend-649c9f4dc7-cnl67, uid=56339e14-7a4b-445f-9055-4771e47a5a21" t=2024-05-29T13:44:13.732886531Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.73285487Z caller=remote_instance_store.go:51 user=320906 slug=techcyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb, phase=Succeeded, pod=cb-strapi-66cc6967fc-fhkqt, uid=9502c577-5691-43ee-a176-5d483e9ccf8f" t=2024-05-29T13:44:13.73276164Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.732808073Z caller=client.go:80 msg="creating client for grafana instance" user=542567 addr=dns:///serviannz-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.73278626Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=337181 slug=parentevents + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-labmbybz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.732747075Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206439 slug=relaypro instance="cluster=mob, environment=qa, role=ibotdr_fdb" t=2024-05-29T13:44:13.732754923Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb, phase=Succeeded, pod=cb-frontend-7bcb7ccddc-9kxrs, uid=035d4b44-8f97-4cd9-998d-4500597f122d" t=2024-05-29T13:44:13.732664467Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:13.73270553Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb, phase=Succeeded, pod=cb-backend-649c9f4dc7-k26b5, uid=add9f0ad-0295-4bc1-8f95-1667ea249587" t=2024-05-29T13:44:13.732623071Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="host=eqny405-pomsp02" t=2024-05-29T13:44:13.732668796Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="host=eqny405-pomsp02" t=2024-05-29T13:44:13.732656941Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-la85onef-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.732619494Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb, phase=Pending, pod=cb-strapi-66cc6967fc-lcrmb, uid=45d0da60-6c86-46b1-9f42-7b9743cf3452" t=2024-05-29T13:44:13.732531932Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.732578326Z caller=remote_instance_store.go:51 user=111653 slug=theassociationmxp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-la7qtlcx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.732481332Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=111653 slug=theassociationmxp t=2024-05-29T13:44:13.732477168Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=111653 slug=theassociationmxp instance= t=2024-05-29T13:44:13.732457322Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb, phase=Pending, pod=cb-strapi-66cc6967fc-fhkqt, uid=9502c577-5691-43ee-a176-5d483e9ccf8f" t=2024-05-29T13:44:13.732461021Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=526847 slug=soniclabs t=2024-05-29T13:44:13.73244302Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=coinbureau-eks-cluster" + level=debug ts=2024-05-29T13:44:13.732417433Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.732385396Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.732366938Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.732343592Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=696798 slug=mcv instance="datasource_uid=a7c4b457-6a7a-416c-b994-1407dd32ed34, ref_id=Query,QueryPrevious" t=2024-05-29T13:44:13.732330232Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb, phase=Pending, pod=cb-frontend-7bcb7ccddc-9kxrs, uid=035d4b44-8f97-4cd9-998d-4500597f122d" t=2024-05-29T13:44:13.732336065Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=ac59f13d0867b5fd attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.732226581Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=a7c4b457-6a7a-416c-b994-1407dd32ed34, ref_id=Query,QueryPrevious State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.731953452s EvaluationString:}]" duration=59.868748ms + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb, phase=Pending, pod=cb-backend-649c9f4dc7-k26b5, uid=add9f0ad-0295-4bc1-8f95-1667ea249587" t=2024-05-29T13:44:13.732297656Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=526847 slug=soniclabs t=2024-05-29T13:44:13.732259521Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=coinbureau-eks-cluster" + level=debug ts=2024-05-29T13:44:13.732150776Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.732183223Z caller=remote_alert_sender.go:94 user=319327 slug=cvi host=cvi-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.149.159.106:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=cDZsT0hVz alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-la7q0p0y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.732183229Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-la7q0p0y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.732136139Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:13.732190792Z caller=remote_alert_sender.go:94 user=288032 slug=dapperlabssre host=dapperlabssre-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.118.165:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=VcO6sRE4z alerts=1 + logger=ngalert.state.manager user=206439 slug=relaypro t=2024-05-29T13:44:13.732179766Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}The process count on the {{role}} FDB cluster in the {{cluster}}_{{environment}} environment has changed repeatedly within the past hour. This may indicate flapping.': error parsing template __alert_FDB - Process Count (Flapping): template: __alert_FDB - Process Count (Flapping):1: function \"role\" not defined" + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb, phase=Failed, pod=cb-strapi-66cc6967fc-lcrmb, uid=45d0da60-6c86-46b1-9f42-7b9743cf3452" t=2024-05-29T13:44:13.732185483Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-la7q0p0y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.732103368Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=526847 slug=soniclabs instance="cluster=coinbureau-eks-cluster, instance=grafana-k8s-monitoring-kube-state-metrics.grafana.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=prod-cb, phase=Failed, pod=cb-strapi-66cc6967fc-fhkqt, uid=9502c577-5691-43ee-a176-5d483e9ccf8f" t=2024-05-29T13:44:13.732111335Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=526847 slug=soniclabs t=2024-05-29T13:44:13.732052426Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=coinbureau-eks-cluster" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-la75llb9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.731971477Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-la75llb9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.731894086Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-la75llb9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.731862276Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=526847 slug=soniclabs t=2024-05-29T13:44:13.731859984Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=coinbureau-eks-cluster" + logger=ngalert.state.manager user=526847 slug=soniclabs t=2024-05-29T13:44:13.731639794Z level=debug msg="State manager processing evaluation results" resultCount=24 + level=debug ts=2024-05-29T13:44:13.7317971Z caller=remote_instance_store.go:51 user=516446 slug=awarehqdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l9zw57ty-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.731756375Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206439 slug=relaypro t=2024-05-29T13:44:13.731814889Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}The process count on the {{role}} FDB cluster in the {{cluster}}_{{environment}} environment has changed repeatedly within the past hour. This may indicate flapping.': error parsing template __alert_FDB - Process Count (Flapping): template: __alert_FDB - Process Count (Flapping):1: function \"role\" not defined" + level=debug ts=2024-05-29T13:44:13.731709698Z caller=remote_instance_store.go:57 user=516446 slug=awarehqdev msg="calling DeleteAlertInstances - not implemented" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l9zw57ty-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.731723104Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=516446 slug=awarehqdev t=2024-05-29T13:44:13.731684797Z level=debug msg="Deleting alert states" count=1 + logger=ngalert.state.manager user=516446 slug=awarehqdev t=2024-05-29T13:44:13.731672297Z level=info msg="Detected stale state entry" cacheID="[[\"EndpointName\",\"v3-sentiment-esp\"],[\"Series\",\"query63efe21f53724af4b916c1abd0314ea7\"],[\"__alert_rule_namespace_uid__\",\"D-8RyMx4z\"],[\"__alert_rule_uid__\",\"vYufSfxVkm\"],[\"alertname\",\"v3-sentiment-esp-no-invocations\"],[\"grafana_folder\",\"bi\"],[\"group\",\"SageMakerNoInvocations\"],[\"route\",\"team=bi\"],[\"team\",\"bi\"]]" state=Pending reason= + logger=ngalert.state.manager user=516446 slug=awarehqdev instance="EndpointName=v3-sentiment-esp, Series=query8cf5b60e18e2424285f80195d14c662f" t=2024-05-29T13:44:13.731649897Z level=debug msg="Changing state" previous_state=Normal next_state=Pending previous_ends_at=2024-05-29T13:44:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.73157493Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l9zw57ty-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.731676724Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l9rs3s3o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.731593883Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=516446 slug=awarehqdev t=2024-05-29T13:44:13.731494693Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.731437099Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l9qluwn5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.73132015Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l9qluwn5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.73128607Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l9qluwn5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.731221249Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.731143133Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=716630 slug=coapdev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.730908021Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l9jmoo3m-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.730936536Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.730990406Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=54972 slug=zanglang t=2024-05-29T13:44:13.730861369Z level=debug msg="Saving alert states done" count=4 max_state_save_concurrency=1 duration=53.71127ms + level=debug ts=2024-05-29T13:44:13.730843152Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:13.730783312Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=55.428442ms + level=debug ts=2024-05-29T13:44:13.730782906Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.730802715Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l9jmoo3m-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.730707424Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=937416 slug=cambridgeuniversitypress t=2024-05-29T13:44:13.730690837Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=937416 slug=cambridgeuniversitypress instance= t=2024-05-29T13:44:13.730674716Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.730715572Z caller=remote_instance_store.go:51 user=662362 slug=imsafu msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=937416 slug=cambridgeuniversitypress instance= t=2024-05-29T13:44:13.730660006Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.73072092Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=435206 slug=kkrprivateuat t=2024-05-29T13:44:13.730603305Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager.persist user=662362 slug=imsafu t=2024-05-29T13:44:13.7306657Z level=debug msg="Saving alert states" count=5 max_state_save_concurrency=1 + logger=ngalert.scheduler user=245291 slug=pismo version=165 fingerprint=767f1a167843962a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.730599046Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.73039006s EvaluationString:}]" duration=87.00804ms + logger=ngalert.state.manager user=662362 slug=imsafu instance="__name__=allowpay_go_stuck_call_count, app=allowpay-api-mainnet, chain_id=unknown, host=1dad, instance=6e82d924a0e218, region=lhr" t=2024-05-29T13:44:13.730652251Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l9hf2jyd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.730528002Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l9h3crhq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.730472232Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l97mqjrc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.730230569Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l97mqjrc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.730119648Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=662362 slug=imsafu t=2024-05-29T13:44:13.730027407Z level=debug msg="State manager processing evaluation results" resultCount=5 + level=warn ts=2024-05-29T13:44:13.73004235Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=503035 slug=purpleflag + level=debug ts=2024-05-29T13:44:13.73001223Z caller=ruler.go:522 msg="tenant is owned by this instance" user=503035 slug=purpleflag groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l976d4c9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.729933686Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:13.729865283Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.729787052Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l93vt18k-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.729777035Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l93vt18k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.729740094Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l93vt18k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.729726094Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l8zzs6oc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.729679214Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l8zzs6oc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.729647033Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=146728 slug=dgc instance="datasource_uid=WqVnnZtMk, ref_id=A" t=2024-05-29T13:44:13.72971723Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=146728 slug=dgc instance="datasource_uid=WqVnnZtMk, ref_id=A" t=2024-05-29T13:44:13.729705221Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=146728 slug=dgc t=2024-05-29T13:44:13.729667444Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.729661373Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.729536208Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.729523832Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l8yxobe5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.729459281Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.729211061Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l8xi4dqq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.729128318Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.729149017Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.729058045Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l8oybw34-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.729014577Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=901230 slug=integromonitor instance= previous_handler=resultError t=2024-05-29T13:44:13.728964726Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=901230 slug=integromonitor instance= previous_handler=resultError t=2024-05-29T13:44:13.728955481Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.728896279Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l8nid4ug-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.728822805Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l8nid4ug-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.728780904Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l8nid4ug-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.728719364Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l8nid4ug-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.728692743Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l8jkrpes-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.728626793Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.728650344Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.728627603Z caller=remote_alert_sender.go:94 user=120621 slug=jdall host=jdall-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.152.99.132:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=1KOGnKlnk alerts=1 + logger=ngalert.state.manager.persist user=120621 slug=jdall t=2024-05-29T13:44:13.728566709Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.800404ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l8jkrpes-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.728510872Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l8hby8py-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.728427361Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.72843807Z caller=remote_instance_store.go:51 user=277970 slug=teckresourcestest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l8hby8py-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.728284059Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:13.728367933Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + level=debug ts=2024-05-29T13:44:13.728278504Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l8fgahh2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.728243069Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=2 fingerprint=da0b66ca23d9240d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.728240652Z level=error msg="Failed to evaluate rule" error="failed to build query 'C': data source not found" duration=5.128026ms + level=error ts=2024-05-29T13:44:13.728207794Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'C': data source not found" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l8fgahh2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.728165138Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l8fgahh2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.728134848Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.728074554Z caller=remote_instance_store.go:51 user=384712 slug=nearinc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:13.728066913Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.160229ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l8fgahh2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.728091237Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.728088718Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.728082019Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l8fgahh2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.728076987Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=471517 slug=trist85 t=2024-05-29T13:44:13.728065216Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.728019459Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l873xl8r-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.727968186Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.728006162Z caller=remote_instance_store.go:51 user=824501 slug=bendingspoons msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.728014905Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.727802087Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l83km652-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.727713303Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.727775232Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.727746826Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.727712767Z caller=remote_alert_sender.go:94 user=250150 slug=bizagi host=bizagi-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.117.7:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=eddz4x9muf8qoa alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l81juxb5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.727595722Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l81juxb5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.727567122Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l81juxb5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.727459181Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l7y4k54z-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.72739744Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=618024 slug=aptosdev instance= t=2024-05-29T13:44:13.727299488Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.727339188Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l7y4k54z-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.727241189Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=618024 slug=aptosdev t=2024-05-29T13:44:13.727203507Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l7vyqqxp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.727124617Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.727153637Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l7qkiokg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.726901985Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.72689944Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.726813231Z caller=remote_instance_store.go:51 user=307001 slug=hirerightdev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.726860898Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=472647 slug=planet t=2024-05-29T13:44:13.726801384Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=438855 slug=teckresources t=2024-05-29T13:44:13.726762568Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=34.529776ms + level=debug ts=2024-05-29T13:44:13.726694059Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l7myymkq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.726710903Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l7myymkq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.726647662Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=471861 slug=planetstaging t=2024-05-29T13:44:13.726703776Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=471861 slug=planetstaging instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.72667738Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l7h8rsqe-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.726462961Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l7h8rsqe-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.72643582Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l7g0rjmk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.726298369Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.72615859Z caller=ruler.go:522 msg="tenant is owned by this instance" user=313957 slug=regdipasupil344 groups=0 + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:13.726248496Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=523054 slug=vialtopartners instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.726205707Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=523054 slug=vialtopartners t=2024-05-29T13:44:13.726190796Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l7g0rjmk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.726168437Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.726160144Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l7fcr8mg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.726124537Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l7fcr8mg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.726097987Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l7fcr8mg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.726055156Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.726113891Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.726110504Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l7fcr8mg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.725985496Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.726028085Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l7fbx42b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.725890035Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l7fbx42b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.725845204Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l7fbx42b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.725816054Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l75lofew-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.725663712Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l75lofew-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.725522001Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l71fs8wf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.7254505Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.725334929Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l71fs8wf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.725374939Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l71fs8wf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.725302458Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l7118f79-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.725260178Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.725231263Z caller=remote_instance_store.go:51 user=751407 slug=nethermindjuno msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.724961126Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l6z3y0bq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.724953585Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.724710139Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l6s4jc59-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.724739023Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l6pwifar-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.724399929Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.724316699Z caller=remote_instance_store.go:51 user=765752 slug=octave msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l6n56mtp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.724105036Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l6n56mtp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.724072596Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l6n56mtp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.724014705Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.724024321Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.723977886Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=70430 slug=dapperlabs instance= t=2024-05-29T13:44:13.723900581Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l6k9ylqi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.723954614Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.723729878Z caller=client.go:80 msg="creating client for grafana instance" user=301946 addr=dns:///rrrr-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.scheduler user=70430 slug=dapperlabs version=1 fingerprint=7b43e498997d0788 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.723775656Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.723408508s EvaluationString:}]" duration=23.879901ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l6k9ylqi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.723872124Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l6k9ylqi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.723834553Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l6k9ylqi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.723801313Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.723799793Z caller=ruler.go:522 msg="tenant is owned by this instance" user=428033 slug=pointcontents groups=0 + level=info component=discovery ts=2024-05-29T13:44:13.723781418Z caller=client.go:80 msg="creating client for grafana instance" user=637318 addr=dns:///samjewellprodausoutheast0-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l5tz04i7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.723758082Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.723756584Z caller=client.go:80 msg="creating client for grafana instance" user=536482 addr=dns:///rtdex-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.723695268Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.723655597Z caller=client.go:80 msg="creating client for grafana instance" user=397315 addr=dns:///royportas-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.723652519Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l5tz04i7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.723585381Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.723572247Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=624945 slug=phishbate + level=debug ts=2024-05-29T13:44:13.723457992Z caller=remote_instance_store.go:51 user=504140 slug=chipotlestg msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.723215186Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=112732 slug=gleamer t=2024-05-29T13:44:13.723398332Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=472647 slug=planet instance="metric.name=value_num_undelivered_messages_max_max" t=2024-05-29T13:44:13.723389388Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l5m8jjju-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.723255517Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:13.723332505Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=438185 slug=nodeinfra instance="__name__=up, chain=APT, deployment=production, instance=aptosmainnetvfn.mirny.io:9101, job=prod-APT-mainnet-vfn, network=mainnet, servicetype=validator_fullnode" t=2024-05-29T13:44:13.722957607Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=438185 slug=nodeinfra t=2024-05-29T13:44:13.722916221Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="chain=APT" + logger=ngalert.state.manager.persist user=346766 slug=checklyhq t=2024-05-29T13:44:13.723233725Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=346766 slug=checklyhq instance= t=2024-05-29T13:44:13.723218236Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.723011105Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=856040 slug=kuady t=2024-05-29T13:44:13.72284421Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.754695ms + logger=ngalert.state.manager user=438185 slug=nodeinfra t=2024-05-29T13:44:13.72277798Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="chain=APT" + logger=ngalert.state.manager user=438185 slug=nodeinfra instance="__name__=up, chain=APT, cloud=OVH, deployment=production, instance=131.153.13.19:9101, job=prod-APT-testnet-fullnode, network=testnet, node_name=prod_APT_testnet_Fullnode_rbx_1, region=roubaix, servicetype=fullnode" t=2024-05-29T13:44:13.722692687Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=524410 slug=syso instance="STS=Westerly, STS_BMS=RTACSOLAR" t=2024-05-29T13:44:13.722684226Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=524410 slug=syso instance="STS=Westerly, STS_BMS=RTACSOLAR" t=2024-05-29T13:44:13.722667391Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.72241886Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.722557718Z caller=remote_instance_store.go:51 user=739130 slug=redphasetech msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=438185 slug=nodeinfra t=2024-05-29T13:44:13.722519503Z level=debug msg="State manager processing evaluation results" resultCount=3 + logger=ngalert.state.manager user=524410 slug=syso instance="STS=Waterford, STS_BMS=RTACSOLAR" t=2024-05-29T13:44:13.722528081Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=548157 slug=kushkiprod t=2024-05-29T13:44:13.7223105Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.722150931Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=316418 slug=workmotion instance="FunctionName=CallInternalSchemaMigrationApiBeta" t=2024-05-29T13:44:13.721891828Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=316418 slug=workmotion t=2024-05-29T13:44:13.72169471Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=316418 slug=workmotion version=1 fingerprint=ddf33f5c8843cf59 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.721578273Z level=debug msg="Alert rule evaluated" results="[{Instance:FunctionName=CallInternalSchemaMigrationApiBeta State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:FunctionName=CallInternalSchemaMigrationApiBeta Value:0xc005c834d8} C:{Var:C Labels:FunctionName=CallInternalSchemaMigrationApiBeta Value:0xc005c83500}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.721073943s EvaluationString:[ var='B' labels={FunctionName=CallInternalSchemaMigrationApiBeta} value=0 ], [ var='C' labels={FunctionName=CallInternalSchemaMigrationApiBeta} value=0 ]}]" duration=26.21491ms + logger=ngalert.state.manager.persist user=542894 slug=aize t=2024-05-29T13:44:13.721575475Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=542894 slug=aize instance= t=2024-05-29T13:44:13.721546583Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=542894 slug=aize t=2024-05-29T13:44:13.721505869Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info ts=2024-05-29T13:44:13.721107991Z caller=remote_alert_sender.go:94 user=98483 slug=nocgpschile host=nocgpschile-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.95.99:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=e6d2a1fe-5b91-4efa-ad1b-c348b3366ccc alerts=1 + level=info ts=2024-05-29T13:44:13.720741903Z caller=remote_alert_sender.go:94 user=884866 slug=cnonumerique host=cnonumerique-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.51.155:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=edlon1y0ly4g0d alerts=1 + level=info component=discovery ts=2024-05-29T13:44:13.720707332Z caller=client.go:80 msg="creating client for grafana instance" user=396973 addr=dns:///rogueasianoc-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.720655219Z caller=remote_instance_store.go:51 user=159781 slug=suncornoc msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.720631775Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=394445 slug=ozrust + level=debug ts=2024-05-29T13:44:13.720406562Z caller=remote_instance_store.go:51 user=173730 slug=nikon msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=843304 slug=ppcgroup instance= t=2024-05-29T13:44:13.720473814Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=173730 slug=nikon instance= t=2024-05-29T13:44:13.720297161Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=843304 slug=ppcgroup t=2024-05-29T13:44:13.720417613Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=708873 slug=soultv t=2024-05-29T13:44:13.72033588Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.591483ms + logger=ngalert.scheduler user=173730 slug=nikon version=3 fingerprint=05e8ee7ec9ea56e2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.720161153Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.719651925s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=133.295007ms + level=debug ts=2024-05-29T13:44:13.720239938Z caller=ruler.go:522 msg="tenant is owned by this instance" user=318831 slug=grafanavianet groups=18 + level=debug ts=2024-05-29T13:44:13.720136766Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.historian backend=loki user=337951 slug=pawapay t=2024-05-29T13:44:13.720019776Z level=debug msg="Done saving alert state history batch" + level=info ts=2024-05-29T13:44:13.719696477Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.244.39:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddbhspyntvgu8e alerts=1 + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:13.719595071Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=28.661985ms + level=debug ts=2024-05-29T13:44:13.719292253Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.719131912Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.718957629Z caller=client.go:80 msg="creating client for grafana instance" user=647401 addr=dns:///riodiep-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.718822569Z caller=client.go:80 msg="creating client for grafana instance" user=509766 addr=dns:///rickeyw-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.718750907Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.718471931Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.718262638Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.718181437Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.718071372Z caller=client.go:80 msg="creating client for grafana instance" user=617704 addr=dns:///richoaaa-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.718024794Z caller=ruler.go:522 msg="tenant is owned by this instance" user=424337 slug=pandarust groups=0 + level=debug ts=2024-05-29T13:44:13.717951791Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.7178855Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.717864587Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.717755645Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.717438043Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.717509653Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.717325043Z caller=remote_instance_store.go:51 user=54972 slug=zanglang msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.71707421Z caller=client.go:80 msg="creating client for grafana instance" user=544318 addr=dns:///redphasetechdemo-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.717039503Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=426229 slug=accelbyte t=2024-05-29T13:44:13.716959227Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datacenter=us-east-1" t=2024-05-29T13:44:13.71684089Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:13.716627777Z level=debug msg="State manager processing evaluation results" resultCount=3 + level=debug ts=2024-05-29T13:44:13.716608483Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.716548678Z caller=remote_instance_store.go:51 user=319327 slug=cvi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=workflows, cluster=lmnd-sandbox-us-east-1, container=kube-state-metrics, deployment=workflows, endpoint=http, instance=10.32.87.231:8080, job=kube-state-metrics, namespace=sandbox, pod=kube-state-metrics-6c795d5489-lb4vc, region=us-east-1, service=kube-state-metrics, stage=sandbox" t=2024-05-29T13:44:13.716525802Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=319327 slug=cvi t=2024-05-29T13:44:13.716499561Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.716200128Z caller=ruler.go:522 msg="tenant is owned by this instance" user=392371 slug=nypd groups=0 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.71620644Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.716000895Z caller=ruler.go:522 msg="tenant is owned by this instance" user=385698 slug=nuaays groups=0 + logger=ngalert.state.manager user=884866 slug=cnonumerique instance="application=PMT" t=2024-05-29T13:44:13.71572795Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=884866 slug=cnonumerique instance="application=PIV" t=2024-05-29T13:44:13.715651908Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.715536947Z caller=ruler.go:522 msg="tenant is owned by this instance" user=458781 slug=katchassets groups=5 + logger=ngalert.state.manager user=336655 slug=odigeoconnect instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.715383651Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=336655 slug=odigeoconnect instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.715328114Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=884866 slug=cnonumerique instance="application=OSE" t=2024-05-29T13:44:13.715458144Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=336655 slug=odigeoconnect instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.71529546Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.715252094Z caller=remote_instance_store.go:51 user=713299 slug=btcnonprod msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=336655 slug=odigeoconnect version=44 fingerprint=bac00d402a0f0c0d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.715054348Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.714688706s EvaluationString:}]" duration=18.995177ms + logger=ngalert.state.manager.persist user=109452 slug=deltarisk t=2024-05-29T13:44:13.715205671Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=36.810572ms + logger=ngalert.state.manager user=884866 slug=cnonumerique instance="application=CTS" t=2024-05-29T13:44:13.715075425Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=884866 slug=cnonumerique instance="application=ALMA" t=2024-05-29T13:44:13.714800688Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.714814852Z caller=remote_instance_store.go:51 user=320906 slug=techcyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.714783472Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.714603696Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=884866 slug=cnonumerique version=4 fingerprint=c453ea762845cc20 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.714387468Z level=debug msg="Alert rule evaluated" results="[{Instance:application=ALMA State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:application=ALMA Value:0xc01949f7f0} C:{Var:C Labels:application=ALMA Value:0xc01949f810}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.713929213s EvaluationString:[ var='A' labels={application=ALMA} value=2 ], [ var='C' labels={application=ALMA} value=0 ]} {Instance:application=APIICV State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:application=APIICV Value:0xc01949f850} C:{Var:C Labels:application=APIICV Value:0xc01949f870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.713936453s EvaluationString:[ var='A' labels={application=APIICV} value=5 ], [ var='C' labels={application=APIICV} value=0 ]} {Instance:application=APIIDV State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:application=APIIDV Value:0xc01949f8d0} C:{Var:C Labels:application=APIIDV Value:0xc01949f8b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.713939024s EvaluationString:[ var='A' labels={application=APIIDV} value=3 ], [ var='C' labels={application=APIIDV} value=0 ]} {Instance:application=CTS State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:application=CTS Value:0xc01949f910} C:{Var:C Labels:application=CTS Value:0xc01949f930}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.713943344s EvaluationString:[ var='A' labels={application=CTS} value=1 ], [ var='C' labels={application=CTS} value=0 ]} {Instance:application=DAUPHINECLOUD State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:application=DAUPHINECLOUD Value:0xc01949f988} C:{Var:C Labels:application=DAUPHINECLOUD Value:0xc01949f980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.713945544s EvaluationString:[ var='A' labels={application=DAUPHINECLOUD} value=7 ], [ var='C' labels={application=DAUPHINECLOUD} value=0 ]} {Instance:application=ICSUCLOUD State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:application=ICSUCLOUD Value:0xc01949f9f0} C:{Var:C Labels:application=ICSUCLOUD Value:0xc01949f9f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.713948454s EvaluationString:[ var='A' labels={application=ICSUCLOUD} value=3 ], [ var='C' labels={application=ICSUCLOUD} value=0 ]} {Instance:application=MONA State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:application=MONA Value:0xc01949fa50} C:{Var:C Labels:application=MONA Value:0xc01949fa70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.713950604s EvaluationString:[ var='A' labels={application=MONA} value=96 ], [ var='C' labels={application=MONA} value=1 ]} {Instance:application=OSE State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:application=OSE Value:0xc01949fac0} C:{Var:C Labels:application=OSE Value:0xc01949faf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.713952944s EvaluationString:[ var='A' labels={application=OSE} value=1 ], [ var='C' labels={application=OSE} value=0 ]} {Instance:application=PDEO State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:application=PDEO Value:0xc01949fb40} C:{Var:C Labels:application=PDEO Value:0xc01949fb60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.713954894s EvaluationString:[ var='A' labels={application=PDEO} value=1 ], [ var='C' labels={application=PDEO} value=0 ]} {Instance:application=PIV State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:application=PIV Value:0xc01949fba0} C:{Var:C Labels:application=PIV Value:0xc01949fbc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.713956854s EvaluationString:[ var='A' labels={application=PIV} value=1 ], [ var='C' labels={application=PIV} value=0 ]} {Instance:application=PMT State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:application=PMT Value:0xc01949fc10} C:{Var:C Labels:application=PMT Value:0xc01949fc30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.713960484s EvaluationString:[ var='A' labels={application=PMT} value=1 ], [ var='C' labels={application=PMT} value=0 ]}]" duration=40.350462ms + level=debug ts=2024-05-29T13:44:13.71447122Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.714343444Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.714040213Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.714238046Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.714179613Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.714113295Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.714056059Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.713968223Z caller=client.go:80 msg="creating client for grafana instance" user=547513 addr=dns:///r88-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.713912629Z caller=ruler.go:522 msg="tenant is owned by this instance" user=342874 slug=nomadicox groups=0 + level=debug ts=2024-05-29T13:44:13.713804522Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:13.713835254Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.713676502Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.713465865Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.713310172Z caller=remote_image_capturer.go:54 user=288032 slug=dapperlabssre rule_org_id=1 rule_uid=hx56sgPVz dashboard=1nFDDBEluhDE panel=69 msg="rendering alert image with grafana" + level=debug ts=2024-05-29T13:44:13.713181227Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=288032 slug=dapperlabssre t=2024-05-29T13:44:13.713150639Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=172.26.10.98:7979, job=external-dns, namespace=external-dns, pod=external-dns-bb56c4dfb-5g5k7, provider=aws, redpanda_id=clo8v8lhac3fc8rlkmjg, service=external-dns" t=2024-05-29T13:44:13.713134495Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.712988303Z caller=client.go:80 msg="creating client for grafana instance" user=345253 addr=dns:///r3x-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.712968529Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=606134 slug=krzko + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=172.21.10.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-6576745f55-8g9t4, provider=gcp, redpanda_id=cloglqhpeif0bijutdr0, service=external-dns" t=2024-05-29T13:44:13.712914395Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=172.21.10.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-6576745f55-8g9t4, provider=gcp, redpanda_id=cloglqhpeif0bijutdr0, service=external-dns" t=2024-05-29T13:44:13.712900952Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.71274329Z caller=remote_instance_store.go:51 user=706031 slug=miceutest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.712632998Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.712717773Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.712449077Z caller=client.go:80 msg="creating client for grafana instance" user=723580 addr=dns:///pierrecabral-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=172.16.42.19:7979, job=external-dns, namespace=external-dns, pod=external-dns-64f46b5b4f-dsqc8, provider=gcp, redpanda_id=ckrvkrd3rmo54fot1r1g, service=external-dns" t=2024-05-29T13:44:13.712694971Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=434891 slug=webdeveloper instance="app=limberapp, cluster=cloud-heroku" t=2024-05-29T13:44:13.712595621Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=info component=discovery ts=2024-05-29T13:44:13.712569115Z caller=client.go:80 msg="creating client for grafana instance" user=602963 addr=dns:///provectusalgae-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=434891 slug=webdeveloper instance="app=limberapp, cluster=cloud-heroku" t=2024-05-29T13:44:13.712577799Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:13.712532378Z caller=remote_instance_store.go:51 user=154996 slug=veovo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=11.4.24.38:7979, job=external-dns, namespace=external-dns, pod=external-dns-7f7fc75f7f-8rnxx, provider=gcp, redpanda_id=cp77jim6fslkb963td8g, service=external-dns" t=2024-05-29T13:44:13.712549346Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.712473476Z caller=client.go:80 msg="creating client for grafana instance" user=303552 addr=dns:///posmalaysiaapm-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.712454528Z caller=client.go:80 msg="creating client for grafana instance" user=313982 addr=dns:///polarm62-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.712451651Z caller=client.go:80 msg="creating client for grafana instance" user=428033 addr=dns:///pointcontents-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=434891 slug=webdeveloper t=2024-05-29T13:44:13.712486787Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info component=discovery ts=2024-05-29T13:44:13.712384705Z caller=client.go:80 msg="creating client for grafana instance" user=463340 addr=dns:///phily-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.712212811Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.712201411Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.712196211Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=info component=discovery ts=2024-05-29T13:44:13.712342603Z caller=client.go:80 msg="creating client for grafana instance" user=518980 addr=dns:///permads-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.712135509Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.712130009Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.712111709Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=warn ts=2024-05-29T13:44:13.712195795Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=668638 slug=mappetella + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.712067008Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.712025307Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.712005206Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711983606Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.712159421Z caller=remote_instance_store.go:51 user=115097 slug=controlplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711962705Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=11.0.24.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-5d659f6df7-bctlf, provider=gcp, redpanda_id=co1l1l1pom78tp54qn90, service=external-dns" t=2024-05-29T13:44:13.712145012Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.710742474Z caller=ruler.go:522 msg="tenant is owned by this instance" user=317380 slug=justinlip groups=0 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711926404Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.710575745Z caller=ruler.go:522 msg="tenant is owned by this instance" user=321642 slug=netmakers groups=0 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711862002Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.712111259Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.710529487Z caller=ruler.go:522 msg="tenant is owned by this instance" user=622695 slug=niquist groups=1 + level=debug ts=2024-05-29T13:44:13.710440158Z caller=ruler.go:522 msg="tenant is owned by this instance" user=673133 slug=musgravegrafana groups=1 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.7117794Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.71197206Z caller=remote_instance_store.go:51 user=71676 slug=lemonbeat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.7117429Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=100.88.9.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-6ff8547c7c-sv7zj, provider=gcp, redpanda_id=cn1jn9k5ihvfjtmun7eg, service=external-dns" t=2024-05-29T13:44:13.711955357Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711726699Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.710163547Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=670694 slug=fwwc2023 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711678398Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711653997Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711595896Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711588196Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711574595Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711545495Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=100.88.16.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b765d966f-fcbb6, provider=gcp, redpanda_id=cnaa0i9028lf1qmgl5tg, service=external-dns" t=2024-05-29T13:44:13.711782069Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711524494Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711509994Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711501393Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.7116727Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711483493Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711464593Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711454092Z level=debug msg="Setting next state" handler=resultNoData + level=info ts=2024-05-29T13:44:13.711564154Z caller=remote_alert_sender.go:94 user=337951 slug=pawapay host=pawapay-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.148.90.170:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=72dc9c46 alerts=1 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711436292Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.711661328Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=100.88.152.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-79bc645655-lg7zp, provider=gcp, redpanda_id=cost10i34627vtg2kmg0, service=external-dns" t=2024-05-29T13:44:13.711687353Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.711631571Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.711618381Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=642752 slug=mtest2023 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.71135769Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711315089Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=100.88.136.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-65d7d9459f-4kxld, provider=gcp, redpanda_id=cnipfdrqu42smqf5rt40, service=external-dns" t=2024-05-29T13:44:13.711522078Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711288788Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711253587Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711230487Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711221487Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711177485Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711161685Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711148785Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711137484Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.711129184Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.8.6.225:7979, job=external-dns, namespace=external-dns, pod=external-dns-5bfccf748f-nh8rl, provider=aws, redpanda_id=cp17312j3085r2lqj4sg, service=external-dns" t=2024-05-29T13:44:13.711333353Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.710298136Z caller=ruler.go:522 msg="tenant is owned by this instance" user=734153 slug=mahisolutions groups=1 + level=debug ts=2024-05-29T13:44:13.711236596Z caller=remote_instance_store.go:51 user=155740 slug=routific msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.71096308Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.711260318Z caller=remote_instance_store.go:51 user=491157 slug=prd01wr msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710937879Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710877478Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710865778Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=491157 slug=prd01wr instance= t=2024-05-29T13:44:13.711180325Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=337951 slug=pawapay t=2024-05-29T13:44:13.711117487Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.101395ms + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.8.2.117:7979, job=external-dns, namespace=external-dns, pod=external-dns-74dc857f6c-mrmss, provider=aws, redpanda_id=cotudq2l9050bvlc7kcg, service=external-dns" t=2024-05-29T13:44:13.711136945Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.711115577Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710714274Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710704474Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.71100868Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710670473Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.711036365Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710656872Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710636672Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710622472Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710612671Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.71057167Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.71055587Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.71054397Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710512269Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.710912673Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.710887733Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710470568Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710457367Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.710883859Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710451167Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.710814923Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710382866Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710376865Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710317864Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710266763Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.71.13.9:7979, job=external-dns, namespace=external-dns, pod=external-dns-5659c99f86-f62gt, provider=gcp, redpanda_id=ci8q04tu86fclp7lpl0g, service=external-dns" t=2024-05-29T13:44:13.710703818Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710255962Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710251062Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710234262Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710190861Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710185761Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.71017176Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.71016666Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.71015496Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.71014096Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710130059Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.64.9.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-78fd59fbd8-b2pb5, provider=gcp, redpanda_id=ce9p9mb9tos7v1tuco70, service=external-dns" t=2024-05-29T13:44:13.710558476Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710123659Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.710510774Z caller=client.go:80 msg="creating client for grafana instance" user=604967 addr=dns:///dabbadev-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710100459Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710092458Z level=debug msg="Setting next state" handler=resultNoData + level=warn ts=2024-05-29T13:44:13.710460773Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=714317 slug=clgit + level=debug ts=2024-05-29T13:44:13.710428173Z caller=ruler.go:522 msg="tenant is owned by this instance" user=714317 slug=clgit groups=0 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710070158Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.710396373Z caller=ruler.go:522 msg="tenant is owned by this instance" user=498448 slug=chessmonitoring groups=0 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.710057857Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=info component=discovery ts=2024-05-29T13:44:13.710395181Z caller=client.go:80 msg="creating client for grafana instance" user=349537 addr=dns:///pangzlab-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709979956Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=warn ts=2024-05-29T13:44:13.710271671Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=496982 slug=chromabeams + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709946155Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709897953Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709886653Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.710245098Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.60.192.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d675867d9-nxz96, provider=gcp, redpanda_id=cnc28jka4ff1g49hecf0, service=external-dns" t=2024-05-29T13:44:13.710256028Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709869253Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709842052Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709830652Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709809951Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709789651Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.709972737Z caller=ruler.go:522 msg="tenant is owned by this instance" user=670694 slug=fwwc2023 groups=0 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709784751Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=304032 slug=clearbanc t=2024-05-29T13:44:13.710106294Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70976785Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=304032 slug=clearbanc instance="clearco_owner=infra-devs, clearco_service=prometheus, clearco_tier=t2, env=production-core, pod=prometheus-mon-kube-prometheus-stack-prometheus-1" t=2024-05-29T13:44:13.710093505Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70974845Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=304032 slug=clearbanc t=2024-05-29T13:44:13.710055022Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="env=production-core" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.6.54.6:7979, job=external-dns, namespace=external-dns, pod=external-dns-d5f5ddbbc-w86sq, provider=aws, redpanda_id=cpb4qli77h8g0iu4f7tg, service=external-dns" t=2024-05-29T13:44:13.710055282Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.710056572Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709718249Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709713149Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709707549Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709699849Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709633147Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709615446Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.709869268Z caller=client.go:80 msg="creating client for grafana instance" user=681975 addr=dns:///cyrillerobert-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.scheduler user=304032 slug=clearbanc version=39 fingerprint=409208ccf311410c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.709734812Z level=debug msg="Alert rule evaluated" results="[{Instance:clearco_owner=infra-devs, clearco_service=prometheus, clearco_tier=t2, env=production-core, pod=prometheus-mon-kube-prometheus-stack-prometheus-0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:clearco_owner=infra-devs, clearco_service=prometheus, clearco_tier=t2, env=production-core, pod=prometheus-mon-kube-prometheus-stack-prometheus-0 Value:0xc027ae5e50} C:{Var:C Labels:clearco_owner=infra-devs, clearco_service=prometheus, clearco_tier=t2, env=production-core, pod=prometheus-mon-kube-prometheus-stack-prometheus-0 Value:0xc027ae5ec0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.709379589s EvaluationString:[ var='B' labels={clearco_owner=infra-devs, clearco_service=prometheus, clearco_tier=t2, env=production-core, pod=prometheus-mon-kube-prometheus-stack-prometheus-0} value=1 ], [ var='C' labels={clearco_owner=infra-devs, clearco_service=prometheus, clearco_tier=t2, env=production-core, pod=prometheus-mon-kube-prometheus-stack-prometheus-0} value=0 ]} {Instance:clearco_owner=infra-devs, clearco_service=prometheus, clearco_tier=t2, env=production-core, pod=prometheus-mon-kube-prometheus-stack-prometheus-1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:clearco_owner=infra-devs, clearco_service=prometheus, clearco_tier=t2, env=production-core, pod=prometheus-mon-kube-prometheus-stack-prometheus-1 Value:0xc027ae5fb8} C:{Var:C Labels:clearco_owner=infra-devs, clearco_service=prometheus, clearco_tier=t2, env=production-core, pod=prometheus-mon-kube-prometheus-stack-prometheus-1 Value:0xc01058e020}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.709394843s EvaluationString:[ var='B' labels={clearco_owner=infra-devs, clearco_service=prometheus, clearco_tier=t2, env=production-core, pod=prometheus-mon-kube-prometheus-stack-prometheus-1} value=1 ], [ var='C' labels={clearco_owner=infra-devs, clearco_service=prometheus, clearco_tier=t2, env=production-core, pod=prometheus-mon-kube-prometheus-stack-prometheus-1} value=0 ]}]" duration=14.706192ms + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709547545Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709541245Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.709775966Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=381707 slug=nandhakandasamy + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709527744Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.709774314Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709520744Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709515344Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.6.0.146:7979, job=external-dns, namespace=external-dns, pod=external-dns-597f465f54-g8jlg, provider=aws, redpanda_id=cgs1c8e984cnrsjmp320, service=external-dns" t=2024-05-29T13:44:13.709799475Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709510244Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709496043Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709490843Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709453142Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.6.0.146:7979, job=external-dns, namespace=external-dns, pod=external-dns-597f465f54-g8jlg, provider=aws, redpanda_id=cgs1c8e984cnrsjmp320, service=external-dns" t=2024-05-29T13:44:13.709786114Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709441642Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709396141Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.709709929Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709323239Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709277138Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.709635152Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709251937Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.58.128.32:7979, job=external-dns, namespace=external-dns, pod=external-dns-6fb5746cdc-wnk8r, provider=gcp, redpanda_id=cl0h411k7kve5j7088hg, service=external-dns" t=2024-05-29T13:44:13.709631285Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.709579177Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.70961561Z caller=remote_alert_sender.go:94 user=212369 slug=finaloop host=finaloop-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.156.214:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=TAhjDKRVz alerts=1 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709211136Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709183136Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709134234Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709085133Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.54.2.191:7979, job=external-dns, namespace=external-dns, pod=external-dns-9c9d77795-f9nhx, provider=aws, redpanda_id=ci4sa407e7rqqatd3hmg, service=external-dns" t=2024-05-29T13:44:13.709539552Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709075133Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709052532Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709046132Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.709003531Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708911929Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708903829Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.53.128.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-d9cd7945f-c9t7f, provider=gcp, redpanda_id=cngh6crqu42smqf5qr1g, service=external-dns" t=2024-05-29T13:44:13.709437607Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.709424963Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=901996 slug=cmgas2stgneu + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708871628Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708828527Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708823227Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708815826Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708810626Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.709341863Z caller=ruler.go:522 msg="tenant is owned by this instance" user=901996 slug=cmgas2stgneu groups=0 + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:13.709305304Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=35.798473ms + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708798826Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708792826Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708783026Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.5.14.106:7979, job=external-dns, namespace=external-dns, pod=external-dns-675cc6b89-szqzb, provider=aws, redpanda_id=cj1ti4jube5f1u6qjfrg, service=external-dns" t=2024-05-29T13:44:13.709348617Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708749225Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708715824Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708705024Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708675023Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.709227691Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708657923Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708629522Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708614921Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708607521Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=884866 slug=cnonumerique instance="datasource_uid=ddhkbrfewv7k0d, ref_id=A" t=2024-05-29T13:44:13.709139877Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=884866 slug=cnonumerique t=2024-05-29T13:44:13.709073255Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.5.0.172:7979, job=external-dns, namespace=external-dns, pod=external-dns-6cf6446bcd-6vsb8, provider=aws, redpanda_id=ch341oogvg4l92oem7u0, service=external-dns" t=2024-05-29T13:44:13.709165762Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708508519Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=884866 slug=cnonumerique version=10 fingerprint=2b362e220e6741f6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.708987994Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=ddhkbrfewv7k0d, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.708652298s EvaluationString:}]" duration=42.427438ms + level=info ts=2024-05-29T13:44:13.709124007Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.244.39:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddbhspzwx1hj4f alerts=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.5.0.114:7979, job=external-dns, namespace=external-dns, pod=external-dns-67dccdbb4c-tfxql, provider=aws, redpanda_id=cfl3m3aei3b2k79h2brg, service=external-dns" t=2024-05-29T13:44:13.709037441Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708462218Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708450817Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:13.709076858Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.156.57:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddbhspzwx1hj4f alerts=1 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708418317Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708395316Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708384916Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708354515Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:13.708987338Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=38.267975ms + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708319814Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708306714Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708289813Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708265713Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708259513Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.41.192.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-5b6b9d968f-jdnlz, provider=gcp, redpanda_id=cnuvknbiuvqvkcfi59ig, service=external-dns" t=2024-05-29T13:44:13.708922136Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708218612Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708208011Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708181611Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70816601Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70816051Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.708779257Z caller=ruler.go:522 msg="tenant is owned by this instance" user=760367 slug=cluepoints groups=0 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70814261Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708122209Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.708806189Z caller=remote_instance_store.go:51 user=765907 slug=orangebarrelmedia msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708100209Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708075008Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.708026707Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707982506Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707973505Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707958205Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707936204Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707858903Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707850902Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707828002Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707814501Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.7077553Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707705399Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707682998Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707672298Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707665298Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707653997Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707646897Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.708328942Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707508094Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707501494Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.30.11.7:7979, job=external-dns, namespace=external-dns, pod=external-dns-6f9b49f97d-b68ph, provider=gcp, redpanda_id=chhnf2abksi1fj6tastg, service=external-dns" t=2024-05-29T13:44:13.70823377Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707470493Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.708276866Z caller=remote_instance_store.go:51 user=206439 slug=relaypro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707452092Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707445892Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707440392Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707425992Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.708162617Z caller=remote_instance_store.go:51 user=856040 slug=kuady msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707409291Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707393991Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.708060551Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=622226 slug=cinemoz + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70736639Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=120621 slug=jdall instance= t=2024-05-29T13:44:13.708126451Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70734819Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70734309Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70733679Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.708106027Z caller=remote_instance_store.go:51 user=444728 slug=stgnextgen msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=120621 slug=jdall instance= t=2024-05-29T13:44:13.708105247Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager.persist user=444728 slug=stgnextgen t=2024-05-29T13:44:13.708053469Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=120621 slug=jdall t=2024-05-29T13:44:13.708087311Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=444728 slug=stgnextgen instance= t=2024-05-29T13:44:13.708035567Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707314189Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=error ts=2024-05-29T13:44:13.708032269Z caller=remote_rule_evaluator.go:110 user=120621 slug=jdall msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.scheduler user=120621 slug=jdall version=1 fingerprint=b83dd0f036be28c7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.708058612Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=3.775396ms + logger=ngalert.state.manager user=716519 slug=bradfordprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707923729Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707251687Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707245787Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.707948599Z caller=remote_instance_store.go:51 user=703825 slug=andrewbauman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707216587Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707206086Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=716519 slug=bradfordprod version=1 fingerprint=dc0a98c217945524 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.707805577Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.707543947s EvaluationString:}]" duration=9.008813ms + level=debug ts=2024-05-29T13:44:13.707768981Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707159785Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=444728 slug=stgnextgen version=3 fingerprint=5bca03362a11e46d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.70776547Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.707532547s EvaluationString:}]" duration=255.471391ms + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707073783Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707048182Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.707641776Z caller=remote_instance_store.go:51 user=739130 slug=redphasetech msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707026282Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.707003581Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706992181Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70697308Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70696808Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70695668Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70694648Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706934579Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706923179Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706913379Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706891078Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706885878Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706874878Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706862378Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706850777Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706839577Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.707230343Z caller=ruler.go:522 msg="tenant is owned by this instance" user=535765 slug=carenamics groups=0 + level=info component=discovery ts=2024-05-29T13:44:13.707239443Z caller=client.go:80 msg="creating client for grafana instance" user=617022 addr=dns:///couponsphinx-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.707195642Z caller=ruler.go:522 msg="tenant is owned by this instance" user=701088 slug=bykovas groups=0 + level=debug ts=2024-05-29T13:44:13.707104368Z caller=remote_instance_store.go:51 user=289377 slug=jochim msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.255.0.241:7979, job=external-dns, namespace=external-dns, pod=external-dns-847c5f468d-fsqxh, provider=aws, redpanda_id=cl0eao9k7kve5j7085rg, service=external-dns" t=2024-05-29T13:44:13.707213589Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706747375Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706740875Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706712574Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.255.0.127:7979, job=external-dns, namespace=external-dns, pod=external-dns-6444fc7b96-kphpj, provider=aws, redpanda_id=cl0e1e9k7kve5j7085hg, service=external-dns" t=2024-05-29T13:44:13.707107641Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706656273Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706650372Z level=debug msg="Setting next state" handler=resultNoData + level=warn ts=2024-05-29T13:44:13.70698704Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=715436 slug=canerakar + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.255.0.101:7979, job=external-dns, namespace=external-dns, pod=external-dns-6d65d9764-xf4vp, provider=aws, redpanda_id=cl0ceg1k7kve5j708470, service=external-dns" t=2024-05-29T13:44:13.706976684Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70655307Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=150145 slug=pleasant t=2024-05-29T13:44:13.706912839Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=30.742053ms + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706482468Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=309009 slug=elestyle t=2024-05-29T13:44:13.706808012Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=25.485691ms + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706435367Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706426467Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706420067Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706414967Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706407966Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706387566Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.70678957Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706316864Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706285563Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706279963Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.706612986Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.706748838Z caller=client.go:80 msg="creating client for grafana instance" user=766347 addr=dns:///constrat-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706269063Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=708873 slug=soultv t=2024-05-29T13:44:13.706737458Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.70665484Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706257163Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.253.65.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-b4db9dd4f-vbwrk, provider=gcp, redpanda_id=cmk3k6vj7la47b8ftnbg, service=external-dns" t=2024-05-29T13:44:13.706677071Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706176561Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70616346Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706110859Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706062158Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.706486699Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706030157Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.706012256Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705998556Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705967655Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.706455675Z caller=remote_instance_store.go:51 user=314067 slug=itsme msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.706431736Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705958755Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705949655Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705910854Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705903154Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705857553Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705834852Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.706322729Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705814452Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705774051Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70576655Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.706255184Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.706217433Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=753954 slug=carmela + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70574705Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70574055Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705699149Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.706100595Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.647655ms + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705693849Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705672248Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705623347Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.221.8.154:7979, job=external-dns, namespace=external-dns, pod=external-dns-84b5c4658c-8nb67, provider=gcp, redpanda_id=co62vfa6i63ttaf6mrl0, service=external-dns" t=2024-05-29T13:44:13.706030735Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705519144Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705505944Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705497244Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705490543Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705476443Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705470643Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705452442Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.220.8.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-969fb454f-tz8hk, provider=gcp, redpanda_id=cokiaqnb4g10p5c9b8k0, service=external-dns" t=2024-05-29T13:44:13.705908938Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705447642Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.220.8.156:7979, job=external-dns, namespace=external-dns, pod=external-dns-55c44464f9-hw8ch, provider=gcp, redpanda_id=colb09f6nielhbep3rv0, service=external-dns" t=2024-05-29T13:44:13.705815608Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70533714Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.21.240.44:7979, job=external-dns, namespace=external-dns, pod=external-dns-598985fc64-dnv9d, provider=aws, redpanda_id=cfvrtf1ksfn7un5jhlm0, service=external-dns" t=2024-05-29T13:44:13.705728014Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705285838Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705263338Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705233437Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705227937Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705220337Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.705484826Z caller=ruler.go:522 msg="tenant is owned by this instance" user=499760 slug=cargatserv groups=0 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705180836Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.200.0.158:7979, job=external-dns, namespace=external-dns, pod=external-dns-55994cc7cd-x67tg, provider=aws, redpanda_id=cfhc16nu1f0o5qipdvp0, service=external-dns" t=2024-05-29T13:44:13.705533739Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705065933Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.705018632Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.20.240.157:7979, job=external-dns, namespace=external-dns, pod=external-dns-dddd4478b-b8xvs, provider=aws, redpanda_id=cfddoje2gj23h4urbbg0, service=external-dns" t=2024-05-29T13:44:13.705416183Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704981131Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.2.8.23:7979, job=external-dns, namespace=external-dns, pod=external-dns-6595d44b88-gb2h4, provider=aws, redpanda_id=ckor43qoad62ru3usgg0, service=external-dns" t=2024-05-29T13:44:13.705345045Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=806229 slug=simplisafe t=2024-05-29T13:44:13.705121268Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70493743Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.705273909Z caller=remote_instance_store.go:51 user=620449 slug=pocketbitcoin msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704846827Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704836327Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704814627Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.2.4.179:7979, job=external-dns, namespace=external-dns, pod=external-dns-5dd474d59f-55d29, provider=aws, redpanda_id=cih9b8so6oo60d8402g0, service=external-dns" t=2024-05-29T13:44:13.705258988Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=620449 slug=pocketbitcoin instance="__name__=lnpayout_synced, app=lnpayout-staging, cluster=cloud, instance=10.120.2.16:8081, job=lnpayout, namespace=lnpayout-staging" t=2024-05-29T13:44:13.705209867Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704781826Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704752925Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704716924Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704706724Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.2.0.49:7979, job=external-dns, namespace=external-dns, pod=external-dns-7bc678f875-dv7r4, provider=aws, redpanda_id=cihcamco6oo60d8405d0, service=external-dns" t=2024-05-29T13:44:13.705153394Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704657423Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704605121Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704583021Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70456942Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70454632Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70454032Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.199.8.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-69b9b588bc-cq22w, provider=gcp, redpanda_id=cnduflca4ff1g49heso0, service=external-dns" t=2024-05-29T13:44:13.70502108Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704500319Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:13.704975605Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=432323 slug=lithic instance="QueueName=ledger-xfer-consumer-v1-sandbox-dlq" t=2024-05-29T13:44:13.704960853Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704465818Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=102312 slug=gameanalytics t=2024-05-29T13:44:13.704954274Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704457718Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.70491916Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=102312 slug=gameanalytics instance="datasource_uid=grafanacloud-graphite, ref_id=B" t=2024-05-29T13:44:13.704935735Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.704860863Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704450217Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=102312 slug=gameanalytics instance="datasource_uid=grafanacloud-graphite, ref_id=B" t=2024-05-29T13:44:13.704928017Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=432323 slug=lithic t=2024-05-29T13:44:13.704897887Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=102312 slug=gameanalytics version=1 fingerprint=63a087d992eaa90b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.704840953Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-graphite, ref_id=B State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.704562428s EvaluationString:}]" duration=54.113306ms + level=debug ts=2024-05-29T13:44:13.704915828Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704413417Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=432323 slug=lithic version=1 fingerprint=534858f224aa85b7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.7047527Z level=debug msg="Alert rule evaluated" results="[{Instance:QueueName=ledger-xfer-consumer-v1-sandbox-dlq State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:QueueName=ledger-xfer-consumer-v1-sandbox-dlq Value:0xc04277bb88} C:{Var:C Labels:QueueName=ledger-xfer-consumer-v1-sandbox-dlq Value:0xc04277bb80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.704389961s EvaluationString:[ var='B' labels={QueueName=ledger-xfer-consumer-v1-sandbox-dlq} value=0 ], [ var='C' labels={QueueName=ledger-xfer-consumer-v1-sandbox-dlq} value=0 ]}]" duration=79.974476ms + level=debug ts=2024-05-29T13:44:13.704869149Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704309414Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704300114Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704291613Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704267513Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704246812Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.704724026Z caller=remote_instance_store.go:51 user=61907 slug=fullstory msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704192811Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704186611Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=61907 slug=fullstory instance= t=2024-05-29T13:44:13.704641219Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704181411Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704174611Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70416351Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70415351Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70414371Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704131209Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704103009Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704078208Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.18.8.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-d54d6f669-hmwcq, provider=gcp, redpanda_id=cmshck3mgrq4nh9abbmg, service=external-dns" t=2024-05-29T13:44:13.704515299Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.704044807Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.178.0.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-6bdcd878b5-vj6fj, provider=gcp, redpanda_id=cmjv9aei35o3jbomnhe0, service=external-dns" t=2024-05-29T13:44:13.70442825Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703987706Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.178.0.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-6bdcd878b5-vj6fj, provider=gcp, redpanda_id=cmjv9aei35o3jbomnhe0, service=external-dns" t=2024-05-29T13:44:13.704418148Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=f901086b-e83c-4767-8689-f9c5848eaf68, ref_id=B" t=2024-05-29T13:44:13.704353399Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=f901086b-e83c-4767-8689-f9c5848eaf68, ref_id=B" t=2024-05-29T13:44:13.704343794Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703924004Z level=debug msg="Keeping state" state=Normal + level=debug component=discovery ts=2024-05-29T13:44:13.704259715Z caller=retry.go:58 user=328744 msg="retrying grpc request" method=/remoteruler.rules.v1.RulesService/GetByRuleGroup attempt=1 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703884703Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.167.9.154:7979, job=external-dns, namespace=external-dns, pod=external-dns-d6b47bc46-cz9wh, provider=gcp, redpanda_id=chllqaunch049g9dc0t0, service=external-dns" t=2024-05-29T13:44:13.704357855Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703830702Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703823502Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.704162559Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703789901Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.7037664Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.7037517Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703690698Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703685798Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703679798Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703669598Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703539995Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703523994Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703518894Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703512994Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703501694Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703489893Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703476593Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703443192Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703435492Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703400691Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703393291Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.703896345Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703381891Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.16.8.3:7979, job=external-dns, namespace=external-dns, pod=external-dns-7489b55764-htmph, provider=gcp, redpanda_id=cg7li0b7415mavl54ibg, service=external-dns" t=2024-05-29T13:44:13.703953687Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.16.8.3:7979, job=external-dns, namespace=external-dns, pod=external-dns-7489b55764-htmph, provider=gcp, redpanda_id=cg7li0b7415mavl54ibg, service=external-dns" t=2024-05-29T13:44:13.703939898Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70335589Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70335019Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70334319Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70333679Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703321489Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703311389Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703273888Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703256588Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.703696117Z caller=remote_instance_store.go:51 user=713299 slug=btcnonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703250488Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703245087Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703223387Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703194986Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703161385Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703149485Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703139885Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703133285Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703127184Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=713299 slug=btcnonprod t=2024-05-29T13:44:13.703661607Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703115484Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703095584Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.13.0.6:7979, job=external-dns, namespace=external-dns, pod=external-dns-856b465cdf-4t2pm, provider=aws, redpanda_id=cp35ago256rbcqi0bh40, service=external-dns" t=2024-05-29T13:44:13.703633593Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=713299 slug=btcnonprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703578235Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703074383Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.13.0.6:7979, job=external-dns, namespace=external-dns, pod=external-dns-856b465cdf-4t2pm, provider=aws, redpanda_id=cp35ago256rbcqi0bh40, service=external-dns" t=2024-05-29T13:44:13.70361914Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703060583Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703052783Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.703023782Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702990781Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70296898Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.703249752Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702921779Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702915279Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702901979Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.703223222Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702881778Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702869678Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702798476Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702768675Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702759475Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.703130605Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702745675Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702731275Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702724374Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702719074Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.702984817Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702665673Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702631572Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702617972Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.703040877Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702572771Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70255817Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70255017Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.108.80.36:7979, job=external-dns, namespace=external-dns, pod=external-dns-55ccd8f5d5-5t9s9, provider=gcp, redpanda_id=cp736qcgu42nj1hpqak0, service=external-dns" t=2024-05-29T13:44:13.703021531Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.702942065Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.702916602Z caller=client.go:80 msg="creating client for grafana instance" user=727014 addr=dns:///cminformatiktesting-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702494369Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702481668Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.702925146Z caller=remote_instance_store.go:51 user=700783 slug=gsgmedia msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702465068Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.702861124Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.702865074Z caller=remote_instance_store.go:51 user=485459 slug=heroiclabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.105.161.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-648c87b5b9-fzl82, provider=gcp, redpanda_id=cn1vdqo4qu1uu9j2h870, service=external-dns" t=2024-05-29T13:44:13.702916033Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.702790062Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702416567Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702396466Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702383766Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.702733448Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.105.0.204:7979, job=external-dns, namespace=external-dns, pod=external-dns-85b5667dc9-jps7c, provider=aws, redpanda_id=cm5qpt2jfjl2gs1fnno0, service=external-dns" t=2024-05-29T13:44:13.702830755Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702370266Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702363365Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.10.10.93:7979, job=external-dns, namespace=external-dns, pod=external-dns-58dc4454c7-qg4l9, provider=aws, redpanda_id=cjhl5ggccm8eecla1tog, service=external-dns" t=2024-05-29T13:44:13.702749584Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702310364Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=244418 slug=of t=2024-05-29T13:44:13.702704084Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=244418 slug=of instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702688491Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=info component=discovery ts=2024-05-29T13:44:13.7026741Z caller=client.go:80 msg="creating client for grafana instance" user=742098 addr=dns:///cminformatiktest-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.702599828Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702248162Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702227662Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=244418 slug=of version=1 fingerprint=ac9071e1d8c2346c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.702575577Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.702206044s EvaluationString:}]" duration=18.425608ms + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702219162Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=warn ts=2024-05-29T13:44:13.702603699Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=765761 slug=bfv + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.1.8.240:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b4c7c9cb9-wwkg6, provider=aws, redpanda_id=cp5ml63ctouaqdnu93c0, service=external-dns" t=2024-05-29T13:44:13.702625602Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702195161Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.1.8.240:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b4c7c9cb9-wwkg6, provider=aws, redpanda_id=cp5ml63ctouaqdnu93c0, service=external-dns" t=2024-05-29T13:44:13.702612573Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70214926Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70214176Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.1.4.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-5dbf544659-k9f2z, provider=aws, redpanda_id=cih9ajco6oo60d8402eg, service=external-dns" t=2024-05-29T13:44:13.702530807Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.702467156Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.702392522Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702099159Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702078958Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.1.0.217:7979, job=external-dns, namespace=external-dns, pod=external-dns-8f4657b5f-c9tsw, provider=aws, redpanda_id=cihc9v4uqhmje92v7e7g, service=external-dns" t=2024-05-29T13:44:13.702347748Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702012657Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.702004256Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701996556Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701983356Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.702274625Z caller=remote_instance_store.go:51 user=54972 slug=zanglang msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701956655Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701941155Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.702110276Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.702125294Z caller=client.go:80 msg="creating client for grafana instance" user=760367 addr=dns:///cluepoints-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.9.9:7979, job=external-dns, namespace=external-dns, pod=external-dns-54987f65c-27rhr, provider=gcp, redpanda_id=cfhui4vu1f0o5qipe0e0, service=external-dns" t=2024-05-29T13:44:13.702234961Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.702103494Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=530613 slug=bert3 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701885453Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701872853Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.700713681Z caller=ruler.go:522 msg="tenant is owned by this instance" user=530613 slug=bert3 groups=0 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.9.30:7979, job=external-dns, namespace=external-dns, pod=external-dns-b9fcd8d5d-ldm8w, provider=gcp, redpanda_id=cmnepnqg5arbtkvmolo0, service=external-dns" t=2024-05-29T13:44:13.702162826Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701862053Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701830952Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.9.17:7979, job=external-dns, namespace=external-dns, pod=external-dns-b8d96d4cc-hch42, provider=gcp, redpanda_id=cfhbvgiei3b2k79h28d0, service=external-dns" t=2024-05-29T13:44:13.702047135Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701686348Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701681048Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701664948Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=info component=discovery ts=2024-05-29T13:44:13.701812791Z caller=client.go:80 msg="creating client for grafana instance" user=554902 addr=dns:///cloutomate-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.701845505Z caller=remote_instance_store.go:51 user=350551 slug=loopme msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701653048Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701612847Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701601846Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701572246Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701560845Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701544045Z level=debug msg="Setting next state" handler=resultNoData + level=info component=discovery ts=2024-05-29T13:44:13.70167869Z caller=client.go:80 msg="creating client for grafana instance" user=650163 addr=dns:///cloudfuel-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701472543Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.8.6:7979, job=external-dns, namespace=external-dns, pod=external-dns-57c97fbc4-htvf8, provider=gcp, redpanda_id=cfhtq3vu1f0o5qipe0c0, service=external-dns" t=2024-05-29T13:44:13.701664393Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.8.6:7979, job=external-dns, namespace=external-dns, pod=external-dns-57c97fbc4-htvf8, provider=gcp, redpanda_id=cfhtq3vu1f0o5qipe0c0, service=external-dns" t=2024-05-29T13:44:13.70165097Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.701512053Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.701586412Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70135244Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70132824Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701302639Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701242837Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.8.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-846c84c6f4-gskpj, provider=gcp, redpanda_id=cpaqakb01cjus260u92g, service=external-dns" t=2024-05-29T13:44:13.701475577Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.701470113Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.701441259Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701217637Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701177436Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=info component=discovery ts=2024-05-29T13:44:13.700821682Z caller=client.go:80 msg="creating client for grafana instance" user=622226 addr=dns:///cinemoz-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701136835Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=warn ts=2024-05-29T13:44:13.701297987Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=744941 slug=buzzinga + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701077433Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.701030032Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.8.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-58d8bd5d74-z7z9d, provider=gcp, redpanda_id=cp76akm6fslkb963tc50, service=external-dns" t=2024-05-29T13:44:13.701300112Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700979731Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70092873Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.8.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-58d8bd5d74-z7z9d, provider=gcp, redpanda_id=cp76akm6fslkb963tc50, service=external-dns" t=2024-05-29T13:44:13.701284744Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700923529Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700903829Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700867928Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.8.32:7979, job=external-dns, namespace=external-dns, pod=external-dns-5669cbc655-2gpdn, provider=gcp, redpanda_id=cmc5ur2bu6qj8qcjc5rg, service=external-dns" t=2024-05-29T13:44:13.701205411Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.8.32:7979, job=external-dns, namespace=external-dns, pod=external-dns-5669cbc655-2gpdn, provider=gcp, redpanda_id=cmc5ur2bu6qj8qcjc5rg, service=external-dns" t=2024-05-29T13:44:13.701191563Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700799626Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700786626Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700727625Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700690724Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700668223Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700634922Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.700969787Z caller=remote_image_capturer.go:33 user=337951 slug=pawapay rule_org_id=1 rule_uid=72dc9c46 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700627922Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=337951 slug=pawapay instance= t=2024-05-29T13:44:13.700954052Z level=debug msg="Changing state" previous_state=Pending next_state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700583721Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.700917083Z caller=ruler.go:522 msg="tenant is owned by this instance" user=493344 slug=axhmonitor groups=3 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70055112Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=info component=discovery ts=2024-05-29T13:44:13.700857482Z caller=client.go:80 msg="creating client for grafana instance" user=828431 addr=dns:///cisci-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.700831882Z caller=ruler.go:522 msg="tenant is owned by this instance" user=522793 slug=borcsokj groups=0 + level=warn ts=2024-05-29T13:44:13.700827182Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=529396 slug=baqend + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70052742Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.700801582Z caller=ruler.go:522 msg="tenant is owned by this instance" user=529396 slug=baqend groups=0 + level=debug ts=2024-05-29T13:44:13.700791426Z caller=remote_instance_store.go:51 user=320778 slug=omegaai msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=337951 slug=pawapay version=8 fingerprint=083739a75bd1bd7a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.700702515Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Alerting Error: Results:map[] Values:map[B0:{Var:B Labels:service=deposit-service Value:0xc007e17c18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.700167411s EvaluationString:[ var='B0' metric='Value' labels={service=deposit-service} value=5.333333333333333 ]}]" duration=12.937052ms + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700503619Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700497919Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.8.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-5d4b897b97-7f28m, provider=gcp, redpanda_id=co2o3ha2q0l3vqpek9h0, service=external-dns" t=2024-05-29T13:44:13.700803005Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700455218Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700434117Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=320778 slug=omegaai instance= t=2024-05-29T13:44:13.70072889Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=320778 slug=omegaai t=2024-05-29T13:44:13.700665252Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700401616Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700389816Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700377416Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700365715Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700350515Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700330115Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.8.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-799455496f-92csp, provider=gcp, redpanda_id=ck5luh76bagqm4aaf260, service=external-dns" t=2024-05-29T13:44:13.700602268Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700311014Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700276313Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700270513Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.8.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-775c769f8b-7wnxp, provider=gcp, redpanda_id=cfictoiei3b2k79h29q0, service=external-dns" t=2024-05-29T13:44:13.700525427Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.700497779Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=540251 slug=axelio + level=debug ts=2024-05-29T13:44:13.700490703Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700214112Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.700411678Z caller=ruler.go:522 msg="tenant is owned by this instance" user=540251 slug=axelio groups=0 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.70014921Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.700098409Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699984006Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699967206Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699929405Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.700170969Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699897504Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=274199 slug=telemetriahgm instance= t=2024-05-29T13:44:13.700082557Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699884703Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699874403Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699843402Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699824102Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699812702Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=274199 slug=telemetriahgm instance= t=2024-05-29T13:44:13.700066585Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.8.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b4db5dbf8-m8d4t, provider=gcp, redpanda_id=cnhshb3qu42smqf5rh6g, service=external-dns" t=2024-05-29T13:44:13.70014914Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.6997551Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.6997343Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.8.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b4db5dbf8-m8d4t, provider=gcp, redpanda_id=cnhshb3qu42smqf5rh6g, service=external-dns" t=2024-05-29T13:44:13.700135397Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699674798Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.8.134:7979, job=external-dns, namespace=external-dns, pod=external-dns-7645cfc7c6-lswtp, provider=gcp, redpanda_id=cfigqg7u1f0o5qipe1h0, service=external-dns" t=2024-05-29T13:44:13.700047262Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.8.134:7979, job=external-dns, namespace=external-dns, pod=external-dns-7645cfc7c6-lswtp, provider=gcp, redpanda_id=cfigqg7u1f0o5qipe1h0, service=external-dns" t=2024-05-29T13:44:13.700029133Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.699956095Z caller=remote_instance_store.go:51 user=384712 slug=nearinc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.699926769Z caller=remote_instance_store.go:51 user=306551 slug=teckresourcesalerts msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699629297Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699603896Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699598496Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699552395Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699540395Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699518094Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.699778001Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699493894Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699483693Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699466693Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699460893Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.6.86:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d78448bcb-qm5k8, provider=aws, redpanda_id=cp10kqbe1d7i2jp1h5tg, service=external-dns" t=2024-05-29T13:44:13.699684973Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699455893Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699439092Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699426792Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.6.80:7979, job=external-dns, namespace=external-dns, pod=external-dns-7676888856-drndm, provider=aws, redpanda_id=cotpnm234627vtg2l8dg, service=external-dns" t=2024-05-29T13:44:13.699587263Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.699493729Z caller=remote_rule_evaluator.go:193 user=396586 slug=opengov msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.6.79:7979, job=external-dns, namespace=external-dns, pod=external-dns-5b8ffcbbdd-xcr45, provider=aws, redpanda_id=cmfvb0v2a0vplsk4d3ug, service=external-dns" t=2024-05-29T13:44:13.699505883Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.69936309Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.69934679Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699306289Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699274288Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.699444297Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699221687Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699173786Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=206107 slug=hydrolix t=2024-05-29T13:44:13.699330234Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699121484Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699100984Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699087884Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699082583Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699064983Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699057483Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699039782Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.699024882Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.6.46:7979, job=external-dns, namespace=external-dns, pod=external-dns-6f4cb455b4-prnft, provider=aws, redpanda_id=cn9jk0jpimm8ftui7jk0, service=external-dns" t=2024-05-29T13:44:13.699151841Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698998981Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698992381Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.69896248Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.6.237:7979, job=external-dns, namespace=external-dns, pod=external-dns-5df9d899b-wxt8n, provider=aws, redpanda_id=cns4o9jiuvqvkcfi3ao0, service=external-dns" t=2024-05-29T13:44:13.699008355Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698895179Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.6.237:7979, job=external-dns, namespace=external-dns, pod=external-dns-5df9d899b-wxt8n, provider=aws, redpanda_id=cns4o9jiuvqvkcfi3ao0, service=external-dns" t=2024-05-29T13:44:13.698994795Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=491157 slug=prd01wr instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.69895449Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698864278Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=499423 slug=rebelssoftware t=2024-05-29T13:44:13.69892855Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.386783ms + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698841577Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.698834963Z caller=client.go:80 msg="creating client for grafana instance" user=496982 addr=dns:///chromabeams-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info ts=2024-05-29T13:44:13.698541883Z caller=remote_alert_sender.go:94 user=550657 slug=garrigues host=garrigues-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.149.90.201:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=faaa7681-6c61-40d0-87ce-5e02e0c4ab58 alerts=1 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698830377Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698816777Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698811177Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698786776Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698776876Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.698839411Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.698767209Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.698800314Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698786331Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.698690166Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=491157 slug=prd01wr version=1 fingerprint=0d09f82af3454723 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.69866288Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.698278951s EvaluationString:}]" duration=42.313703ms + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.6.225:7979, job=external-dns, namespace=external-dns, pod=external-dns-5cb9bd488f-cwfpb, provider=aws, redpanda_id=cosmgtq34627vtg2kjkg, service=external-dns" t=2024-05-29T13:44:13.698733565Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698657073Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=ca10ceed2bb1845c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.698642208Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.698385916s EvaluationString:}]" duration=11.603014ms + level=debug ts=2024-05-29T13:44:13.698556643Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.69853057Z level=debug msg="Setting next state" handler=resultNoData + level=info component=discovery ts=2024-05-29T13:44:13.698547261Z caller=client.go:80 msg="creating client for grafana instance" user=540485 addr=dns:///christophbronold-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698519969Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=550657 slug=garrigues t=2024-05-29T13:44:13.698392596Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.151494ms + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.6.175:7979, job=external-dns, namespace=external-dns, pod=external-dns-7965fb4cb8-ckklf, provider=aws, redpanda_id=coqfsdb7u2hdqvuptn50, service=external-dns" t=2024-05-29T13:44:13.698552945Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.69847336Z caller=ruler.go:522 msg="tenant is owned by this instance" user=627059 slug=bemine groups=0 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698458168Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.6.149:7979, job=external-dns, namespace=external-dns, pod=external-dns-d9f6bccf6-wf9p4, provider=aws, redpanda_id=cov5uc2l9050bvlc7thg, service=external-dns" t=2024-05-29T13:44:13.698478098Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698364766Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.6.140:7979, job=external-dns, namespace=external-dns, pod=external-dns-778cc68444-gbhtv, provider=aws, redpanda_id=cnlms5310r099f0a9a90, service=external-dns" t=2024-05-29T13:44:13.698370416Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698318264Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.6.130:7979, job=external-dns, namespace=external-dns, pod=external-dns-6576fcd489-4vfn9, provider=aws, redpanda_id=cpb5inq77h8g0iu4f8t0, service=external-dns" t=2024-05-29T13:44:13.69829432Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698263963Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698240862Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.698272502Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698206162Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698194361Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698186561Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.698124537Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.6.11:7979, job=external-dns, namespace=external-dns, pod=external-dns-7f74948744-x6l5j, provider=aws, redpanda_id=coj72qeolu5sgnu5udqg, service=external-dns" t=2024-05-29T13:44:13.69816915Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698102359Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698091459Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=190917 slug=d1cx instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.698096391Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698036857Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.698022157Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.697964156Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.697955755Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.697939155Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.697926855Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.56.51:7979, job=external-dns, namespace=external-dns, pod=external-dns-56b7b8f845-w846q, provider=aws, redpanda_id=cm9sg3ajfjl2gs1fobd0, service=external-dns" t=2024-05-29T13:44:13.697947821Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.69777315Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.4.38:7979, job=external-dns, namespace=external-dns, pod=external-dns-9c6d94bb-774xr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=external-dns" t=2024-05-29T13:44:13.697721877Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.697635547Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.697582546Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.697513944Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.4.213:7979, job=external-dns, namespace=external-dns, pod=external-dns-58497c6657-w2fvf, provider=aws, redpanda_id=cobdghtdvhevo5irjsi0, service=external-dns" t=2024-05-29T13:44:13.697459598Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.697456443Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.697449843Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.697268738Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.697251538Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.697239237Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.697229637Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.697223737Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.697218737Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.697189836Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.697152135Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.697088434Z level=debug msg="Setting next state" handler=resultNoData + level=warn ts=2024-05-29T13:44:13.697114047Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=534324 slug=borgargrande + level=debug ts=2024-05-29T13:44:13.697073647Z caller=ruler.go:522 msg="tenant is owned by this instance" user=534324 slug=borgargrande groups=0 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.697043933Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-066" t=2024-05-29T13:44:13.696423887Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.24.189:7979, job=external-dns, namespace=external-dns, pod=external-dns-86cb4b8944-fvvtp, provider=aws, redpanda_id=chiec86nch049g9dbu30, service=external-dns" t=2024-05-29T13:44:13.697059013Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-064" t=2024-05-29T13:44:13.696116701Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.697038632Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-063" t=2024-05-29T13:44:13.696004634Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-062" t=2024-05-29T13:44:13.695904671Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-062" t=2024-05-29T13:44:13.695893179Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.697027632Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-060" t=2024-05-29T13:44:13.695672177Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-059" t=2024-05-29T13:44:13.695555134Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-058" t=2024-05-29T13:44:13.695435909Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-058" t=2024-05-29T13:44:13.695420926Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.697011032Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-057" t=2024-05-29T13:44:13.69525102Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-055" t=2024-05-29T13:44:13.695032645Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.696992631Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.696986531Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-053" t=2024-05-29T13:44:13.694730345Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.696825927Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.696796126Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.696753125Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.696723425Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.2.166:7979, job=external-dns, namespace=external-dns, pod=external-dns-c7ddb7c44-zjht9, provider=aws, redpanda_id=cm31i42jfjl2gs1fnejg, service=external-dns" t=2024-05-29T13:44:13.69674613Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.69670274Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.696681424Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.2.10:7979, job=external-dns, namespace=external-dns, pod=external-dns-7f9cf9b956-qdcdr, provider=aws, redpanda_id=cn2i65o4qu1uu9j2hck0, service=external-dns" t=2024-05-29T13:44:13.696650895Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.2.10:7979, job=external-dns, namespace=external-dns, pod=external-dns-7f9cf9b956-qdcdr, provider=aws, redpanda_id=cn2i65o4qu1uu9j2hck0, service=external-dns" t=2024-05-29T13:44:13.696636851Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.696631922Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.696626722Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.696587921Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.696565921Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.69654852Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug component=discovery ts=2024-05-29T13:44:13.696469241Z caller=retry.go:58 user=307450 msg="retrying grpc request" method=/remoteruler.rules.v1.RulesService/GetByRuleGroup attempt=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.11.36:7979, job=external-dns, namespace=external-dns, pod=external-dns-66797fbb67-ltnjx, provider=gcp, redpanda_id=ckru7kd3rmo54fot1oqg, service=external-dns" t=2024-05-29T13:44:13.696442261Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.696287194Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.696216912Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.10.91:7979, job=external-dns, namespace=external-dns, pod=external-dns-76648954f6-2c8f9, provider=aws, redpanda_id=co5c83i2q0l3vqpekhvg, service=external-dns" t=2024-05-29T13:44:13.696274365Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.696206412Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.696062308Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.696038407Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.696024807Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.696008007Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695975806Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695966506Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695956305Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695936905Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695917704Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695906504Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695886304Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695878603Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.695989346Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695870303Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695842803Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695825202Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695819302Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695774401Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695663998Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695623397Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695610997Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695578196Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.695868913Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.10.74:7979, job=external-dns, namespace=external-dns, pod=external-dns-758bbcf8fc-mzkxw, provider=aws, redpanda_id=clod85hpeif0bijutcdg, service=external-dns" t=2024-05-29T13:44:13.695835535Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695526995Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695509494Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.695798135Z caller=ruler.go:522 msg="tenant is owned by this instance" user=512582 slug=backwire groups=0 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695480194Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.695687934Z caller=client.go:80 msg="creating client for grafana instance" user=749921 addr=dns:///charzy-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695460993Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.695656081Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695425292Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695414292Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695407992Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.69563749Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695382791Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.10.236:7979, job=external-dns, namespace=external-dns, pod=external-dns-7f4d8ffbd5-vtzd8, provider=aws, redpanda_id=cj0n9drube5f1u6qjas0, service=external-dns" t=2024-05-29T13:44:13.695643462Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.69535109Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.69534589Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.69533649Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.69532479Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695316889Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695308489Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance= t=2024-05-29T13:44:13.695543271Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695301689Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695266088Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695228387Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.69531772Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695223487Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.695376317Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695201187Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.695335042Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695164686Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=504140 slug=chipotlestg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.695158185Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=info component=discovery ts=2024-05-29T13:44:13.695186929Z caller=client.go:80 msg="creating client for grafana instance" user=716349 addr=dns:///chargeamps-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.695024567Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.694956127Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=557239 slug=bodytrak + level=debug ts=2024-05-29T13:44:13.694890719Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.694774493Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.694740525Z caller=client.go:80 msg="creating client for grafana instance" user=663159 addr=dns:///chandramepani-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.694690824Z caller=ruler.go:522 msg="tenant is owned by this instance" user=506965 slug=bitlane groups=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.0.81:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b8fbc77c5-z7chp, provider=aws, redpanda_id=ci5mn3g7e7rqqatd3j3g, service=external-dns" t=2024-05-29T13:44:13.694655449Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-050" t=2024-05-29T13:44:13.694251389Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-050" t=2024-05-29T13:44:13.694237604Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.0.81:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b8fbc77c5-z7chp, provider=aws, redpanda_id=ci5mn3g7e7rqqatd3j3g, service=external-dns" t=2024-05-29T13:44:13.694642937Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-049" t=2024-05-29T13:44:13.694119455Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-048" t=2024-05-29T13:44:13.694013124Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-048" t=2024-05-29T13:44:13.693998304Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-047" t=2024-05-29T13:44:13.693838026Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-043" t=2024-05-29T13:44:13.69336849Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.694580858Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-042" t=2024-05-29T13:44:13.693237575Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.0.74:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b4c97d547-795ft, provider=aws, redpanda_id=cgdvvlq0l3lc23hthq2g, service=external-dns" t=2024-05-29T13:44:13.694551722Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.69450924Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.0.68:7979, job=external-dns, namespace=external-dns, pod=external-dns-748784fd68-2vd9c, provider=aws, redpanda_id=cgvroei80qtl7p20ouq0, service=external-dns" t=2024-05-29T13:44:13.694367139Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.694245429Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.0.62:7979, job=external-dns, namespace=external-dns, pod=external-dns-6fdd48bbf9-vzsw8, provider=aws, redpanda_id=cfmvae2ei3b2k79h2d2g, service=external-dns" t=2024-05-29T13:44:13.694243056Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.694007531Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.693889988Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.693853417Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.693792554Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.693567431Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.693435612Z caller=client.go:80 msg="creating client for grafana instance" user=558368 addr=dns:///cedi-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.693101009Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-039" t=2024-05-29T13:44:13.693000044Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.692669702Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.692669757Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.0.249:7979, job=external-dns, namespace=external-dns, pod=external-dns-864677b9c9-4f5m9, provider=aws, redpanda_id=cev1gbb7m575jtvbg87g, service=external-dns" t=2024-05-29T13:44:13.692682974Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.0.249:7979, job=external-dns, namespace=external-dns, pod=external-dns-864677b9c9-4f5m9, provider=aws, redpanda_id=cev1gbb7m575jtvbg87g, service=external-dns" t=2024-05-29T13:44:13.692667861Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-036" t=2024-05-29T13:44:13.692634794Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.0.245:7979, job=external-dns, namespace=external-dns, pod=external-dns-5f4d799747-66cm7, provider=aws, redpanda_id=ch158bogvg4l92oem62g, service=external-dns" t=2024-05-29T13:44:13.692573Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance= t=2024-05-29T13:44:13.692570662Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.0.245:7979, job=external-dns, namespace=external-dns, pod=external-dns-5f4d799747-66cm7, provider=aws, redpanda_id=ch158bogvg4l92oem62g, service=external-dns" t=2024-05-29T13:44:13.692535491Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=116479 slug=tomtomnv t=2024-05-29T13:44:13.692344641Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=40.603768ms + level=debug ts=2024-05-29T13:44:13.692207401Z caller=ruler.go:522 msg="tenant is owned by this instance" user=737022 slug=axtion groups=0 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.0.237:7979, job=external-dns, namespace=external-dns, pod=external-dns-86445d955c-qfrv8, provider=aws, redpanda_id=cmjfgp6i35o3jbomnb30, service=external-dns" t=2024-05-29T13:44:13.692233563Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=656284 slug=cencosudx instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.6922243Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=438855 slug=teckresources t=2024-05-29T13:44:13.692230332Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=656284 slug=cencosudx t=2024-05-29T13:44:13.69220902Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=438855 slug=teckresources instance= t=2024-05-29T13:44:13.692212917Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=438855 slug=teckresources instance= t=2024-05-29T13:44:13.692206731Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-033" t=2024-05-29T13:44:13.692171962Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=438855 slug=teckresources version=5 fingerprint=1771c25f2e13f1b9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.692034045Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.691745113s EvaluationString:}]" duration=64.423659ms + logger=ngalert.state.manager.persist user=768556 slug=beesafe t=2024-05-29T13:44:13.692016059Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-032" t=2024-05-29T13:44:13.692055145Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.692054289Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.691994772Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=spectrum-cable" t=2024-05-29T13:44:13.691998206Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.0.218:7979, job=external-dns, namespace=external-dns, pod=external-dns-778f8ff97f-j472q, provider=aws, redpanda_id=cnj1cmstluj4mbfiael0, service=external-dns" t=2024-05-29T13:44:13.691994199Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-031" t=2024-05-29T13:44:13.691951744Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=768556 slug=beesafe t=2024-05-29T13:44:13.691942367Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-031" t=2024-05-29T13:44:13.691939152Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=rcn" t=2024-05-29T13:44:13.691919746Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.0.206:7979, job=external-dns, namespace=external-dns, pod=external-dns-66459b8b77-zj9bm, provider=aws, redpanda_id=ckr4fp8fa0gdjkkbeotg, service=external-dns" t=2024-05-29T13:44:13.691919361Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=optimummobile" t=2024-05-29T13:44:13.691872575Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-030" t=2024-05-29T13:44:13.691823114Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=optimumfixed" t=2024-05-29T13:44:13.691828373Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.691720537Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.691733188Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishwireless" t=2024-05-29T13:44:13.691719757Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=472647 slug=planet t=2024-05-29T13:44:13.69168559Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=830785 slug=jelitto t=2024-05-29T13:44:13.691595269Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.495349ms + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishpostpaid" t=2024-05-29T13:44:13.691674556Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishget" t=2024-05-29T13:44:13.691622804Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.0.192:7979, job=external-dns, namespace=external-dns, pod=external-dns-d4c5d6b5f-j4ctl, provider=aws, redpanda_id=co7ep2lmsr7ofq8a4ngg, service=external-dns" t=2024-05-29T13:44:13.691661688Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=472647 slug=planet version=3 fingerprint=fe7700bccfa844ff attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.691532821Z level=debug msg="Alert rule evaluated" results="[{Instance:metric.name=value.num_undelivered_messages, resource.label.project_id=planet-datapipeline-prod, resource.label.subscription_id=ps-candidate-prod State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:metric.name=value.num_undelivered_messages, resource.label.project_id=planet-datapipeline-prod, resource.label.subscription_id=ps-candidate-prod Value:0xc05717f9a8} C:{Var:C Labels:metric.name=value.num_undelivered_messages, resource.label.project_id=planet-datapipeline-prod, resource.label.subscription_id=ps-candidate-prod Value:0xc05717f9d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.691218822s EvaluationString:[ var='B' labels={metric.name=value.num_undelivered_messages, resource.label.project_id=planet-datapipeline-prod, resource.label.subscription_id=ps-candidate-prod} value=14 ], [ var='C' labels={metric.name=value.num_undelivered_messages, resource.label.project_id=planet-datapipeline-prod, resource.label.subscription_id=ps-candidate-prod} value=0 ]}]" duration=45.681404ms + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.691446427Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=assurantlifestyle" t=2024-05-29T13:44:13.691463491Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.0.185:7979, job=external-dns, namespace=external-dns, pod=external-dns-6cfd7f7b5f-w78b9, provider=aws, redpanda_id=ce9nm6b9tos7v1tuco30, service=external-dns" t=2024-05-29T13:44:13.691474551Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=assuranthousing" t=2024-05-29T13:44:13.69139182Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.0.168:7979, job=external-dns, namespace=external-dns, pod=external-dns-77dcc877b4-gkpfk, provider=aws, redpanda_id=cmff6a0ku6erdrg5r2sg, service=external-dns" t=2024-05-29T13:44:13.691308078Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-025" t=2024-05-29T13:44:13.691300996Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.0.168:7979, job=external-dns, namespace=external-dns, pod=external-dns-77dcc877b4-gkpfk, provider=aws, redpanda_id=cmff6a0ku6erdrg5r2sg, service=external-dns" t=2024-05-29T13:44:13.691291843Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=aizhomesol" t=2024-05-29T13:44:13.691250241Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-024" t=2024-05-29T13:44:13.691194073Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=371756 slug=asapp version=13 fingerprint=e4e8a50970ac9f66 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.690959766Z level=debug msg="Alert rule evaluated" results="[{Instance:company_marker=aizhomesol State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=aizhomesol Value:0xc0127cb888} C:{Var:C Labels:company_marker=aizhomesol Value:0xc0127cb880}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.69035261s EvaluationString:[ var='B' labels={company_marker=aizhomesol} value=144.30933296674496 ], [ var='C' labels={company_marker=aizhomesol} value=0 ]} {Instance:company_marker=american-airlines State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=american-airlines Value:0xc0127cb910} C:{Var:C Labels:company_marker=american-airlines Value:0xc0127cb8d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.690371269s EvaluationString:[ var='B' labels={company_marker=american-airlines} value=106.775470936573 ], [ var='C' labels={company_marker=american-airlines} value=0 ]} {Instance:company_marker=assuranthousing State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=assuranthousing Value:0xc0127cb980} C:{Var:C Labels:company_marker=assuranthousing Value:0xc0127cb988}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.690385951s EvaluationString:[ var='B' labels={company_marker=assuranthousing} value=140.68607122534283 ], [ var='C' labels={company_marker=assuranthousing} value=0 ]} {Instance:company_marker=assurantlifestyle State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=assurantlifestyle Value:0xc0127cb9c8} C:{Var:C Labels:company_marker=assurantlifestyle Value:0xc0127cba00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.690393894s EvaluationString:[ var='B' labels={company_marker=assurantlifestyle} value=93.083080528587 ], [ var='C' labels={company_marker=assurantlifestyle} value=0 ]} {Instance:company_marker=dish State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=dish Value:0xc0127cba50} C:{Var:C Labels:company_marker=dish Value:0xc0127cba70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.690400091s EvaluationString:[ var='B' labels={company_marker=dish} value=96.91636797061436 ], [ var='C' labels={company_marker=dish} value=0 ]} {Instance:company_marker=dishget State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=dishget Value:0xc0127cbaf0} C:{Var:C Labels:company_marker=dishget Value:0xc0127cbac0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.690406571s EvaluationString:[ var='B' labels={company_marker=dishget} value=81.29124007168586 ], [ var='C' labels={company_marker=dishget} value=0 ]} {Instance:company_marker=dishpostpaid State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=dishpostpaid Value:0xc0127cbb58} C:{Var:C Labels:company_marker=dishpostpaid Value:0xc0127cbb50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.690412448s EvaluationString:[ var='B' labels={company_marker=dishpostpaid} value=130.61524384007976 ], [ var='C' labels={company_marker=dishpostpaid} value=0 ]} {Instance:company_marker=dishwireless State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=dishwireless Value:0xc0127cbbc8} C:{Var:C Labels:company_marker=dishwireless Value:0xc0127cbc30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.690419489s EvaluationString:[ var='B' labels={company_marker=dishwireless} value=95.86397042910917 ], [ var='C' labels={company_marker=dishwireless} value=0 ]} {Instance:company_marker=jetblue State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=jetblue Value:0xc0127cbca0} C:{Var:C Labels:company_marker=jetblue Value:0xc0127cbc80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.690425495s EvaluationString:[ var='B' labels={company_marker=jetblue} value=100.57388060411934 ], [ var='C' labels={company_marker=jetblue} value=0 ]} {Instance:company_marker=optimumfixed State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=optimumfixed Value:0xc0127cbd10} C:{Var:C Labels:company_marker=optimumfixed Value:0xc0127cbd18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.690431043s EvaluationString:[ var='B' labels={company_marker=optimumfixed} value=115.95616818260312 ], [ var='C' labels={company_marker=optimumfixed} value=0 ]} {Instance:company_marker=optimummobile State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=optimummobile Value:0xc0127cbdb0} C:{Var:C Labels:company_marker=optimummobile Value:0xc0127cbd68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.690437238s EvaluationString:[ var='B' labels={company_marker=optimummobile} value=70.25224216916442 ], [ var='C' labels={company_marker=optimummobile} value=0 ]} {Instance:company_marker=rcn State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=rcn Value:0xc0127cbe00} C:{Var:C Labels:company_marker=rcn Value:0xc0127cbe40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.690442653s EvaluationString:[ var='B' labels={company_marker=rcn} value=138.63871753931673 ], [ var='C' labels={company_marker=rcn} value=0 ]} {Instance:company_marker=spectrum-cable State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:company_marker=spectrum-cable Value:0xc0127cbea0} C:{Var:C Labels:company_marker=spectrum-cable Value:0xc0127cbea8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.690448756s EvaluationString:[ var='B' labels={company_marker=spectrum-cable} value=96.34655731344802 ], [ var='C' labels={company_marker=spectrum-cable} value=0 ]}]" duration=252.634624ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-023" t=2024-05-29T13:44:13.691033335Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.0.15:7979, job=external-dns, namespace=external-dns, pod=external-dns-84c7775d7-rzbqn, provider=aws, redpanda_id=cfu9jouqj18g969o2330, service=external-dns" t=2024-05-29T13:44:13.690994358Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.690967592Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.690895388Z caller=ruler.go:522 msg="tenant is owned by this instance" user=622234 slug=authero groups=0 + level=debug ts=2024-05-29T13:44:13.690792808Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.690078681Z caller=remote_instance_store.go:51 user=512398 slug=brightdigital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.0.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-6f46f59df9-l55lw, provider=aws, redpanda_id=cerigsr7m575jtvbg6t0, service=external-dns" t=2024-05-29T13:44:13.690693158Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-020" t=2024-05-29T13:44:13.690670827Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=855233 slug=sadeno t=2024-05-29T13:44:13.690577333Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.244328ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-018" t=2024-05-29T13:44:13.690460055Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.0.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-546687cd79-lbhw2, provider=aws, redpanda_id=cmg2stgku6erdrg5r87g, service=external-dns" t=2024-05-29T13:44:13.690370344Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.690279283Z caller=remote_instance_store.go:51 user=654951 slug=apcontrol msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-017" t=2024-05-29T13:44:13.690238251Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=654951 slug=apcontrol t=2024-05-29T13:44:13.690123106Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-016" t=2024-05-29T13:44:13.690093354Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=318220 slug=deepalert instance= t=2024-05-29T13:44:13.690043815Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=318220 slug=deepalert t=2024-05-29T13:44:13.689986303Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.68988472Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.690023742Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.689926521Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=318220 slug=deepalert version=17 fingerprint=20ddedc980ea8fff attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.689851217Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.689524645s EvaluationString:}]" duration=31.663884ms + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.68984609Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-014" t=2024-05-29T13:44:13.689836786Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.689826914Z caller=remote_instance_store.go:51 user=55491 slug=demandbase msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=55491 slug=demandbase t=2024-05-29T13:44:13.689783258Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.689794273Z caller=remote_image_capturer.go:54 user=4947 slug=mediamath rule_org_id=1 rule_uid=ddbhspyntvgu8e dashboard=PQySKRKMz panel=32 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=55491 slug=demandbase instance="datasource_uid=000000350, ref_id=B,C" t=2024-05-29T13:44:13.689743898Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-013" t=2024-05-29T13:44:13.689721873Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.689788817Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.689763061Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.689726427Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.689724787Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.0.133:7979, job=external-dns, namespace=external-dns, pod=external-dns-5fcc65f9bd-877k9, provider=aws, redpanda_id=cnu8pmi83bcutli1gkp0, service=external-dns" t=2024-05-29T13:44:13.689650957Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=538037 slug=drivewealth version=5 fingerprint=cfbb7fd9651fd4ff attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.686403305Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.652663236s EvaluationString:}]" duration=56.851718ms + level=debug ts=2024-05-29T13:44:13.689462375Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=108983 slug=agencyanalytics instance="resource.label.database_id=agency-analytics-1:sql-production-frog-replica-3, resource.label.project_id=agency-analytics-1, resource.label.region=us-east1, resource.type=cloudsql_database" t=2024-05-29T13:44:13.689322252Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="container=external-dns, endpoint=http, instance=10.0.0.112:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d66d89bf5-c5w4t, provider=aws, redpanda_id=ci10ss68mbdudchrk0ug, service=external-dns" t=2024-05-29T13:44:13.689298718Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.68922134Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.689121336Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-009" t=2024-05-29T13:44:13.689195175Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=108983 slug=agencyanalytics instance="resource.label.database_id=agency-analytics-1:sql-production-frog-replica-3, resource.label.project_id=agency-analytics-1, resource.label.region=us-east1, resource.type=cloudsql_database" t=2024-05-29T13:44:13.689128488Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-009" t=2024-05-29T13:44:13.689182332Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.689161077Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=4947 slug=mediamath instance= t=2024-05-29T13:44:13.689197977Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.689056992Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:13.688999249Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=108983 slug=agencyanalytics instance="resource.label.database_id=agency-analytics-1:sql-production-frog-grafana-replica, resource.label.project_id=agency-analytics-1, resource.label.region=us-east1, resource.type=cloudsql_database" t=2024-05-29T13:44:13.689037365Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.688908718Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.688876916Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=4947 slug=mediamath version=1 fingerprint=65300949e480b9fb attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.686953245Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.682417394s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=68.673615ms + logger=ngalert.state.manager user=108983 slug=agencyanalytics instance="resource.label.database_id=agency-analytics-1:sql-production-frog, resource.label.project_id=agency-analytics-1, resource.label.region=us-east1, resource.type=cloudsql_database" t=2024-05-29T13:44:13.688674109Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=384712 slug=nearinc t=2024-05-29T13:44:13.688717231Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.228613ms + level=debug ts=2024-05-29T13:44:13.688769802Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.688750169Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.688746937Z caller=grafana.go:247 user=309009 slug=elestyle msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query= groups=35 alerts=0 + level=debug ts=2024-05-29T13:44:13.688673054Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=436633 slug=swirldslabsproduction t=2024-05-29T13:44:13.688537442Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.959802ms + logger=ngalert.scheduler user=108983 slug=agencyanalytics version=3 fingerprint=66cf3e737068490d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.68644463Z level=debug msg="Alert rule evaluated" results="[{Instance:resource.label.database_id=agency-analytics-1:sql-production-frog, resource.label.project_id=agency-analytics-1, resource.label.region=us-east1, resource.type=cloudsql_database State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:resource.label.database_id=agency-analytics-1:sql-production-frog, resource.label.project_id=agency-analytics-1, resource.label.region=us-east1, resource.type=cloudsql_database Value:0xc024e3c6c8} C:{Var:C Labels:resource.label.database_id=agency-analytics-1:sql-production-frog, resource.label.project_id=agency-analytics-1, resource.label.region=us-east1, resource.type=cloudsql_database Value:0xc024e3c6d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.649895003s EvaluationString:[ var='B' labels={resource.label.database_id=agency-analytics-1:sql-production-frog, resource.label.project_id=agency-analytics-1, resource.label.region=us-east1, resource.type=cloudsql_database} value=0.2833333333333333 ], [ var='C' labels={resource.label.database_id=agency-analytics-1:sql-production-frog, resource.label.project_id=agency-analytics-1, resource.label.region=us-east1, resource.type=cloudsql_database} value=0 ]} {Instance:resource.label.database_id=agency-analytics-1:sql-production-frog-grafana-replica, resource.label.project_id=agency-analytics-1, resource.label.region=us-east1, resource.type=cloudsql_database State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:resource.label.database_id=agency-analytics-1:sql-production-frog-grafana-replica, resource.label.project_id=agency-analytics-1, resource.label.region=us-east1, resource.type=cloudsql_database Value:0xc024e3c728} C:{Var:C Labels:resource.label.database_id=agency-analytics-1:sql-production-frog-grafana-replica, resource.label.project_id=agency-analytics-1, resource.label.region=us-east1, resource.type=cloudsql_database Value:0xc024e3c758}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.649916916s EvaluationString:[ var='B' labels={resource.label.database_id=agency-analytics-1:sql-production-frog-grafana-replica, resource.label.project_id=agency-analytics-1, resource.label.region=us-east1, resource.type=cloudsql_database} value=0.13333333333333333 ], [ var='C' labels={resource.label.database_id=agency-analytics-1:sql-production-frog-grafana-replica, resource.label.project_id=agency-analytics-1, resource.label.region=us-east1, resource.type=cloudsql_database} value=0 ]} {Instance:resource.label.database_id=agency-analytics-1:sql-production-frog-replica-3, resource.label.project_id=agency-analytics-1, resource.label.region=us-east1, resource.type=cloudsql_database State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:resource.label.database_id=agency-analytics-1:sql-production-frog-replica-3, resource.label.project_id=agency-analytics-1, resource.label.region=us-east1, resource.type=cloudsql_database Value:0xc024e3c848} C:{Var:C Labels:resource.label.database_id=agency-analytics-1:sql-production-frog-replica-3, resource.label.project_id=agency-analytics-1, resource.label.region=us-east1, resource.type=cloudsql_database Value:0xc024e3c868}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.649925177s EvaluationString:[ var='B' labels={resource.label.database_id=agency-analytics-1:sql-production-frog-replica-3, resource.label.project_id=agency-analytics-1, resource.label.region=us-east1, resource.type=cloudsql_database} value=0.3 ], [ var='C' labels={resource.label.database_id=agency-analytics-1:sql-production-frog-replica-3, resource.label.project_id=agency-analytics-1, resource.label.region=us-east1, resource.type=cloudsql_database} value=0 ]}]" duration=165.315184ms + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:13.644812438Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.99957ms + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.688614916Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=183214 slug=vectorizedio version=31 fingerprint=167738b0ef2f7a2d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.6261336Z level=debug msg="Alert rule evaluated" results="[{Instance:container=external-dns, endpoint=http, instance=10.0.0.112:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d66d89bf5-c5w4t, provider=aws, redpanda_id=ci10ss68mbdudchrk0ug, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.112:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d66d89bf5-c5w4t, provider=aws, redpanda_id=ci10ss68mbdudchrk0ug, service=external-dns Value:0xc02034ea00} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.112:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d66d89bf5-c5w4t, provider=aws, redpanda_id=ci10ss68mbdudchrk0ug, service=external-dns Value:0xc02034eab8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609597903s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.112:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d66d89bf5-c5w4t, provider=aws, redpanda_id=ci10ss68mbdudchrk0ug, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.112:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d66d89bf5-c5w4t, provider=aws, redpanda_id=ci10ss68mbdudchrk0ug, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.133:7979, job=external-dns, namespace=external-dns, pod=external-dns-5fcc65f9bd-877k9, provider=aws, redpanda_id=cnu8pmi83bcutli1gkp0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.133:7979, job=external-dns, namespace=external-dns, pod=external-dns-5fcc65f9bd-877k9, provider=aws, redpanda_id=cnu8pmi83bcutli1gkp0, service=external-dns Value:0xc02034ec78} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.133:7979, job=external-dns, namespace=external-dns, pod=external-dns-5fcc65f9bd-877k9, provider=aws, redpanda_id=cnu8pmi83bcutli1gkp0, service=external-dns Value:0xc02034ed18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609619223s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.133:7979, job=external-dns, namespace=external-dns, pod=external-dns-5fcc65f9bd-877k9, provider=aws, redpanda_id=cnu8pmi83bcutli1gkp0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.133:7979, job=external-dns, namespace=external-dns, pod=external-dns-5fcc65f9bd-877k9, provider=aws, redpanda_id=cnu8pmi83bcutli1gkp0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-546687cd79-lbhw2, provider=aws, redpanda_id=cmg2stgku6erdrg5r87g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-546687cd79-lbhw2, provider=aws, redpanda_id=cmg2stgku6erdrg5r87g, service=external-dns Value:0xc02034f310} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-546687cd79-lbhw2, provider=aws, redpanda_id=cmg2stgku6erdrg5r87g, service=external-dns Value:0xc02034f1e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609628186s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-546687cd79-lbhw2, provider=aws, redpanda_id=cmg2stgku6erdrg5r87g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-546687cd79-lbhw2, provider=aws, redpanda_id=cmg2stgku6erdrg5r87g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-678dc86dd9-ppdg7, provider=aws, redpanda_id=cog6l25pu5ejhljb62s0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-678dc86dd9-ppdg7, provider=aws, redpanda_id=cog6l25pu5ejhljb62s0, service=external-dns Value:0xc02034f5a8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-678dc86dd9-ppdg7, provider=aws, redpanda_id=cog6l25pu5ejhljb62s0, service=external-dns Value:0xc02034f6d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609636468s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-678dc86dd9-ppdg7, provider=aws, redpanda_id=cog6l25pu5ejhljb62s0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-678dc86dd9-ppdg7, provider=aws, redpanda_id=cog6l25pu5ejhljb62s0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-6f46f59df9-l55lw, provider=aws, redpanda_id=cerigsr7m575jtvbg6t0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-6f46f59df9-l55lw, provider=aws, redpanda_id=cerigsr7m575jtvbg6t0, service=external-dns Value:0xc02034f8f0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-6f46f59df9-l55lw, provider=aws, redpanda_id=cerigsr7m575jtvbg6t0, service=external-dns Value:0xc02034f9f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609645068s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-6f46f59df9-l55lw, provider=aws, redpanda_id=cerigsr7m575jtvbg6t0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-6f46f59df9-l55lw, provider=aws, redpanda_id=cerigsr7m575jtvbg6t0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.15:7979, job=external-dns, namespace=external-dns, pod=external-dns-57f4f5777f-nrfkc, provider=aws, redpanda_id=ch40gk9tkf9be1ehken0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.15:7979, job=external-dns, namespace=external-dns, pod=external-dns-57f4f5777f-nrfkc, provider=aws, redpanda_id=ch40gk9tkf9be1ehken0, service=external-dns Value:0xc02034fcb0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.15:7979, job=external-dns, namespace=external-dns, pod=external-dns-57f4f5777f-nrfkc, provider=aws, redpanda_id=ch40gk9tkf9be1ehken0, service=external-dns Value:0xc02034fdd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609654284s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.15:7979, job=external-dns, namespace=external-dns, pod=external-dns-57f4f5777f-nrfkc, provider=aws, redpanda_id=ch40gk9tkf9be1ehken0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.15:7979, job=external-dns, namespace=external-dns, pod=external-dns-57f4f5777f-nrfkc, provider=aws, redpanda_id=ch40gk9tkf9be1ehken0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.15:7979, job=external-dns, namespace=external-dns, pod=external-dns-84c7775d7-rzbqn, provider=aws, redpanda_id=cfu9jouqj18g969o2330, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.15:7979, job=external-dns, namespace=external-dns, pod=external-dns-84c7775d7-rzbqn, provider=aws, redpanda_id=cfu9jouqj18g969o2330, service=external-dns Value:0xc03447a030} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.15:7979, job=external-dns, namespace=external-dns, pod=external-dns-84c7775d7-rzbqn, provider=aws, redpanda_id=cfu9jouqj18g969o2330, service=external-dns Value:0xc03447a1c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.60966247s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.15:7979, job=external-dns, namespace=external-dns, pod=external-dns-84c7775d7-rzbqn, provider=aws, redpanda_id=cfu9jouqj18g969o2330, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.15:7979, job=external-dns, namespace=external-dns, pod=external-dns-84c7775d7-rzbqn, provider=aws, redpanda_id=cfu9jouqj18g969o2330, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.160:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b7f4b9588-fmpj2, provider=aws, redpanda_id=cmp52i3qaegck87okgs0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.160:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b7f4b9588-fmpj2, provider=aws, redpanda_id=cmp52i3qaegck87okgs0, service=external-dns Value:0xc03447a740} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.160:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b7f4b9588-fmpj2, provider=aws, redpanda_id=cmp52i3qaegck87okgs0, service=external-dns Value:0xc03447a8c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609672324s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.160:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b7f4b9588-fmpj2, provider=aws, redpanda_id=cmp52i3qaegck87okgs0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.160:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b7f4b9588-fmpj2, provider=aws, redpanda_id=cmp52i3qaegck87okgs0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.163:7979, job=external-dns, namespace=external-dns, pod=external-dns-7cc47b58d9-gdqgg, provider=aws, redpanda_id=ck05bju0ur7648lk5e1g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.163:7979, job=external-dns, namespace=external-dns, pod=external-dns-7cc47b58d9-gdqgg, provider=aws, redpanda_id=ck05bju0ur7648lk5e1g, service=external-dns Value:0xc03447aaa0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.163:7979, job=external-dns, namespace=external-dns, pod=external-dns-7cc47b58d9-gdqgg, provider=aws, redpanda_id=ck05bju0ur7648lk5e1g, service=external-dns Value:0xc03447ab70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609679862s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.163:7979, job=external-dns, namespace=external-dns, pod=external-dns-7cc47b58d9-gdqgg, provider=aws, redpanda_id=ck05bju0ur7648lk5e1g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.163:7979, job=external-dns, namespace=external-dns, pod=external-dns-7cc47b58d9-gdqgg, provider=aws, redpanda_id=ck05bju0ur7648lk5e1g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.168:7979, job=external-dns, namespace=external-dns, pod=external-dns-77dcc877b4-gkpfk, provider=aws, redpanda_id=cmff6a0ku6erdrg5r2sg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.168:7979, job=external-dns, namespace=external-dns, pod=external-dns-77dcc877b4-gkpfk, provider=aws, redpanda_id=cmff6a0ku6erdrg5r2sg, service=external-dns Value:0xc03447ad50} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.168:7979, job=external-dns, namespace=external-dns, pod=external-dns-77dcc877b4-gkpfk, provider=aws, redpanda_id=cmff6a0ku6erdrg5r2sg, service=external-dns Value:0xc03447ae08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609687112s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.168:7979, job=external-dns, namespace=external-dns, pod=external-dns-77dcc877b4-gkpfk, provider=aws, redpanda_id=cmff6a0ku6erdrg5r2sg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.168:7979, job=external-dns, namespace=external-dns, pod=external-dns-77dcc877b4-gkpfk, provider=aws, redpanda_id=cmff6a0ku6erdrg5r2sg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.184:7979, job=external-dns, namespace=external-dns, pod=external-dns-646d878df5-ljtbk, provider=aws, redpanda_id=cf4rhsr6f9p3a0jmteo0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.184:7979, job=external-dns, namespace=external-dns, pod=external-dns-646d878df5-ljtbk, provider=aws, redpanda_id=cf4rhsr6f9p3a0jmteo0, service=external-dns Value:0xc03447b160} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.184:7979, job=external-dns, namespace=external-dns, pod=external-dns-646d878df5-ljtbk, provider=aws, redpanda_id=cf4rhsr6f9p3a0jmteo0, service=external-dns Value:0xc03447b0b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609699428s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.184:7979, job=external-dns, namespace=external-dns, pod=external-dns-646d878df5-ljtbk, provider=aws, redpanda_id=cf4rhsr6f9p3a0jmteo0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.184:7979, job=external-dns, namespace=external-dns, pod=external-dns-646d878df5-ljtbk, provider=aws, redpanda_id=cf4rhsr6f9p3a0jmteo0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.185:7979, job=external-dns, namespace=external-dns, pod=external-dns-6cfd7f7b5f-w78b9, provider=aws, redpanda_id=ce9nm6b9tos7v1tuco30, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.185:7979, job=external-dns, namespace=external-dns, pod=external-dns-6cfd7f7b5f-w78b9, provider=aws, redpanda_id=ce9nm6b9tos7v1tuco30, service=external-dns Value:0xc03447b2f8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.185:7979, job=external-dns, namespace=external-dns, pod=external-dns-6cfd7f7b5f-w78b9, provider=aws, redpanda_id=ce9nm6b9tos7v1tuco30, service=external-dns Value:0xc03447b480}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609706568s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.185:7979, job=external-dns, namespace=external-dns, pod=external-dns-6cfd7f7b5f-w78b9, provider=aws, redpanda_id=ce9nm6b9tos7v1tuco30, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.185:7979, job=external-dns, namespace=external-dns, pod=external-dns-6cfd7f7b5f-w78b9, provider=aws, redpanda_id=ce9nm6b9tos7v1tuco30, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.186:7979, job=external-dns, namespace=external-dns, pod=external-dns-ff686c4c-84p2b, provider=aws, redpanda_id=cfh1kpfu1f0o5qipdug0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.186:7979, job=external-dns, namespace=external-dns, pod=external-dns-ff686c4c-84p2b, provider=aws, redpanda_id=cfh1kpfu1f0o5qipdug0, service=external-dns Value:0xc03447b958} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.186:7979, job=external-dns, namespace=external-dns, pod=external-dns-ff686c4c-84p2b, provider=aws, redpanda_id=cfh1kpfu1f0o5qipdug0, service=external-dns Value:0xc03447baf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609714896s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.186:7979, job=external-dns, namespace=external-dns, pod=external-dns-ff686c4c-84p2b, provider=aws, redpanda_id=cfh1kpfu1f0o5qipdug0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.186:7979, job=external-dns, namespace=external-dns, pod=external-dns-ff686c4c-84p2b, provider=aws, redpanda_id=cfh1kpfu1f0o5qipdug0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.192:7979, job=external-dns, namespace=external-dns, pod=external-dns-d4c5d6b5f-j4ctl, provider=aws, redpanda_id=co7ep2lmsr7ofq8a4ngg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.192:7979, job=external-dns, namespace=external-dns, pod=external-dns-d4c5d6b5f-j4ctl, provider=aws, redpanda_id=co7ep2lmsr7ofq8a4ngg, service=external-dns Value:0xc03447be50} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.192:7979, job=external-dns, namespace=external-dns, pod=external-dns-d4c5d6b5f-j4ctl, provider=aws, redpanda_id=co7ep2lmsr7ofq8a4ngg, service=external-dns Value:0xc03447bc60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.60972192s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.192:7979, job=external-dns, namespace=external-dns, pod=external-dns-d4c5d6b5f-j4ctl, provider=aws, redpanda_id=co7ep2lmsr7ofq8a4ngg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.192:7979, job=external-dns, namespace=external-dns, pod=external-dns-d4c5d6b5f-j4ctl, provider=aws, redpanda_id=co7ep2lmsr7ofq8a4ngg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.195:7979, job=external-dns, namespace=external-dns, pod=external-dns-696bd9bb84-xj8pd, provider=aws, redpanda_id=ckkq5vr3baqqelf7hhfg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.195:7979, job=external-dns, namespace=external-dns, pod=external-dns-696bd9bb84-xj8pd, provider=aws, redpanda_id=ckkq5vr3baqqelf7hhfg, service=external-dns Value:0xc030b02170} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.195:7979, job=external-dns, namespace=external-dns, pod=external-dns-696bd9bb84-xj8pd, provider=aws, redpanda_id=ckkq5vr3baqqelf7hhfg, service=external-dns Value:0xc030b02570}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609729127s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.195:7979, job=external-dns, namespace=external-dns, pod=external-dns-696bd9bb84-xj8pd, provider=aws, redpanda_id=ckkq5vr3baqqelf7hhfg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.195:7979, job=external-dns, namespace=external-dns, pod=external-dns-696bd9bb84-xj8pd, provider=aws, redpanda_id=ckkq5vr3baqqelf7hhfg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.199:7979, job=external-dns, namespace=external-dns, pod=external-dns-767ffdb4d4-v547k, provider=aws, redpanda_id=ckd9aqq8790s9pfbjfag, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.199:7979, job=external-dns, namespace=external-dns, pod=external-dns-767ffdb4d4-v547k, provider=aws, redpanda_id=ckd9aqq8790s9pfbjfag, service=external-dns Value:0xc030b02aa8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.199:7979, job=external-dns, namespace=external-dns, pod=external-dns-767ffdb4d4-v547k, provider=aws, redpanda_id=ckd9aqq8790s9pfbjfag, service=external-dns Value:0xc030b02ed0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609736708s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.199:7979, job=external-dns, namespace=external-dns, pod=external-dns-767ffdb4d4-v547k, provider=aws, redpanda_id=ckd9aqq8790s9pfbjfag, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.199:7979, job=external-dns, namespace=external-dns, pod=external-dns-767ffdb4d4-v547k, provider=aws, redpanda_id=ckd9aqq8790s9pfbjfag, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.206:7979, job=external-dns, namespace=external-dns, pod=external-dns-66459b8b77-zj9bm, provider=aws, redpanda_id=ckr4fp8fa0gdjkkbeotg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.206:7979, job=external-dns, namespace=external-dns, pod=external-dns-66459b8b77-zj9bm, provider=aws, redpanda_id=ckr4fp8fa0gdjkkbeotg, service=external-dns Value:0xc030b03720} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.206:7979, job=external-dns, namespace=external-dns, pod=external-dns-66459b8b77-zj9bm, provider=aws, redpanda_id=ckr4fp8fa0gdjkkbeotg, service=external-dns Value:0xc030b03c50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609744144s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.206:7979, job=external-dns, namespace=external-dns, pod=external-dns-66459b8b77-zj9bm, provider=aws, redpanda_id=ckr4fp8fa0gdjkkbeotg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.206:7979, job=external-dns, namespace=external-dns, pod=external-dns-66459b8b77-zj9bm, provider=aws, redpanda_id=ckr4fp8fa0gdjkkbeotg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.218:7979, job=external-dns, namespace=external-dns, pod=external-dns-778f8ff97f-j472q, provider=aws, redpanda_id=cnj1cmstluj4mbfiael0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.218:7979, job=external-dns, namespace=external-dns, pod=external-dns-778f8ff97f-j472q, provider=aws, redpanda_id=cnj1cmstluj4mbfiael0, service=external-dns Value:0xc00ab24110} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.218:7979, job=external-dns, namespace=external-dns, pod=external-dns-778f8ff97f-j472q, provider=aws, redpanda_id=cnj1cmstluj4mbfiael0, service=external-dns Value:0xc00ab241e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609751162s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.218:7979, job=external-dns, namespace=external-dns, pod=external-dns-778f8ff97f-j472q, provider=aws, redpanda_id=cnj1cmstluj4mbfiael0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.218:7979, job=external-dns, namespace=external-dns, pod=external-dns-778f8ff97f-j472q, provider=aws, redpanda_id=cnj1cmstluj4mbfiael0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.237:7979, job=external-dns, namespace=external-dns, pod=external-dns-779f97d6d8-zsqrn, provider=aws, redpanda_id=cnni68g90kn7sr8bj3i0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.237:7979, job=external-dns, namespace=external-dns, pod=external-dns-779f97d6d8-zsqrn, provider=aws, redpanda_id=cnni68g90kn7sr8bj3i0, service=external-dns Value:0xc00ab243d0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.237:7979, job=external-dns, namespace=external-dns, pod=external-dns-779f97d6d8-zsqrn, provider=aws, redpanda_id=cnni68g90kn7sr8bj3i0, service=external-dns Value:0xc00ab24488}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609759468s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.237:7979, job=external-dns, namespace=external-dns, pod=external-dns-779f97d6d8-zsqrn, provider=aws, redpanda_id=cnni68g90kn7sr8bj3i0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.237:7979, job=external-dns, namespace=external-dns, pod=external-dns-779f97d6d8-zsqrn, provider=aws, redpanda_id=cnni68g90kn7sr8bj3i0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.237:7979, job=external-dns, namespace=external-dns, pod=external-dns-86445d955c-qfrv8, provider=aws, redpanda_id=cmjfgp6i35o3jbomnb30, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.237:7979, job=external-dns, namespace=external-dns, pod=external-dns-86445d955c-qfrv8, provider=aws, redpanda_id=cmjfgp6i35o3jbomnb30, service=external-dns Value:0xc00ab24630} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.237:7979, job=external-dns, namespace=external-dns, pod=external-dns-86445d955c-qfrv8, provider=aws, redpanda_id=cmjfgp6i35o3jbomnb30, service=external-dns Value:0xc00ab24700}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.60976737s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.237:7979, job=external-dns, namespace=external-dns, pod=external-dns-86445d955c-qfrv8, provider=aws, redpanda_id=cmjfgp6i35o3jbomnb30, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.237:7979, job=external-dns, namespace=external-dns, pod=external-dns-86445d955c-qfrv8, provider=aws, redpanda_id=cmjfgp6i35o3jbomnb30, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.238:7979, job=external-dns, namespace=external-dns, pod=external-dns-56b4995bf5-gspts, provider=aws, redpanda_id=cfmhbiiei3b2k79h2cj0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.238:7979, job=external-dns, namespace=external-dns, pod=external-dns-56b4995bf5-gspts, provider=aws, redpanda_id=cfmhbiiei3b2k79h2cj0, service=external-dns Value:0xc00ab24898} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.238:7979, job=external-dns, namespace=external-dns, pod=external-dns-56b4995bf5-gspts, provider=aws, redpanda_id=cfmhbiiei3b2k79h2cj0, service=external-dns Value:0xc00ab24990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609775299s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.238:7979, job=external-dns, namespace=external-dns, pod=external-dns-56b4995bf5-gspts, provider=aws, redpanda_id=cfmhbiiei3b2k79h2cj0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.238:7979, job=external-dns, namespace=external-dns, pod=external-dns-56b4995bf5-gspts, provider=aws, redpanda_id=cfmhbiiei3b2k79h2cj0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.245:7979, job=external-dns, namespace=external-dns, pod=external-dns-5f4d799747-66cm7, provider=aws, redpanda_id=ch158bogvg4l92oem62g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.245:7979, job=external-dns, namespace=external-dns, pod=external-dns-5f4d799747-66cm7, provider=aws, redpanda_id=ch158bogvg4l92oem62g, service=external-dns Value:0xc00ab24b90} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.245:7979, job=external-dns, namespace=external-dns, pod=external-dns-5f4d799747-66cm7, provider=aws, redpanda_id=ch158bogvg4l92oem62g, service=external-dns Value:0xc00ab24d20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609782098s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.245:7979, job=external-dns, namespace=external-dns, pod=external-dns-5f4d799747-66cm7, provider=aws, redpanda_id=ch158bogvg4l92oem62g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.245:7979, job=external-dns, namespace=external-dns, pod=external-dns-5f4d799747-66cm7, provider=aws, redpanda_id=ch158bogvg4l92oem62g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.249:7979, job=external-dns, namespace=external-dns, pod=external-dns-864677b9c9-4f5m9, provider=aws, redpanda_id=cev1gbb7m575jtvbg87g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.249:7979, job=external-dns, namespace=external-dns, pod=external-dns-864677b9c9-4f5m9, provider=aws, redpanda_id=cev1gbb7m575jtvbg87g, service=external-dns Value:0xc00ab24e90} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.249:7979, job=external-dns, namespace=external-dns, pod=external-dns-864677b9c9-4f5m9, provider=aws, redpanda_id=cev1gbb7m575jtvbg87g, service=external-dns Value:0xc00ab24f60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609790795s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.249:7979, job=external-dns, namespace=external-dns, pod=external-dns-864677b9c9-4f5m9, provider=aws, redpanda_id=cev1gbb7m575jtvbg87g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.249:7979, job=external-dns, namespace=external-dns, pod=external-dns-864677b9c9-4f5m9, provider=aws, redpanda_id=cev1gbb7m575jtvbg87g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.28:7979, job=external-dns, namespace=external-dns, pod=external-dns-6897d9ccff-gjzfd, provider=aws, redpanda_id=ci3fc768mbdudchrk1tg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.28:7979, job=external-dns, namespace=external-dns, pod=external-dns-6897d9ccff-gjzfd, provider=aws, redpanda_id=ci3fc768mbdudchrk1tg, service=external-dns Value:0xc00ab251b8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.28:7979, job=external-dns, namespace=external-dns, pod=external-dns-6897d9ccff-gjzfd, provider=aws, redpanda_id=ci3fc768mbdudchrk1tg, service=external-dns Value:0xc00ab25298}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609798127s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.28:7979, job=external-dns, namespace=external-dns, pod=external-dns-6897d9ccff-gjzfd, provider=aws, redpanda_id=ci3fc768mbdudchrk1tg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.28:7979, job=external-dns, namespace=external-dns, pod=external-dns-6897d9ccff-gjzfd, provider=aws, redpanda_id=ci3fc768mbdudchrk1tg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.39:7979, job=external-dns, namespace=external-dns, pod=external-dns-cc885cdb7-nnkq2, provider=aws, redpanda_id=cfe0f6m2gj23h4urbe00, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.39:7979, job=external-dns, namespace=external-dns, pod=external-dns-cc885cdb7-nnkq2, provider=aws, redpanda_id=cfe0f6m2gj23h4urbe00, service=external-dns Value:0xc00ab25430} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.39:7979, job=external-dns, namespace=external-dns, pod=external-dns-cc885cdb7-nnkq2, provider=aws, redpanda_id=cfe0f6m2gj23h4urbe00, service=external-dns Value:0xc00ab25500}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609806523s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.39:7979, job=external-dns, namespace=external-dns, pod=external-dns-cc885cdb7-nnkq2, provider=aws, redpanda_id=cfe0f6m2gj23h4urbe00, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.39:7979, job=external-dns, namespace=external-dns, pod=external-dns-cc885cdb7-nnkq2, provider=aws, redpanda_id=cfe0f6m2gj23h4urbe00, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.42:7979, job=external-dns, namespace=external-dns, pod=external-dns-68969bb555-554qz, provider=aws, redpanda_id=cfmljn2ei3b2k79h2d0g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.42:7979, job=external-dns, namespace=external-dns, pod=external-dns-68969bb555-554qz, provider=aws, redpanda_id=cfmljn2ei3b2k79h2d0g, service=external-dns Value:0xc00ab25698} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.42:7979, job=external-dns, namespace=external-dns, pod=external-dns-68969bb555-554qz, provider=aws, redpanda_id=cfmljn2ei3b2k79h2d0g, service=external-dns Value:0xc00ab25780}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609814305s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.42:7979, job=external-dns, namespace=external-dns, pod=external-dns-68969bb555-554qz, provider=aws, redpanda_id=cfmljn2ei3b2k79h2d0g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.42:7979, job=external-dns, namespace=external-dns, pod=external-dns-68969bb555-554qz, provider=aws, redpanda_id=cfmljn2ei3b2k79h2d0g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.62:7979, job=external-dns, namespace=external-dns, pod=external-dns-6fdd48bbf9-vzsw8, provider=aws, redpanda_id=cfmvae2ei3b2k79h2d2g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.62:7979, job=external-dns, namespace=external-dns, pod=external-dns-6fdd48bbf9-vzsw8, provider=aws, redpanda_id=cfmvae2ei3b2k79h2d2g, service=external-dns Value:0xc00ab25908} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.62:7979, job=external-dns, namespace=external-dns, pod=external-dns-6fdd48bbf9-vzsw8, provider=aws, redpanda_id=cfmvae2ei3b2k79h2d2g, service=external-dns Value:0xc00ab259c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609821945s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.62:7979, job=external-dns, namespace=external-dns, pod=external-dns-6fdd48bbf9-vzsw8, provider=aws, redpanda_id=cfmvae2ei3b2k79h2d2g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.62:7979, job=external-dns, namespace=external-dns, pod=external-dns-6fdd48bbf9-vzsw8, provider=aws, redpanda_id=cfmvae2ei3b2k79h2d2g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.68:7979, job=external-dns, namespace=external-dns, pod=external-dns-748784fd68-2vd9c, provider=aws, redpanda_id=cgvroei80qtl7p20ouq0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.68:7979, job=external-dns, namespace=external-dns, pod=external-dns-748784fd68-2vd9c, provider=aws, redpanda_id=cgvroei80qtl7p20ouq0, service=external-dns Value:0xc00ab25c60} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.68:7979, job=external-dns, namespace=external-dns, pod=external-dns-748784fd68-2vd9c, provider=aws, redpanda_id=cgvroei80qtl7p20ouq0, service=external-dns Value:0xc00ab25ba0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609830387s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.68:7979, job=external-dns, namespace=external-dns, pod=external-dns-748784fd68-2vd9c, provider=aws, redpanda_id=cgvroei80qtl7p20ouq0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.68:7979, job=external-dns, namespace=external-dns, pod=external-dns-748784fd68-2vd9c, provider=aws, redpanda_id=cgvroei80qtl7p20ouq0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.71:7979, job=external-dns, namespace=external-dns, pod=external-dns-5f8f4f9b57-fg56z, provider=aws, redpanda_id=ck47lbv6bagqm4aaepo0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.71:7979, job=external-dns, namespace=external-dns, pod=external-dns-5f8f4f9b57-fg56z, provider=aws, redpanda_id=ck47lbv6bagqm4aaepo0, service=external-dns Value:0xc00ab25d88} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.71:7979, job=external-dns, namespace=external-dns, pod=external-dns-5f8f4f9b57-fg56z, provider=aws, redpanda_id=ck47lbv6bagqm4aaepo0, service=external-dns Value:0xc00ab25eb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609838773s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.71:7979, job=external-dns, namespace=external-dns, pod=external-dns-5f8f4f9b57-fg56z, provider=aws, redpanda_id=ck47lbv6bagqm4aaepo0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.71:7979, job=external-dns, namespace=external-dns, pod=external-dns-5f8f4f9b57-fg56z, provider=aws, redpanda_id=ck47lbv6bagqm4aaepo0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.74:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b4c97d547-795ft, provider=aws, redpanda_id=cgdvvlq0l3lc23hthq2g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.74:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b4c97d547-795ft, provider=aws, redpanda_id=cgdvvlq0l3lc23hthq2g, service=external-dns Value:0xc00ab25fe8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.74:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b4c97d547-795ft, provider=aws, redpanda_id=cgdvvlq0l3lc23hthq2g, service=external-dns Value:0xc02f51db20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609846331s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.74:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b4c97d547-795ft, provider=aws, redpanda_id=cgdvvlq0l3lc23hthq2g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.74:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b4c97d547-795ft, provider=aws, redpanda_id=cgdvvlq0l3lc23hthq2g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.81:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b8fbc77c5-z7chp, provider=aws, redpanda_id=ci5mn3g7e7rqqatd3j3g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.81:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b8fbc77c5-z7chp, provider=aws, redpanda_id=ci5mn3g7e7rqqatd3j3g, service=external-dns Value:0xc02afac4c0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.81:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b8fbc77c5-z7chp, provider=aws, redpanda_id=ci5mn3g7e7rqqatd3j3g, service=external-dns Value:0xc02afac810}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609853844s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.81:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b8fbc77c5-z7chp, provider=aws, redpanda_id=ci5mn3g7e7rqqatd3j3g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.81:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b8fbc77c5-z7chp, provider=aws, redpanda_id=ci5mn3g7e7rqqatd3j3g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.84:7979, job=external-dns, namespace=external-dns, pod=external-dns-7556858b8c-wgscm, provider=aws, redpanda_id=certnoj7m575jtvbg730, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.84:7979, job=external-dns, namespace=external-dns, pod=external-dns-7556858b8c-wgscm, provider=aws, redpanda_id=certnoj7m575jtvbg730, service=external-dns Value:0xc02afacdc8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.84:7979, job=external-dns, namespace=external-dns, pod=external-dns-7556858b8c-wgscm, provider=aws, redpanda_id=certnoj7m575jtvbg730, service=external-dns Value:0xc02afacfd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609861109s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.84:7979, job=external-dns, namespace=external-dns, pod=external-dns-7556858b8c-wgscm, provider=aws, redpanda_id=certnoj7m575jtvbg730, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.84:7979, job=external-dns, namespace=external-dns, pod=external-dns-7556858b8c-wgscm, provider=aws, redpanda_id=certnoj7m575jtvbg730, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.86:7979, job=external-dns, namespace=external-dns, pod=external-dns-68fcb4c468-rvp5z, provider=aws, redpanda_id=cnors1090kn7sr8bk01g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.86:7979, job=external-dns, namespace=external-dns, pod=external-dns-68fcb4c468-rvp5z, provider=aws, redpanda_id=cnors1090kn7sr8bk01g, service=external-dns Value:0xc02afad7a0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.86:7979, job=external-dns, namespace=external-dns, pod=external-dns-68fcb4c468-rvp5z, provider=aws, redpanda_id=cnors1090kn7sr8bk01g, service=external-dns Value:0xc02afad440}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609868638s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.86:7979, job=external-dns, namespace=external-dns, pod=external-dns-68fcb4c468-rvp5z, provider=aws, redpanda_id=cnors1090kn7sr8bk01g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.86:7979, job=external-dns, namespace=external-dns, pod=external-dns-68fcb4c468-rvp5z, provider=aws, redpanda_id=cnors1090kn7sr8bk01g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.90:7979, job=external-dns, namespace=external-dns, pod=external-dns-5b9ccd7499-8lttn, provider=aws, redpanda_id=cop3g6cjjifkpmg8t4gg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.90:7979, job=external-dns, namespace=external-dns, pod=external-dns-5b9ccd7499-8lttn, provider=aws, redpanda_id=cop3g6cjjifkpmg8t4gg, service=external-dns Value:0xc02afadc28} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.90:7979, job=external-dns, namespace=external-dns, pod=external-dns-5b9ccd7499-8lttn, provider=aws, redpanda_id=cop3g6cjjifkpmg8t4gg, service=external-dns Value:0xc030ea0020}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.60987538s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.90:7979, job=external-dns, namespace=external-dns, pod=external-dns-5b9ccd7499-8lttn, provider=aws, redpanda_id=cop3g6cjjifkpmg8t4gg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.90:7979, job=external-dns, namespace=external-dns, pod=external-dns-5b9ccd7499-8lttn, provider=aws, redpanda_id=cop3g6cjjifkpmg8t4gg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.0.90:7979, job=external-dns, namespace=external-dns, pod=external-dns-5ffcd74d67-fpfwd, provider=aws, redpanda_id=cgvrdiggvg4l92oem4ig, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.0.90:7979, job=external-dns, namespace=external-dns, pod=external-dns-5ffcd74d67-fpfwd, provider=aws, redpanda_id=cgvrdiggvg4l92oem4ig, service=external-dns Value:0xc030ea0348} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.0.90:7979, job=external-dns, namespace=external-dns, pod=external-dns-5ffcd74d67-fpfwd, provider=aws, redpanda_id=cgvrdiggvg4l92oem4ig, service=external-dns Value:0xc030ea0220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609882705s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.0.90:7979, job=external-dns, namespace=external-dns, pod=external-dns-5ffcd74d67-fpfwd, provider=aws, redpanda_id=cgvrdiggvg4l92oem4ig, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.0.90:7979, job=external-dns, namespace=external-dns, pod=external-dns-5ffcd74d67-fpfwd, provider=aws, redpanda_id=cgvrdiggvg4l92oem4ig, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.10.124:7979, job=external-dns, namespace=external-dns, pod=external-dns-694ccd5ff5-bczrs, provider=aws, redpanda_id=ciqr9acfbvm2cii265dg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.10.124:7979, job=external-dns, namespace=external-dns, pod=external-dns-694ccd5ff5-bczrs, provider=aws, redpanda_id=ciqr9acfbvm2cii265dg, service=external-dns Value:0xc030ea0578} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.10.124:7979, job=external-dns, namespace=external-dns, pod=external-dns-694ccd5ff5-bczrs, provider=aws, redpanda_id=ciqr9acfbvm2cii265dg, service=external-dns Value:0xc030ea07a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609890268s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.10.124:7979, job=external-dns, namespace=external-dns, pod=external-dns-694ccd5ff5-bczrs, provider=aws, redpanda_id=ciqr9acfbvm2cii265dg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.10.124:7979, job=external-dns, namespace=external-dns, pod=external-dns-694ccd5ff5-bczrs, provider=aws, redpanda_id=ciqr9acfbvm2cii265dg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.10.144:7979, job=external-dns, namespace=external-dns, pod=external-dns-6fc54b747c-2wwr2, provider=aws, redpanda_id=cnkvb0b10r099f0a90b0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.10.144:7979, job=external-dns, namespace=external-dns, pod=external-dns-6fc54b747c-2wwr2, provider=aws, redpanda_id=cnkvb0b10r099f0a90b0, service=external-dns Value:0xc030ea0ab0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.10.144:7979, job=external-dns, namespace=external-dns, pod=external-dns-6fc54b747c-2wwr2, provider=aws, redpanda_id=cnkvb0b10r099f0a90b0, service=external-dns Value:0xc030ea0b98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609900262s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.10.144:7979, job=external-dns, namespace=external-dns, pod=external-dns-6fc54b747c-2wwr2, provider=aws, redpanda_id=cnkvb0b10r099f0a90b0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.10.144:7979, job=external-dns, namespace=external-dns, pod=external-dns-6fc54b747c-2wwr2, provider=aws, redpanda_id=cnkvb0b10r099f0a90b0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.10.192:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b99c556d4-mhrw4, provider=aws, redpanda_id=cp2uqj62qi9l644oahg0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.10.192:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b99c556d4-mhrw4, provider=aws, redpanda_id=cp2uqj62qi9l644oahg0, service=external-dns Value:0xc030ea0ec0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.10.192:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b99c556d4-mhrw4, provider=aws, redpanda_id=cp2uqj62qi9l644oahg0, service=external-dns Value:0xc030ea0dd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.60990871s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.10.192:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b99c556d4-mhrw4, provider=aws, redpanda_id=cp2uqj62qi9l644oahg0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.10.192:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b99c556d4-mhrw4, provider=aws, redpanda_id=cp2uqj62qi9l644oahg0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.10.236:7979, job=external-dns, namespace=external-dns, pod=external-dns-7f4d8ffbd5-vtzd8, provider=aws, redpanda_id=cj0n9drube5f1u6qjas0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.10.236:7979, job=external-dns, namespace=external-dns, pod=external-dns-7f4d8ffbd5-vtzd8, provider=aws, redpanda_id=cj0n9drube5f1u6qjas0, service=external-dns Value:0xc030ea10d8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.10.236:7979, job=external-dns, namespace=external-dns, pod=external-dns-7f4d8ffbd5-vtzd8, provider=aws, redpanda_id=cj0n9drube5f1u6qjas0, service=external-dns Value:0xc030ea12c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609916466s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.10.236:7979, job=external-dns, namespace=external-dns, pod=external-dns-7f4d8ffbd5-vtzd8, provider=aws, redpanda_id=cj0n9drube5f1u6qjas0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.10.236:7979, job=external-dns, namespace=external-dns, pod=external-dns-7f4d8ffbd5-vtzd8, provider=aws, redpanda_id=cj0n9drube5f1u6qjas0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.10.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-9cb7d76bd-lt9qb, provider=gcp, redpanda_id=ck5k6gn6bagqm4aaf1qg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.10.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-9cb7d76bd-lt9qb, provider=gcp, redpanda_id=ck5k6gn6bagqm4aaf1qg, service=external-dns Value:0xc030ea1708} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.10.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-9cb7d76bd-lt9qb, provider=gcp, redpanda_id=ck5k6gn6bagqm4aaf1qg, service=external-dns Value:0xc030ea1808}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609924296s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.10.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-9cb7d76bd-lt9qb, provider=gcp, redpanda_id=ck5k6gn6bagqm4aaf1qg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.10.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-9cb7d76bd-lt9qb, provider=gcp, redpanda_id=ck5k6gn6bagqm4aaf1qg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.10.74:7979, job=external-dns, namespace=external-dns, pod=external-dns-758bbcf8fc-mzkxw, provider=aws, redpanda_id=clod85hpeif0bijutcdg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.10.74:7979, job=external-dns, namespace=external-dns, pod=external-dns-758bbcf8fc-mzkxw, provider=aws, redpanda_id=clod85hpeif0bijutcdg, service=external-dns Value:0xc030ea1c48} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.10.74:7979, job=external-dns, namespace=external-dns, pod=external-dns-758bbcf8fc-mzkxw, provider=aws, redpanda_id=clod85hpeif0bijutcdg, service=external-dns Value:0xc030ea1b10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609931485s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.10.74:7979, job=external-dns, namespace=external-dns, pod=external-dns-758bbcf8fc-mzkxw, provider=aws, redpanda_id=clod85hpeif0bijutcdg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.10.74:7979, job=external-dns, namespace=external-dns, pod=external-dns-758bbcf8fc-mzkxw, provider=aws, redpanda_id=clod85hpeif0bijutcdg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.10.76:7979, job=external-dns, namespace=external-dns, pod=external-dns-647f5b9f6-xcpcv, provider=aws, redpanda_id=cmna2c854vo7dt9o0hmg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.10.76:7979, job=external-dns, namespace=external-dns, pod=external-dns-647f5b9f6-xcpcv, provider=aws, redpanda_id=cmna2c854vo7dt9o0hmg, service=external-dns Value:0xc030ea1e48} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.10.76:7979, job=external-dns, namespace=external-dns, pod=external-dns-647f5b9f6-xcpcv, provider=aws, redpanda_id=cmna2c854vo7dt9o0hmg, service=external-dns Value:0xc030ea1f70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609938955s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.10.76:7979, job=external-dns, namespace=external-dns, pod=external-dns-647f5b9f6-xcpcv, provider=aws, redpanda_id=cmna2c854vo7dt9o0hmg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.10.76:7979, job=external-dns, namespace=external-dns, pod=external-dns-647f5b9f6-xcpcv, provider=aws, redpanda_id=cmna2c854vo7dt9o0hmg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.10.77:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b7989cf6c-dzscj, provider=aws, redpanda_id=clnoi0lhac3fc8rlkcrg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.10.77:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b7989cf6c-dzscj, provider=aws, redpanda_id=clnoi0lhac3fc8rlkcrg, service=external-dns Value:0xc005b6c600} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.10.77:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b7989cf6c-dzscj, provider=aws, redpanda_id=clnoi0lhac3fc8rlkcrg, service=external-dns Value:0xc005b6c420}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609946648s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.10.77:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b7989cf6c-dzscj, provider=aws, redpanda_id=clnoi0lhac3fc8rlkcrg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.10.77:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b7989cf6c-dzscj, provider=aws, redpanda_id=clnoi0lhac3fc8rlkcrg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.10.91:7979, job=external-dns, namespace=external-dns, pod=external-dns-76648954f6-2c8f9, provider=aws, redpanda_id=co5c83i2q0l3vqpekhvg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.10.91:7979, job=external-dns, namespace=external-dns, pod=external-dns-76648954f6-2c8f9, provider=aws, redpanda_id=co5c83i2q0l3vqpekhvg, service=external-dns Value:0xc005b6c8b8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.10.91:7979, job=external-dns, namespace=external-dns, pod=external-dns-76648954f6-2c8f9, provider=aws, redpanda_id=co5c83i2q0l3vqpekhvg, service=external-dns Value:0xc005b6c9d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609958899s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.10.91:7979, job=external-dns, namespace=external-dns, pod=external-dns-76648954f6-2c8f9, provider=aws, redpanda_id=co5c83i2q0l3vqpekhvg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.10.91:7979, job=external-dns, namespace=external-dns, pod=external-dns-76648954f6-2c8f9, provider=aws, redpanda_id=co5c83i2q0l3vqpekhvg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.11.36:7979, job=external-dns, namespace=external-dns, pod=external-dns-66797fbb67-ltnjx, provider=gcp, redpanda_id=ckru7kd3rmo54fot1oqg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.11.36:7979, job=external-dns, namespace=external-dns, pod=external-dns-66797fbb67-ltnjx, provider=gcp, redpanda_id=ckru7kd3rmo54fot1oqg, service=external-dns Value:0xc005b6cc90} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.11.36:7979, job=external-dns, namespace=external-dns, pod=external-dns-66797fbb67-ltnjx, provider=gcp, redpanda_id=ckru7kd3rmo54fot1oqg, service=external-dns Value:0xc005b6cba0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609967834s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.11.36:7979, job=external-dns, namespace=external-dns, pod=external-dns-66797fbb67-ltnjx, provider=gcp, redpanda_id=ckru7kd3rmo54fot1oqg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.11.36:7979, job=external-dns, namespace=external-dns, pod=external-dns-66797fbb67-ltnjx, provider=gcp, redpanda_id=ckru7kd3rmo54fot1oqg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.12.45:7979, job=external-dns, namespace=external-dns, pod=external-dns-659886f8b6-wpj2q, provider=gcp, redpanda_id=cgugikm984cnrsjmp5ng, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.12.45:7979, job=external-dns, namespace=external-dns, pod=external-dns-659886f8b6-wpj2q, provider=gcp, redpanda_id=cgugikm984cnrsjmp5ng, service=external-dns Value:0xc005b6ce70} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.12.45:7979, job=external-dns, namespace=external-dns, pod=external-dns-659886f8b6-wpj2q, provider=gcp, redpanda_id=cgugikm984cnrsjmp5ng, service=external-dns Value:0xc005b6cf78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.60997559s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.12.45:7979, job=external-dns, namespace=external-dns, pod=external-dns-659886f8b6-wpj2q, provider=gcp, redpanda_id=cgugikm984cnrsjmp5ng, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.12.45:7979, job=external-dns, namespace=external-dns, pod=external-dns-659886f8b6-wpj2q, provider=gcp, redpanda_id=cgugikm984cnrsjmp5ng, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.2.10:7979, job=external-dns, namespace=external-dns, pod=external-dns-7f9cf9b956-qdcdr, provider=aws, redpanda_id=cn2i65o4qu1uu9j2hck0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.2.10:7979, job=external-dns, namespace=external-dns, pod=external-dns-7f9cf9b956-qdcdr, provider=aws, redpanda_id=cn2i65o4qu1uu9j2hck0, service=external-dns Value:0xc005b6d150} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.2.10:7979, job=external-dns, namespace=external-dns, pod=external-dns-7f9cf9b956-qdcdr, provider=aws, redpanda_id=cn2i65o4qu1uu9j2hck0, service=external-dns Value:0xc005b6d220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609982415s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.2.10:7979, job=external-dns, namespace=external-dns, pod=external-dns-7f9cf9b956-qdcdr, provider=aws, redpanda_id=cn2i65o4qu1uu9j2hck0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.2.10:7979, job=external-dns, namespace=external-dns, pod=external-dns-7f9cf9b956-qdcdr, provider=aws, redpanda_id=cn2i65o4qu1uu9j2hck0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.2.166:7979, job=external-dns, namespace=external-dns, pod=external-dns-c7ddb7c44-zjht9, provider=aws, redpanda_id=cm31i42jfjl2gs1fnejg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.2.166:7979, job=external-dns, namespace=external-dns, pod=external-dns-c7ddb7c44-zjht9, provider=aws, redpanda_id=cm31i42jfjl2gs1fnejg, service=external-dns Value:0xc005b6d568} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.2.166:7979, job=external-dns, namespace=external-dns, pod=external-dns-c7ddb7c44-zjht9, provider=aws, redpanda_id=cm31i42jfjl2gs1fnejg, service=external-dns Value:0xc005b6d6b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609989393s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.2.166:7979, job=external-dns, namespace=external-dns, pod=external-dns-c7ddb7c44-zjht9, provider=aws, redpanda_id=cm31i42jfjl2gs1fnejg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.2.166:7979, job=external-dns, namespace=external-dns, pod=external-dns-c7ddb7c44-zjht9, provider=aws, redpanda_id=cm31i42jfjl2gs1fnejg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.2.31:7979, job=external-dns, namespace=external-dns, pod=external-dns-b46b5ddf9-kjwr9, provider=aws, redpanda_id=clod121peif0bijutc6g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.2.31:7979, job=external-dns, namespace=external-dns, pod=external-dns-b46b5ddf9-kjwr9, provider=aws, redpanda_id=clod121peif0bijutc6g, service=external-dns Value:0xc005b6dc90} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.2.31:7979, job=external-dns, namespace=external-dns, pod=external-dns-b46b5ddf9-kjwr9, provider=aws, redpanda_id=clod121peif0bijutc6g, service=external-dns Value:0xc005b6dab0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.609996787s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.2.31:7979, job=external-dns, namespace=external-dns, pod=external-dns-b46b5ddf9-kjwr9, provider=aws, redpanda_id=clod121peif0bijutc6g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.2.31:7979, job=external-dns, namespace=external-dns, pod=external-dns-b46b5ddf9-kjwr9, provider=aws, redpanda_id=clod121peif0bijutc6g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.2.61:7979, job=external-dns, namespace=external-dns, pod=external-dns-6d5458ff96-mv5l7, provider=aws, redpanda_id=cpbi7j301cjus260uhbg, service=external-dns State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.2.61:7979, job=external-dns, namespace=external-dns, pod=external-dns-6d5458ff96-mv5l7, provider=aws, redpanda_id=cpbi7j301cjus260uhbg, service=external-dns Value:0xc005b6dfc8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.2.61:7979, job=external-dns, namespace=external-dns, pod=external-dns-6d5458ff96-mv5l7, provider=aws, redpanda_id=cpbi7j301cjus260uhbg, service=external-dns Value:0xc005b6de80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610003453s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.2.61:7979, job=external-dns, namespace=external-dns, pod=external-dns-6d5458ff96-mv5l7, provider=aws, redpanda_id=cpbi7j301cjus260uhbg, service=external-dns} value=0.007979666666666666 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.2.61:7979, job=external-dns, namespace=external-dns, pod=external-dns-6d5458ff96-mv5l7, provider=aws, redpanda_id=cpbi7j301cjus260uhbg, service=external-dns} value=1 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.24.189:7979, job=external-dns, namespace=external-dns, pod=external-dns-86cb4b8944-fvvtp, provider=aws, redpanda_id=chiec86nch049g9dbu30, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.24.189:7979, job=external-dns, namespace=external-dns, pod=external-dns-86cb4b8944-fvvtp, provider=aws, redpanda_id=chiec86nch049g9dbu30, service=external-dns Value:0xc023eda980} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.24.189:7979, job=external-dns, namespace=external-dns, pod=external-dns-86cb4b8944-fvvtp, provider=aws, redpanda_id=chiec86nch049g9dbu30, service=external-dns Value:0xc023edacc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.61001448s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.24.189:7979, job=external-dns, namespace=external-dns, pod=external-dns-86cb4b8944-fvvtp, provider=aws, redpanda_id=chiec86nch049g9dbu30, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.24.189:7979, job=external-dns, namespace=external-dns, pod=external-dns-86cb4b8944-fvvtp, provider=aws, redpanda_id=chiec86nch049g9dbu30, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.32.214:7979, job=external-dns, namespace=external-dns, pod=external-dns-5778dd9c54-22mzb, provider=aws, redpanda_id=cjcu7eckblpdubl9a2l0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.32.214:7979, job=external-dns, namespace=external-dns, pod=external-dns-5778dd9c54-22mzb, provider=aws, redpanda_id=cjcu7eckblpdubl9a2l0, service=external-dns Value:0xc023edb2a8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.32.214:7979, job=external-dns, namespace=external-dns, pod=external-dns-5778dd9c54-22mzb, provider=aws, redpanda_id=cjcu7eckblpdubl9a2l0, service=external-dns Value:0xc023edb420}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610022732s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.32.214:7979, job=external-dns, namespace=external-dns, pod=external-dns-5778dd9c54-22mzb, provider=aws, redpanda_id=cjcu7eckblpdubl9a2l0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.32.214:7979, job=external-dns, namespace=external-dns, pod=external-dns-5778dd9c54-22mzb, provider=aws, redpanda_id=cjcu7eckblpdubl9a2l0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.4.125:7979, job=external-dns, namespace=external-dns, pod=external-dns-64c7fd5487-dp5hz, provider=aws, redpanda_id=cn39hdrpimm8ftui5tjg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.4.125:7979, job=external-dns, namespace=external-dns, pod=external-dns-64c7fd5487-dp5hz, provider=aws, redpanda_id=cn39hdrpimm8ftui5tjg, service=external-dns Value:0xc023edb690} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.4.125:7979, job=external-dns, namespace=external-dns, pod=external-dns-64c7fd5487-dp5hz, provider=aws, redpanda_id=cn39hdrpimm8ftui5tjg, service=external-dns Value:0xc023edb8e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610032646s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.4.125:7979, job=external-dns, namespace=external-dns, pod=external-dns-64c7fd5487-dp5hz, provider=aws, redpanda_id=cn39hdrpimm8ftui5tjg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.4.125:7979, job=external-dns, namespace=external-dns, pod=external-dns-64c7fd5487-dp5hz, provider=aws, redpanda_id=cn39hdrpimm8ftui5tjg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.4.140:7979, job=external-dns, namespace=external-dns, pod=external-dns-58df7688f9-rpcql, provider=aws, redpanda_id=ch40r2lioepec6ilb000, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.4.140:7979, job=external-dns, namespace=external-dns, pod=external-dns-58df7688f9-rpcql, provider=aws, redpanda_id=ch40r2lioepec6ilb000, service=external-dns Value:0xc023edbf48} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.4.140:7979, job=external-dns, namespace=external-dns, pod=external-dns-58df7688f9-rpcql, provider=aws, redpanda_id=ch40r2lioepec6ilb000, service=external-dns Value:0xc023edbd10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610040289s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.4.140:7979, job=external-dns, namespace=external-dns, pod=external-dns-58df7688f9-rpcql, provider=aws, redpanda_id=ch40r2lioepec6ilb000, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.4.140:7979, job=external-dns, namespace=external-dns, pod=external-dns-58df7688f9-rpcql, provider=aws, redpanda_id=ch40r2lioepec6ilb000, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.4.213:7979, job=external-dns, namespace=external-dns, pod=external-dns-58497c6657-w2fvf, provider=aws, redpanda_id=cobdghtdvhevo5irjsi0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.4.213:7979, job=external-dns, namespace=external-dns, pod=external-dns-58497c6657-w2fvf, provider=aws, redpanda_id=cobdghtdvhevo5irjsi0, service=external-dns Value:0xc024ea42d0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.4.213:7979, job=external-dns, namespace=external-dns, pod=external-dns-58497c6657-w2fvf, provider=aws, redpanda_id=cobdghtdvhevo5irjsi0, service=external-dns Value:0xc024ea4490}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610047553s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.4.213:7979, job=external-dns, namespace=external-dns, pod=external-dns-58497c6657-w2fvf, provider=aws, redpanda_id=cobdghtdvhevo5irjsi0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.4.213:7979, job=external-dns, namespace=external-dns, pod=external-dns-58497c6657-w2fvf, provider=aws, redpanda_id=cobdghtdvhevo5irjsi0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.4.218:7979, job=external-dns, namespace=external-dns, pod=external-dns-55fd9d9888-rp84w, provider=aws, redpanda_id=cpbi78i77h8g0iu4fgq0, service=external-dns State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.4.218:7979, job=external-dns, namespace=external-dns, pod=external-dns-55fd9d9888-rp84w, provider=aws, redpanda_id=cpbi78i77h8g0iu4fgq0, service=external-dns Value:0xc024ea48c8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.4.218:7979, job=external-dns, namespace=external-dns, pod=external-dns-55fd9d9888-rp84w, provider=aws, redpanda_id=cpbi78i77h8g0iu4fgq0, service=external-dns Value:0xc024ea4b90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610054447s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.4.218:7979, job=external-dns, namespace=external-dns, pod=external-dns-55fd9d9888-rp84w, provider=aws, redpanda_id=cpbi78i77h8g0iu4fgq0, service=external-dns} value=0.005028944444444444 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.4.218:7979, job=external-dns, namespace=external-dns, pod=external-dns-55fd9d9888-rp84w, provider=aws, redpanda_id=cpbi78i77h8g0iu4fgq0, service=external-dns} value=1 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.4.37:7979, job=external-dns, namespace=external-dns, pod=external-dns-79bc899f5c-6ln69, provider=aws, redpanda_id=cjnft31ita076k50b2k0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.4.37:7979, job=external-dns, namespace=external-dns, pod=external-dns-79bc899f5c-6ln69, provider=aws, redpanda_id=cjnft31ita076k50b2k0, service=external-dns Value:0xc024ea4ea8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.4.37:7979, job=external-dns, namespace=external-dns, pod=external-dns-79bc899f5c-6ln69, provider=aws, redpanda_id=cjnft31ita076k50b2k0, service=external-dns Value:0xc024ea5160}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610064512s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.4.37:7979, job=external-dns, namespace=external-dns, pod=external-dns-79bc899f5c-6ln69, provider=aws, redpanda_id=cjnft31ita076k50b2k0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.4.37:7979, job=external-dns, namespace=external-dns, pod=external-dns-79bc899f5c-6ln69, provider=aws, redpanda_id=cjnft31ita076k50b2k0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.4.38:7979, job=external-dns, namespace=external-dns, pod=external-dns-9c6d94bb-774xr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.4.38:7979, job=external-dns, namespace=external-dns, pod=external-dns-9c6d94bb-774xr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=external-dns Value:0xc024ea5480} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.4.38:7979, job=external-dns, namespace=external-dns, pod=external-dns-9c6d94bb-774xr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=external-dns Value:0xc024ea5688}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.61007229s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.4.38:7979, job=external-dns, namespace=external-dns, pod=external-dns-9c6d94bb-774xr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.4.38:7979, job=external-dns, namespace=external-dns, pod=external-dns-9c6d94bb-774xr, provider=azure, redpanda_id=cp5srjuvd5pm5ndt9he0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.4.62:7979, job=external-dns, namespace=external-dns, pod=external-dns-6694b8c844-5t7tr, provider=aws, redpanda_id=cmtd064955drl96rdit0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.4.62:7979, job=external-dns, namespace=external-dns, pod=external-dns-6694b8c844-5t7tr, provider=aws, redpanda_id=cmtd064955drl96rdit0, service=external-dns Value:0xc024ea5ab8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.4.62:7979, job=external-dns, namespace=external-dns, pod=external-dns-6694b8c844-5t7tr, provider=aws, redpanda_id=cmtd064955drl96rdit0, service=external-dns Value:0xc024ea5c60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610079973s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.4.62:7979, job=external-dns, namespace=external-dns, pod=external-dns-6694b8c844-5t7tr, provider=aws, redpanda_id=cmtd064955drl96rdit0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.4.62:7979, job=external-dns, namespace=external-dns, pod=external-dns-6694b8c844-5t7tr, provider=aws, redpanda_id=cmtd064955drl96rdit0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.56.51:7979, job=external-dns, namespace=external-dns, pod=external-dns-56b7b8f845-w846q, provider=aws, redpanda_id=cm9sg3ajfjl2gs1fobd0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.56.51:7979, job=external-dns, namespace=external-dns, pod=external-dns-56b7b8f845-w846q, provider=aws, redpanda_id=cm9sg3ajfjl2gs1fobd0, service=external-dns Value:0xc0029c80a0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.56.51:7979, job=external-dns, namespace=external-dns, pod=external-dns-56b7b8f845-w846q, provider=aws, redpanda_id=cm9sg3ajfjl2gs1fobd0, service=external-dns Value:0xc0029c8320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610088401s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.56.51:7979, job=external-dns, namespace=external-dns, pod=external-dns-56b7b8f845-w846q, provider=aws, redpanda_id=cm9sg3ajfjl2gs1fobd0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.56.51:7979, job=external-dns, namespace=external-dns, pod=external-dns-56b7b8f845-w846q, provider=aws, redpanda_id=cm9sg3ajfjl2gs1fobd0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.6.116:7979, job=external-dns, namespace=external-dns, pod=external-dns-5cb56ccc6d-mlcxp, provider=aws, redpanda_id=cnln6s310r099f0a9b2g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.6.116:7979, job=external-dns, namespace=external-dns, pod=external-dns-5cb56ccc6d-mlcxp, provider=aws, redpanda_id=cnln6s310r099f0a9b2g, service=external-dns Value:0xc0029c8600} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.6.116:7979, job=external-dns, namespace=external-dns, pod=external-dns-5cb56ccc6d-mlcxp, provider=aws, redpanda_id=cnln6s310r099f0a9b2g, service=external-dns Value:0xc0029c87a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610096235s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.6.116:7979, job=external-dns, namespace=external-dns, pod=external-dns-5cb56ccc6d-mlcxp, provider=aws, redpanda_id=cnln6s310r099f0a9b2g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.6.116:7979, job=external-dns, namespace=external-dns, pod=external-dns-5cb56ccc6d-mlcxp, provider=aws, redpanda_id=cnln6s310r099f0a9b2g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.6.11:7979, job=external-dns, namespace=external-dns, pod=external-dns-7f74948744-x6l5j, provider=aws, redpanda_id=coj72qeolu5sgnu5udqg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.6.11:7979, job=external-dns, namespace=external-dns, pod=external-dns-7f74948744-x6l5j, provider=aws, redpanda_id=coj72qeolu5sgnu5udqg, service=external-dns Value:0xc0029c8db0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.6.11:7979, job=external-dns, namespace=external-dns, pod=external-dns-7f74948744-x6l5j, provider=aws, redpanda_id=coj72qeolu5sgnu5udqg, service=external-dns Value:0xc0029c9038}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610106513s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.6.11:7979, job=external-dns, namespace=external-dns, pod=external-dns-7f74948744-x6l5j, provider=aws, redpanda_id=coj72qeolu5sgnu5udqg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.6.11:7979, job=external-dns, namespace=external-dns, pod=external-dns-7f74948744-x6l5j, provider=aws, redpanda_id=coj72qeolu5sgnu5udqg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.6.130:7979, job=external-dns, namespace=external-dns, pod=external-dns-6576fcd489-4vfn9, provider=aws, redpanda_id=cpb5inq77h8g0iu4f8t0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.6.130:7979, job=external-dns, namespace=external-dns, pod=external-dns-6576fcd489-4vfn9, provider=aws, redpanda_id=cpb5inq77h8g0iu4f8t0, service=external-dns Value:0xc0029c9378} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.6.130:7979, job=external-dns, namespace=external-dns, pod=external-dns-6576fcd489-4vfn9, provider=aws, redpanda_id=cpb5inq77h8g0iu4f8t0, service=external-dns Value:0xc0029c9740}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610115358s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.6.130:7979, job=external-dns, namespace=external-dns, pod=external-dns-6576fcd489-4vfn9, provider=aws, redpanda_id=cpb5inq77h8g0iu4f8t0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.6.130:7979, job=external-dns, namespace=external-dns, pod=external-dns-6576fcd489-4vfn9, provider=aws, redpanda_id=cpb5inq77h8g0iu4f8t0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.6.140:7979, job=external-dns, namespace=external-dns, pod=external-dns-778cc68444-gbhtv, provider=aws, redpanda_id=cnlms5310r099f0a9a90, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.6.140:7979, job=external-dns, namespace=external-dns, pod=external-dns-778cc68444-gbhtv, provider=aws, redpanda_id=cnlms5310r099f0a9a90, service=external-dns Value:0xc0029c9d90} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.6.140:7979, job=external-dns, namespace=external-dns, pod=external-dns-778cc68444-gbhtv, provider=aws, redpanda_id=cnlms5310r099f0a9a90, service=external-dns Value:0xc0029c9bc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610122696s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.6.140:7979, job=external-dns, namespace=external-dns, pod=external-dns-778cc68444-gbhtv, provider=aws, redpanda_id=cnlms5310r099f0a9a90, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.6.140:7979, job=external-dns, namespace=external-dns, pod=external-dns-778cc68444-gbhtv, provider=aws, redpanda_id=cnlms5310r099f0a9a90, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.6.149:7979, job=external-dns, namespace=external-dns, pod=external-dns-d9f6bccf6-wf9p4, provider=aws, redpanda_id=cov5uc2l9050bvlc7thg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.6.149:7979, job=external-dns, namespace=external-dns, pod=external-dns-d9f6bccf6-wf9p4, provider=aws, redpanda_id=cov5uc2l9050bvlc7thg, service=external-dns Value:0xc02bd5e060} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.6.149:7979, job=external-dns, namespace=external-dns, pod=external-dns-d9f6bccf6-wf9p4, provider=aws, redpanda_id=cov5uc2l9050bvlc7thg, service=external-dns Value:0xc02bd5e178}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610129814s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.6.149:7979, job=external-dns, namespace=external-dns, pod=external-dns-d9f6bccf6-wf9p4, provider=aws, redpanda_id=cov5uc2l9050bvlc7thg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.6.149:7979, job=external-dns, namespace=external-dns, pod=external-dns-d9f6bccf6-wf9p4, provider=aws, redpanda_id=cov5uc2l9050bvlc7thg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.6.175:7979, job=external-dns, namespace=external-dns, pod=external-dns-7965fb4cb8-ckklf, provider=aws, redpanda_id=coqfsdb7u2hdqvuptn50, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.6.175:7979, job=external-dns, namespace=external-dns, pod=external-dns-7965fb4cb8-ckklf, provider=aws, redpanda_id=coqfsdb7u2hdqvuptn50, service=external-dns Value:0xc02bd5e4c8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.6.175:7979, job=external-dns, namespace=external-dns, pod=external-dns-7965fb4cb8-ckklf, provider=aws, redpanda_id=coqfsdb7u2hdqvuptn50, service=external-dns Value:0xc02bd5e630}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610137029s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.6.175:7979, job=external-dns, namespace=external-dns, pod=external-dns-7965fb4cb8-ckklf, provider=aws, redpanda_id=coqfsdb7u2hdqvuptn50, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.6.175:7979, job=external-dns, namespace=external-dns, pod=external-dns-7965fb4cb8-ckklf, provider=aws, redpanda_id=coqfsdb7u2hdqvuptn50, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.6.196:7979, job=external-dns, namespace=external-dns, pod=external-dns-565b9cf7dd-4wl8s, provider=aws, redpanda_id=cou0p0re1d7i2jp1fdg0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.6.196:7979, job=external-dns, namespace=external-dns, pod=external-dns-565b9cf7dd-4wl8s, provider=aws, redpanda_id=cou0p0re1d7i2jp1fdg0, service=external-dns Value:0xc02bd5eb38} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.6.196:7979, job=external-dns, namespace=external-dns, pod=external-dns-565b9cf7dd-4wl8s, provider=aws, redpanda_id=cou0p0re1d7i2jp1fdg0, service=external-dns Value:0xc02bd5e8f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610144048s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.6.196:7979, job=external-dns, namespace=external-dns, pod=external-dns-565b9cf7dd-4wl8s, provider=aws, redpanda_id=cou0p0re1d7i2jp1fdg0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.6.196:7979, job=external-dns, namespace=external-dns, pod=external-dns-565b9cf7dd-4wl8s, provider=aws, redpanda_id=cou0p0re1d7i2jp1fdg0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.6.225:7979, job=external-dns, namespace=external-dns, pod=external-dns-5cb9bd488f-cwfpb, provider=aws, redpanda_id=cosmgtq34627vtg2kjkg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.6.225:7979, job=external-dns, namespace=external-dns, pod=external-dns-5cb9bd488f-cwfpb, provider=aws, redpanda_id=cosmgtq34627vtg2kjkg, service=external-dns Value:0xc02bd5ef50} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.6.225:7979, job=external-dns, namespace=external-dns, pod=external-dns-5cb9bd488f-cwfpb, provider=aws, redpanda_id=cosmgtq34627vtg2kjkg, service=external-dns Value:0xc02bd5f138}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610150824s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.6.225:7979, job=external-dns, namespace=external-dns, pod=external-dns-5cb9bd488f-cwfpb, provider=aws, redpanda_id=cosmgtq34627vtg2kjkg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.6.225:7979, job=external-dns, namespace=external-dns, pod=external-dns-5cb9bd488f-cwfpb, provider=aws, redpanda_id=cosmgtq34627vtg2kjkg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.6.230:7979, job=external-dns, namespace=external-dns, pod=external-dns-684f7947d8-pfdw2, provider=aws, redpanda_id=col8nh8vha20igtb7fu0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.6.230:7979, job=external-dns, namespace=external-dns, pod=external-dns-684f7947d8-pfdw2, provider=aws, redpanda_id=col8nh8vha20igtb7fu0, service=external-dns Value:0xc02bd5f590} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.6.230:7979, job=external-dns, namespace=external-dns, pod=external-dns-684f7947d8-pfdw2, provider=aws, redpanda_id=col8nh8vha20igtb7fu0, service=external-dns Value:0xc02bd5f7b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610160139s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.6.230:7979, job=external-dns, namespace=external-dns, pod=external-dns-684f7947d8-pfdw2, provider=aws, redpanda_id=col8nh8vha20igtb7fu0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.6.230:7979, job=external-dns, namespace=external-dns, pod=external-dns-684f7947d8-pfdw2, provider=aws, redpanda_id=col8nh8vha20igtb7fu0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.6.232:7979, job=external-dns, namespace=external-dns, pod=external-dns-9748874bd-w8rpq, provider=aws, redpanda_id=cl1r1kngb9aiv82p8f40, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.6.232:7979, job=external-dns, namespace=external-dns, pod=external-dns-9748874bd-w8rpq, provider=aws, redpanda_id=cl1r1kngb9aiv82p8f40, service=external-dns Value:0xc02bd5fd00} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.6.232:7979, job=external-dns, namespace=external-dns, pod=external-dns-9748874bd-w8rpq, provider=aws, redpanda_id=cl1r1kngb9aiv82p8f40, service=external-dns Value:0xc02bd5fba0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610167833s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.6.232:7979, job=external-dns, namespace=external-dns, pod=external-dns-9748874bd-w8rpq, provider=aws, redpanda_id=cl1r1kngb9aiv82p8f40, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.6.232:7979, job=external-dns, namespace=external-dns, pod=external-dns-9748874bd-w8rpq, provider=aws, redpanda_id=cl1r1kngb9aiv82p8f40, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.6.237:7979, job=external-dns, namespace=external-dns, pod=external-dns-5df9d899b-wxt8n, provider=aws, redpanda_id=cns4o9jiuvqvkcfi3ao0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.6.237:7979, job=external-dns, namespace=external-dns, pod=external-dns-5df9d899b-wxt8n, provider=aws, redpanda_id=cns4o9jiuvqvkcfi3ao0, service=external-dns Value:0xc019a5e2a8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.6.237:7979, job=external-dns, namespace=external-dns, pod=external-dns-5df9d899b-wxt8n, provider=aws, redpanda_id=cns4o9jiuvqvkcfi3ao0, service=external-dns Value:0xc019a5e0c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610176747s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.6.237:7979, job=external-dns, namespace=external-dns, pod=external-dns-5df9d899b-wxt8n, provider=aws, redpanda_id=cns4o9jiuvqvkcfi3ao0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.6.237:7979, job=external-dns, namespace=external-dns, pod=external-dns-5df9d899b-wxt8n, provider=aws, redpanda_id=cns4o9jiuvqvkcfi3ao0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.6.46:7979, job=external-dns, namespace=external-dns, pod=external-dns-6f4cb455b4-prnft, provider=aws, redpanda_id=cn9jk0jpimm8ftui7jk0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.6.46:7979, job=external-dns, namespace=external-dns, pod=external-dns-6f4cb455b4-prnft, provider=aws, redpanda_id=cn9jk0jpimm8ftui7jk0, service=external-dns Value:0xc019a5e570} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.6.46:7979, job=external-dns, namespace=external-dns, pod=external-dns-6f4cb455b4-prnft, provider=aws, redpanda_id=cn9jk0jpimm8ftui7jk0, service=external-dns Value:0xc019a5e730}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610184528s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.6.46:7979, job=external-dns, namespace=external-dns, pod=external-dns-6f4cb455b4-prnft, provider=aws, redpanda_id=cn9jk0jpimm8ftui7jk0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.6.46:7979, job=external-dns, namespace=external-dns, pod=external-dns-6f4cb455b4-prnft, provider=aws, redpanda_id=cn9jk0jpimm8ftui7jk0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.6.47:7979, job=external-dns, namespace=external-dns, pod=external-dns-77cf848f8c-6hh57, provider=aws, redpanda_id=cnthrbjiuvqvkcfi4acg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.6.47:7979, job=external-dns, namespace=external-dns, pod=external-dns-77cf848f8c-6hh57, provider=aws, redpanda_id=cnthrbjiuvqvkcfi4acg, service=external-dns Value:0xc019a5ea08} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.6.47:7979, job=external-dns, namespace=external-dns, pod=external-dns-77cf848f8c-6hh57, provider=aws, redpanda_id=cnthrbjiuvqvkcfi4acg, service=external-dns Value:0xc019a5eb80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610192053s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.6.47:7979, job=external-dns, namespace=external-dns, pod=external-dns-77cf848f8c-6hh57, provider=aws, redpanda_id=cnthrbjiuvqvkcfi4acg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.6.47:7979, job=external-dns, namespace=external-dns, pod=external-dns-77cf848f8c-6hh57, provider=aws, redpanda_id=cnthrbjiuvqvkcfi4acg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.6.62:7979, job=external-dns, namespace=external-dns, pod=external-dns-5d497bb96-zqh7r, provider=aws, redpanda_id=co5tet1kkm0vlrgadoog, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.6.62:7979, job=external-dns, namespace=external-dns, pod=external-dns-5d497bb96-zqh7r, provider=aws, redpanda_id=co5tet1kkm0vlrgadoog, service=external-dns Value:0xc019a5eea0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.6.62:7979, job=external-dns, namespace=external-dns, pod=external-dns-5d497bb96-zqh7r, provider=aws, redpanda_id=co5tet1kkm0vlrgadoog, service=external-dns Value:0xc019a5f100}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610232614s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.6.62:7979, job=external-dns, namespace=external-dns, pod=external-dns-5d497bb96-zqh7r, provider=aws, redpanda_id=co5tet1kkm0vlrgadoog, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.6.62:7979, job=external-dns, namespace=external-dns, pod=external-dns-5d497bb96-zqh7r, provider=aws, redpanda_id=co5tet1kkm0vlrgadoog, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.6.77:7979, job=external-dns, namespace=external-dns, pod=external-dns-bb94fdbdd-mpdsl, provider=aws, redpanda_id=cnu9r3q83bcutli1glf0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.6.77:7979, job=external-dns, namespace=external-dns, pod=external-dns-bb94fdbdd-mpdsl, provider=aws, redpanda_id=cnu9r3q83bcutli1glf0, service=external-dns Value:0xc019a5f3c0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.6.77:7979, job=external-dns, namespace=external-dns, pod=external-dns-bb94fdbdd-mpdsl, provider=aws, redpanda_id=cnu9r3q83bcutli1glf0, service=external-dns Value:0xc019a5f540}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610241817s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.6.77:7979, job=external-dns, namespace=external-dns, pod=external-dns-bb94fdbdd-mpdsl, provider=aws, redpanda_id=cnu9r3q83bcutli1glf0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.6.77:7979, job=external-dns, namespace=external-dns, pod=external-dns-bb94fdbdd-mpdsl, provider=aws, redpanda_id=cnu9r3q83bcutli1glf0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.6.79:7979, job=external-dns, namespace=external-dns, pod=external-dns-5b8ffcbbdd-xcr45, provider=aws, redpanda_id=cmfvb0v2a0vplsk4d3ug, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.6.79:7979, job=external-dns, namespace=external-dns, pod=external-dns-5b8ffcbbdd-xcr45, provider=aws, redpanda_id=cmfvb0v2a0vplsk4d3ug, service=external-dns Value:0xc019a5f900} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.6.79:7979, job=external-dns, namespace=external-dns, pod=external-dns-5b8ffcbbdd-xcr45, provider=aws, redpanda_id=cmfvb0v2a0vplsk4d3ug, service=external-dns Value:0xc019a5fa80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610249161s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.6.79:7979, job=external-dns, namespace=external-dns, pod=external-dns-5b8ffcbbdd-xcr45, provider=aws, redpanda_id=cmfvb0v2a0vplsk4d3ug, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.6.79:7979, job=external-dns, namespace=external-dns, pod=external-dns-5b8ffcbbdd-xcr45, provider=aws, redpanda_id=cmfvb0v2a0vplsk4d3ug, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.6.80:7979, job=external-dns, namespace=external-dns, pod=external-dns-7676888856-drndm, provider=aws, redpanda_id=cotpnm234627vtg2l8dg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.6.80:7979, job=external-dns, namespace=external-dns, pod=external-dns-7676888856-drndm, provider=aws, redpanda_id=cotpnm234627vtg2l8dg, service=external-dns Value:0xc019a5fd70} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.6.80:7979, job=external-dns, namespace=external-dns, pod=external-dns-7676888856-drndm, provider=aws, redpanda_id=cotpnm234627vtg2l8dg, service=external-dns Value:0xc019a5ff30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.61026315s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.6.80:7979, job=external-dns, namespace=external-dns, pod=external-dns-7676888856-drndm, provider=aws, redpanda_id=cotpnm234627vtg2l8dg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.6.80:7979, job=external-dns, namespace=external-dns, pod=external-dns-7676888856-drndm, provider=aws, redpanda_id=cotpnm234627vtg2l8dg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.6.86:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d78448bcb-qm5k8, provider=aws, redpanda_id=cp10kqbe1d7i2jp1h5tg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.6.86:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d78448bcb-qm5k8, provider=aws, redpanda_id=cp10kqbe1d7i2jp1h5tg, service=external-dns Value:0xc02cbb6250} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.6.86:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d78448bcb-qm5k8, provider=aws, redpanda_id=cp10kqbe1d7i2jp1h5tg, service=external-dns Value:0xc02cbb6308}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.61027122s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.6.86:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d78448bcb-qm5k8, provider=aws, redpanda_id=cp10kqbe1d7i2jp1h5tg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.6.86:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d78448bcb-qm5k8, provider=aws, redpanda_id=cp10kqbe1d7i2jp1h5tg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.6.8:7979, job=external-dns, namespace=external-dns, pod=external-dns-7597cd95dd-kbwn8, provider=aws, redpanda_id=cmtav3eslckj1iv14n3g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.6.8:7979, job=external-dns, namespace=external-dns, pod=external-dns-7597cd95dd-kbwn8, provider=aws, redpanda_id=cmtav3eslckj1iv14n3g, service=external-dns Value:0xc02cbb66c0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.6.8:7979, job=external-dns, namespace=external-dns, pod=external-dns-7597cd95dd-kbwn8, provider=aws, redpanda_id=cmtav3eslckj1iv14n3g, service=external-dns Value:0xc02cbb6810}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610280285s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.6.8:7979, job=external-dns, namespace=external-dns, pod=external-dns-7597cd95dd-kbwn8, provider=aws, redpanda_id=cmtav3eslckj1iv14n3g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.6.8:7979, job=external-dns, namespace=external-dns, pod=external-dns-7597cd95dd-kbwn8, provider=aws, redpanda_id=cmtav3eslckj1iv14n3g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.6.98:7979, job=external-dns, namespace=external-dns, pod=external-dns-655d9fb9fb-q8pmz, provider=aws, redpanda_id=co22j60hl9slsne0020g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.6.98:7979, job=external-dns, namespace=external-dns, pod=external-dns-655d9fb9fb-q8pmz, provider=aws, redpanda_id=co22j60hl9slsne0020g, service=external-dns Value:0xc02cbb6c40} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.6.98:7979, job=external-dns, namespace=external-dns, pod=external-dns-655d9fb9fb-q8pmz, provider=aws, redpanda_id=co22j60hl9slsne0020g, service=external-dns Value:0xc02cbb6b00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610295328s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.6.98:7979, job=external-dns, namespace=external-dns, pod=external-dns-655d9fb9fb-q8pmz, provider=aws, redpanda_id=co22j60hl9slsne0020g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.6.98:7979, job=external-dns, namespace=external-dns, pod=external-dns-655d9fb9fb-q8pmz, provider=aws, redpanda_id=co22j60hl9slsne0020g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.8.119:7979, job=external-dns, namespace=external-dns, pod=external-dns-58b994c48c-cxhdw, provider=aws, redpanda_id=chnrjg6cbu54mmg3on4g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.8.119:7979, job=external-dns, namespace=external-dns, pod=external-dns-58b994c48c-cxhdw, provider=aws, redpanda_id=chnrjg6cbu54mmg3on4g, service=external-dns Value:0xc02cbb6f30} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.8.119:7979, job=external-dns, namespace=external-dns, pod=external-dns-58b994c48c-cxhdw, provider=aws, redpanda_id=chnrjg6cbu54mmg3on4g, service=external-dns Value:0xc02cbb7080}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610303896s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.8.119:7979, job=external-dns, namespace=external-dns, pod=external-dns-58b994c48c-cxhdw, provider=aws, redpanda_id=chnrjg6cbu54mmg3on4g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.8.119:7979, job=external-dns, namespace=external-dns, pod=external-dns-58b994c48c-cxhdw, provider=aws, redpanda_id=chnrjg6cbu54mmg3on4g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.8.134:7979, job=external-dns, namespace=external-dns, pod=external-dns-7645cfc7c6-lswtp, provider=gcp, redpanda_id=cfigqg7u1f0o5qipe1h0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.8.134:7979, job=external-dns, namespace=external-dns, pod=external-dns-7645cfc7c6-lswtp, provider=gcp, redpanda_id=cfigqg7u1f0o5qipe1h0, service=external-dns Value:0xc02cbb7450} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.8.134:7979, job=external-dns, namespace=external-dns, pod=external-dns-7645cfc7c6-lswtp, provider=gcp, redpanda_id=cfigqg7u1f0o5qipe1h0, service=external-dns Value:0xc02cbb7320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610312459s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.8.134:7979, job=external-dns, namespace=external-dns, pod=external-dns-7645cfc7c6-lswtp, provider=gcp, redpanda_id=cfigqg7u1f0o5qipe1h0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.8.134:7979, job=external-dns, namespace=external-dns, pod=external-dns-7645cfc7c6-lswtp, provider=gcp, redpanda_id=cfigqg7u1f0o5qipe1h0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.8.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b4db5dbf8-m8d4t, provider=gcp, redpanda_id=cnhshb3qu42smqf5rh6g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.8.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b4db5dbf8-m8d4t, provider=gcp, redpanda_id=cnhshb3qu42smqf5rh6g, service=external-dns Value:0xc02cbb76b0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.8.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b4db5dbf8-m8d4t, provider=gcp, redpanda_id=cnhshb3qu42smqf5rh6g, service=external-dns Value:0xc02cbb77f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610319621s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.8.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b4db5dbf8-m8d4t, provider=gcp, redpanda_id=cnhshb3qu42smqf5rh6g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.8.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b4db5dbf8-m8d4t, provider=gcp, redpanda_id=cnhshb3qu42smqf5rh6g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.8.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-6ccd8497df-gvw44, provider=gcp, redpanda_id=co20jh0hl9slsndvvvv0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.8.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-6ccd8497df-gvw44, provider=gcp, redpanda_id=co20jh0hl9slsndvvvv0, service=external-dns Value:0xc02cbb7cc0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.8.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-6ccd8497df-gvw44, provider=gcp, redpanda_id=co20jh0hl9slsndvvvv0, service=external-dns Value:0xc02cbb7b28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610327607s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.8.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-6ccd8497df-gvw44, provider=gcp, redpanda_id=co20jh0hl9slsndvvvv0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.8.159:7979, job=external-dns, namespace=external-dns, pod=external-dns-6ccd8497df-gvw44, provider=gcp, redpanda_id=co20jh0hl9slsndvvvv0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.8.165:7979, job=external-dns, namespace=external-dns, pod=external-dns-7ff94fb974-lxnvq, provider=gcp, redpanda_id=cp7lnqe6fslkb963tmt0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.8.165:7979, job=external-dns, namespace=external-dns, pod=external-dns-7ff94fb974-lxnvq, provider=gcp, redpanda_id=cp7lnqe6fslkb963tmt0, service=external-dns Value:0xc027f90128} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.8.165:7979, job=external-dns, namespace=external-dns, pod=external-dns-7ff94fb974-lxnvq, provider=gcp, redpanda_id=cp7lnqe6fslkb963tmt0, service=external-dns Value:0xc027f903b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610335821s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.8.165:7979, job=external-dns, namespace=external-dns, pod=external-dns-7ff94fb974-lxnvq, provider=gcp, redpanda_id=cp7lnqe6fslkb963tmt0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.8.165:7979, job=external-dns, namespace=external-dns, pod=external-dns-7ff94fb974-lxnvq, provider=gcp, redpanda_id=cp7lnqe6fslkb963tmt0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.8.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-68654bccc7-q5cv6, provider=gcp, redpanda_id=clos08thac3fc8rll040, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.8.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-68654bccc7-q5cv6, provider=gcp, redpanda_id=clos08thac3fc8rll040, service=external-dns Value:0xc027f90748} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.8.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-68654bccc7-q5cv6, provider=gcp, redpanda_id=clos08thac3fc8rll040, service=external-dns Value:0xc027f909d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610343238s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.8.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-68654bccc7-q5cv6, provider=gcp, redpanda_id=clos08thac3fc8rll040, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.8.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-68654bccc7-q5cv6, provider=gcp, redpanda_id=clos08thac3fc8rll040, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.8.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-775c769f8b-7wnxp, provider=gcp, redpanda_id=cfictoiei3b2k79h29q0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.8.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-775c769f8b-7wnxp, provider=gcp, redpanda_id=cfictoiei3b2k79h29q0, service=external-dns Value:0xc027f90ca0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.8.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-775c769f8b-7wnxp, provider=gcp, redpanda_id=cfictoiei3b2k79h29q0, service=external-dns Value:0xc027f90d70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.61035572s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.8.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-775c769f8b-7wnxp, provider=gcp, redpanda_id=cfictoiei3b2k79h29q0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.8.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-775c769f8b-7wnxp, provider=gcp, redpanda_id=cfictoiei3b2k79h29q0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.8.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-799455496f-92csp, provider=gcp, redpanda_id=ck5luh76bagqm4aaf260, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.8.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-799455496f-92csp, provider=gcp, redpanda_id=ck5luh76bagqm4aaf260, service=external-dns Value:0xc027f90f40} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.8.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-799455496f-92csp, provider=gcp, redpanda_id=ck5luh76bagqm4aaf260, service=external-dns Value:0xc027f91190}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610362875s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.8.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-799455496f-92csp, provider=gcp, redpanda_id=ck5luh76bagqm4aaf260, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.8.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-799455496f-92csp, provider=gcp, redpanda_id=ck5luh76bagqm4aaf260, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.8.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-664fc6fbb9-4m6kn, provider=gcp, redpanda_id=cmul1kk955drl96re2l0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.8.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-664fc6fbb9-4m6kn, provider=gcp, redpanda_id=cmul1kk955drl96re2l0, service=external-dns Value:0xc027f915b0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.8.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-664fc6fbb9-4m6kn, provider=gcp, redpanda_id=cmul1kk955drl96re2l0, service=external-dns Value:0xc027f91430}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610370535s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.8.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-664fc6fbb9-4m6kn, provider=gcp, redpanda_id=cmul1kk955drl96re2l0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.8.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-664fc6fbb9-4m6kn, provider=gcp, redpanda_id=cmul1kk955drl96re2l0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.8.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-5d4b897b97-7f28m, provider=gcp, redpanda_id=co2o3ha2q0l3vqpek9h0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.8.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-5d4b897b97-7f28m, provider=gcp, redpanda_id=co2o3ha2q0l3vqpek9h0, service=external-dns Value:0xc027f918b8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.8.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-5d4b897b97-7f28m, provider=gcp, redpanda_id=co2o3ha2q0l3vqpek9h0, service=external-dns Value:0xc027f91c68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610377148s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.8.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-5d4b897b97-7f28m, provider=gcp, redpanda_id=co2o3ha2q0l3vqpek9h0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.8.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-5d4b897b97-7f28m, provider=gcp, redpanda_id=co2o3ha2q0l3vqpek9h0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.8.30:7979, job=external-dns, namespace=external-dns, pod=external-dns-65bcd675fd-j9mkd, provider=gcp, redpanda_id=cnb09g9028lf1qmgllpg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.8.30:7979, job=external-dns, namespace=external-dns, pod=external-dns-65bcd675fd-j9mkd, provider=gcp, redpanda_id=cnb09g9028lf1qmgllpg, service=external-dns Value:0xc01ff28038} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.8.30:7979, job=external-dns, namespace=external-dns, pod=external-dns-65bcd675fd-j9mkd, provider=gcp, redpanda_id=cnb09g9028lf1qmgllpg, service=external-dns Value:0xc01ff281c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610383401s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.8.30:7979, job=external-dns, namespace=external-dns, pod=external-dns-65bcd675fd-j9mkd, provider=gcp, redpanda_id=cnb09g9028lf1qmgllpg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.8.30:7979, job=external-dns, namespace=external-dns, pod=external-dns-65bcd675fd-j9mkd, provider=gcp, redpanda_id=cnb09g9028lf1qmgllpg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.8.31:7979, job=external-dns, namespace=external-dns, pod=external-dns-5884688698-srtgs, provider=gcp, redpanda_id=cnu8thi83bcutli1gks0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.8.31:7979, job=external-dns, namespace=external-dns, pod=external-dns-5884688698-srtgs, provider=gcp, redpanda_id=cnu8thi83bcutli1gks0, service=external-dns Value:0xc01ff28520} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.8.31:7979, job=external-dns, namespace=external-dns, pod=external-dns-5884688698-srtgs, provider=gcp, redpanda_id=cnu8thi83bcutli1gks0, service=external-dns Value:0xc01ff28608}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610389996s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.8.31:7979, job=external-dns, namespace=external-dns, pod=external-dns-5884688698-srtgs, provider=gcp, redpanda_id=cnu8thi83bcutli1gks0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.8.31:7979, job=external-dns, namespace=external-dns, pod=external-dns-5884688698-srtgs, provider=gcp, redpanda_id=cnu8thi83bcutli1gks0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.8.31:7979, job=external-dns, namespace=external-dns, pod=external-dns-7dc5498554-2mbcq, provider=gcp, redpanda_id=cmff72gku6erdrg5r2u0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.8.31:7979, job=external-dns, namespace=external-dns, pod=external-dns-7dc5498554-2mbcq, provider=gcp, redpanda_id=cmff72gku6erdrg5r2u0, service=external-dns Value:0xc01ff28910} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.8.31:7979, job=external-dns, namespace=external-dns, pod=external-dns-7dc5498554-2mbcq, provider=gcp, redpanda_id=cmff72gku6erdrg5r2u0, service=external-dns Value:0xc01ff28838}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610397374s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.8.31:7979, job=external-dns, namespace=external-dns, pod=external-dns-7dc5498554-2mbcq, provider=gcp, redpanda_id=cmff72gku6erdrg5r2u0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.8.31:7979, job=external-dns, namespace=external-dns, pod=external-dns-7dc5498554-2mbcq, provider=gcp, redpanda_id=cmff72gku6erdrg5r2u0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.8.32:7979, job=external-dns, namespace=external-dns, pod=external-dns-5669cbc655-2gpdn, provider=gcp, redpanda_id=cmc5ur2bu6qj8qcjc5rg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.8.32:7979, job=external-dns, namespace=external-dns, pod=external-dns-5669cbc655-2gpdn, provider=gcp, redpanda_id=cmc5ur2bu6qj8qcjc5rg, service=external-dns Value:0xc01ff28ad0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.8.32:7979, job=external-dns, namespace=external-dns, pod=external-dns-5669cbc655-2gpdn, provider=gcp, redpanda_id=cmc5ur2bu6qj8qcjc5rg, service=external-dns Value:0xc01ff28c20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.61040489s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.8.32:7979, job=external-dns, namespace=external-dns, pod=external-dns-5669cbc655-2gpdn, provider=gcp, redpanda_id=cmc5ur2bu6qj8qcjc5rg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.8.32:7979, job=external-dns, namespace=external-dns, pod=external-dns-5669cbc655-2gpdn, provider=gcp, redpanda_id=cmc5ur2bu6qj8qcjc5rg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.8.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-58d8bd5d74-z7z9d, provider=gcp, redpanda_id=cp76akm6fslkb963tc50, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.8.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-58d8bd5d74-z7z9d, provider=gcp, redpanda_id=cp76akm6fslkb963tc50, service=external-dns Value:0xc01ff28e10} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.8.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-58d8bd5d74-z7z9d, provider=gcp, redpanda_id=cp76akm6fslkb963tc50, service=external-dns Value:0xc01ff28fb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610411946s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.8.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-58d8bd5d74-z7z9d, provider=gcp, redpanda_id=cp76akm6fslkb963tc50, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.8.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-58d8bd5d74-z7z9d, provider=gcp, redpanda_id=cp76akm6fslkb963tc50, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.8.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-6ff9f4fcc-mppz8, provider=gcp, redpanda_id=cp7507kgu42nj1hpqcvg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.8.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-6ff9f4fcc-mppz8, provider=gcp, redpanda_id=cp7507kgu42nj1hpqcvg, service=external-dns Value:0xc01ff29240} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.8.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-6ff9f4fcc-mppz8, provider=gcp, redpanda_id=cp7507kgu42nj1hpqcvg, service=external-dns Value:0xc01ff29340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.6104201s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.8.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-6ff9f4fcc-mppz8, provider=gcp, redpanda_id=cp7507kgu42nj1hpqcvg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.8.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-6ff9f4fcc-mppz8, provider=gcp, redpanda_id=cp7507kgu42nj1hpqcvg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.8.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-846c84c6f4-gskpj, provider=gcp, redpanda_id=cpaqakb01cjus260u92g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.8.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-846c84c6f4-gskpj, provider=gcp, redpanda_id=cpaqakb01cjus260u92g, service=external-dns Value:0xc01ff294f8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.8.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-846c84c6f4-gskpj, provider=gcp, redpanda_id=cpaqakb01cjus260u92g, service=external-dns Value:0xc01ff29658}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610427643s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.8.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-846c84c6f4-gskpj, provider=gcp, redpanda_id=cpaqakb01cjus260u92g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.8.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-846c84c6f4-gskpj, provider=gcp, redpanda_id=cpaqakb01cjus260u92g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.8.36:7979, job=external-dns, namespace=external-dns, pod=external-dns-7db7744998-rb2c2, provider=gcp, redpanda_id=cpbi9cq77h8g0iu4fgu0, service=external-dns State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.8.36:7979, job=external-dns, namespace=external-dns, pod=external-dns-7db7744998-rb2c2, provider=gcp, redpanda_id=cpbi9cq77h8g0iu4fgu0, service=external-dns Value:0xc01ff29860} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.8.36:7979, job=external-dns, namespace=external-dns, pod=external-dns-7db7744998-rb2c2, provider=gcp, redpanda_id=cpbi9cq77h8g0iu4fgu0, service=external-dns Value:0xc01ff29950}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610437694s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.8.36:7979, job=external-dns, namespace=external-dns, pod=external-dns-7db7744998-rb2c2, provider=gcp, redpanda_id=cpbi9cq77h8g0iu4fgu0, service=external-dns} value=0.005771222222222223 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.8.36:7979, job=external-dns, namespace=external-dns, pod=external-dns-7db7744998-rb2c2, provider=gcp, redpanda_id=cpbi9cq77h8g0iu4fgu0, service=external-dns} value=1 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.8.6:7979, job=external-dns, namespace=external-dns, pod=external-dns-57c97fbc4-htvf8, provider=gcp, redpanda_id=cfhtq3vu1f0o5qipe0c0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.8.6:7979, job=external-dns, namespace=external-dns, pod=external-dns-57c97fbc4-htvf8, provider=gcp, redpanda_id=cfhtq3vu1f0o5qipe0c0, service=external-dns Value:0xc01ff29b38} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.8.6:7979, job=external-dns, namespace=external-dns, pod=external-dns-57c97fbc4-htvf8, provider=gcp, redpanda_id=cfhtq3vu1f0o5qipe0c0, service=external-dns Value:0xc01ff29d10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610447482s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.8.6:7979, job=external-dns, namespace=external-dns, pod=external-dns-57c97fbc4-htvf8, provider=gcp, redpanda_id=cfhtq3vu1f0o5qipe0c0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.8.6:7979, job=external-dns, namespace=external-dns, pod=external-dns-57c97fbc4-htvf8, provider=gcp, redpanda_id=cfhtq3vu1f0o5qipe0c0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.9.141:7979, job=external-dns, namespace=external-dns, pod=external-dns-69855f5785-lprhb, provider=gcp, redpanda_id=cfgdp2u2gj23h4urbgdg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.9.141:7979, job=external-dns, namespace=external-dns, pod=external-dns-69855f5785-lprhb, provider=gcp, redpanda_id=cfgdp2u2gj23h4urbgdg, service=external-dns Value:0xc01ff29f28} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.9.141:7979, job=external-dns, namespace=external-dns, pod=external-dns-69855f5785-lprhb, provider=gcp, redpanda_id=cfgdp2u2gj23h4urbgdg, service=external-dns Value:0xc027b10070}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610455541s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.9.141:7979, job=external-dns, namespace=external-dns, pod=external-dns-69855f5785-lprhb, provider=gcp, redpanda_id=cfgdp2u2gj23h4urbgdg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.9.141:7979, job=external-dns, namespace=external-dns, pod=external-dns-69855f5785-lprhb, provider=gcp, redpanda_id=cfgdp2u2gj23h4urbgdg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.9.142:7979, job=external-dns, namespace=external-dns, pod=external-dns-79677f799c-wrfw6, provider=gcp, redpanda_id=chiujiunch049g9dbuu0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.9.142:7979, job=external-dns, namespace=external-dns, pod=external-dns-79677f799c-wrfw6, provider=gcp, redpanda_id=chiujiunch049g9dbuu0, service=external-dns Value:0xc027b10208} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.9.142:7979, job=external-dns, namespace=external-dns, pod=external-dns-79677f799c-wrfw6, provider=gcp, redpanda_id=chiujiunch049g9dbuu0, service=external-dns Value:0xc027b10300}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610466405s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.9.142:7979, job=external-dns, namespace=external-dns, pod=external-dns-79677f799c-wrfw6, provider=gcp, redpanda_id=chiujiunch049g9dbuu0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.9.142:7979, job=external-dns, namespace=external-dns, pod=external-dns-79677f799c-wrfw6, provider=gcp, redpanda_id=chiujiunch049g9dbuu0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.9.151:7979, job=external-dns, namespace=external-dns, pod=external-dns-6cfb98cbf-6tf7g, provider=gcp, redpanda_id=chmhbg6cbu54mmg3oif0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.9.151:7979, job=external-dns, namespace=external-dns, pod=external-dns-6cfb98cbf-6tf7g, provider=gcp, redpanda_id=chmhbg6cbu54mmg3oif0, service=external-dns Value:0xc027b10508} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.9.151:7979, job=external-dns, namespace=external-dns, pod=external-dns-6cfb98cbf-6tf7g, provider=gcp, redpanda_id=chmhbg6cbu54mmg3oif0, service=external-dns Value:0xc027b10620}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.61047478s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.9.151:7979, job=external-dns, namespace=external-dns, pod=external-dns-6cfb98cbf-6tf7g, provider=gcp, redpanda_id=chmhbg6cbu54mmg3oif0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.9.151:7979, job=external-dns, namespace=external-dns, pod=external-dns-6cfb98cbf-6tf7g, provider=gcp, redpanda_id=chmhbg6cbu54mmg3oif0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.9.17:7979, job=external-dns, namespace=external-dns, pod=external-dns-b8d96d4cc-hch42, provider=gcp, redpanda_id=cfhbvgiei3b2k79h28d0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.9.17:7979, job=external-dns, namespace=external-dns, pod=external-dns-b8d96d4cc-hch42, provider=gcp, redpanda_id=cfhbvgiei3b2k79h28d0, service=external-dns Value:0xc027b10830} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.9.17:7979, job=external-dns, namespace=external-dns, pod=external-dns-b8d96d4cc-hch42, provider=gcp, redpanda_id=cfhbvgiei3b2k79h28d0, service=external-dns Value:0xc027b10940}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610482788s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.9.17:7979, job=external-dns, namespace=external-dns, pod=external-dns-b8d96d4cc-hch42, provider=gcp, redpanda_id=cfhbvgiei3b2k79h28d0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.9.17:7979, job=external-dns, namespace=external-dns, pod=external-dns-b8d96d4cc-hch42, provider=gcp, redpanda_id=cfhbvgiei3b2k79h28d0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.9.30:7979, job=external-dns, namespace=external-dns, pod=external-dns-b9fcd8d5d-ldm8w, provider=gcp, redpanda_id=cmnepnqg5arbtkvmolo0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.9.30:7979, job=external-dns, namespace=external-dns, pod=external-dns-b9fcd8d5d-ldm8w, provider=gcp, redpanda_id=cmnepnqg5arbtkvmolo0, service=external-dns Value:0xc027b10ab0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.9.30:7979, job=external-dns, namespace=external-dns, pod=external-dns-b9fcd8d5d-ldm8w, provider=gcp, redpanda_id=cmnepnqg5arbtkvmolo0, service=external-dns Value:0xc027b10b60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610490263s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.9.30:7979, job=external-dns, namespace=external-dns, pod=external-dns-b9fcd8d5d-ldm8w, provider=gcp, redpanda_id=cmnepnqg5arbtkvmolo0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.9.30:7979, job=external-dns, namespace=external-dns, pod=external-dns-b9fcd8d5d-ldm8w, provider=gcp, redpanda_id=cmnepnqg5arbtkvmolo0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.0.9.9:7979, job=external-dns, namespace=external-dns, pod=external-dns-54987f65c-27rhr, provider=gcp, redpanda_id=cfhui4vu1f0o5qipe0e0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.0.9.9:7979, job=external-dns, namespace=external-dns, pod=external-dns-54987f65c-27rhr, provider=gcp, redpanda_id=cfhui4vu1f0o5qipe0e0, service=external-dns Value:0xc027b10cd0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.0.9.9:7979, job=external-dns, namespace=external-dns, pod=external-dns-54987f65c-27rhr, provider=gcp, redpanda_id=cfhui4vu1f0o5qipe0e0, service=external-dns Value:0xc027b10d90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610497935s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.0.9.9:7979, job=external-dns, namespace=external-dns, pod=external-dns-54987f65c-27rhr, provider=gcp, redpanda_id=cfhui4vu1f0o5qipe0e0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.0.9.9:7979, job=external-dns, namespace=external-dns, pod=external-dns-54987f65c-27rhr, provider=gcp, redpanda_id=cfhui4vu1f0o5qipe0e0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.1.0.217:7979, job=external-dns, namespace=external-dns, pod=external-dns-8f4657b5f-c9tsw, provider=aws, redpanda_id=cihc9v4uqhmje92v7e7g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.1.0.217:7979, job=external-dns, namespace=external-dns, pod=external-dns-8f4657b5f-c9tsw, provider=aws, redpanda_id=cihc9v4uqhmje92v7e7g, service=external-dns Value:0xc027b10f60} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.1.0.217:7979, job=external-dns, namespace=external-dns, pod=external-dns-8f4657b5f-c9tsw, provider=aws, redpanda_id=cihc9v4uqhmje92v7e7g, service=external-dns Value:0xc027b11040}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610506689s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.1.0.217:7979, job=external-dns, namespace=external-dns, pod=external-dns-8f4657b5f-c9tsw, provider=aws, redpanda_id=cihc9v4uqhmje92v7e7g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.1.0.217:7979, job=external-dns, namespace=external-dns, pod=external-dns-8f4657b5f-c9tsw, provider=aws, redpanda_id=cihc9v4uqhmje92v7e7g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.1.11.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-b998cf9bd-2jhgw, provider=gcp, redpanda_id=cg878h763mv64c2r2l0g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.1.11.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-b998cf9bd-2jhgw, provider=gcp, redpanda_id=cg878h763mv64c2r2l0g, service=external-dns Value:0xc027b111a0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.1.11.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-b998cf9bd-2jhgw, provider=gcp, redpanda_id=cg878h763mv64c2r2l0g, service=external-dns Value:0xc027b11290}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610515275s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.1.11.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-b998cf9bd-2jhgw, provider=gcp, redpanda_id=cg878h763mv64c2r2l0g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.1.11.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-b998cf9bd-2jhgw, provider=gcp, redpanda_id=cg878h763mv64c2r2l0g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.1.4.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-5dbf544659-k9f2z, provider=aws, redpanda_id=cih9ajco6oo60d8402eg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.1.4.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-5dbf544659-k9f2z, provider=aws, redpanda_id=cih9ajco6oo60d8402eg, service=external-dns Value:0xc027b113f8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.1.4.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-5dbf544659-k9f2z, provider=aws, redpanda_id=cih9ajco6oo60d8402eg, service=external-dns Value:0xc027b114c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610523124s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.1.4.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-5dbf544659-k9f2z, provider=aws, redpanda_id=cih9ajco6oo60d8402eg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.1.4.35:7979, job=external-dns, namespace=external-dns, pod=external-dns-5dbf544659-k9f2z, provider=aws, redpanda_id=cih9ajco6oo60d8402eg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.1.8.240:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b4c7c9cb9-wwkg6, provider=aws, redpanda_id=cp5ml63ctouaqdnu93c0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.1.8.240:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b4c7c9cb9-wwkg6, provider=aws, redpanda_id=cp5ml63ctouaqdnu93c0, service=external-dns Value:0xc027b11620} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.1.8.240:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b4c7c9cb9-wwkg6, provider=aws, redpanda_id=cp5ml63ctouaqdnu93c0, service=external-dns Value:0xc027b11750}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610531445s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.1.8.240:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b4c7c9cb9-wwkg6, provider=aws, redpanda_id=cp5ml63ctouaqdnu93c0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.1.8.240:7979, job=external-dns, namespace=external-dns, pod=external-dns-7b4c7c9cb9-wwkg6, provider=aws, redpanda_id=cp5ml63ctouaqdnu93c0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.10.10.93:7979, job=external-dns, namespace=external-dns, pod=external-dns-58dc4454c7-qg4l9, provider=aws, redpanda_id=cjhl5ggccm8eecla1tog, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.10.10.93:7979, job=external-dns, namespace=external-dns, pod=external-dns-58dc4454c7-qg4l9, provider=aws, redpanda_id=cjhl5ggccm8eecla1tog, service=external-dns Value:0xc027b11b28} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.10.10.93:7979, job=external-dns, namespace=external-dns, pod=external-dns-58dc4454c7-qg4l9, provider=aws, redpanda_id=cjhl5ggccm8eecla1tog, service=external-dns Value:0xc027b11a98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610539405s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.10.10.93:7979, job=external-dns, namespace=external-dns, pod=external-dns-58dc4454c7-qg4l9, provider=aws, redpanda_id=cjhl5ggccm8eecla1tog, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.10.10.93:7979, job=external-dns, namespace=external-dns, pod=external-dns-58dc4454c7-qg4l9, provider=aws, redpanda_id=cjhl5ggccm8eecla1tog, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.105.0.204:7979, job=external-dns, namespace=external-dns, pod=external-dns-85b5667dc9-jps7c, provider=aws, redpanda_id=cm5qpt2jfjl2gs1fnno0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.105.0.204:7979, job=external-dns, namespace=external-dns, pod=external-dns-85b5667dc9-jps7c, provider=aws, redpanda_id=cm5qpt2jfjl2gs1fnno0, service=external-dns Value:0xc027b11cc0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.105.0.204:7979, job=external-dns, namespace=external-dns, pod=external-dns-85b5667dc9-jps7c, provider=aws, redpanda_id=cm5qpt2jfjl2gs1fnno0, service=external-dns Value:0xc027b11d68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610548579s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.105.0.204:7979, job=external-dns, namespace=external-dns, pod=external-dns-85b5667dc9-jps7c, provider=aws, redpanda_id=cm5qpt2jfjl2gs1fnno0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.105.0.204:7979, job=external-dns, namespace=external-dns, pod=external-dns-85b5667dc9-jps7c, provider=aws, redpanda_id=cm5qpt2jfjl2gs1fnno0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.105.161.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-648c87b5b9-fzl82, provider=gcp, redpanda_id=cn1vdqo4qu1uu9j2h870, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.105.161.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-648c87b5b9-fzl82, provider=gcp, redpanda_id=cn1vdqo4qu1uu9j2h870, service=external-dns Value:0xc027b11eb0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.105.161.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-648c87b5b9-fzl82, provider=gcp, redpanda_id=cn1vdqo4qu1uu9j2h870, service=external-dns Value:0xc027b11f50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610556033s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.105.161.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-648c87b5b9-fzl82, provider=gcp, redpanda_id=cn1vdqo4qu1uu9j2h870, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.105.161.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-648c87b5b9-fzl82, provider=gcp, redpanda_id=cn1vdqo4qu1uu9j2h870, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.108.80.36:7979, job=external-dns, namespace=external-dns, pod=external-dns-55ccd8f5d5-5t9s9, provider=gcp, redpanda_id=cp736qcgu42nj1hpqak0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.108.80.36:7979, job=external-dns, namespace=external-dns, pod=external-dns-55ccd8f5d5-5t9s9, provider=gcp, redpanda_id=cp736qcgu42nj1hpqak0, service=external-dns Value:0xc0326ac1c0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.108.80.36:7979, job=external-dns, namespace=external-dns, pod=external-dns-55ccd8f5d5-5t9s9, provider=gcp, redpanda_id=cp736qcgu42nj1hpqak0, service=external-dns Value:0xc0326ac340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610564529s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.108.80.36:7979, job=external-dns, namespace=external-dns, pod=external-dns-55ccd8f5d5-5t9s9, provider=gcp, redpanda_id=cp736qcgu42nj1hpqak0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.108.80.36:7979, job=external-dns, namespace=external-dns, pod=external-dns-55ccd8f5d5-5t9s9, provider=gcp, redpanda_id=cp736qcgu42nj1hpqak0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.11.4.69:7979, job=external-dns, namespace=external-dns, pod=external-dns-678ffc6db4-jcdhz, provider=aws, redpanda_id=cjmftk9sutm9q4956l60, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.11.4.69:7979, job=external-dns, namespace=external-dns, pod=external-dns-678ffc6db4-jcdhz, provider=aws, redpanda_id=cjmftk9sutm9q4956l60, service=external-dns Value:0xc0326ac6f0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.11.4.69:7979, job=external-dns, namespace=external-dns, pod=external-dns-678ffc6db4-jcdhz, provider=aws, redpanda_id=cjmftk9sutm9q4956l60, service=external-dns Value:0xc0326ac818}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.61057244s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.11.4.69:7979, job=external-dns, namespace=external-dns, pod=external-dns-678ffc6db4-jcdhz, provider=aws, redpanda_id=cjmftk9sutm9q4956l60, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.11.4.69:7979, job=external-dns, namespace=external-dns, pod=external-dns-678ffc6db4-jcdhz, provider=aws, redpanda_id=cjmftk9sutm9q4956l60, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.117.208.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-68fd6f64d9-lcwc4, provider=gcp, redpanda_id=cm0jdhqjfjl2gs1fmrj0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.117.208.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-68fd6f64d9-lcwc4, provider=gcp, redpanda_id=cm0jdhqjfjl2gs1fmrj0, service=external-dns Value:0xc0326aca30} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.117.208.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-68fd6f64d9-lcwc4, provider=gcp, redpanda_id=cm0jdhqjfjl2gs1fmrj0, service=external-dns Value:0xc0326acb60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610579769s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.117.208.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-68fd6f64d9-lcwc4, provider=gcp, redpanda_id=cm0jdhqjfjl2gs1fmrj0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.117.208.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-68fd6f64d9-lcwc4, provider=gcp, redpanda_id=cm0jdhqjfjl2gs1fmrj0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.12.0.245:7979, job=external-dns, namespace=external-dns, pod=external-dns-958c7c58d-hk4pd, provider=aws, redpanda_id=cm1cdsqjfjl2gs1fn260, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.12.0.245:7979, job=external-dns, namespace=external-dns, pod=external-dns-958c7c58d-hk4pd, provider=aws, redpanda_id=cm1cdsqjfjl2gs1fn260, service=external-dns Value:0xc0326acde8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.12.0.245:7979, job=external-dns, namespace=external-dns, pod=external-dns-958c7c58d-hk4pd, provider=aws, redpanda_id=cm1cdsqjfjl2gs1fn260, service=external-dns Value:0xc0326acf20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610588566s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.12.0.245:7979, job=external-dns, namespace=external-dns, pod=external-dns-958c7c58d-hk4pd, provider=aws, redpanda_id=cm1cdsqjfjl2gs1fn260, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.12.0.245:7979, job=external-dns, namespace=external-dns, pod=external-dns-958c7c58d-hk4pd, provider=aws, redpanda_id=cm1cdsqjfjl2gs1fn260, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.12.8.254:7979, job=external-dns, namespace=external-dns, pod=external-dns-5d8b6f88c7-jm8xp, provider=aws, redpanda_id=ckmp70dnk2eo2pkvhgqg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.12.8.254:7979, job=external-dns, namespace=external-dns, pod=external-dns-5d8b6f88c7-jm8xp, provider=aws, redpanda_id=ckmp70dnk2eo2pkvhgqg, service=external-dns Value:0xc0326ad190} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.12.8.254:7979, job=external-dns, namespace=external-dns, pod=external-dns-5d8b6f88c7-jm8xp, provider=aws, redpanda_id=ckmp70dnk2eo2pkvhgqg, service=external-dns Value:0xc0326ad320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610597317s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.12.8.254:7979, job=external-dns, namespace=external-dns, pod=external-dns-5d8b6f88c7-jm8xp, provider=aws, redpanda_id=ckmp70dnk2eo2pkvhgqg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.12.8.254:7979, job=external-dns, namespace=external-dns, pod=external-dns-5d8b6f88c7-jm8xp, provider=aws, redpanda_id=ckmp70dnk2eo2pkvhgqg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.129.8.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-7c49cfb999-zfd5w, provider=gcp, redpanda_id=cnah9bt9cnna1iqe201g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.129.8.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-7c49cfb999-zfd5w, provider=gcp, redpanda_id=cnah9bt9cnna1iqe201g, service=external-dns Value:0xc0326ad5a0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.129.8.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-7c49cfb999-zfd5w, provider=gcp, redpanda_id=cnah9bt9cnna1iqe201g, service=external-dns Value:0xc0326ad708}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610605013s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.129.8.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-7c49cfb999-zfd5w, provider=gcp, redpanda_id=cnah9bt9cnna1iqe201g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.129.8.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-7c49cfb999-zfd5w, provider=gcp, redpanda_id=cnah9bt9cnna1iqe201g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.13.0.6:7979, job=external-dns, namespace=external-dns, pod=external-dns-856b465cdf-4t2pm, provider=aws, redpanda_id=cp35ago256rbcqi0bh40, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.13.0.6:7979, job=external-dns, namespace=external-dns, pod=external-dns-856b465cdf-4t2pm, provider=aws, redpanda_id=cp35ago256rbcqi0bh40, service=external-dns Value:0xc0326ad970} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.13.0.6:7979, job=external-dns, namespace=external-dns, pod=external-dns-856b465cdf-4t2pm, provider=aws, redpanda_id=cp35ago256rbcqi0bh40, service=external-dns Value:0xc0326ada98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610612586s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.13.0.6:7979, job=external-dns, namespace=external-dns, pod=external-dns-856b465cdf-4t2pm, provider=aws, redpanda_id=cp35ago256rbcqi0bh40, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.13.0.6:7979, job=external-dns, namespace=external-dns, pod=external-dns-856b465cdf-4t2pm, provider=aws, redpanda_id=cp35ago256rbcqi0bh40, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.14.0.247:7979, job=external-dns, namespace=external-dns, pod=external-dns-b8858c8b5-p7xxg, provider=aws, redpanda_id=cmaappijfjl2gs1foebg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.14.0.247:7979, job=external-dns, namespace=external-dns, pod=external-dns-b8858c8b5-p7xxg, provider=aws, redpanda_id=cmaappijfjl2gs1foebg, service=external-dns Value:0xc0326aded0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.14.0.247:7979, job=external-dns, namespace=external-dns, pod=external-dns-b8858c8b5-p7xxg, provider=aws, redpanda_id=cmaappijfjl2gs1foebg, service=external-dns Value:0xc0326add80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610622433s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.14.0.247:7979, job=external-dns, namespace=external-dns, pod=external-dns-b8858c8b5-p7xxg, provider=aws, redpanda_id=cmaappijfjl2gs1foebg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.14.0.247:7979, job=external-dns, namespace=external-dns, pod=external-dns-b8858c8b5-p7xxg, provider=aws, redpanda_id=cmaappijfjl2gs1foebg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.14.6.23:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d97c56c56-vtdqp, provider=aws, redpanda_id=cm6nu6qjfjl2gs1fntf0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.14.6.23:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d97c56c56-vtdqp, provider=aws, redpanda_id=cm6nu6qjfjl2gs1fntf0, service=external-dns Value:0xc004ff26e0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.14.6.23:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d97c56c56-vtdqp, provider=aws, redpanda_id=cm6nu6qjfjl2gs1fntf0, service=external-dns Value:0xc004ff2828}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610632306s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.14.6.23:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d97c56c56-vtdqp, provider=aws, redpanda_id=cm6nu6qjfjl2gs1fntf0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.14.6.23:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d97c56c56-vtdqp, provider=aws, redpanda_id=cm6nu6qjfjl2gs1fntf0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.16.8.3:7979, job=external-dns, namespace=external-dns, pod=external-dns-7489b55764-htmph, provider=gcp, redpanda_id=cg7li0b7415mavl54ibg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.16.8.3:7979, job=external-dns, namespace=external-dns, pod=external-dns-7489b55764-htmph, provider=gcp, redpanda_id=cg7li0b7415mavl54ibg, service=external-dns Value:0xc004ff2c68} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.16.8.3:7979, job=external-dns, namespace=external-dns, pod=external-dns-7489b55764-htmph, provider=gcp, redpanda_id=cg7li0b7415mavl54ibg, service=external-dns Value:0xc004ff2ef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610640128s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.16.8.3:7979, job=external-dns, namespace=external-dns, pod=external-dns-7489b55764-htmph, provider=gcp, redpanda_id=cg7li0b7415mavl54ibg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.16.8.3:7979, job=external-dns, namespace=external-dns, pod=external-dns-7489b55764-htmph, provider=gcp, redpanda_id=cg7li0b7415mavl54ibg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.16.9.8:7979, job=external-dns, namespace=external-dns, pod=external-dns-f7dd68954-kxs2f, provider=gcp, redpanda_id=cjaiiekkblpdubl99pvg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.16.9.8:7979, job=external-dns, namespace=external-dns, pod=external-dns-f7dd68954-kxs2f, provider=gcp, redpanda_id=cjaiiekkblpdubl99pvg, service=external-dns Value:0xc004ff34f0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.16.9.8:7979, job=external-dns, namespace=external-dns, pod=external-dns-f7dd68954-kxs2f, provider=gcp, redpanda_id=cjaiiekkblpdubl99pvg, service=external-dns Value:0xc004ff3680}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610655991s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.16.9.8:7979, job=external-dns, namespace=external-dns, pod=external-dns-f7dd68954-kxs2f, provider=gcp, redpanda_id=cjaiiekkblpdubl99pvg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.16.9.8:7979, job=external-dns, namespace=external-dns, pod=external-dns-f7dd68954-kxs2f, provider=gcp, redpanda_id=cjaiiekkblpdubl99pvg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.166.8.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-7fcb5db6cc-5nczl, provider=gcp, redpanda_id=ckogp15nk2eo2pkviq0g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.166.8.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-7fcb5db6cc-5nczl, provider=gcp, redpanda_id=ckogp15nk2eo2pkviq0g, service=external-dns Value:0xc004ff3c50} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.166.8.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-7fcb5db6cc-5nczl, provider=gcp, redpanda_id=ckogp15nk2eo2pkviq0g, service=external-dns Value:0xc035924508}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610663243s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.166.8.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-7fcb5db6cc-5nczl, provider=gcp, redpanda_id=ckogp15nk2eo2pkviq0g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.166.8.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-7fcb5db6cc-5nczl, provider=gcp, redpanda_id=ckogp15nk2eo2pkviq0g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.167.12.140:7979, job=external-dns, namespace=external-dns, pod=external-dns-68cb6448f6-t8jjv, provider=gcp, redpanda_id=chiba8rosj7gvl84bk3g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.167.12.140:7979, job=external-dns, namespace=external-dns, pod=external-dns-68cb6448f6-t8jjv, provider=gcp, redpanda_id=chiba8rosj7gvl84bk3g, service=external-dns Value:0xc035924d68} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.167.12.140:7979, job=external-dns, namespace=external-dns, pod=external-dns-68cb6448f6-t8jjv, provider=gcp, redpanda_id=chiba8rosj7gvl84bk3g, service=external-dns Value:0xc035924a40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610671521s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.167.12.140:7979, job=external-dns, namespace=external-dns, pod=external-dns-68cb6448f6-t8jjv, provider=gcp, redpanda_id=chiba8rosj7gvl84bk3g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.167.12.140:7979, job=external-dns, namespace=external-dns, pod=external-dns-68cb6448f6-t8jjv, provider=gcp, redpanda_id=chiba8rosj7gvl84bk3g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.167.9.154:7979, job=external-dns, namespace=external-dns, pod=external-dns-d6b47bc46-cz9wh, provider=gcp, redpanda_id=chllqaunch049g9dc0t0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.167.9.154:7979, job=external-dns, namespace=external-dns, pod=external-dns-d6b47bc46-cz9wh, provider=gcp, redpanda_id=chllqaunch049g9dc0t0, service=external-dns Value:0xc035925470} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.167.9.154:7979, job=external-dns, namespace=external-dns, pod=external-dns-d6b47bc46-cz9wh, provider=gcp, redpanda_id=chllqaunch049g9dc0t0, service=external-dns Value:0xc0359252f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610679285s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.167.9.154:7979, job=external-dns, namespace=external-dns, pod=external-dns-d6b47bc46-cz9wh, provider=gcp, redpanda_id=chllqaunch049g9dc0t0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.167.9.154:7979, job=external-dns, namespace=external-dns, pod=external-dns-d6b47bc46-cz9wh, provider=gcp, redpanda_id=chllqaunch049g9dc0t0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.178.0.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-6bdcd878b5-vj6fj, provider=gcp, redpanda_id=cmjv9aei35o3jbomnhe0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.178.0.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-6bdcd878b5-vj6fj, provider=gcp, redpanda_id=cmjv9aei35o3jbomnhe0, service=external-dns Value:0xc035925ab0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.178.0.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-6bdcd878b5-vj6fj, provider=gcp, redpanda_id=cmjv9aei35o3jbomnhe0, service=external-dns Value:0xc035925d80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610688596s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.178.0.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-6bdcd878b5-vj6fj, provider=gcp, redpanda_id=cmjv9aei35o3jbomnhe0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.178.0.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-6bdcd878b5-vj6fj, provider=gcp, redpanda_id=cmjv9aei35o3jbomnhe0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.18.8.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-d54d6f669-hmwcq, provider=gcp, redpanda_id=cmshck3mgrq4nh9abbmg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.18.8.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-d54d6f669-hmwcq, provider=gcp, redpanda_id=cmshck3mgrq4nh9abbmg, service=external-dns Value:0xc02f95c060} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.18.8.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-d54d6f669-hmwcq, provider=gcp, redpanda_id=cmshck3mgrq4nh9abbmg, service=external-dns Value:0xc02f95c118}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610696122s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.18.8.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-d54d6f669-hmwcq, provider=gcp, redpanda_id=cmshck3mgrq4nh9abbmg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.18.8.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-d54d6f669-hmwcq, provider=gcp, redpanda_id=cmshck3mgrq4nh9abbmg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.188.8.136:7979, job=external-dns, namespace=external-dns, pod=external-dns-5f98df96-nd4rr, provider=gcp, redpanda_id=cj0ght3ube5f1u6qj810, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.188.8.136:7979, job=external-dns, namespace=external-dns, pod=external-dns-5f98df96-nd4rr, provider=gcp, redpanda_id=cj0ght3ube5f1u6qj810, service=external-dns Value:0xc02f95c2e0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.188.8.136:7979, job=external-dns, namespace=external-dns, pod=external-dns-5f98df96-nd4rr, provider=gcp, redpanda_id=cj0ght3ube5f1u6qj810, service=external-dns Value:0xc02f95c3a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610704393s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.188.8.136:7979, job=external-dns, namespace=external-dns, pod=external-dns-5f98df96-nd4rr, provider=gcp, redpanda_id=cj0ght3ube5f1u6qj810, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.188.8.136:7979, job=external-dns, namespace=external-dns, pod=external-dns-5f98df96-nd4rr, provider=gcp, redpanda_id=cj0ght3ube5f1u6qj810, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.189.8.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-676cc7dff-b22xs, provider=gcp, redpanda_id=cke58qaau9ni9ian29l0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.189.8.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-676cc7dff-b22xs, provider=gcp, redpanda_id=cke58qaau9ni9ian29l0, service=external-dns Value:0xc02f95c550} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.189.8.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-676cc7dff-b22xs, provider=gcp, redpanda_id=cke58qaau9ni9ian29l0, service=external-dns Value:0xc02f95c620}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610712315s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.189.8.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-676cc7dff-b22xs, provider=gcp, redpanda_id=cke58qaau9ni9ian29l0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.189.8.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-676cc7dff-b22xs, provider=gcp, redpanda_id=cke58qaau9ni9ian29l0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.19.6.94:7979, job=external-dns, namespace=external-dns, pod=external-dns-665495f65c-nwdt4, provider=aws, redpanda_id=cov1p22l9050bvlc7smg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.19.6.94:7979, job=external-dns, namespace=external-dns, pod=external-dns-665495f65c-nwdt4, provider=aws, redpanda_id=cov1p22l9050bvlc7smg, service=external-dns Value:0xc02f95c890} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.19.6.94:7979, job=external-dns, namespace=external-dns, pod=external-dns-665495f65c-nwdt4, provider=aws, redpanda_id=cov1p22l9050bvlc7smg, service=external-dns Value:0xc02f95c7a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610720494s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.19.6.94:7979, job=external-dns, namespace=external-dns, pod=external-dns-665495f65c-nwdt4, provider=aws, redpanda_id=cov1p22l9050bvlc7smg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.19.6.94:7979, job=external-dns, namespace=external-dns, pod=external-dns-665495f65c-nwdt4, provider=aws, redpanda_id=cov1p22l9050bvlc7smg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.19.8.163:7979, job=external-dns, namespace=external-dns, pod=external-dns-854cf8bb87-x7l46, provider=gcp, redpanda_id=cpbid5a77h8g0iu4fh0g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.19.8.163:7979, job=external-dns, namespace=external-dns, pod=external-dns-854cf8bb87-x7l46, provider=gcp, redpanda_id=cpbid5a77h8g0iu4fh0g, service=external-dns Value:0xc02f95ca58} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.19.8.163:7979, job=external-dns, namespace=external-dns, pod=external-dns-854cf8bb87-x7l46, provider=gcp, redpanda_id=cpbid5a77h8g0iu4fh0g, service=external-dns Value:0xc02f95cb50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610727904s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.19.8.163:7979, job=external-dns, namespace=external-dns, pod=external-dns-854cf8bb87-x7l46, provider=gcp, redpanda_id=cpbid5a77h8g0iu4fh0g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.19.8.163:7979, job=external-dns, namespace=external-dns, pod=external-dns-854cf8bb87-x7l46, provider=gcp, redpanda_id=cpbid5a77h8g0iu4fh0g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.199.8.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-69b9b588bc-cq22w, provider=gcp, redpanda_id=cnduflca4ff1g49heso0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.199.8.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-69b9b588bc-cq22w, provider=gcp, redpanda_id=cnduflca4ff1g49heso0, service=external-dns Value:0xc02f95cd18} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.199.8.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-69b9b588bc-cq22w, provider=gcp, redpanda_id=cnduflca4ff1g49heso0, service=external-dns Value:0xc02f95ce60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610736814s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.199.8.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-69b9b588bc-cq22w, provider=gcp, redpanda_id=cnduflca4ff1g49heso0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.199.8.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-69b9b588bc-cq22w, provider=gcp, redpanda_id=cnduflca4ff1g49heso0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.2.0.49:7979, job=external-dns, namespace=external-dns, pod=external-dns-7bc678f875-dv7r4, provider=aws, redpanda_id=cihcamco6oo60d8405d0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.2.0.49:7979, job=external-dns, namespace=external-dns, pod=external-dns-7bc678f875-dv7r4, provider=aws, redpanda_id=cihcamco6oo60d8405d0, service=external-dns Value:0xc02f95d010} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.2.0.49:7979, job=external-dns, namespace=external-dns, pod=external-dns-7bc678f875-dv7r4, provider=aws, redpanda_id=cihcamco6oo60d8405d0, service=external-dns Value:0xc02f95d0e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610745211s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.2.0.49:7979, job=external-dns, namespace=external-dns, pod=external-dns-7bc678f875-dv7r4, provider=aws, redpanda_id=cihcamco6oo60d8405d0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.2.0.49:7979, job=external-dns, namespace=external-dns, pod=external-dns-7bc678f875-dv7r4, provider=aws, redpanda_id=cihcamco6oo60d8405d0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.2.4.179:7979, job=external-dns, namespace=external-dns, pod=external-dns-5dd474d59f-55d29, provider=aws, redpanda_id=cih9b8so6oo60d8402g0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.2.4.179:7979, job=external-dns, namespace=external-dns, pod=external-dns-5dd474d59f-55d29, provider=aws, redpanda_id=cih9b8so6oo60d8402g0, service=external-dns Value:0xc02f95d2a8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.2.4.179:7979, job=external-dns, namespace=external-dns, pod=external-dns-5dd474d59f-55d29, provider=aws, redpanda_id=cih9b8so6oo60d8402g0, service=external-dns Value:0xc02f95d3e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610752958s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.2.4.179:7979, job=external-dns, namespace=external-dns, pod=external-dns-5dd474d59f-55d29, provider=aws, redpanda_id=cih9b8so6oo60d8402g0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.2.4.179:7979, job=external-dns, namespace=external-dns, pod=external-dns-5dd474d59f-55d29, provider=aws, redpanda_id=cih9b8so6oo60d8402g0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.2.8.23:7979, job=external-dns, namespace=external-dns, pod=external-dns-6595d44b88-gb2h4, provider=aws, redpanda_id=ckor43qoad62ru3usgg0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.2.8.23:7979, job=external-dns, namespace=external-dns, pod=external-dns-6595d44b88-gb2h4, provider=aws, redpanda_id=ckor43qoad62ru3usgg0, service=external-dns Value:0xc02f95d5f8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.2.8.23:7979, job=external-dns, namespace=external-dns, pod=external-dns-6595d44b88-gb2h4, provider=aws, redpanda_id=ckor43qoad62ru3usgg0, service=external-dns Value:0xc02f95d6b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610760336s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.2.8.23:7979, job=external-dns, namespace=external-dns, pod=external-dns-6595d44b88-gb2h4, provider=aws, redpanda_id=ckor43qoad62ru3usgg0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.2.8.23:7979, job=external-dns, namespace=external-dns, pod=external-dns-6595d44b88-gb2h4, provider=aws, redpanda_id=ckor43qoad62ru3usgg0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.20.240.157:7979, job=external-dns, namespace=external-dns, pod=external-dns-dddd4478b-b8xvs, provider=aws, redpanda_id=cfddoje2gj23h4urbbg0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.20.240.157:7979, job=external-dns, namespace=external-dns, pod=external-dns-dddd4478b-b8xvs, provider=aws, redpanda_id=cfddoje2gj23h4urbbg0, service=external-dns Value:0xc02f95d830} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.20.240.157:7979, job=external-dns, namespace=external-dns, pod=external-dns-dddd4478b-b8xvs, provider=aws, redpanda_id=cfddoje2gj23h4urbbg0, service=external-dns Value:0xc02f95d900}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610767662s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.20.240.157:7979, job=external-dns, namespace=external-dns, pod=external-dns-dddd4478b-b8xvs, provider=aws, redpanda_id=cfddoje2gj23h4urbbg0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.20.240.157:7979, job=external-dns, namespace=external-dns, pod=external-dns-dddd4478b-b8xvs, provider=aws, redpanda_id=cfddoje2gj23h4urbbg0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.200.0.158:7979, job=external-dns, namespace=external-dns, pod=external-dns-55994cc7cd-x67tg, provider=aws, redpanda_id=cfhc16nu1f0o5qipdvp0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.200.0.158:7979, job=external-dns, namespace=external-dns, pod=external-dns-55994cc7cd-x67tg, provider=aws, redpanda_id=cfhc16nu1f0o5qipdvp0, service=external-dns Value:0xc02f95da60} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.200.0.158:7979, job=external-dns, namespace=external-dns, pod=external-dns-55994cc7cd-x67tg, provider=aws, redpanda_id=cfhc16nu1f0o5qipdvp0, service=external-dns Value:0xc02f95db30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610775071s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.200.0.158:7979, job=external-dns, namespace=external-dns, pod=external-dns-55994cc7cd-x67tg, provider=aws, redpanda_id=cfhc16nu1f0o5qipdvp0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.200.0.158:7979, job=external-dns, namespace=external-dns, pod=external-dns-55994cc7cd-x67tg, provider=aws, redpanda_id=cfhc16nu1f0o5qipdvp0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.200.16.15:7979, job=external-dns, namespace=external-dns, pod=external-dns-8db5477b5-tvdr6, provider=aws, redpanda_id=cobc1l90cr7v0ro17hng, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.200.16.15:7979, job=external-dns, namespace=external-dns, pod=external-dns-8db5477b5-tvdr6, provider=aws, redpanda_id=cobc1l90cr7v0ro17hng, service=external-dns Value:0xc02f95dc98} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.200.16.15:7979, job=external-dns, namespace=external-dns, pod=external-dns-8db5477b5-tvdr6, provider=aws, redpanda_id=cobc1l90cr7v0ro17hng, service=external-dns Value:0xc02f95dd68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610784905s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.200.16.15:7979, job=external-dns, namespace=external-dns, pod=external-dns-8db5477b5-tvdr6, provider=aws, redpanda_id=cobc1l90cr7v0ro17hng, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.200.16.15:7979, job=external-dns, namespace=external-dns, pod=external-dns-8db5477b5-tvdr6, provider=aws, redpanda_id=cobc1l90cr7v0ro17hng, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.21.240.44:7979, job=external-dns, namespace=external-dns, pod=external-dns-598985fc64-dnv9d, provider=aws, redpanda_id=cfvrtf1ksfn7un5jhlm0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.21.240.44:7979, job=external-dns, namespace=external-dns, pod=external-dns-598985fc64-dnv9d, provider=aws, redpanda_id=cfvrtf1ksfn7un5jhlm0, service=external-dns Value:0xc02fbce030} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.21.240.44:7979, job=external-dns, namespace=external-dns, pod=external-dns-598985fc64-dnv9d, provider=aws, redpanda_id=cfvrtf1ksfn7un5jhlm0, service=external-dns Value:0xc02fbce0f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610793035s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.21.240.44:7979, job=external-dns, namespace=external-dns, pod=external-dns-598985fc64-dnv9d, provider=aws, redpanda_id=cfvrtf1ksfn7un5jhlm0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.21.240.44:7979, job=external-dns, namespace=external-dns, pod=external-dns-598985fc64-dnv9d, provider=aws, redpanda_id=cfvrtf1ksfn7un5jhlm0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.220.8.156:7979, job=external-dns, namespace=external-dns, pod=external-dns-55c44464f9-hw8ch, provider=gcp, redpanda_id=colb09f6nielhbep3rv0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.220.8.156:7979, job=external-dns, namespace=external-dns, pod=external-dns-55c44464f9-hw8ch, provider=gcp, redpanda_id=colb09f6nielhbep3rv0, service=external-dns Value:0xc02fbce4d0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.220.8.156:7979, job=external-dns, namespace=external-dns, pod=external-dns-55c44464f9-hw8ch, provider=gcp, redpanda_id=colb09f6nielhbep3rv0, service=external-dns Value:0xc02fbce280}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610801049s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.220.8.156:7979, job=external-dns, namespace=external-dns, pod=external-dns-55c44464f9-hw8ch, provider=gcp, redpanda_id=colb09f6nielhbep3rv0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.220.8.156:7979, job=external-dns, namespace=external-dns, pod=external-dns-55c44464f9-hw8ch, provider=gcp, redpanda_id=colb09f6nielhbep3rv0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.220.8.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-969fb454f-tz8hk, provider=gcp, redpanda_id=cokiaqnb4g10p5c9b8k0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.220.8.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-969fb454f-tz8hk, provider=gcp, redpanda_id=cokiaqnb4g10p5c9b8k0, service=external-dns Value:0xc02fbce7c0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.220.8.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-969fb454f-tz8hk, provider=gcp, redpanda_id=cokiaqnb4g10p5c9b8k0, service=external-dns Value:0xc02fbce910}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610809238s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.220.8.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-969fb454f-tz8hk, provider=gcp, redpanda_id=cokiaqnb4g10p5c9b8k0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.220.8.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-969fb454f-tz8hk, provider=gcp, redpanda_id=cokiaqnb4g10p5c9b8k0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.221.8.154:7979, job=external-dns, namespace=external-dns, pod=external-dns-84b5c4658c-8nb67, provider=gcp, redpanda_id=co62vfa6i63ttaf6mrl0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.221.8.154:7979, job=external-dns, namespace=external-dns, pod=external-dns-84b5c4658c-8nb67, provider=gcp, redpanda_id=co62vfa6i63ttaf6mrl0, service=external-dns Value:0xc02fbcecd0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.221.8.154:7979, job=external-dns, namespace=external-dns, pod=external-dns-84b5c4658c-8nb67, provider=gcp, redpanda_id=co62vfa6i63ttaf6mrl0, service=external-dns Value:0xc02fbceb70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610817477s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.221.8.154:7979, job=external-dns, namespace=external-dns, pod=external-dns-84b5c4658c-8nb67, provider=gcp, redpanda_id=co62vfa6i63ttaf6mrl0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.221.8.154:7979, job=external-dns, namespace=external-dns, pod=external-dns-84b5c4658c-8nb67, provider=gcp, redpanda_id=co62vfa6i63ttaf6mrl0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.221.8.155:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d8d8bcfff-cgzrt, provider=gcp, redpanda_id=colasun6nielhbep3rsg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.221.8.155:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d8d8bcfff-cgzrt, provider=gcp, redpanda_id=colasun6nielhbep3rsg, service=external-dns Value:0xc02fbcef80} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.221.8.155:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d8d8bcfff-cgzrt, provider=gcp, redpanda_id=colasun6nielhbep3rsg, service=external-dns Value:0xc02fbcf038}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610826607s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.221.8.155:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d8d8bcfff-cgzrt, provider=gcp, redpanda_id=colasun6nielhbep3rsg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.221.8.155:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d8d8bcfff-cgzrt, provider=gcp, redpanda_id=colasun6nielhbep3rsg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.221.8.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-d99f74777-hvbkx, provider=gcp, redpanda_id=coklds7b4g10p5c9bbd0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.221.8.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-d99f74777-hvbkx, provider=gcp, redpanda_id=coklds7b4g10p5c9bbd0, service=external-dns Value:0xc02fbcf1f0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.221.8.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-d99f74777-hvbkx, provider=gcp, redpanda_id=coklds7b4g10p5c9bbd0, service=external-dns Value:0xc02fbcf2a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610834416s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.221.8.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-d99f74777-hvbkx, provider=gcp, redpanda_id=coklds7b4g10p5c9bbd0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.221.8.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-d99f74777-hvbkx, provider=gcp, redpanda_id=coklds7b4g10p5c9bbd0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.230.128.190:7979, job=external-dns, namespace=external-dns, pod=external-dns-cf7878f95-pfm6g, provider=aws, redpanda_id=co64tki6i63ttaf6mv7g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.230.128.190:7979, job=external-dns, namespace=external-dns, pod=external-dns-cf7878f95-pfm6g, provider=aws, redpanda_id=co64tki6i63ttaf6mv7g, service=external-dns Value:0xc02fbcf3d8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.230.128.190:7979, job=external-dns, namespace=external-dns, pod=external-dns-cf7878f95-pfm6g, provider=aws, redpanda_id=co64tki6i63ttaf6mv7g, service=external-dns Value:0xc02fbcf550}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610842322s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.230.128.190:7979, job=external-dns, namespace=external-dns, pod=external-dns-cf7878f95-pfm6g, provider=aws, redpanda_id=co64tki6i63ttaf6mv7g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.230.128.190:7979, job=external-dns, namespace=external-dns, pod=external-dns-cf7878f95-pfm6g, provider=aws, redpanda_id=co64tki6i63ttaf6mv7g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.24.240.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b4769bbf7-9x7g6, provider=aws, redpanda_id=cks1cct3rmo54fot1t6g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.24.240.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b4769bbf7-9x7g6, provider=aws, redpanda_id=cks1cct3rmo54fot1t6g, service=external-dns Value:0xc02fbcf870} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.24.240.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b4769bbf7-9x7g6, provider=aws, redpanda_id=cks1cct3rmo54fot1t6g, service=external-dns Value:0xc02fbcf928}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610861628s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.24.240.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b4769bbf7-9x7g6, provider=aws, redpanda_id=cks1cct3rmo54fot1t6g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.24.240.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b4769bbf7-9x7g6, provider=aws, redpanda_id=cks1cct3rmo54fot1t6g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.25.240.90:7979, job=external-dns, namespace=external-dns, pod=external-dns-867bd4b86d-z42vv, provider=aws, redpanda_id=cgqr4fohsdffe3jba4sg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.25.240.90:7979, job=external-dns, namespace=external-dns, pod=external-dns-867bd4b86d-z42vv, provider=aws, redpanda_id=cgqr4fohsdffe3jba4sg, service=external-dns Value:0xc02fbcfb20} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.25.240.90:7979, job=external-dns, namespace=external-dns, pod=external-dns-867bd4b86d-z42vv, provider=aws, redpanda_id=cgqr4fohsdffe3jba4sg, service=external-dns Value:0xc02fbcfbc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610869401s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.25.240.90:7979, job=external-dns, namespace=external-dns, pod=external-dns-867bd4b86d-z42vv, provider=aws, redpanda_id=cgqr4fohsdffe3jba4sg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.25.240.90:7979, job=external-dns, namespace=external-dns, pod=external-dns-867bd4b86d-z42vv, provider=aws, redpanda_id=cgqr4fohsdffe3jba4sg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.253.65.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-b4db9dd4f-vbwrk, provider=gcp, redpanda_id=cmk3k6vj7la47b8ftnbg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.253.65.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-b4db9dd4f-vbwrk, provider=gcp, redpanda_id=cmk3k6vj7la47b8ftnbg, service=external-dns Value:0xc02fbcfeb0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.253.65.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-b4db9dd4f-vbwrk, provider=gcp, redpanda_id=cmk3k6vj7la47b8ftnbg, service=external-dns Value:0xc02fbcffa8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.61087787s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.253.65.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-b4db9dd4f-vbwrk, provider=gcp, redpanda_id=cmk3k6vj7la47b8ftnbg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.253.65.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-b4db9dd4f-vbwrk, provider=gcp, redpanda_id=cmk3k6vj7la47b8ftnbg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.254.21.158:7979, job=external-dns, namespace=external-dns, pod=external-dns-59856f6b6-6wbkr, provider=aws, serverless_id=pro-us-east-1, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.254.21.158:7979, job=external-dns, namespace=external-dns, pod=external-dns-59856f6b6-6wbkr, provider=aws, serverless_id=pro-us-east-1, service=external-dns Value:0xc01f2de2b8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.254.21.158:7979, job=external-dns, namespace=external-dns, pod=external-dns-59856f6b6-6wbkr, provider=aws, serverless_id=pro-us-east-1, service=external-dns Value:0xc01f2de960}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610885827s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.254.21.158:7979, job=external-dns, namespace=external-dns, pod=external-dns-59856f6b6-6wbkr, provider=aws, serverless_id=pro-us-east-1, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.254.21.158:7979, job=external-dns, namespace=external-dns, pod=external-dns-59856f6b6-6wbkr, provider=aws, serverless_id=pro-us-east-1, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.254.5.45:7979, job=external-dns, namespace=external-dns, pod=external-dns-6559f485bb-94pmf, provider=aws, serverless_id=pro-eu-central-1, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.254.5.45:7979, job=external-dns, namespace=external-dns, pod=external-dns-6559f485bb-94pmf, provider=aws, serverless_id=pro-eu-central-1, service=external-dns Value:0xc01f2ded80} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.254.5.45:7979, job=external-dns, namespace=external-dns, pod=external-dns-6559f485bb-94pmf, provider=aws, serverless_id=pro-eu-central-1, service=external-dns Value:0xc01f2def38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610893366s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.254.5.45:7979, job=external-dns, namespace=external-dns, pod=external-dns-6559f485bb-94pmf, provider=aws, serverless_id=pro-eu-central-1, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.254.5.45:7979, job=external-dns, namespace=external-dns, pod=external-dns-6559f485bb-94pmf, provider=aws, serverless_id=pro-eu-central-1, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.255.0.101:7979, job=external-dns, namespace=external-dns, pod=external-dns-6d65d9764-xf4vp, provider=aws, redpanda_id=cl0ceg1k7kve5j708470, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.255.0.101:7979, job=external-dns, namespace=external-dns, pod=external-dns-6d65d9764-xf4vp, provider=aws, redpanda_id=cl0ceg1k7kve5j708470, service=external-dns Value:0xc01f2df1e8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.255.0.101:7979, job=external-dns, namespace=external-dns, pod=external-dns-6d65d9764-xf4vp, provider=aws, redpanda_id=cl0ceg1k7kve5j708470, service=external-dns Value:0xc01f2df340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.61090197s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.255.0.101:7979, job=external-dns, namespace=external-dns, pod=external-dns-6d65d9764-xf4vp, provider=aws, redpanda_id=cl0ceg1k7kve5j708470, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.255.0.101:7979, job=external-dns, namespace=external-dns, pod=external-dns-6d65d9764-xf4vp, provider=aws, redpanda_id=cl0ceg1k7kve5j708470, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.255.0.127:7979, job=external-dns, namespace=external-dns, pod=external-dns-6444fc7b96-kphpj, provider=aws, redpanda_id=cl0e1e9k7kve5j7085hg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.255.0.127:7979, job=external-dns, namespace=external-dns, pod=external-dns-6444fc7b96-kphpj, provider=aws, redpanda_id=cl0e1e9k7kve5j7085hg, service=external-dns Value:0xc01f2df620} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.255.0.127:7979, job=external-dns, namespace=external-dns, pod=external-dns-6444fc7b96-kphpj, provider=aws, redpanda_id=cl0e1e9k7kve5j7085hg, service=external-dns Value:0xc01f2df750}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610910338s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.255.0.127:7979, job=external-dns, namespace=external-dns, pod=external-dns-6444fc7b96-kphpj, provider=aws, redpanda_id=cl0e1e9k7kve5j7085hg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.255.0.127:7979, job=external-dns, namespace=external-dns, pod=external-dns-6444fc7b96-kphpj, provider=aws, redpanda_id=cl0e1e9k7kve5j7085hg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.255.0.241:7979, job=external-dns, namespace=external-dns, pod=external-dns-847c5f468d-fsqxh, provider=aws, redpanda_id=cl0eao9k7kve5j7085rg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.255.0.241:7979, job=external-dns, namespace=external-dns, pod=external-dns-847c5f468d-fsqxh, provider=aws, redpanda_id=cl0eao9k7kve5j7085rg, service=external-dns Value:0xc01f2dfa28} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.255.0.241:7979, job=external-dns, namespace=external-dns, pod=external-dns-847c5f468d-fsqxh, provider=aws, redpanda_id=cl0eao9k7kve5j7085rg, service=external-dns Value:0xc01f2dfb68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610918063s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.255.0.241:7979, job=external-dns, namespace=external-dns, pod=external-dns-847c5f468d-fsqxh, provider=aws, redpanda_id=cl0eao9k7kve5j7085rg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.255.0.241:7979, job=external-dns, namespace=external-dns, pod=external-dns-847c5f468d-fsqxh, provider=aws, redpanda_id=cl0eao9k7kve5j7085rg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.255.10.85:7979, job=external-dns, namespace=external-dns, pod=external-dns-5b8cf8ddb5-v2nxp, provider=aws, redpanda_id=cjlalu22i0nlq1f2nu3g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.255.10.85:7979, job=external-dns, namespace=external-dns, pod=external-dns-5b8cf8ddb5-v2nxp, provider=aws, redpanda_id=cjlalu22i0nlq1f2nu3g, service=external-dns Value:0xc00ac989e8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.255.10.85:7979, job=external-dns, namespace=external-dns, pod=external-dns-5b8cf8ddb5-v2nxp, provider=aws, redpanda_id=cjlalu22i0nlq1f2nu3g, service=external-dns Value:0xc00ac98800}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610926383s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.255.10.85:7979, job=external-dns, namespace=external-dns, pod=external-dns-5b8cf8ddb5-v2nxp, provider=aws, redpanda_id=cjlalu22i0nlq1f2nu3g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.255.10.85:7979, job=external-dns, namespace=external-dns, pod=external-dns-5b8cf8ddb5-v2nxp, provider=aws, redpanda_id=cjlalu22i0nlq1f2nu3g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.3.0.172:7979, job=external-dns, namespace=external-dns, pod=external-dns-544c9b44bf-bpzpk, provider=aws, redpanda_id=cj5uvc2r6237gqb2l4b0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.3.0.172:7979, job=external-dns, namespace=external-dns, pod=external-dns-544c9b44bf-bpzpk, provider=aws, redpanda_id=cj5uvc2r6237gqb2l4b0, service=external-dns Value:0xc00ac98e38} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.3.0.172:7979, job=external-dns, namespace=external-dns, pod=external-dns-544c9b44bf-bpzpk, provider=aws, redpanda_id=cj5uvc2r6237gqb2l4b0, service=external-dns Value:0xc00ac98c90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610935291s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.3.0.172:7979, job=external-dns, namespace=external-dns, pod=external-dns-544c9b44bf-bpzpk, provider=aws, redpanda_id=cj5uvc2r6237gqb2l4b0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.3.0.172:7979, job=external-dns, namespace=external-dns, pod=external-dns-544c9b44bf-bpzpk, provider=aws, redpanda_id=cj5uvc2r6237gqb2l4b0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.3.6.214:7979, job=external-dns, namespace=external-dns, pod=external-dns-5968d7b884-vdpg4, provider=aws, redpanda_id=cno31s890kn7sr8bjhi0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.3.6.214:7979, job=external-dns, namespace=external-dns, pod=external-dns-5968d7b884-vdpg4, provider=aws, redpanda_id=cno31s890kn7sr8bjhi0, service=external-dns Value:0xc00ac990f8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.3.6.214:7979, job=external-dns, namespace=external-dns, pod=external-dns-5968d7b884-vdpg4, provider=aws, redpanda_id=cno31s890kn7sr8bjhi0, service=external-dns Value:0xc00ac992d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610942684s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.3.6.214:7979, job=external-dns, namespace=external-dns, pod=external-dns-5968d7b884-vdpg4, provider=aws, redpanda_id=cno31s890kn7sr8bjhi0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.3.6.214:7979, job=external-dns, namespace=external-dns, pod=external-dns-5968d7b884-vdpg4, provider=aws, redpanda_id=cno31s890kn7sr8bjhi0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.3.6.21:7979, job=external-dns, namespace=external-dns, pod=external-dns-797b959c6-8lvvq, provider=aws, redpanda_id=cno33lg90kn7sr8bjhs0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.3.6.21:7979, job=external-dns, namespace=external-dns, pod=external-dns-797b959c6-8lvvq, provider=aws, redpanda_id=cno33lg90kn7sr8bjhs0, service=external-dns Value:0xc00ac99628} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.3.6.21:7979, job=external-dns, namespace=external-dns, pod=external-dns-797b959c6-8lvvq, provider=aws, redpanda_id=cno33lg90kn7sr8bjhs0, service=external-dns Value:0xc00ac99530}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610951191s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.3.6.21:7979, job=external-dns, namespace=external-dns, pod=external-dns-797b959c6-8lvvq, provider=aws, redpanda_id=cno33lg90kn7sr8bjhs0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.3.6.21:7979, job=external-dns, namespace=external-dns, pod=external-dns-797b959c6-8lvvq, provider=aws, redpanda_id=cno33lg90kn7sr8bjhs0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.3.8.189:7979, job=external-dns, namespace=external-dns, pod=external-dns-58885f75fc-76pf5, provider=aws, redpanda_id=cj3tnuar6237gqb2kob0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.3.8.189:7979, job=external-dns, namespace=external-dns, pod=external-dns-58885f75fc-76pf5, provider=aws, redpanda_id=cj3tnuar6237gqb2kob0, service=external-dns Value:0xc00ac99db8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.3.8.189:7979, job=external-dns, namespace=external-dns, pod=external-dns-58885f75fc-76pf5, provider=aws, redpanda_id=cj3tnuar6237gqb2kob0, service=external-dns Value:0xc00ac99fe8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.61095821s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.3.8.189:7979, job=external-dns, namespace=external-dns, pod=external-dns-58885f75fc-76pf5, provider=aws, redpanda_id=cj3tnuar6237gqb2kob0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.3.8.189:7979, job=external-dns, namespace=external-dns, pod=external-dns-58885f75fc-76pf5, provider=aws, redpanda_id=cj3tnuar6237gqb2kob0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.30.11.7:7979, job=external-dns, namespace=external-dns, pod=external-dns-6f9b49f97d-b68ph, provider=gcp, redpanda_id=chhnf2abksi1fj6tastg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.30.11.7:7979, job=external-dns, namespace=external-dns, pod=external-dns-6f9b49f97d-b68ph, provider=gcp, redpanda_id=chhnf2abksi1fj6tastg, service=external-dns Value:0xc00f8ac5b0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.30.11.7:7979, job=external-dns, namespace=external-dns, pod=external-dns-6f9b49f97d-b68ph, provider=gcp, redpanda_id=chhnf2abksi1fj6tastg, service=external-dns Value:0xc00f8ac890}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610966922s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.30.11.7:7979, job=external-dns, namespace=external-dns, pod=external-dns-6f9b49f97d-b68ph, provider=gcp, redpanda_id=chhnf2abksi1fj6tastg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.30.11.7:7979, job=external-dns, namespace=external-dns, pod=external-dns-6f9b49f97d-b68ph, provider=gcp, redpanda_id=chhnf2abksi1fj6tastg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.30.9.155:7979, job=external-dns, namespace=external-dns, pod=external-dns-66ccb5fc8b-jlv4w, provider=gcp, redpanda_id=chd4a4buvjnko39cg6v0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.30.9.155:7979, job=external-dns, namespace=external-dns, pod=external-dns-66ccb5fc8b-jlv4w, provider=gcp, redpanda_id=chd4a4buvjnko39cg6v0, service=external-dns Value:0xc00f8ad040} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.30.9.155:7979, job=external-dns, namespace=external-dns, pod=external-dns-66ccb5fc8b-jlv4w, provider=gcp, redpanda_id=chd4a4buvjnko39cg6v0, service=external-dns Value:0xc00f8ad298}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610974669s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.30.9.155:7979, job=external-dns, namespace=external-dns, pod=external-dns-66ccb5fc8b-jlv4w, provider=gcp, redpanda_id=chd4a4buvjnko39cg6v0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.30.9.155:7979, job=external-dns, namespace=external-dns, pod=external-dns-66ccb5fc8b-jlv4w, provider=gcp, redpanda_id=chd4a4buvjnko39cg6v0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.33.14.94:7979, job=external-dns, namespace=external-dns, pod=external-dns-6586f46676-xlv48, provider=aws, redpanda_id=cjtgkc07jgn1poljg6sg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.33.14.94:7979, job=external-dns, namespace=external-dns, pod=external-dns-6586f46676-xlv48, provider=aws, redpanda_id=cjtgkc07jgn1poljg6sg, service=external-dns Value:0xc00f8ad8e0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.33.14.94:7979, job=external-dns, namespace=external-dns, pod=external-dns-6586f46676-xlv48, provider=aws, redpanda_id=cjtgkc07jgn1poljg6sg, service=external-dns Value:0xc00f8adbe8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610982749s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.33.14.94:7979, job=external-dns, namespace=external-dns, pod=external-dns-6586f46676-xlv48, provider=aws, redpanda_id=cjtgkc07jgn1poljg6sg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.33.14.94:7979, job=external-dns, namespace=external-dns, pod=external-dns-6586f46676-xlv48, provider=aws, redpanda_id=cjtgkc07jgn1poljg6sg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.4.0.63:7979, job=external-dns, namespace=external-dns, pod=external-dns-7467cb4c44-smkrj, provider=aws, redpanda_id=cno335890kn7sr8bjhp0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.4.0.63:7979, job=external-dns, namespace=external-dns, pod=external-dns-7467cb4c44-smkrj, provider=aws, redpanda_id=cno335890kn7sr8bjhp0, service=external-dns Value:0xc02a3dc140} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.4.0.63:7979, job=external-dns, namespace=external-dns, pod=external-dns-7467cb4c44-smkrj, provider=aws, redpanda_id=cno335890kn7sr8bjhp0, service=external-dns Value:0xc00f8adfd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.610990785s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.4.0.63:7979, job=external-dns, namespace=external-dns, pod=external-dns-7467cb4c44-smkrj, provider=aws, redpanda_id=cno335890kn7sr8bjhp0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.4.0.63:7979, job=external-dns, namespace=external-dns, pod=external-dns-7467cb4c44-smkrj, provider=aws, redpanda_id=cno335890kn7sr8bjhp0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.41.192.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-5b6b9d968f-jdnlz, provider=gcp, redpanda_id=cnuvknbiuvqvkcfi59ig, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.41.192.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-5b6b9d968f-jdnlz, provider=gcp, redpanda_id=cnuvknbiuvqvkcfi59ig, service=external-dns Value:0xc02a3dc290} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.41.192.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-5b6b9d968f-jdnlz, provider=gcp, redpanda_id=cnuvknbiuvqvkcfi59ig, service=external-dns Value:0xc02a3dc340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611000812s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.41.192.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-5b6b9d968f-jdnlz, provider=gcp, redpanda_id=cnuvknbiuvqvkcfi59ig, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.41.192.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-5b6b9d968f-jdnlz, provider=gcp, redpanda_id=cnuvknbiuvqvkcfi59ig, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.5.0.114:7979, job=external-dns, namespace=external-dns, pod=external-dns-67dccdbb4c-tfxql, provider=aws, redpanda_id=cfl3m3aei3b2k79h2brg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.5.0.114:7979, job=external-dns, namespace=external-dns, pod=external-dns-67dccdbb4c-tfxql, provider=aws, redpanda_id=cfl3m3aei3b2k79h2brg, service=external-dns Value:0xc02a3dc558} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.5.0.114:7979, job=external-dns, namespace=external-dns, pod=external-dns-67dccdbb4c-tfxql, provider=aws, redpanda_id=cfl3m3aei3b2k79h2brg, service=external-dns Value:0xc02a3dc4b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611009914s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.5.0.114:7979, job=external-dns, namespace=external-dns, pod=external-dns-67dccdbb4c-tfxql, provider=aws, redpanda_id=cfl3m3aei3b2k79h2brg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.5.0.114:7979, job=external-dns, namespace=external-dns, pod=external-dns-67dccdbb4c-tfxql, provider=aws, redpanda_id=cfl3m3aei3b2k79h2brg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.5.0.172:7979, job=external-dns, namespace=external-dns, pod=external-dns-6cf6446bcd-6vsb8, provider=aws, redpanda_id=ch341oogvg4l92oem7u0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.5.0.172:7979, job=external-dns, namespace=external-dns, pod=external-dns-6cf6446bcd-6vsb8, provider=aws, redpanda_id=ch341oogvg4l92oem7u0, service=external-dns Value:0xc02a3dc808} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.5.0.172:7979, job=external-dns, namespace=external-dns, pod=external-dns-6cf6446bcd-6vsb8, provider=aws, redpanda_id=ch341oogvg4l92oem7u0, service=external-dns Value:0xc02a3dc950}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611017568s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.5.0.172:7979, job=external-dns, namespace=external-dns, pod=external-dns-6cf6446bcd-6vsb8, provider=aws, redpanda_id=ch341oogvg4l92oem7u0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.5.0.172:7979, job=external-dns, namespace=external-dns, pod=external-dns-6cf6446bcd-6vsb8, provider=aws, redpanda_id=ch341oogvg4l92oem7u0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.5.0.5:7979, job=external-dns, namespace=external-dns, pod=external-dns-74df4f84f9-hhd9t, provider=aws, redpanda_id=cfn2gs2ei3b2k79h2es0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.5.0.5:7979, job=external-dns, namespace=external-dns, pod=external-dns-74df4f84f9-hhd9t, provider=aws, redpanda_id=cfn2gs2ei3b2k79h2es0, service=external-dns Value:0xc02a3dcad0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.5.0.5:7979, job=external-dns, namespace=external-dns, pod=external-dns-74df4f84f9-hhd9t, provider=aws, redpanda_id=cfn2gs2ei3b2k79h2es0, service=external-dns Value:0xc02a3dcba8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611026176s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.5.0.5:7979, job=external-dns, namespace=external-dns, pod=external-dns-74df4f84f9-hhd9t, provider=aws, redpanda_id=cfn2gs2ei3b2k79h2es0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.5.0.5:7979, job=external-dns, namespace=external-dns, pod=external-dns-74df4f84f9-hhd9t, provider=aws, redpanda_id=cfn2gs2ei3b2k79h2es0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.5.14.106:7979, job=external-dns, namespace=external-dns, pod=external-dns-675cc6b89-szqzb, provider=aws, redpanda_id=cj1ti4jube5f1u6qjfrg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.5.14.106:7979, job=external-dns, namespace=external-dns, pod=external-dns-675cc6b89-szqzb, provider=aws, redpanda_id=cj1ti4jube5f1u6qjfrg, service=external-dns Value:0xc02a3dcce8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.5.14.106:7979, job=external-dns, namespace=external-dns, pod=external-dns-675cc6b89-szqzb, provider=aws, redpanda_id=cj1ti4jube5f1u6qjfrg, service=external-dns Value:0xc02a3dcdd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611033752s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.5.14.106:7979, job=external-dns, namespace=external-dns, pod=external-dns-675cc6b89-szqzb, provider=aws, redpanda_id=cj1ti4jube5f1u6qjfrg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.5.14.106:7979, job=external-dns, namespace=external-dns, pod=external-dns-675cc6b89-szqzb, provider=aws, redpanda_id=cj1ti4jube5f1u6qjfrg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.53.128.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-d9cd7945f-c9t7f, provider=gcp, redpanda_id=cngh6crqu42smqf5qr1g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.53.128.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-d9cd7945f-c9t7f, provider=gcp, redpanda_id=cngh6crqu42smqf5qr1g, service=external-dns Value:0xc02a3dcfd0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.53.128.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-d9cd7945f-c9t7f, provider=gcp, redpanda_id=cngh6crqu42smqf5qr1g, service=external-dns Value:0xc02a3dd0b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611041957s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.53.128.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-d9cd7945f-c9t7f, provider=gcp, redpanda_id=cngh6crqu42smqf5qr1g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.53.128.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-d9cd7945f-c9t7f, provider=gcp, redpanda_id=cngh6crqu42smqf5qr1g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.54.2.191:7979, job=external-dns, namespace=external-dns, pod=external-dns-9c9d77795-f9nhx, provider=aws, redpanda_id=ci4sa407e7rqqatd3hmg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.54.2.191:7979, job=external-dns, namespace=external-dns, pod=external-dns-9c9d77795-f9nhx, provider=aws, redpanda_id=ci4sa407e7rqqatd3hmg, service=external-dns Value:0xc02a3dd240} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.54.2.191:7979, job=external-dns, namespace=external-dns, pod=external-dns-9c9d77795-f9nhx, provider=aws, redpanda_id=ci4sa407e7rqqatd3hmg, service=external-dns Value:0xc02a3dd340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611069445s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.54.2.191:7979, job=external-dns, namespace=external-dns, pod=external-dns-9c9d77795-f9nhx, provider=aws, redpanda_id=ci4sa407e7rqqatd3hmg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.54.2.191:7979, job=external-dns, namespace=external-dns, pod=external-dns-9c9d77795-f9nhx, provider=aws, redpanda_id=ci4sa407e7rqqatd3hmg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.58.128.32:7979, job=external-dns, namespace=external-dns, pod=external-dns-6fb5746cdc-wnk8r, provider=gcp, redpanda_id=cl0h411k7kve5j7088hg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.58.128.32:7979, job=external-dns, namespace=external-dns, pod=external-dns-6fb5746cdc-wnk8r, provider=gcp, redpanda_id=cl0h411k7kve5j7088hg, service=external-dns Value:0xc02a3dd4e0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.58.128.32:7979, job=external-dns, namespace=external-dns, pod=external-dns-6fb5746cdc-wnk8r, provider=gcp, redpanda_id=cl0h411k7kve5j7088hg, service=external-dns Value:0xc02a3dd6a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611077522s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.58.128.32:7979, job=external-dns, namespace=external-dns, pod=external-dns-6fb5746cdc-wnk8r, provider=gcp, redpanda_id=cl0h411k7kve5j7088hg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.58.128.32:7979, job=external-dns, namespace=external-dns, pod=external-dns-6fb5746cdc-wnk8r, provider=gcp, redpanda_id=cl0h411k7kve5j7088hg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.58.67.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-776c8dd48f-wsfcb, provider=gcp, redpanda_id=clschtuqc4k3h6rq5blg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.58.67.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-776c8dd48f-wsfcb, provider=gcp, redpanda_id=clschtuqc4k3h6rq5blg, service=external-dns Value:0xc02a3dd868} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.58.67.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-776c8dd48f-wsfcb, provider=gcp, redpanda_id=clschtuqc4k3h6rq5blg, service=external-dns Value:0xc02a3dd938}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611093621s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.58.67.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-776c8dd48f-wsfcb, provider=gcp, redpanda_id=clschtuqc4k3h6rq5blg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.58.67.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-776c8dd48f-wsfcb, provider=gcp, redpanda_id=clschtuqc4k3h6rq5blg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.6.0.146:7979, job=external-dns, namespace=external-dns, pod=external-dns-597f465f54-g8jlg, provider=aws, redpanda_id=cgs1c8e984cnrsjmp320, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.6.0.146:7979, job=external-dns, namespace=external-dns, pod=external-dns-597f465f54-g8jlg, provider=aws, redpanda_id=cgs1c8e984cnrsjmp320, service=external-dns Value:0xc02a3ddc30} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.6.0.146:7979, job=external-dns, namespace=external-dns, pod=external-dns-597f465f54-g8jlg, provider=aws, redpanda_id=cgs1c8e984cnrsjmp320, service=external-dns Value:0xc02a3ddb58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611102783s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.6.0.146:7979, job=external-dns, namespace=external-dns, pod=external-dns-597f465f54-g8jlg, provider=aws, redpanda_id=cgs1c8e984cnrsjmp320, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.6.0.146:7979, job=external-dns, namespace=external-dns, pod=external-dns-597f465f54-g8jlg, provider=aws, redpanda_id=cgs1c8e984cnrsjmp320, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.6.16.189:7979, job=external-dns, namespace=external-dns, pod=external-dns-86c575d86f-2xh7z, provider=aws, redpanda_id=cou0ulje1d7i2jp1fdi0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.6.16.189:7979, job=external-dns, namespace=external-dns, pod=external-dns-86c575d86f-2xh7z, provider=aws, redpanda_id=cou0ulje1d7i2jp1fdi0, service=external-dns Value:0xc026ea6048} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.6.16.189:7979, job=external-dns, namespace=external-dns, pod=external-dns-86c575d86f-2xh7z, provider=aws, redpanda_id=cou0ulje1d7i2jp1fdi0, service=external-dns Value:0xc026ea6220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611109873s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.6.16.189:7979, job=external-dns, namespace=external-dns, pod=external-dns-86c575d86f-2xh7z, provider=aws, redpanda_id=cou0ulje1d7i2jp1fdi0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.6.16.189:7979, job=external-dns, namespace=external-dns, pod=external-dns-86c575d86f-2xh7z, provider=aws, redpanda_id=cou0ulje1d7i2jp1fdi0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.6.54.6:7979, job=external-dns, namespace=external-dns, pod=external-dns-d5f5ddbbc-w86sq, provider=aws, redpanda_id=cpb4qli77h8g0iu4f7tg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.6.54.6:7979, job=external-dns, namespace=external-dns, pod=external-dns-d5f5ddbbc-w86sq, provider=aws, redpanda_id=cpb4qli77h8g0iu4f7tg, service=external-dns Value:0xc026ea6828} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.6.54.6:7979, job=external-dns, namespace=external-dns, pod=external-dns-d5f5ddbbc-w86sq, provider=aws, redpanda_id=cpb4qli77h8g0iu4f7tg, service=external-dns Value:0xc026ea6648}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611116145s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.6.54.6:7979, job=external-dns, namespace=external-dns, pod=external-dns-d5f5ddbbc-w86sq, provider=aws, redpanda_id=cpb4qli77h8g0iu4f7tg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.6.54.6:7979, job=external-dns, namespace=external-dns, pod=external-dns-d5f5ddbbc-w86sq, provider=aws, redpanda_id=cpb4qli77h8g0iu4f7tg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.60.10.182:7979, job=external-dns, namespace=external-dns, pod=external-dns-86cbc8c6d9-55qhh, provider=aws, redpanda_id=copbt0j7u2hdqvupt52g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.60.10.182:7979, job=external-dns, namespace=external-dns, pod=external-dns-86cbc8c6d9-55qhh, provider=aws, redpanda_id=copbt0j7u2hdqvupt52g, service=external-dns Value:0xc026ea6e30} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.60.10.182:7979, job=external-dns, namespace=external-dns, pod=external-dns-86cbc8c6d9-55qhh, provider=aws, redpanda_id=copbt0j7u2hdqvupt52g, service=external-dns Value:0xc026ea72a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611122968s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.60.10.182:7979, job=external-dns, namespace=external-dns, pod=external-dns-86cbc8c6d9-55qhh, provider=aws, redpanda_id=copbt0j7u2hdqvupt52g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.60.10.182:7979, job=external-dns, namespace=external-dns, pod=external-dns-86cbc8c6d9-55qhh, provider=aws, redpanda_id=copbt0j7u2hdqvupt52g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.60.192.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d675867d9-nxz96, provider=gcp, redpanda_id=cnc28jka4ff1g49hecf0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.60.192.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d675867d9-nxz96, provider=gcp, redpanda_id=cnc28jka4ff1g49hecf0, service=external-dns Value:0xc026ea7638} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.60.192.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d675867d9-nxz96, provider=gcp, redpanda_id=cnc28jka4ff1g49hecf0, service=external-dns Value:0xc026ea77b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611130691s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.60.192.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d675867d9-nxz96, provider=gcp, redpanda_id=cnc28jka4ff1g49hecf0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.60.192.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-7d675867d9-nxz96, provider=gcp, redpanda_id=cnc28jka4ff1g49hecf0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.64.8.149:7979, job=external-dns, namespace=external-dns, pod=external-dns-b58995f69-dc6tn, provider=gcp, redpanda_id=cee75b1afq6s316q65mg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.64.8.149:7979, job=external-dns, namespace=external-dns, pod=external-dns-b58995f69-dc6tn, provider=gcp, redpanda_id=cee75b1afq6s316q65mg, service=external-dns Value:0xc026ea7aa8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.64.8.149:7979, job=external-dns, namespace=external-dns, pod=external-dns-b58995f69-dc6tn, provider=gcp, redpanda_id=cee75b1afq6s316q65mg, service=external-dns Value:0xc026ea7c40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611139621s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.64.8.149:7979, job=external-dns, namespace=external-dns, pod=external-dns-b58995f69-dc6tn, provider=gcp, redpanda_id=cee75b1afq6s316q65mg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.64.8.149:7979, job=external-dns, namespace=external-dns, pod=external-dns-b58995f69-dc6tn, provider=gcp, redpanda_id=cee75b1afq6s316q65mg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.64.8.18:7979, job=external-dns, namespace=external-dns, pod=external-dns-84f5b4f6bf-k4j75, provider=gcp, redpanda_id=cfc37jc3hig0l2didagg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.64.8.18:7979, job=external-dns, namespace=external-dns, pod=external-dns-84f5b4f6bf-k4j75, provider=gcp, redpanda_id=cfc37jc3hig0l2didagg, service=external-dns Value:0xc036e820b0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.64.8.18:7979, job=external-dns, namespace=external-dns, pod=external-dns-84f5b4f6bf-k4j75, provider=gcp, redpanda_id=cfc37jc3hig0l2didagg, service=external-dns Value:0xc036e821d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611147721s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.64.8.18:7979, job=external-dns, namespace=external-dns, pod=external-dns-84f5b4f6bf-k4j75, provider=gcp, redpanda_id=cfc37jc3hig0l2didagg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.64.8.18:7979, job=external-dns, namespace=external-dns, pod=external-dns-84f5b4f6bf-k4j75, provider=gcp, redpanda_id=cfc37jc3hig0l2didagg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.64.9.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-78fd59fbd8-b2pb5, provider=gcp, redpanda_id=ce9p9mb9tos7v1tuco70, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.64.9.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-78fd59fbd8-b2pb5, provider=gcp, redpanda_id=ce9p9mb9tos7v1tuco70, service=external-dns Value:0xc036e82488} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.64.9.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-78fd59fbd8-b2pb5, provider=gcp, redpanda_id=ce9p9mb9tos7v1tuco70, service=external-dns Value:0xc036e82620}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.61115514s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.64.9.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-78fd59fbd8-b2pb5, provider=gcp, redpanda_id=ce9p9mb9tos7v1tuco70, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.64.9.135:7979, job=external-dns, namespace=external-dns, pod=external-dns-78fd59fbd8-b2pb5, provider=gcp, redpanda_id=ce9p9mb9tos7v1tuco70, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.70.9.4:7979, job=external-dns, namespace=external-dns, pod=external-dns-f4d68965d-28ft9, provider=gcp, redpanda_id=ci8p8nodcr0nohbi06k0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.70.9.4:7979, job=external-dns, namespace=external-dns, pod=external-dns-f4d68965d-28ft9, provider=gcp, redpanda_id=ci8p8nodcr0nohbi06k0, service=external-dns Value:0xc036e82b30} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.70.9.4:7979, job=external-dns, namespace=external-dns, pod=external-dns-f4d68965d-28ft9, provider=gcp, redpanda_id=ci8p8nodcr0nohbi06k0, service=external-dns Value:0xc036e82980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611163567s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.70.9.4:7979, job=external-dns, namespace=external-dns, pod=external-dns-f4d68965d-28ft9, provider=gcp, redpanda_id=ci8p8nodcr0nohbi06k0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.70.9.4:7979, job=external-dns, namespace=external-dns, pod=external-dns-f4d68965d-28ft9, provider=gcp, redpanda_id=ci8p8nodcr0nohbi06k0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.71.13.9:7979, job=external-dns, namespace=external-dns, pod=external-dns-5659c99f86-f62gt, provider=gcp, redpanda_id=ci8q04tu86fclp7lpl0g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.71.13.9:7979, job=external-dns, namespace=external-dns, pod=external-dns-5659c99f86-f62gt, provider=gcp, redpanda_id=ci8q04tu86fclp7lpl0g, service=external-dns Value:0xc036e82df0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.71.13.9:7979, job=external-dns, namespace=external-dns, pod=external-dns-5659c99f86-f62gt, provider=gcp, redpanda_id=ci8q04tu86fclp7lpl0g, service=external-dns Value:0xc036e83080}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611171445s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.71.13.9:7979, job=external-dns, namespace=external-dns, pod=external-dns-5659c99f86-f62gt, provider=gcp, redpanda_id=ci8q04tu86fclp7lpl0g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.71.13.9:7979, job=external-dns, namespace=external-dns, pod=external-dns-5659c99f86-f62gt, provider=gcp, redpanda_id=ci8q04tu86fclp7lpl0g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.71.56.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-65d5d5d5f7-r7kpk, provider=gcp, redpanda_id=cnth32q83bcutli1gd5g, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.71.56.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-65d5d5d5f7-r7kpk, provider=gcp, redpanda_id=cnth32q83bcutli1gd5g, service=external-dns Value:0xc036e834c8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.71.56.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-65d5d5d5f7-r7kpk, provider=gcp, redpanda_id=cnth32q83bcutli1gd5g, service=external-dns Value:0xc036e83368}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611179148s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.71.56.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-65d5d5d5f7-r7kpk, provider=gcp, redpanda_id=cnth32q83bcutli1gd5g, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.71.56.26:7979, job=external-dns, namespace=external-dns, pod=external-dns-65d5d5d5f7-r7kpk, provider=gcp, redpanda_id=cnth32q83bcutli1gd5g, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.71.6.173:7979, job=external-dns, namespace=external-dns, pod=external-dns-6bb68b948b-msrmc, provider=aws, redpanda_id=cioieosfbvm2cii25pg0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.71.6.173:7979, job=external-dns, namespace=external-dns, pod=external-dns-6bb68b948b-msrmc, provider=aws, redpanda_id=cioieosfbvm2cii25pg0, service=external-dns Value:0xc036e837f8} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.71.6.173:7979, job=external-dns, namespace=external-dns, pod=external-dns-6bb68b948b-msrmc, provider=aws, redpanda_id=cioieosfbvm2cii25pg0, service=external-dns Value:0xc036e83958}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611187148s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.71.6.173:7979, job=external-dns, namespace=external-dns, pod=external-dns-6bb68b948b-msrmc, provider=aws, redpanda_id=cioieosfbvm2cii25pg0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.71.6.173:7979, job=external-dns, namespace=external-dns, pod=external-dns-6bb68b948b-msrmc, provider=aws, redpanda_id=cioieosfbvm2cii25pg0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.8.0.189:7979, job=external-dns, namespace=external-dns, pod=external-dns-797485d84-8z5kz, provider=aws, redpanda_id=colpfv0vha20igtb7sag, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.8.0.189:7979, job=external-dns, namespace=external-dns, pod=external-dns-797485d84-8z5kz, provider=aws, redpanda_id=colpfv0vha20igtb7sag, service=external-dns Value:0xc036e83d10} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.8.0.189:7979, job=external-dns, namespace=external-dns, pod=external-dns-797485d84-8z5kz, provider=aws, redpanda_id=colpfv0vha20igtb7sag, service=external-dns Value:0xc036e83e80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611196195s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.8.0.189:7979, job=external-dns, namespace=external-dns, pod=external-dns-797485d84-8z5kz, provider=aws, redpanda_id=colpfv0vha20igtb7sag, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.8.0.189:7979, job=external-dns, namespace=external-dns, pod=external-dns-797485d84-8z5kz, provider=aws, redpanda_id=colpfv0vha20igtb7sag, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.8.2.117:7979, job=external-dns, namespace=external-dns, pod=external-dns-74dc857f6c-mrmss, provider=aws, redpanda_id=cotudq2l9050bvlc7kcg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.8.2.117:7979, job=external-dns, namespace=external-dns, pod=external-dns-74dc857f6c-mrmss, provider=aws, redpanda_id=cotudq2l9050bvlc7kcg, service=external-dns Value:0xc034ae4228} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.8.2.117:7979, job=external-dns, namespace=external-dns, pod=external-dns-74dc857f6c-mrmss, provider=aws, redpanda_id=cotudq2l9050bvlc7kcg, service=external-dns Value:0xc034ae4330}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611216121s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.8.2.117:7979, job=external-dns, namespace=external-dns, pod=external-dns-74dc857f6c-mrmss, provider=aws, redpanda_id=cotudq2l9050bvlc7kcg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.8.2.117:7979, job=external-dns, namespace=external-dns, pod=external-dns-74dc857f6c-mrmss, provider=aws, redpanda_id=cotudq2l9050bvlc7kcg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.8.2.124:7979, job=external-dns, namespace=external-dns, pod=external-dns-679c664d9-v8zwz, provider=aws, redpanda_id=clbso42c99huahai5ipg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.8.2.124:7979, job=external-dns, namespace=external-dns, pod=external-dns-679c664d9-v8zwz, provider=aws, redpanda_id=clbso42c99huahai5ipg, service=external-dns Value:0xc034ae46f0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.8.2.124:7979, job=external-dns, namespace=external-dns, pod=external-dns-679c664d9-v8zwz, provider=aws, redpanda_id=clbso42c99huahai5ipg, service=external-dns Value:0xc034ae4810}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611223391s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.8.2.124:7979, job=external-dns, namespace=external-dns, pod=external-dns-679c664d9-v8zwz, provider=aws, redpanda_id=clbso42c99huahai5ipg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.8.2.124:7979, job=external-dns, namespace=external-dns, pod=external-dns-679c664d9-v8zwz, provider=aws, redpanda_id=clbso42c99huahai5ipg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=10.8.6.225:7979, job=external-dns, namespace=external-dns, pod=external-dns-5bfccf748f-nh8rl, provider=aws, redpanda_id=cp17312j3085r2lqj4sg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=10.8.6.225:7979, job=external-dns, namespace=external-dns, pod=external-dns-5bfccf748f-nh8rl, provider=aws, redpanda_id=cp17312j3085r2lqj4sg, service=external-dns Value:0xc034ae4a40} C:{Var:C Labels:container=external-dns, endpoint=http, instance=10.8.6.225:7979, job=external-dns, namespace=external-dns, pod=external-dns-5bfccf748f-nh8rl, provider=aws, redpanda_id=cp17312j3085r2lqj4sg, service=external-dns Value:0xc034ae4b70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611232656s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=10.8.6.225:7979, job=external-dns, namespace=external-dns, pod=external-dns-5bfccf748f-nh8rl, provider=aws, redpanda_id=cp17312j3085r2lqj4sg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=10.8.6.225:7979, job=external-dns, namespace=external-dns, pod=external-dns-5bfccf748f-nh8rl, provider=aws, redpanda_id=cp17312j3085r2lqj4sg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=100.88.130.5:7979, job=external-dns, namespace=external-dns, pod=external-dns-b9fdd65f9-qr7tg, provider=gcp, redpanda_id=cnip5e2ahikqss22j4f0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=100.88.130.5:7979, job=external-dns, namespace=external-dns, pod=external-dns-b9fdd65f9-qr7tg, provider=gcp, redpanda_id=cnip5e2ahikqss22j4f0, service=external-dns Value:0xc034ae4e60} C:{Var:C Labels:container=external-dns, endpoint=http, instance=100.88.130.5:7979, job=external-dns, namespace=external-dns, pod=external-dns-b9fdd65f9-qr7tg, provider=gcp, redpanda_id=cnip5e2ahikqss22j4f0, service=external-dns Value:0xc034ae5010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611242016s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=100.88.130.5:7979, job=external-dns, namespace=external-dns, pod=external-dns-b9fdd65f9-qr7tg, provider=gcp, redpanda_id=cnip5e2ahikqss22j4f0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=100.88.130.5:7979, job=external-dns, namespace=external-dns, pod=external-dns-b9fdd65f9-qr7tg, provider=gcp, redpanda_id=cnip5e2ahikqss22j4f0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=100.88.136.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-65d7d9459f-4kxld, provider=gcp, redpanda_id=cnipfdrqu42smqf5rt40, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=100.88.136.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-65d7d9459f-4kxld, provider=gcp, redpanda_id=cnipfdrqu42smqf5rt40, service=external-dns Value:0xc034ae5288} C:{Var:C Labels:container=external-dns, endpoint=http, instance=100.88.136.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-65d7d9459f-4kxld, provider=gcp, redpanda_id=cnipfdrqu42smqf5rt40, service=external-dns Value:0xc034ae53f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.61124995s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=100.88.136.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-65d7d9459f-4kxld, provider=gcp, redpanda_id=cnipfdrqu42smqf5rt40, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=100.88.136.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-65d7d9459f-4kxld, provider=gcp, redpanda_id=cnipfdrqu42smqf5rt40, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=100.88.145.153:7979, job=external-dns, namespace=external-dns, pod=external-dns-8d99dd969-js5zx, provider=gcp, redpanda_id=cnipvd2ahikqss22j670, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=100.88.145.153:7979, job=external-dns, namespace=external-dns, pod=external-dns-8d99dd969-js5zx, provider=gcp, redpanda_id=cnipvd2ahikqss22j670, service=external-dns Value:0xc034ae57e0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=100.88.145.153:7979, job=external-dns, namespace=external-dns, pod=external-dns-8d99dd969-js5zx, provider=gcp, redpanda_id=cnipvd2ahikqss22j670, service=external-dns Value:0xc034ae5658}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611264007s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=100.88.145.153:7979, job=external-dns, namespace=external-dns, pod=external-dns-8d99dd969-js5zx, provider=gcp, redpanda_id=cnipvd2ahikqss22j670, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=100.88.145.153:7979, job=external-dns, namespace=external-dns, pod=external-dns-8d99dd969-js5zx, provider=gcp, redpanda_id=cnipvd2ahikqss22j670, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=100.88.152.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-79bc645655-lg7zp, provider=gcp, redpanda_id=cost10i34627vtg2kmg0, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=100.88.152.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-79bc645655-lg7zp, provider=gcp, redpanda_id=cost10i34627vtg2kmg0, service=external-dns Value:0xc034ae5b30} C:{Var:C Labels:container=external-dns, endpoint=http, instance=100.88.152.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-79bc645655-lg7zp, provider=gcp, redpanda_id=cost10i34627vtg2kmg0, service=external-dns Value:0xc034ae59c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611271808s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=100.88.152.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-79bc645655-lg7zp, provider=gcp, redpanda_id=cost10i34627vtg2kmg0, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=100.88.152.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-79bc645655-lg7zp, provider=gcp, redpanda_id=cost10i34627vtg2kmg0, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=100.88.16.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b765d966f-fcbb6, provider=gcp, redpanda_id=cnaa0i9028lf1qmgl5tg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=100.88.16.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b765d966f-fcbb6, provider=gcp, redpanda_id=cnaa0i9028lf1qmgl5tg, service=external-dns Value:0xc034ae5d20} C:{Var:C Labels:container=external-dns, endpoint=http, instance=100.88.16.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b765d966f-fcbb6, provider=gcp, redpanda_id=cnaa0i9028lf1qmgl5tg, service=external-dns Value:0xc034ae5f20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.61127946s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=100.88.16.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b765d966f-fcbb6, provider=gcp, redpanda_id=cnaa0i9028lf1qmgl5tg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=100.88.16.24:7979, job=external-dns, namespace=external-dns, pod=external-dns-6b765d966f-fcbb6, provider=gcp, redpanda_id=cnaa0i9028lf1qmgl5tg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=100.88.24.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-7465848695-q8g9g, provider=gcp, redpanda_id=cos6rsg9okkarnijlk10, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=100.88.24.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-7465848695-q8g9g, provider=gcp, redpanda_id=cos6rsg9okkarnijlk10, service=external-dns Value:0xc0118c6150} C:{Var:C Labels:container=external-dns, endpoint=http, instance=100.88.24.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-7465848695-q8g9g, provider=gcp, redpanda_id=cos6rsg9okkarnijlk10, service=external-dns Value:0xc0118c60a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611287093s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=100.88.24.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-7465848695-q8g9g, provider=gcp, redpanda_id=cos6rsg9okkarnijlk10, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=100.88.24.27:7979, job=external-dns, namespace=external-dns, pod=external-dns-7465848695-q8g9g, provider=gcp, redpanda_id=cos6rsg9okkarnijlk10, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=100.88.9.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-6ff8547c7c-sv7zj, provider=gcp, redpanda_id=cn1jn9k5ihvfjtmun7eg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=100.88.9.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-6ff8547c7c-sv7zj, provider=gcp, redpanda_id=cn1jn9k5ihvfjtmun7eg, service=external-dns Value:0xc0118c63a0} C:{Var:C Labels:container=external-dns, endpoint=http, instance=100.88.9.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-6ff8547c7c-sv7zj, provider=gcp, redpanda_id=cn1jn9k5ihvfjtmun7eg, service=external-dns Value:0xc0118c62e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.611295834s EvaluationString:[ var='B' labels={container=external-dns, endpoint=http, instance=100.88.9.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-6ff8547c7c-sv7zj, provider=gcp, redpanda_id=cn1jn9k5ihvfjtmun7eg, service=external-dns} value=0 ], [ var='C' labels={container=external-dns, endpoint=http, instance=100.88.9.25:7979, job=external-dns, namespace=external-dns, pod=external-dns-6ff8547c7c-sv7zj, provider=gcp, redpanda_id=cn1jn9k5ihvfjtmun7eg, service=external-dns} value=0 ]} {Instance:container=external-dns, endpoint=http, instance=11.0.16.36:7979, job=external-dns, namespace=external-dns, pod=external-dns-6bcc968c49-shpbg, provider=gcp, redpanda_id=cp75stcgu42nj1hpqdeg, service=external-dns State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:container=external-dns, endpoint=http, instance=11.0.16.36:7979, job=external-dns, namespace=external-dns, pod=external-dns-6bcc968c49-shpbg, provider=gcp, redpanda_id=cp75stcgu42nj1hpqdeg, service=external-dns Value:0xc0118c6510} C:{Var:C Labels:container=external-dns, endpoint=http, instance=11.0.16.36:7979, job=external-dns, namespace=external-dns, pod=external-dns-6bcc968c49-shpbg, provider=gcp, redpanda_id=cp75stcgu42nj1hpqdeg, service=external-dns Value:0xc0118c65b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDur + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-003" t=2024-05-29T13:44:13.688394464Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=yes-yyz-ca-001" t=2024-05-29T13:44:13.688113792Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.68792194Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.687915251Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=xv-emp-ewr-us-001" t=2024-05-29T13:44:13.687894163Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=xv-dp-sin-sg-002" t=2024-05-29T13:44:13.687489963Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.687328254Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=556147 slug=bettercloudholding t=2024-05-29T13:44:13.687368209Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=556147 slug=bettercloudholding instance="datasource_uid=holding-prom-k8s, ref_id=A" t=2024-05-29T13:44:13.687350869Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=556147 slug=bettercloudholding instance="datasource_uid=holding-prom-k8s, ref_id=A" t=2024-05-29T13:44:13.687343169Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=556147 slug=bettercloudholding instance="datasource_uid=holding-prom-k8s, ref_id=A" t=2024-05-29T13:44:13.687330153Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=xv-dp-sin-sg-001" t=2024-05-29T13:44:13.687304765Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.687197657Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.687167938Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=xv-dp-hkg-hk-002" t=2024-05-29T13:44:13.687190156Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.687107726Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.68701517Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=vwt-ge-01" t=2024-05-29T13:44:13.686807732Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.686588128Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=vwt-cy-01" t=2024-05-29T13:44:13.686546752Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.686397345Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.685604272Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=velia-fr-st-26" t=2024-05-29T13:44:13.685611553Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=velia-fr-st-25" t=2024-05-29T13:44:13.685481827Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.685351378Z caller=remote_alert_sender.go:94 user=70430 slug=dapperlabs host=dapperlabs-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.176.29:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=addec86c-9278-4cfc-b85c-f72638b214d6 alerts=1 + logger=ngalert.state.manager.persist user=550657 slug=garrigues t=2024-05-29T13:44:13.685237454Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.685242169Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.68520364Z caller=remote_image_capturer.go:33 user=550657 slug=garrigues rule_org_id=1 rule_uid=faaa7681-6c61-40d0-87ce-5e02e0c4ab58 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:13.685106172Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=550657 slug=garrigues instance= t=2024-05-29T13:44:13.685188898Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.685130037Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.685080437Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=550657 slug=garrigues t=2024-05-29T13:44:13.684964155Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=235691 slug=om2 t=2024-05-29T13:44:13.684967051Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=25.193845ms + level=debug ts=2024-05-29T13:44:13.684920552Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.684849066Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.684700472Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=691855 slug=chainlake t=2024-05-29T13:44:13.684673936Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=691855 slug=chainlake version=4 fingerprint=83272502b8edd7d1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.684542409Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=compute-hel-7-cpx51-compute-hel-7, nodename=compute-hel-7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=compute-hel-7-cpx51-compute-hel-7, nodename=compute-hel-7 Value:0xc00627cf20} B:{Var:B Labels:instance=compute-hel-7-cpx51-compute-hel-7, nodename=compute-hel-7 Value:0xc00627cf38} C:{Var:C Labels:instance=compute-hel-7-cpx51-compute-hel-7, nodename=compute-hel-7 Value:0xc00627cf70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.6842086s EvaluationString:[ var='A' labels={instance=compute-hel-7-cpx51-compute-hel-7, nodename=compute-hel-7} value=0.14474791491344707 ], [ var='B' labels={instance=compute-hel-7-cpx51-compute-hel-7, nodename=compute-hel-7} value=0.14474791491344707 ], [ var='C' labels={instance=compute-hel-7-cpx51-compute-hel-7, nodename=compute-hel-7} value=0 ]}]" duration=247.194633ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=velia-fr-st-17" t=2024-05-29T13:44:13.684488367Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=velia-fr-st-16" t=2024-05-29T13:44:13.684339478Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=velia-fr-st-15" t=2024-05-29T13:44:13.684215286Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=velia-fr-als-009" t=2024-05-29T13:44:13.683588448Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=velia-fr-als-008" t=2024-05-29T13:44:13.683456305Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.683312659Z caller=remote_instance_store.go:51 user=557231 slug=lnrsusinsuranceprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=velia-fr-als-007" t=2024-05-29T13:44:13.683287284Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=velia-fr-als-006" t=2024-05-29T13:44:13.683168631Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=715708 slug=ggiprod t=2024-05-29T13:44:13.682880942Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=7.0786ms + logger=ngalert.state.manager.persist user=795224 slug=gannettdigital t=2024-05-29T13:44:13.682881601Z level=debug msg="Saving alert states done" count=19 max_state_save_concurrency=1 duration=237.648563ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=velia-fr-als-004" t=2024-05-29T13:44:13.682913229Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=332534 slug=adevintakijiji t=2024-05-29T13:44:13.682759289Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.845998ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=velia-fr-als-003" t=2024-05-29T13:44:13.682754809Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=velia-fr-als-002" t=2024-05-29T13:44:13.682628296Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.682559672Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.682465921Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=499423 slug=rebelssoftware t=2024-05-29T13:44:13.682540093Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=499423 slug=rebelssoftware instance= t=2024-05-29T13:44:13.68252247Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=velia-fr-als-001" t=2024-05-29T13:44:13.682485527Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=499423 slug=rebelssoftware t=2024-05-29T13:44:13.682455104Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.682435725Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=528849 slug=bitvavo instance= t=2024-05-29T13:44:13.682373994Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=691103 slug=caetest t=2024-05-29T13:44:13.682061168Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.682082865Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ukserv-lgw-uk-017" t=2024-05-29T13:44:13.682115041Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=691103 slug=caetest version=1 fingerprint=1bb141b66d312421 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.681965706Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.68179059s EvaluationString:}]" duration=6.593352ms + level=debug ts=2024-05-29T13:44:13.682046028Z caller=remote_instance_store.go:51 user=174016 slug=journalstaging msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.681880693Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.681899936Z caller=remote_instance_store.go:51 user=332534 slug=adevintakijiji msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.681788788Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ukserv-lgw-uk-015" t=2024-05-29T13:44:13.681797316Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ukserv-lgw-uk-013" t=2024-05-29T13:44:13.681511313Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=332534 slug=adevintakijiji t=2024-05-29T13:44:13.6817554Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.681579047Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=309009 slug=elestyle t=2024-05-29T13:44:13.681315762Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=309009 slug=elestyle instance="datasource_uid=b28d76fc-91a2-470d-8da0-ef4b1aa8bdc5, ref_id=A" t=2024-05-29T13:44:13.681273699Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.681286202Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=320778 slug=omegaai instance= t=2024-05-29T13:44:13.681288507Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.681271709Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ukserv-lgw-uk-011" t=2024-05-29T13:44:13.681257698Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.68117101Z caller=remote_instance_store.go:51 user=700783 slug=gsgmedia msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ukserv-lgw-uk-011" t=2024-05-29T13:44:13.681196083Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ukserv-lgw-uk-010" t=2024-05-29T13:44:13.681088638Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.680952009Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.680893086Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.680884968Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.680675636Z caller=remote_instance_store.go:51 user=355429 slug=zenpli msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ukserv-lgw-uk-006" t=2024-05-29T13:44:13.680591228Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.68064788Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ukserv-lgw-uk-005" t=2024-05-29T13:44:13.680456403Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.680156346Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.680099177Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.68018605Z caller=remote_instance_store.go:51 user=749971 slug=unobravo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=749971 slug=unobravo t=2024-05-29T13:44:13.68015163Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.680071827Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ukserv-lgw-uk-002" t=2024-05-29T13:44:13.680059163Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=749971 slug=unobravo t=2024-05-29T13:44:13.680013068Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ukserv-lgw-uk-001" t=2024-05-29T13:44:13.67994582Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ukserv-lgw-uk-001" t=2024-05-29T13:44:13.679935853Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ukserv-cvt-uk-008" t=2024-05-29T13:44:13.679823351Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ukserv-cvt-uk-007" t=2024-05-29T13:44:13.679709957Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ukserv-cvt-uk-006" t=2024-05-29T13:44:13.679589448Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ukserv-cvt-uk-004" t=2024-05-29T13:44:13.679331207Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.679183941Z caller=remote_instance_store.go:51 user=289377 slug=jochim msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.679125395Z caller=remote_instance_store.go:51 user=349229 slug=kropyva msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ukserv-cvt-uk-003" t=2024-05-29T13:44:13.67918416Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ukserv-cvt-uk-002" t=2024-05-29T13:44:13.679076205Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=ukserv-cvt-uk-002" t=2024-05-29T13:44:13.679063222Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.67905019Z caller=remote_instance_store.go:51 user=212369 slug=finaloop msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=212369 slug=finaloop instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.678994728Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:59:10Z next_ends_at=2024-05-29T14:04:10Z + logger=ngalert.state.manager user=212369 slug=finaloop instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.678985094Z level=debug msg="Setting next state" handler=resultNoData + level=info ts=2024-05-29T13:44:13.678768268Z caller=remote_alert_sender.go:94 user=265585 slug=engageli host=engageli-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.80.85:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=WJrK2No4z alerts=1 + logger=ngalert.state.manager.persist user=707420 slug=pangealab t=2024-05-29T13:44:13.67867171Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=707420 slug=pangealab instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.67865656Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=707420 slug=pangealab t=2024-05-29T13:44:13.678616609Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=289377 slug=jochim instance="__name__=node_load5, agent_hostname=SSISBEASCA0007, host=ac-bitbucket, instance=SSISBEASCA0007:12345, job=integrations/node_exporter" t=2024-05-29T13:44:13.67862446Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.678489951Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.678566609Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-050" t=2024-05-29T13:44:13.678570222Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.678477736Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=289377 slug=jochim instance="__name__=node_load5, agent_hostname=SSISBEASCA0006, host=ac-bamboo, instance=SSISBEASCA0006:12345, job=integrations/node_exporter" t=2024-05-29T13:44:13.678498165Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=289377 slug=jochim instance="__name__=node_load5, agent_hostname=SSCSBEAS4010, host=ac-crowd, instance=SSCSBEAS4010:12345, job=integrations/node_exporter" t=2024-05-29T13:44:13.67837133Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-049" t=2024-05-29T13:44:13.678458914Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.678417862Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=109452 slug=deltarisk t=2024-05-29T13:44:13.678326965Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=109452 slug=deltarisk version=21 fingerprint=3dec7d5d430bd479 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.678248691Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.677886929s EvaluationString:}]" duration=354.72082ms + level=debug ts=2024-05-29T13:44:13.678222382Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-047" t=2024-05-29T13:44:13.678207036Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-047" t=2024-05-29T13:44:13.678191858Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.678193402Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.678051805Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.677898595Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-045" t=2024-05-29T13:44:13.677961761Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=154996 slug=veovo t=2024-05-29T13:44:13.677835785Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="siteEnvironment=Prod, siteName=Queenstown" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-044" t=2024-05-29T13:44:13.677838146Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.677748213Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-043" t=2024-05-29T13:44:13.677672435Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-042" t=2024-05-29T13:44:13.677553506Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=localhost:9400, job=WindowsExporterWeb, name=mdebugsvrd, serverName=Web, siteEnvironment=Prod, siteName=Queenstown, state=running" t=2024-05-29T13:44:13.67718626Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.677183263Z caller=remote_instance_store.go:51 user=54972 slug=zanglang msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.677117485Z caller=remote_instance_store.go:51 user=191103 slug=amazonadmin msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=54972 slug=zanglang instance="account=evmos1lldjhjnn32e8vek7cxe9g05nf8j74y0xa6dt3p, chain=evmos_9001-2, denom=aevmos, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service" t=2024-05-29T13:44:13.677094882Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:13.677088434Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:13.677080877Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:13.677073927Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=191103 slug=amazonadmin version=52 fingerprint=8a1be47cc930516d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.67700543Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.676763294s EvaluationString:}]" duration=174.026332ms + level=debug ts=2024-05-29T13:44:13.676853121Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-037" t=2024-05-29T13:44:13.676869291Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-036" t=2024-05-29T13:44:13.676723705Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=54972 slug=zanglang instance="account=archway1nna7k5lywn99cd63elcfqm6p8c5c4qcua7parr, chain=archway-1, denom=aarch, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service" t=2024-05-29T13:44:13.676501169Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.67652193Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=localhost:9400, job=WindowsExporter, name=mlockmand, serverName=ApplicationServer, siteEnvironment=Prod, siteName=Luton, state=running" t=2024-05-29T13:44:13.67644515Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=54972 slug=zanglang version=11 fingerprint=317fa6ae87b3e651 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.676146335Z level=debug msg="Alert rule evaluated" results="[{Instance:account=archway1nna7k5lywn99cd63elcfqm6p8c5c4qcua7parr, chain=archway-1, denom=aarch, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account=archway1nna7k5lywn99cd63elcfqm6p8c5c4qcua7parr, chain=archway-1, denom=aarch, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service Value:0xc0193509f0} B:{Var:B Labels:account=archway1nna7k5lywn99cd63elcfqm6p8c5c4qcua7parr, chain=archway-1, denom=aarch, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service Value:0xc0193508f0} C:{Var:C Labels:account=archway1nna7k5lywn99cd63elcfqm6p8c5c4qcua7parr, chain=archway-1, denom=aarch, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service Value:0xc019350970}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.675538962s EvaluationString:[ var='A' labels={account=archway1nna7k5lywn99cd63elcfqm6p8c5c4qcua7parr, chain=archway-1, denom=aarch, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service} value=1.6495150576308415 ], [ var='B' labels={account=archway1nna7k5lywn99cd63elcfqm6p8c5c4qcua7parr, chain=archway-1, denom=aarch, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service} value=1.6495150576308415 ], [ var='C' labels={account=archway1nna7k5lywn99cd63elcfqm6p8c5c4qcua7parr, chain=archway-1, denom=aarch, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service} value=0 ]} {Instance:account=crc16maj39vuvuuen3fttfejy09e8m6edkq40g87pt, chain=cronosmainnet_25-1, denom=basecro, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account=crc16maj39vuvuuen3fttfejy09e8m6edkq40g87pt, chain=cronosmainnet_25-1, denom=basecro, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service Value:0xc019350bb8} B:{Var:B Labels:account=crc16maj39vuvuuen3fttfejy09e8m6edkq40g87pt, chain=cronosmainnet_25-1, denom=basecro, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service Value:0xc019350ad0} C:{Var:C Labels:account=crc16maj39vuvuuen3fttfejy09e8m6edkq40g87pt, chain=cronosmainnet_25-1, denom=basecro, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service Value:0xc019350b40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.67556957s EvaluationString:[ var='A' labels={account=crc16maj39vuvuuen3fttfejy09e8m6edkq40g87pt, chain=cronosmainnet_25-1, denom=basecro, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service} value=228.69600501 ], [ var='B' labels={account=crc16maj39vuvuuen3fttfejy09e8m6edkq40g87pt, chain=cronosmainnet_25-1, denom=basecro, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service} value=228.69600501 ], [ var='C' labels={account=crc16maj39vuvuuen3fttfejy09e8m6edkq40g87pt, chain=cronosmainnet_25-1, denom=basecro, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service} value=0 ]} {Instance:account=evmos1lldjhjnn32e8vek7cxe9g05nf8j74y0xa6dt3p, chain=evmos_9001-2, denom=aevmos, instance=hetzner-node-0:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account=evmos1lldjhjnn32e8vek7cxe9g05nf8j74y0xa6dt3p, chain=evmos_9001-2, denom=aevmos, instance=hetzner-node-0:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service Value:0xc019350d48} B:{Var:B Labels:account=evmos1lldjhjnn32e8vek7cxe9g05nf8j74y0xa6dt3p, chain=evmos_9001-2, denom=aevmos, instance=hetzner-node-0:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service Value:0xc019350dd0} C:{Var:C Labels:account=evmos1lldjhjnn32e8vek7cxe9g05nf8j74y0xa6dt3p, chain=evmos_9001-2, denom=aevmos, instance=hetzner-node-0:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service Value:0xc019350cc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.675579028s EvaluationString:[ var='A' labels={account=evmos1lldjhjnn32e8vek7cxe9g05nf8j74y0xa6dt3p, chain=evmos_9001-2, denom=aevmos, instance=hetzner-node-0:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service} value=48.89671486539745 ], [ var='B' labels={account=evmos1lldjhjnn32e8vek7cxe9g05nf8j74y0xa6dt3p, chain=evmos_9001-2, denom=aevmos, instance=hetzner-node-0:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service} value=48.89671486539745 ], [ var='C' labels={account=evmos1lldjhjnn32e8vek7cxe9g05nf8j74y0xa6dt3p, chain=evmos_9001-2, denom=aevmos, instance=hetzner-node-0:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service} value=0 ]} {Instance:account=evmos1lldjhjnn32e8vek7cxe9g05nf8j74y0xa6dt3p, chain=evmos_9001-2, denom=aevmos, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account=evmos1lldjhjnn32e8vek7cxe9g05nf8j74y0xa6dt3p, chain=evmos_9001-2, denom=aevmos, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service Value:0xc019350ed8} B:{Var:B Labels:account=evmos1lldjhjnn32e8vek7cxe9g05nf8j74y0xa6dt3p, chain=evmos_9001-2, denom=aevmos, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service Value:0xc019350f60} C:{Var:C Labels:account=evmos1lldjhjnn32e8vek7cxe9g05nf8j74y0xa6dt3p, chain=evmos_9001-2, denom=aevmos, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service Value:0xc019350fe8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.675589337s EvaluationString:[ var='A' labels={account=evmos1lldjhjnn32e8vek7cxe9g05nf8j74y0xa6dt3p, chain=evmos_9001-2, denom=aevmos, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service} value=48.89671486539745 ], [ var='B' labels={account=evmos1lldjhjnn32e8vek7cxe9g05nf8j74y0xa6dt3p, chain=evmos_9001-2, denom=aevmos, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service} value=48.89671486539745 ], [ var='C' labels={account=evmos1lldjhjnn32e8vek7cxe9g05nf8j74y0xa6dt3p, chain=evmos_9001-2, denom=aevmos, instance=hetzner-node-1:3001, job=hermes, otel_scope_name=hermes, service_name=unknown_service} value=0 ]}]" duration=10.159033ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-033" t=2024-05-29T13:44:13.676381621Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.676306476Z caller=remote_instance_store.go:51 user=703825 slug=andrewbauman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=855233 slug=sadeno instance="__name__=up, instance=localhost:9100, job=node_export" t=2024-05-29T13:44:13.676309815Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=localhost:9400, job=WindowsExporter, name=mgentracksvrd, serverName=ApplicationServer, siteEnvironment=Prod, siteName=Luton, state=running" t=2024-05-29T13:44:13.676090642Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=localhost:9400, job=WindowsExporter, name=mgentracksvrd, serverName=ApplicationServer, siteEnvironment=Prod, siteName=Luton, state=running" t=2024-05-29T13:44:13.676079591Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=150145 slug=pleasant t=2024-05-29T13:44:13.676169617Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-031" t=2024-05-29T13:44:13.676126455Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=150145 slug=pleasant t=2024-05-29T13:44:13.676100679Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=715708 slug=ggiprod t=2024-05-29T13:44:13.67575223Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=154996 slug=veovo t=2024-05-29T13:44:13.675890564Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="siteEnvironment=Prod, siteName=Luton" + level=debug ts=2024-05-29T13:44:13.675717846Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=localhost:9400, job=WindowsExporter, name=mcman, serverName=ApplicationServer, siteEnvironment=Prod, siteName=Luton, state=running" t=2024-05-29T13:44:13.675739492Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.675600708Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-027" t=2024-05-29T13:44:13.6756556Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=WebServer, job=WindowsExporterWeb, name=mworkermonitor, serverName=WebServer, siteEnvironment=Prod, siteName=Wellington, state=running" t=2024-05-29T13:44:13.675599235Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.675533524Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:13.675398834Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=31.136714ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-026" t=2024-05-29T13:44:13.675511612Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.675401506Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=824501 slug=bendingspoons t=2024-05-29T13:44:13.675404232Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=824501 slug=bendingspoons instance="action=index" t=2024-05-29T13:44:13.675393344Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.67544453Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=824501 slug=bendingspoons instance="action=index" t=2024-05-29T13:44:13.67538019Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=WebServer, job=WindowsExporterWeb, name=mlockmand, serverName=WebServer, siteEnvironment=Prod, siteName=Wellington, state=running" t=2024-05-29T13:44:13.675436279Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.675388304Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.675427893Z caller=remote_instance_store.go:51 user=18798 slug=smsportal msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=insurance-platform-worker, pod=insurance-platform-worker-6f8dd4f5f4-2c4ft" t=2024-05-29T13:44:13.67531881Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:13.675363268Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=49.933051ms + level=debug ts=2024-05-29T13:44:13.675339181Z caller=remote_instance_store.go:51 user=155740 slug=routific msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=615392 slug=shinemetrics instance="__name__=probe_success, config_version=1715008305388584192, instance=https://api.shine.fr/v2/export/liveness_check, job=Liveness Check export-v2, probe=Paris" t=2024-05-29T13:44:13.67527338Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=824501 slug=bendingspoons instance="action=delete" t=2024-05-29T13:44:13.675279946Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=WebServer, job=WindowsExporterWeb, name=mheartbeater, serverName=WebServer, siteEnvironment=Prod, siteName=Wellington, state=running" t=2024-05-29T13:44:13.675277398Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=71697 slug=lovelysystems t=2024-05-29T13:44:13.675191566Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=41.44414ms + logger=ngalert.state.manager.persist user=75789 slug=mysign t=2024-05-29T13:44:13.675224011Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=32.797566ms + logger=ngalert.state.manager user=154996 slug=veovo t=2024-05-29T13:44:13.675112922Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="siteEnvironment=Prod, siteName=Wellington" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-023" t=2024-05-29T13:44:13.675158352Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=WebServer, job=WindowsExporterWeb, name=mdebugsvrd, serverName=WebServer, siteEnvironment=Prod, siteName=Wellington, state=running" t=2024-05-29T13:44:13.674991774Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-022" t=2024-05-29T13:44:13.675035503Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=615392 slug=shinemetrics t=2024-05-29T13:44:13.674934291Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.state.manager user=154996 slug=veovo t=2024-05-29T13:44:13.674944192Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="siteEnvironment=Prod, siteName=Wellington" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-021" t=2024-05-29T13:44:13.67491972Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l5m8jjju-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.674724679Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-019" t=2024-05-29T13:44:13.674690084Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-019" t=2024-05-29T13:44:13.674680055Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l5m1nd7u-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.674638128Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l5m1nd7u-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.674570817Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=154996 slug=veovo t=2024-05-29T13:44:13.674570063Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="siteEnvironment=Prod, siteName=Birmingham" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.674433319Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.589429ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l5m1nd7u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.674538887Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l5m1nd7u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.674508086Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=SAODB:9103, job=WindowsExporter, name=mlockmand, serverName=ApplicationServer, siteEnvironment=Prod, siteName=Birmingham, state=running" t=2024-05-29T13:44:13.67443871Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=SAODB:9103, job=WindowsExporter, name=mlockmand, serverName=ApplicationServer, siteEnvironment=Prod, siteName=Birmingham, state=running" t=2024-05-29T13:44:13.674428229Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=154996 slug=veovo t=2024-05-29T13:44:13.674398724Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="siteEnvironment=Prod, siteName=Birmingham" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l5hdrxdu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.674382705Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-016" t=2024-05-29T13:44:13.674312209Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=SAODB:9103, job=WindowsExporter, name=mgentracksvrd, serverName=ApplicationServer, siteEnvironment=Prod, siteName=Birmingham, state=running" t=2024-05-29T13:44:13.674088115Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l5e2eq3o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.674215663Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l5e2eq3o-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.674151913Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=SAODB:9103, job=WindowsExporter, name=mgentracksvrd, serverName=ApplicationServer, siteEnvironment=Prod, siteName=Birmingham, state=running" t=2024-05-29T13:44:13.674074726Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l5e2eq3o-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.674126833Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l51z7eb9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.674088032Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l51z7eb9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.674058432Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.674020767Z caller=remote_instance_store.go:51 user=306551 slug=teckresourcesalerts msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l51z7eb9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.673995111Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=245291 slug=pismo version=749 fingerprint=b0b37bf451890699 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.673860857Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.673659207s EvaluationString:}]" duration=257.549364ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-012" t=2024-05-29T13:44:13.673846217Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-012" t=2024-05-29T13:44:13.673833634Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=265756 slug=vowfood t=2024-05-29T13:44:13.673789901Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=154996 slug=veovo t=2024-05-29T13:44:13.673785312Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="siteEnvironment=Prod, siteName=Birmingham" + logger=ngalert.state.manager user=265756 slug=vowfood instance= t=2024-05-29T13:44:13.67377697Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l4v69c2g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.673754769Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l4v69c2g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.673740069Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-011" t=2024-05-29T13:44:13.673714512Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.67367702Z caller=remote_instance_store.go:51 user=482907 slug=wavelonp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:13.673500653Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-011" t=2024-05-29T13:44:13.673702001Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:13.673482175Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.673696309Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:13.673474648Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=114492 slug=railsbank t=2024-05-29T13:44:13.673460254Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.673551565Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.673606643Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.673562191Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l4sdfqra-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.673640508Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=114492 slug=railsbank version=1 fingerprint=e9f3c2f44c2fe0e6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.673395633Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.673124432s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=121.959674ms + logger=ngalert.state.manager.persist user=436633 slug=swirldslabsproduction t=2024-05-29T13:44:13.67257517Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=436633 slug=swirldslabsproduction instance="datasource_uid=grafanacloud-logs, ref_id=query" t=2024-05-29T13:44:13.672534819Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-010" t=2024-05-29T13:44:13.673545423Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=384712 slug=nearinc instance="datasource_uid=7FBWubNVz, ref_id=C,D" t=2024-05-29T13:44:13.673457392Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l4raep32-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.673488886Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=384712 slug=nearinc t=2024-05-29T13:44:13.673423162Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l4raep32-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.673397985Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-009" t=2024-05-29T13:44:13.673419361Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=384712 slug=nearinc version=47 fingerprint=2425c7e54cf75ef7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.673280162Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=7FBWubNVz, ref_id=C,D State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.673038733s EvaluationString:}]" duration=1.286522379s + level=debug ts=2024-05-29T13:44:13.673340986Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.6733191Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.673223062Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=ApplicationServer, job=WindowsExporterApp, name=mworkermonitor, serverName=ApplicationServer, siteEnvironment=Prod, siteName=Wellington, state=running" t=2024-05-29T13:44:13.673243964Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-008" t=2024-05-29T13:44:13.673278375Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.673230622Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-007" t=2024-05-29T13:44:13.673164057Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l4pyl2oj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.673125032Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l4j8jtco-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.673001581Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l4es9zwx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.67285739Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.672823572Z caller=remote_instance_store.go:51 user=765752 slug=octave msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-004" t=2024-05-29T13:44:13.672789143Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=ApplicationServer, job=WindowsExporterApp, name=mheartbeater, serverName=ApplicationServer, siteEnvironment=Prod, siteName=Wellington, state=running" t=2024-05-29T13:44:13.672759723Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.672719563Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=154996 slug=veovo t=2024-05-29T13:44:13.672724518Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="siteEnvironment=Prod, siteName=Wellington" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-sin-sg-003" t=2024-05-29T13:44:13.672608139Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l48ircqw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.672605607Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l48ircqw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.672570517Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l48ircqw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.672545376Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l48ircqw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.672480016Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=63699 slug=bizzydist instance= t=2024-05-29T13:44:13.672507586Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=63699 slug=bizzydist instance= t=2024-05-29T13:44:13.672499283Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=63699 slug=bizzydist version=1 fingerprint=360ee5a09c5d1f24 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.672405333Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.672165557s EvaluationString:}]" duration=219.891221ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l47co4kv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.672419135Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l47co4kv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.672361704Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=ApplicationServer, job=WindowsExporterApp, name=mdebugsvrd, serverName=ApplicationServer, siteEnvironment=Prod, siteName=Wellington, state=running" t=2024-05-29T13:44:13.672348121Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=154996 slug=veovo t=2024-05-29T13:44:13.672254484Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="siteEnvironment=Prod, siteName=Wellington" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-040" t=2024-05-29T13:44:13.672325411Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l42hxrjc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.672169102Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-039" t=2024-05-29T13:44:13.672211897Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=830785 slug=jelitto t=2024-05-29T13:44:13.67209639Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l42hxrjc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.672102212Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3y3ys08-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.672057761Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.67195923Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=830785 slug=jelitto t=2024-05-29T13:44:13.672001917Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-037" t=2024-05-29T13:44:13.671974864Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=AOS-PROD-WEB02:9400, job=WindowsExporterWeb, name=mworkermonitor, serverName=serverName, siteEnvironment=Prod, siteName=Sydney, state=running" t=2024-05-29T13:44:13.671917894Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=154996 slug=veovo t=2024-05-29T13:44:13.67186695Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="siteEnvironment=Prod, siteName=Sydney" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3y3ys08-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.671796469Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-036" t=2024-05-29T13:44:13.671846497Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3y3ys08-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.671767558Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3v41ucv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.671720248Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=AOS-PROD-WEB02:9400, job=WindowsExporterWeb, name=mlockmand, serverName=serverName, siteEnvironment=Prod, siteName=Sydney, state=running" t=2024-05-29T13:44:13.671708329Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=AOS-PROD-WEB02:9400, job=WindowsExporterWeb, name=mlockmand, serverName=serverName, siteEnvironment=Prod, siteName=Sydney, state=running" t=2024-05-29T13:44:13.671698604Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.67154262Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-034" t=2024-05-29T13:44:13.671538688Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=AOS-PROD-WEB02:9400, job=WindowsExporterWeb, name=mheartbeater, serverName=serverName, siteEnvironment=Prod, siteName=Sydney, state=running" t=2024-05-29T13:44:13.671481438Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-033" t=2024-05-29T13:44:13.671439085Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3ogs8bk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.671319444Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3n6odxs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.671133602Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=402122 slug=leapwallet instance= t=2024-05-29T13:44:13.671138388Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3m0svfa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.671038281Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=402122 slug=leapwallet instance= t=2024-05-29T13:44:13.671126286Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3m0svfa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.671020831Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3m0svfa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.67098233Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=402122 slug=leapwallet version=69 fingerprint=412af9f7aa2ff8f8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.671017196Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.670690409s EvaluationString:}]" duration=20.22022ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3m0svfa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.67096992Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-030" t=2024-05-29T13:44:13.671073657Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3m0svfa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.6709162Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=537072 slug=devbitvavo instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.670963694Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=AOS-PROD-WEB02:9400, job=WindowsExporterWeb, name=mcman, serverName=serverName, siteEnvironment=Prod, siteName=Sydney, state=running" t=2024-05-29T13:44:13.670712775Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-028" t=2024-05-29T13:44:13.670848039Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=537072 slug=devbitvavo t=2024-05-29T13:44:13.670821551Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.670745034Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:13.67071513Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3ljvkf5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.670687067Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=4947 slug=mediamath instance="datasource_uid=YDKOxyYMz, ref_id=A" t=2024-05-29T13:44:13.670699378Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=4947 slug=mediamath instance="datasource_uid=YDKOxyYMz, ref_id=A" t=2024-05-29T13:44:13.670685937Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3ljvkf5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.670626157Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=4947 slug=mediamath version=1 fingerprint=bf020d02a287b25a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.670570107Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=YDKOxyYMz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.670302137s EvaluationString:}]" duration=1.259976227s + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3k0vnn5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.670589526Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3k0vnn5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.670564886Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=154996 slug=veovo t=2024-05-29T13:44:13.670476128Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="siteEnvironment=Prod, siteName=Sydney" + level=debug ts=2024-05-29T13:44:13.670161421Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=154996 slug=veovo t=2024-05-29T13:44:13.670258542Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="siteEnvironment=Prod, siteName=Sydney" + level=debug ts=2024-05-29T13:44:13.67019Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=AOS-PROD-WEB01:9400, job=WindowsExporterWeb, name=mheartbeater, serverName=serverName, siteEnvironment=Prod, siteName=Sydney, state=running" t=2024-05-29T13:44:13.670096594Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-023" t=2024-05-29T13:44:13.670156232Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3ibw3ka-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.670154132Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.670100467Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:13.670032866Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3fy5y36-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.67002231Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=612525 slug=adleyeview instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.670003166Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3fy5y36-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.66999779Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=612525 slug=adleyeview instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.669989466Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=849729 slug=medopsimscare t=2024-05-29T13:44:13.669917319Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.952682ms + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=AOS-PROD-WEB01:9400, job=WindowsExporterWeb, name=mgentracksvrd, serverName=serverName, siteEnvironment=Prod, siteName=Sydney, state=running" t=2024-05-29T13:44:13.66982581Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.669826536Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=154996 slug=veovo t=2024-05-29T13:44:13.669791448Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="siteEnvironment=Prod, siteName=Sydney" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3dul73p-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.669826238Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=344017 slug=descript t=2024-05-29T13:44:13.669797357Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.20735ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3dul73p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.669749128Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-020" t=2024-05-29T13:44:13.66970519Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3dul73p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.669626806Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3datmtw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.669552656Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.669573376Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-019" t=2024-05-29T13:44:13.669566185Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=AOS-PROD-WEB01:9400, job=WindowsExporterWeb, name=mcman, serverName=serverName, siteEnvironment=Prod, siteName=Sydney, state=running" t=2024-05-29T13:44:13.669454783Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.669524175Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.669474753Z caller=remote_instance_store.go:51 user=890273 slug=cmhusqnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.669415214Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-018" t=2024-05-29T13:44:13.669439522Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.669418183Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-018" t=2024-05-29T13:44:13.669423447Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="name=ws-jtwtcue2-cn-wulan-img-gen" t=2024-05-29T13:44:13.669409204Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.669372683Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.669386218Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l3datmtw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.669382254Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l372ly90-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.669342163Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=838012 slug=lepton version=22 fingerprint=0f184f6270b0d651 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.669245852Z level=debug msg="Alert rule evaluated" results="[{Instance:name=ws-jtwtcue2-cn-wulan-img-gen State:Normal Error: Results:map[] Values:map[all:{Var:all Labels:name=ws-jtwtcue2-cn-wulan-img-gen Value:0xc0818ca070} cond:{Var:cond Labels:name=ws-jtwtcue2-cn-wulan-img-gen Value:0xc0818ca098} errors:{Var:errors Labels:name=ws-jtwtcue2-cn-wulan-img-gen Value:0xc0818ca0b0} success rate:{Var:success rate Labels:name=ws-jtwtcue2-cn-wulan-img-gen Value:0xc0818ca0e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.668603745s EvaluationString:[ var='all' labels={name=ws-jtwtcue2-cn-wulan-img-gen} value=4610.20920502092 ], [ var='cond' labels={name=ws-jtwtcue2-cn-wulan-img-gen} value=0 ], [ var='errors' labels={name=ws-jtwtcue2-cn-wulan-img-gen} value=0 ], [ var='success rate' labels={name=ws-jtwtcue2-cn-wulan-img-gen} value=100 ]} {Instance:name=ws-vjigr56z-lepton-ai-us-east-dev State:Normal Error: Results:map[] Values:map[all:{Var:all Labels:name=ws-vjigr56z-lepton-ai-us-east-dev Value:0xc0818ca188} cond:{Var:cond Labels:name=ws-vjigr56z-lepton-ai-us-east-dev Value:0xc0818ca108} errors:{Var:errors Labels:name=ws-vjigr56z-lepton-ai-us-east-dev Value:0xc0818ca140} success rate:{Var:success rate Labels:name=ws-vjigr56z-lepton-ai-us-east-dev Value:0xc0818ca170}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.668620306s EvaluationString:[ var='all' labels={name=ws-vjigr56z-lepton-ai-us-east-dev} value=135.56485355648536 ], [ var='cond' labels={name=ws-vjigr56z-lepton-ai-us-east-dev} value=0 ], [ var='errors' labels={name=ws-vjigr56z-lepton-ai-us-east-dev} value=0 ], [ var='success rate' labels={name=ws-vjigr56z-lepton-ai-us-east-dev} value=100 ]} {Instance:name=ws-vjigr56z-us-east-training State:Normal Error: Results:map[] Values:map[all:{Var:all Labels:name=ws-vjigr56z-us-east-training Value:0xc0818ca208} cond:{Var:cond Labels:name=ws-vjigr56z-us-east-training Value:0xc0818ca1a8} errors:{Var:errors Labels:name=ws-vjigr56z-us-east-training Value:0xc0818ca1c0} success rate:{Var:success rate Labels:name=ws-vjigr56z-us-east-training Value:0xc0818ca1f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.668631086s EvaluationString:[ var='all' labels={name=ws-vjigr56z-us-east-training} value=388221.589958159 ], [ var='cond' labels={name=ws-vjigr56z-us-east-training} value=0 ], [ var='errors' labels={name=ws-vjigr56z-us-east-training} value=103.43096234309624 ], [ var='success rate' labels={name=ws-vjigr56z-us-east-training} value=99.9733577510838 ]}]" duration=145.159642ms + logger=ngalert.state.manager.persist user=222972 slug=outseer t=2024-05-29T13:44:13.669250074Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.081174ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l372ly90-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.669271263Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-017" t=2024-05-29T13:44:13.66927046Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l372ly90-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.669207132Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l372ly90-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.669180132Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.669065525Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l36eb8zq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.669093901Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l36eb8zq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.669068031Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=AOS-PROD-APP01:9400, job=WindowsExporterApp, name=mlockmand, serverName=serverName, siteEnvironment=Prod, siteName=Sydney, state=running" t=2024-05-29T13:44:13.669050277Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-015" t=2024-05-29T13:44:13.669037248Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l36eb8zq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.66901353Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l36eb8zq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.66898797Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-013" t=2024-05-29T13:44:13.668782413Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-012" t=2024-05-29T13:44:13.66866402Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.668638626Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.66863044Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=245291 slug=pismo version=2 fingerprint=fd04da4795d48d8b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.66851346Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.668094876s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=457.805471ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-011" t=2024-05-29T13:44:13.66851267Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-011" t=2024-05-29T13:44:13.66849966Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-010" t=2024-05-29T13:44:13.66836482Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.668722955Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=AOS-PROD-APP01:9400, job=WindowsExporterApp, name=mheartbeater, serverName=serverName, siteEnvironment=Prod, siteName=Sydney, state=running" t=2024-05-29T13:44:13.668800925Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-005" t=2024-05-29T13:44:13.667707101Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l2xdcpix-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.668784038Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l2otfenw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.668725497Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-002" t=2024-05-29T13:44:13.667313066Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=thg-ewr-us-001" t=2024-05-29T13:44:13.667198365Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.668675808Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=shark-abq-us-010" t=2024-05-29T13:44:13.66696254Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=shark-abq-us-009" t=2024-05-29T13:44:13.66685809Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=154996 slug=veovo t=2024-05-29T13:44:13.668561899Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="siteEnvironment=Prod, siteName=Sydney" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=shark-abq-us-002" t=2024-05-29T13:44:13.665910673Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=rnerd-yul-ca-024" t=2024-05-29T13:44:13.66567618Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=rnerd-yul-ca-023" t=2024-05-29T13:44:13.665570095Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=rnerd-yul-ca-023" t=2024-05-29T13:44:13.665561238Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=265585 slug=engageli instance="engageli_stack_name=demo2, pod=ems-scaler-00-575fcd849f-5cd22" t=2024-05-29T13:44:13.665544491Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=265585 slug=engageli t=2024-05-29T13:44:13.665480502Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=rnerd-yul-ca-021" t=2024-05-29T13:44:13.665273071Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=rnerd-yul-ca-020" t=2024-05-29T13:44:13.665107575Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=rnerd-yul-ca-020" t=2024-05-29T13:44:13.665100565Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l2g04tei-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.668462584Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.66562084Z caller=remote_instance_store.go:51 user=265585 slug=engageli msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l2g04tei-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.668386933Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.668226375Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.668284331Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l2cjfl50-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.668172121Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=AOS-PROD-APP01:9400, job=WindowsExporterApp, name=mcman, serverName=serverName, siteEnvironment=Prod, siteName=Sydney, state=running" t=2024-05-29T13:44:13.668225387Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.668208764Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l2cjfl50-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.667991759Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.667936081Z caller=remote_instance_store.go:51 user=465816 slug=metricgamingqa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.667984041Z caller=remote_instance_store.go:51 user=127813 slug=clearsale msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=154996 slug=veovo t=2024-05-29T13:44:13.66799294Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="siteEnvironment=Prod, siteName=Queenstown" + logger=ngalert.state.manager user=127813 slug=clearsale instance= t=2024-05-29T13:44:13.667914775Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.667912739Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.667895707Z caller=client.go:80 msg="creating client for grafana instance" user=441901 addr=dns:///openmarkets-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.667862146Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=127813 slug=clearsale t=2024-05-29T13:44:13.667793767Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.667844001Z caller=ruler.go:522 msg="tenant is owned by this instance" user=357843 slug=goodstick groups=0 + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=10.40.161.80:9400, job=WindowsExporterApp, name=mlockmand, serverName=Application, siteEnvironment=Prod, siteName=Queenstown, state=running" t=2024-05-29T13:44:13.667793684Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l27kw6bd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.667797087Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l1rxx33a-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.667756117Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l1rxx33a-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.667730067Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.667752111Z caller=ruler.go:522 msg="tenant is owned by this instance" user=639081 slug=limark groups=0 + level=debug ts=2024-05-29T13:44:13.667634135Z caller=ruler.go:522 msg="tenant is owned by this instance" user=745187 slug=maninweb3 groups=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l1rxx33a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.667692966Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l1rxx33a-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.667615115Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=154996 slug=veovo instance="__name__=windows_service_state, instance=10.40.161.80:9400, job=WindowsExporterApp, name=mheartbeater, serverName=Application, siteEnvironment=Prod, siteName=Queenstown, state=running" t=2024-05-29T13:44:13.667581004Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l1q51abb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.667501404Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=154996 slug=veovo t=2024-05-29T13:44:13.667527576Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="siteEnvironment=Prod, siteName=Queenstown" + level=info component=discovery ts=2024-05-29T13:44:13.667454854Z caller=client.go:80 msg="creating client for grafana instance" user=333694 addr=dns:///nusantaralantera-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l1oav23v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.667374633Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.667311259Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.66733452Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l1oav23v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.667312842Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.667296045Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.667294173Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.667201037Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l1n2ji32-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.667184821Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.667052595Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.667215808Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l1n2ji32-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.667143911Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.667103156Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.667008833Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.666964754Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l1kw0e2d-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.666940148Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=154996 slug=veovo t=2024-05-29T13:44:13.666788185Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="siteEnvironment=Prod, siteName=Queenstown" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l1kw0e2d-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.666871738Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l1ke64ea-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.666791237Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.666594792Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.666553932Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.666422233Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l1jcp26y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.666480854Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l1jcp26y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.666447463Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.666415739Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l1jcp26y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.666398973Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l1jcp26y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.666366893Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l1cqhrx6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.666297342Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.66621646Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l1cqhrx6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.666220411Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l1cqhrx6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.666177531Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l17jqvje-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.66610118Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l17jqvje-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.666066549Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l16h1vwl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.665857187Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.665814422Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l16h1vwl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.665735076Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l12do7fa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.665668565Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l12do7fa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.665644325Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.665512681Z caller=remote_instance_store.go:51 user=224047 slug=ppbtradingtribeprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.66561011Z caller=remote_instance_store.go:51 user=523054 slug=vialtopartners msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l12do7fa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.665607335Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l12do7fa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.665561814Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=349229 slug=kropyva t=2024-05-29T13:44:13.66554052Z level=debug msg="Saving alert states" count=155 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.665460717Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.665462255Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.66540357Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l0y96x3a-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.665306712Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=277970 slug=teckresourcestest t=2024-05-29T13:44:13.665072868Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=40.545067ms + level=debug ts=2024-05-29T13:44:13.665099597Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l0xdk10o-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.66511033Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=vault-stage, pod=vault-2" t=2024-05-29T13:44:13.66507979Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=vault-stage, pod=vault-1" t=2024-05-29T13:44:13.664987554Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.66493258Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l0tcvvua-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.664922208Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=vault-stage, pod=vault-0" t=2024-05-29T13:44:13.664820002Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:13.664657908Z level=debug msg="Saving alert states done" count=171 max_state_save_concurrency=1 duration=1.991069925s + level=debug ts=2024-05-29T13:44:13.664657508Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l0tcvvua-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.664789566Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l0t3p170-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.664738206Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=vault-stage, pod=secrets-to-vault-7f67d56b-g8gtb" t=2024-05-29T13:44:13.664697633Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=rnerd-yul-ca-015" t=2024-05-29T13:44:13.664609747Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l0jupmio-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.664598444Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=vault-prod, pod=vault-configurer-749cd6747c-hl55j" t=2024-05-29T13:44:13.664577724Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l0jupmio-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.664519424Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l0jupmio-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.664463003Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l0hevns0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.664407002Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.664317154Z caller=remote_instance_store.go:51 user=739130 slug=redphasetech msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=rnerd-yul-ca-012" t=2024-05-29T13:44:13.664333408Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l0hevns0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.664278141Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l0fwnkm5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.664048059Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.664131924Z caller=remote_instance_store.go:51 user=139426 slug=nmsalerts msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.664044836Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=139426 slug=nmsalerts instance= t=2024-05-29T13:44:13.664091665Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=rnerd-yul-ca-010" t=2024-05-29T13:44:13.664075615Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=139426 slug=nmsalerts t=2024-05-29T13:44:13.664020851Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=139426 slug=nmsalerts version=19 fingerprint=39768f435179490e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.663955591Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.663537442s EvaluationString:}]" duration=233.550848ms + logger=ngalert.state.manager.persist user=332534 slug=adevintakijiji t=2024-05-29T13:44:13.663908874Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=rnerd-yul-ca-009" t=2024-05-29T13:44:13.663964413Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=332534 slug=adevintakijiji instance="aggregatedBy=sum, name=sumSeries(event-processor.ca-kijiji-production-up0f.*.incoming.kafka.odin-consumer.consumer-fetch-manager-metrics.bytes-consumed-rate) completed" t=2024-05-29T13:44:13.663868225Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l06rkvj7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.663866837Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=vault-ops, pod=vault-configurer-754ccc9cb7-q2bxk" t=2024-05-29T13:44:13.663882817Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l06rkvj7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.663766226Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.663733976Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l03fe19e-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.663720825Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l03fe19e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.663633355Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l03fe19e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.663602484Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=332534 slug=adevintakijiji version=19 fingerprint=193a91d4c6231618 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.663593336Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=sumSeries(event-processor.ca-kijiji-production-up0f.*.incoming.kafka.odin-consumer.consumer-fetch-manager-metrics.bytes-consumed-rate) completed State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:aggregatedBy=sum, name=sumSeries(event-processor.ca-kijiji-production-up0f.*.incoming.kafka.odin-consumer.consumer-fetch-manager-metrics.bytes-consumed-rate) completed Value:0xc04d93e2a0} B:{Var:B Labels:aggregatedBy=sum, name=sumSeries(event-processor.ca-kijiji-production-up0f.*.incoming.kafka.odin-consumer.consumer-fetch-manager-metrics.bytes-consumed-rate) completed Value:0xc04d93e280}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.663174783s EvaluationString:[ var='A' labels={aggregatedBy=sum, name=sumSeries(event-processor.ca-kijiji-production-up0f.*.incoming.kafka.odin-consumer.consumer-fetch-manager-metrics.bytes-consumed-rate) completed} value=5.181087993333334e+06 ], [ var='B' labels={aggregatedBy=sum, name=sumSeries(event-processor.ca-kijiji-production-up0f.*.incoming.kafka.odin-consumer.consumer-fetch-manager-metrics.bytes-consumed-rate) completed} value=0 ]}]" duration=40.642141ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l025o8rf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.663545864Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.663607446Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=vault-ops, pod=vault-1" t=2024-05-29T13:44:13.663573806Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=314067 slug=itsme t=2024-05-29T13:44:13.663586971Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=21.123456ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=rnerd-yul-ca-002" t=2024-05-29T13:44:13.663435415Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l025o8rf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.663447053Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l025o8rf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.663414652Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=vault-ops, pod=secrets-to-vault-84d97769c7-gjlmg" t=2024-05-29T13:44:13.663285131Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=vault-ops, pod=secrets-to-vault-84d97769c7-gjlmg" t=2024-05-29T13:44:13.6632686Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=824501 slug=bendingspoons t=2024-05-29T13:44:13.663105121Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=824501 slug=bendingspoons instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.663082273Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.663097123Z caller=ruler.go:522 msg="tenant is owned by this instance" user=656459 slug=activeport groups=26 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l009kfp5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.66315348Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-l009kfp5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.663055169Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=824501 slug=bendingspoons t=2024-05-29T13:44:13.663055574Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kzzxj8ql-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.662912707Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=vault, pod=vault-operator-7cf5f6c784-5zmvs" t=2024-05-29T13:44:13.662774732Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kzp2nd6j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.662665415Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=quadra-us-sm-37" t=2024-05-29T13:44:13.662720543Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kzp2nd6j-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.662623824Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kzp2nd6j-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.662549483Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=nettle-stage, pod=hemp-ui-5dd6db4f57-h8qlw" t=2024-05-29T13:44:13.662481408Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kznsy58v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.662393112Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.66235341Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=nettle-stage, pod=hemp-cabinet-logs-67fcd6c9f5-t8sx5" t=2024-05-29T13:44:13.662349167Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=quadra-us-sm-34" t=2024-05-29T13:44:13.662277031Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kzjenb11-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.66225307Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kzjenb11-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.66222506Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=quadra-us-sm-34" t=2024-05-29T13:44:13.662264378Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=nettle-stage, pod=fdroid-web-64cf66bb89-2nfx6" t=2024-05-29T13:44:13.662206481Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kzjenb11-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.66217765Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=562267 slug=return t=2024-05-29T13:44:13.66212554Z level=debug msg="Saving alert states done" count=4 max_state_save_concurrency=1 duration=61.510107ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kzjenb11-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.662077739Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=nettle-stage, pod=authzgateway-server-56cd6d8b49-62h6v" t=2024-05-29T13:44:13.662108262Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=nettle-stage, pod=apk-store-oauth2-proxy-78fbc8c697-kgvpx" t=2024-05-29T13:44:13.661994161Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kze29dfw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.661929177Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kze29dfw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.661888837Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.661841308Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.661794741Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kzdnfd4q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.661787346Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=nettle-stage, pod=activation-server-86c4bc5457-s5j9s" t=2024-05-29T13:44:13.661689238Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=463523 slug=porchatto t=2024-05-29T13:44:13.661526855Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.687713ms + level=debug ts=2024-05-29T13:44:13.661545365Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kzbz8vg1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.661558613Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kzbz8vg1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.661502023Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=IuJiRMnVk, ref_id=A" t=2024-05-29T13:44:13.661503046Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.661511738Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kzbz8vg1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.661479582Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kzb9693e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.661383111Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kzb9693e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.661336501Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.661330353Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kz1r13jf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.661296411Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=quadra-us-sm-32" t=2024-05-29T13:44:13.661212077Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kz1r13jf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.661180679Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kz1r13jf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.661157879Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.66112954Z caller=remote_instance_store.go:51 user=751407 slug=nethermindjuno msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=nettle-prod, pod=fdroid-web-7d99c5844-qcbt7" t=2024-05-29T13:44:13.661174641Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.661078762Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kz0zl5fs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.661117829Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kz0zl5fs-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.661059868Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:13.661019044Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.661031312Z caller=remote_instance_store.go:51 user=482907 slug=wavelonp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.661018336Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.660926535Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=nettle-prod, pod=apk-store-7cb9599866-s5jm6" t=2024-05-29T13:44:13.660919529Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.660801739Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=quadra-us-sm-30" t=2024-05-29T13:44:13.660843139Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kyzyyxej-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.660842446Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kyuhdinp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.660788285Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=nettle-prod, pod=activation-server-547667fd6b-ql6td" t=2024-05-29T13:44:13.660799526Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=462905 slug=skra version=4 fingerprint=431fd9c8a9cff48e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.660640581Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc000e0ff98} C:{Var:C Labels: Value:0xc000e0ffa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.660176628s EvaluationString:[ var='B' labels={} value=0 ], [ var='C' labels={} value=0 ]}]" duration=186.080219ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kyuhdinp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.660734435Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=quadra-us-sm-29" t=2024-05-29T13:44:13.660673926Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kys5jr6g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.660625504Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kys5jr6g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.660595263Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kys5jr6g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.660539733Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.660502674Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kynjkum7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.660486482Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=127813 slug=clearsale t=2024-05-29T13:44:13.660443957Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=173730 slug=nikon t=2024-05-29T13:44:13.660080829Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Solr Master disk usage [/] alert" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=quadra-us-sm-27" t=2024-05-29T13:44:13.660331231Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=quadra-us-sm-27" t=2024-05-29T13:44:13.660323242Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.66025103Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.660290496Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=231061 slug=teamaround t=2024-05-29T13:44:13.660180482Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kyg3vpxp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.660198299Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kyfd3rvj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.660135449Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.660017437Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=231061 slug=teamaround t=2024-05-29T13:44:13.660013721Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=173730 slug=nikon version=1 fingerprint=b51e185b982991c9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.659977911Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.659709815s EvaluationString:}]" duration=204.748284ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kyfd3rvj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.660029158Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=defectdojo, pod=defectdojo-django-6cfb7c75c9-q2xrd" t=2024-05-29T13:44:13.659969952Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kyb6heqj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.659998047Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kyb6heqj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.659941377Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=quadra-us-sm-23" t=2024-05-29T13:44:13.659844853Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kyb6heqj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.659861916Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=defectdojo, pod=defectdojo-celery-worker-8588d96b8b-qmnnm" t=2024-05-29T13:44:13.659762356Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kxuycynr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.659827005Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=quadra-us-sm-23" t=2024-05-29T13:44:13.659831152Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=316418 slug=workmotion t=2024-05-29T13:44:13.659723982Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.755208ms + level=debug ts=2024-05-29T13:44:13.659805365Z caller=remote_instance_store.go:51 user=235691 slug=om2 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kxuycynr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.659748355Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=235691 slug=om2 t=2024-05-29T13:44:13.659770225Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=235691 slug=om2 instance= t=2024-05-29T13:44:13.659738435Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=quadra-us-sm-22" t=2024-05-29T13:44:13.659658736Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=quadra-us-sm-20" t=2024-05-29T13:44:13.659428Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kxn7r37q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.659598313Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=quadra-us-sm-20" t=2024-05-29T13:44:13.659372004Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.659543947Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=core, pod=reloader-reloader-66f757db6-cgcmf" t=2024-05-29T13:44:13.659461697Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kxgetz1x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.659415981Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kxgetz1x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.65929981Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kx9yo0fn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.65926605Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=core, pod=reloader-reloader-66f757db6-28wp6" t=2024-05-29T13:44:13.659249112Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.659185424Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kx9yo0fn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.659132658Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kx9yo0fn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.659107838Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kx2hjjpc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.658938956Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kx2euezl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.658716944Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=core, pod=reflector-ccd7686f7-9d986" t=2024-05-29T13:44:13.658818526Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=core, pod=reflector-ccd7686f7-9d986" t=2024-05-29T13:44:13.658796805Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.658752724Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=quadra-us-sm-14" t=2024-05-29T13:44:13.658719329Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.658644297Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.658602324Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=471861 slug=planetstaging t=2024-05-29T13:44:13.65850401Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.758265ms + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=core, pod=descheduler-75c69f57bf-jkxj6" t=2024-05-29T13:44:13.658491696Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.658393899Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=quadra-us-sm-08" t=2024-05-29T13:44:13.657939176Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=quadra-us-sm-05" t=2024-05-29T13:44:13.657721104Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kwym04l3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.65827151Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=quadra-us-sm-04" t=2024-05-29T13:44:13.657582331Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=quadra-us-sm-03" t=2024-05-29T13:44:13.657474641Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, namespace=core, pod=descheduler-6bf5f8654d-p52mm" t=2024-05-29T13:44:13.658224388Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kwvr40hh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.658159348Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kwt2bojh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.657981937Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.657937236Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kwonb4oh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.657820205Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kwonb4oh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.657764934Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.657730428Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kwn0xlik-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.657585903Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kwn0xlik-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.657575502Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kwl8ey55-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.657459041Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.657353028Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kwl8ey55-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.65736494Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kwl8ey55-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.65732609Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kwkxs9rh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.657263289Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kwkxs9rh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.657196999Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=psychz-icn-kr-015" t=2024-05-29T13:44:13.656851584Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kwkxs9rh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.657132928Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=vault, namespace=vault-prod, pod=vault-1" t=2024-05-29T13:44:13.657115023Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=vault, namespace=vault-prod, pod=vault-1" t=2024-05-29T13:44:13.657103585Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kw8odz18-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.657066807Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kw8odz18-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.657013487Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.6566771Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kw8odz18-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.656984936Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.656413464Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kw3mwi99-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.656852455Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kw3mwi99-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.656823385Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kw3mwi99-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.656764094Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=vault, namespace=vault-ops, pod=vault-1" t=2024-05-29T13:44:13.656697665Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kw3c0ttr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.656644423Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kw3c0ttr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.656570792Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kw3c0ttr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.656542452Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=psychz-icn-kr-013" t=2024-05-29T13:44:13.656567954Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=vault, namespace=vault-ops, pod=vault-0" t=2024-05-29T13:44:13.656517383Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kw3c0ttr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.656476301Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=psychz-icn-kr-012" t=2024-05-29T13:44:13.656449451Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.65637477Z caller=remote_instance_store.go:51 user=795224 slug=gannettdigital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=psychz-icn-kr-012" t=2024-05-29T13:44:13.656438369Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kw1grfw9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.656433521Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kw1grfw9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.65637169Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kw1grfw9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.65634309Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=222972 slug=outseer t=2024-05-29T13:44:13.656159934Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.656220339Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kvws412g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.656208018Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kvws412g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.656137928Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kvws412g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.656097217Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.656027934Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kvnlwlbs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.656029387Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kvnlwlbs-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.655948456Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kvnlwlbs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.655877165Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kvnlwlbs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.655846755Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=426229 slug=accelbyte t=2024-05-29T13:44:13.655826184Z level=debug msg="Saving alert states done" count=7 max_state_save_concurrency=1 duration=128.82663ms + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=sonarqube, namespace=sonarqube, pod=sonarqube-sonarqube-c94ccdb69-mg8g6" t=2024-05-29T13:44:13.655828328Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.655825701Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kvi8ljbr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.655807074Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.655740509Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=psychz-icn-kr-006" t=2024-05-29T13:44:13.655512667Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kvi8ljbr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.655714563Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=psychz-icn-kr-005" t=2024-05-29T13:44:13.655322459Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=psychz-icn-kr-004" t=2024-05-29T13:44:13.655181914Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kvi506kg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.655622652Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.655634647Z caller=remote_instance_store.go:51 user=344017 slug=descript msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.655592909Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=344017 slug=descript t=2024-05-29T13:44:13.655587109Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.655529599Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=543654 slug=jobcloudprogrammaticprod t=2024-05-29T13:44:13.65551948Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.280732ms + level=debug ts=2024-05-29T13:44:13.655239296Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.655306506Z caller=remote_instance_store.go:51 user=159781 slug=suncornoc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=s3-proxy, namespace=nettle-stage, pod=hemp-cabinet-logs-67fcd6c9f5-t8sx5" t=2024-05-29T13:44:13.655320473Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=s3-proxy, namespace=nettle-stage, pod=hemp-cabinet-logs-67fcd6c9f5-t8sx5" t=2024-05-29T13:44:13.655306547Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.655262465Z caller=remote_instance_store.go:51 user=741823 slug=sudoops msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=741823 slug=sudoops t=2024-05-29T13:44:13.655222444Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=741823 slug=sudoops instance= t=2024-05-29T13:44:13.655210164Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=741823 slug=sudoops instance= t=2024-05-29T13:44:13.655198284Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kvcxrojn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.655210568Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=s3-proxy, namespace=nettle-prod, pod=hemp-cabinet-logs-7c9867b8fb-rfxql" t=2024-05-29T13:44:13.655196877Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kvcxrojn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.655169858Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=741823 slug=sudoops version=32 fingerprint=badab9d1fb07840d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.655070222Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.65467196s EvaluationString:}]" duration=438.244389ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kv889v78-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.655053637Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.655037959Z caller=remote_instance_store.go:51 user=93046 slug=nese msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.655014518Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=183214 slug=vectorizedio t=2024-05-29T13:44:13.654981401Z level=debug msg="Saving alert states" count=22 max_state_save_concurrency=1 + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=repo-server, namespace=gitops, pod=argo-cd-argocd-repo-server-6b9cb56fcc-xtz6j" t=2024-05-29T13:44:13.654934702Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=psychz-icn-kr-002" t=2024-05-29T13:44:13.654920345Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=repo-server, namespace=gitops, pod=argo-cd-argocd-repo-server-6b9cb56fcc-xtz6j" t=2024-05-29T13:44:13.654915483Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=psychz-icn-kr-002" t=2024-05-29T13:44:13.654861444Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kv4erwgi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.654729013Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=reloader-reloader, namespace=core, pod=reloader-reloader-66f757db6-28wp6" t=2024-05-29T13:44:13.654627253Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.654267945Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.654545459Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=504368 slug=libremfg + level=debug ts=2024-05-29T13:44:13.654520173Z caller=ruler.go:522 msg="tenant is owned by this instance" user=504368 slug=libremfg groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kv43e9me-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.654479081Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=psychz-bcn-es-021" t=2024-05-29T13:44:13.654454761Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.654420503Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=reflector, namespace=core, pod=reflector-ccd7686f7-tr28z" t=2024-05-29T13:44:13.654373681Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=cpbi8sq74ocl436p6ne0" t=2024-05-29T13:44:13.654413297Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=765907 slug=orangebarrelmedia instance="account=oakland, construction_number=oak-028, device=computer-side-2, host_name=oak-028-2" t=2024-05-29T13:44:13.654262985Z level=warn msg="Failed to take an image" dashboard=do3-KLMIz panel=5 error="rpc error: code = Code(422) desc = screenshots unavailable" + level=info ts=2024-05-29T13:44:13.654218726Z caller=remote_image_capturer.go:61 user=765907 slug=orangebarrelmedia rule_org_id=1 rule_uid=a87dee58-f12b-467a-bbb1-b9f04c85c790 dashboard=do3-KLMIz panel=5 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=reflector, namespace=core, pod=reflector-ccd7686f7-9d986" t=2024-05-29T13:44:13.654233215Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=cpbi1lv37uvmolr3ja50" t=2024-05-29T13:44:13.654238949Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=reflector, namespace=core, pod=reflector-ccd7686f7-9d986" t=2024-05-29T13:44:13.654215261Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=cpbi1lv37uvmolr3ja50" t=2024-05-29T13:44:13.654227662Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.65402876Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kux1w8c5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.654094247Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-stage, pod=vault-2" t=2024-05-29T13:44:13.653963539Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kux1w8c5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.654027596Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kut2m2x7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.653989656Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kut2m2x7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.653962575Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=cpbhoai74ocl436p6je0" t=2024-05-29T13:44:13.654011655Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kurr374h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.653744743Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=cliqvijruq7ct96cg040" t=2024-05-29T13:44:13.65387173Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kurr374h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.653583981Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kuh5nff1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.653537531Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:13.653811919Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kuh5nff1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.65344792Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kuaskz0h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.653307939Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kuaskz0h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.653174117Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kuaskz0h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.653134557Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-stage, pod=vault-1" t=2024-05-29T13:44:13.653783891Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ku472ybz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.652947605Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=psychz-bcn-es-017" t=2024-05-29T13:44:13.653748737Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=752743 slug=andreydmitr20 t=2024-05-29T13:44:13.653697899Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.268514ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ktwr1bhl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.652636182Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ktuq7fdk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.652599701Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ktr7ct57-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.6524488Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ktr7ct57-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.65243572Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ktr7ct57-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.652387299Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=ciep3ie5utor16caq7ig" t=2024-05-29T13:44:13.653596961Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=ciep3ie5utor16caq7ig" t=2024-05-29T13:44:13.653583821Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kt7yvok5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.652167917Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=ciddv0m5utor16capvn0" t=2024-05-29T13:44:13.653512177Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-prod, pod=vault-2" t=2024-05-29T13:44:13.653492159Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kt7in050-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.652038006Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-prod, pod=vault-2" t=2024-05-29T13:44:13.653476754Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=765907 slug=orangebarrelmedia instance="account=oakland, construction_number=oak-028, device=computer-side-2, host_name=oak-028-2" t=2024-05-29T13:44:13.653436083Z level=debug msg="Setting next state" handler=resultAlerting + level=info component=discovery ts=2024-05-29T13:44:13.653376744Z caller=client.go:80 msg="creating client for grafana instance" user=321642 addr=dns:///netmakers-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kt1u5ecj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.651897094Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=236496 slug=improbable t=2024-05-29T13:44:13.653363503Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ksz4abhx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.651759073Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ksz4abhx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.651624681Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ksz4abhx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.651613031Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=236496 slug=improbable t=2024-05-29T13:44:13.653298845Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=psychz-bcn-es-015" t=2024-05-29T13:44:13.653333939Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ksoipx3n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.65153124Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=183214 slug=vectorizedio version=1 fingerprint=0eead7081a1bcea6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.653104752Z level=debug msg="Alert rule evaluated" results="[{Instance:redpanda_id=ci0c2f8k30vsi89l4v1g State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:redpanda_id=ci0c2f8k30vsi89l4v1g Value:0xc0d81f2680} C:{Var:C Labels:redpanda_id=ci0c2f8k30vsi89l4v1g Value:0xc0d81f2688}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.652511856s EvaluationString:[ var='B' labels={redpanda_id=ci0c2f8k30vsi89l4v1g} value=0 ], [ var='C' labels={redpanda_id=ci0c2f8k30vsi89l4v1g} value=0 ]} {Instance:redpanda_id=ciddv0m5utor16capvn0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:redpanda_id=ciddv0m5utor16capvn0 Value:0xc0d81f26e0} C:{Var:C Labels:redpanda_id=ciddv0m5utor16capvn0 Value:0xc0d81f26b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.652521277s EvaluationString:[ var='B' labels={redpanda_id=ciddv0m5utor16capvn0} value=0 ], [ var='C' labels={redpanda_id=ciddv0m5utor16capvn0} value=0 ]} {Instance:redpanda_id=ciep3ie5utor16caq7ig State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:redpanda_id=ciep3ie5utor16caq7ig Value:0xc0d81f2718} C:{Var:C Labels:redpanda_id=ciep3ie5utor16caq7ig Value:0xc0d81f2710}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.652527065s EvaluationString:[ var='B' labels={redpanda_id=ciep3ie5utor16caq7ig} value=0 ], [ var='C' labels={redpanda_id=ciep3ie5utor16caq7ig} value=0 ]} {Instance:redpanda_id=cjk99o0a3v32vef0rqtg State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:redpanda_id=cjk99o0a3v32vef0rqtg Value:0xc0d81f2768} C:{Var:C Labels:redpanda_id=cjk99o0a3v32vef0rqtg Value:0xc0d81f2790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.652532711s EvaluationString:[ var='B' labels={redpanda_id=cjk99o0a3v32vef0rqtg} value=0 ], [ var='C' labels={redpanda_id=cjk99o0a3v32vef0rqtg} value=0 ]} {Instance:redpanda_id=cliqutrruq7ct96cg01g State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:redpanda_id=cliqutrruq7ct96cg01g Value:0xc0d81f27d0} C:{Var:C Labels:redpanda_id=cliqutrruq7ct96cg01g Value:0xc0d81f27d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.652536584s EvaluationString:[ var='B' labels={redpanda_id=cliqutrruq7ct96cg01g} value=0 ], [ var='C' labels={redpanda_id=cliqutrruq7ct96cg01g} value=0 ]} {Instance:redpanda_id=cliqvijruq7ct96cg040 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:redpanda_id=cliqvijruq7ct96cg040 Value:0xc0d81f28a0} C:{Var:C Labels:redpanda_id=cliqvijruq7ct96cg040 Value:0xc0d81f2818}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.652540346s EvaluationString:[ var='B' labels={redpanda_id=cliqvijruq7ct96cg040} value=0 ], [ var='C' labels={redpanda_id=cliqvijruq7ct96cg040} value=0 ]} {Instance:redpanda_id=cn6b1srsj6j1mdqdhil0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:redpanda_id=cn6b1srsj6j1mdqdhil0 Value:0xc0d81f29a0} C:{Var:C Labels:redpanda_id=cn6b1srsj6j1mdqdhil0 Value:0xc0d81f29a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.652543897s EvaluationString:[ var='B' labels={redpanda_id=cn6b1srsj6j1mdqdhil0} value=0 ], [ var='C' labels={redpanda_id=cn6b1srsj6j1mdqdhil0} value=0 ]} {Instance:redpanda_id=cpbhoai74ocl436p6je0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:redpanda_id=cpbhoai74ocl436p6je0 Value:0xc0d81f2a08} C:{Var:C Labels:redpanda_id=cpbhoai74ocl436p6je0 Value:0xc0d81f2a30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.652547607s EvaluationString:[ var='B' labels={redpanda_id=cpbhoai74ocl436p6je0} value=0 ], [ var='C' labels={redpanda_id=cpbhoai74ocl436p6je0} value=0 ]} {Instance:redpanda_id=cpbhvv274ocl436p6lb0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:redpanda_id=cpbhvv274ocl436p6lb0 Value:0xc0d81f2a60} C:{Var:C Labels:redpanda_id=cpbhvv274ocl436p6lb0 Value:0xc0d81f2a68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.652551583s EvaluationString:[ var='B' labels={redpanda_id=cpbhvv274ocl436p6lb0} value=0 ], [ var='C' labels={redpanda_id=cpbhvv274ocl436p6lb0} value=0 ]} {Instance:redpanda_id=cpbi0fq74ocl436p6lfg State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:redpanda_id=cpbi0fq74ocl436p6lfg Value:0xc0d81f2a98} C:{Var:C Labels:redpanda_id=cpbi0fq74ocl436p6lfg Value:0xc0d81f2ac0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.652555405s EvaluationString:[ var='B' labels={redpanda_id=cpbi0fq74ocl436p6lfg} value=0 ], [ var='C' labels={redpanda_id=cpbi0fq74ocl436p6lfg} value=0 ]} {Instance:redpanda_id=cpbi1lv37uvmolr3ja50 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:redpanda_id=cpbi1lv37uvmolr3ja50 Value:0xc0d81f2b90} C:{Var:C Labels:redpanda_id=cpbi1lv37uvmolr3ja50 Value:0xc0d81f2b98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.652561664s EvaluationString:[ var='B' labels={redpanda_id=cpbi1lv37uvmolr3ja50} value=0 ], [ var='C' labels={redpanda_id=cpbi1lv37uvmolr3ja50} value=0 ]} {Instance:redpanda_id=cpbi30274ocl436p6ma0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:redpanda_id=cpbi30274ocl436p6ma0 Value:0xc0d81f2bd8} C:{Var:C Labels:redpanda_id=cpbi30274ocl436p6ma0 Value:0xc0d81f2c10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.652565312s EvaluationString:[ var='B' labels={redpanda_id=cpbi30274ocl436p6ma0} value=0 ], [ var='C' labels={redpanda_id=cpbi30274ocl436p6ma0} value=0 ]} {Instance:redpanda_id=cpbi43f37uvmolr3jab0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:redpanda_id=cpbi43f37uvmolr3jab0 Value:0xc0d81f2ce0} C:{Var:C Labels:redpanda_id=cpbi43f37uvmolr3jab0 Value:0xc0d81f2ce8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.652569158s EvaluationString:[ var='B' labels={redpanda_id=cpbi43f37uvmolr3jab0} value=0 ], [ var='C' labels={redpanda_id=cpbi43f37uvmolr3jab0} value=0 ]} {Instance:redpanda_id=cpbi8sq74ocl436p6ne0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:redpanda_id=cpbi8sq74ocl436p6ne0 Value:0xc0d81f2d38} C:{Var:C Labels:redpanda_id=cpbi8sq74ocl436p6ne0 Value:0xc0d81f2db0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.652572668s EvaluationString:[ var='B' labels={redpanda_id=cpbi8sq74ocl436p6ne0} value=0 ], [ var='C' labels={redpanda_id=cpbi8sq74ocl436p6ne0} value=0 ]} {Instance:redpanda_id=cpbiaif37uvmolr3jal0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:redpanda_id=cpbiaif37uvmolr3jal0 Value:0xc0d81f2e80} C:{Var:C Labels:redpanda_id=cpbiaif37uvmolr3jal0 Value:0xc0d81f2e88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.652576248s EvaluationString:[ var='B' labels={redpanda_id=cpbiaif37uvmolr3jal0} value=0 ], [ var='C' labels={redpanda_id=cpbiaif37uvmolr3jal0} value=0 ]} {Instance:redpanda_id=cpbiao737uvmolr3jan0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:redpanda_id=cpbiao737uvmolr3jan0 Value:0xc0d81f2eb8} C:{Var:C Labels:redpanda_id=cpbiao737uvmolr3jan0 Value:0xc0d81f3060}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.65257951s EvaluationString:[ var='B' labels={redpanda_id=cpbiao737uvmolr3jan0} value=0 ], [ var='C' labels={redpanda_id=cpbiao737uvmolr3jan0} value=0 ]} {Instance:redpanda_id=cpbiara74ocl436p6nv0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:redpanda_id=cpbiara74ocl436p6nv0 Value:0xc0d81f3090} C:{Var:C Labels:redpanda_id=cpbiara74ocl436p6nv0 Value:0xc0d81f3098}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.652583141s EvaluationString:[ var='B' labels={redpanda_id=cpbiara74ocl436p6nv0} value=0 ], [ var='C' labels={redpanda_id=cpbiara74ocl436p6nv0} value=0 ]} {Instance:redpanda_id=cpbibci74ocl436p6o30 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:redpanda_id=cpbibci74ocl436p6o30 Value:0xc0d81f30c8} C:{Var:C Labels:redpanda_id=cpbibci74ocl436p6o30 Value:0xc0d81f30f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.65258674s EvaluationString:[ var='B' labels={redpanda_id=cpbibci74ocl436p6o30} value=0.005076883333333333 ], [ var='C' labels={redpanda_id=cpbibci74ocl436p6o30} value=0 ]} {Instance:redpanda_id=cpbidrn37uvmolr3jb0g State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:redpanda_id=cpbidrn37uvmolr3jb0g Value:0xc0d81f3120} C:{Var:C Labels:redpanda_id=cpbidrn37uvmolr3jb0g Value:0xc0d81f3128}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.65259218s EvaluationString:[ var='B' labels={redpanda_id=cpbidrn37uvmolr3jb0g} value=0 ], [ var='C' labels={redpanda_id=cpbidrn37uvmolr3jb0g} value=0 ]} {Instance:redpanda_id=cpbiee737uvmolr3jb1g State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:redpanda_id=cpbiee737uvmolr3jb1g Value:0xc0d81f3158} C:{Var:C Labels:redpanda_id=cpbiee737uvmolr3jb1g Value:0xc0d81f3220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.652597793s EvaluationString:[ var='B' labels={redpanda_id=cpbiee737uvmolr3jb1g} value=0 ], [ var='C' labels={redpanda_id=cpbiee737uvmolr3jb1g} value=0 ]} {Instance:redpanda_id=cpbifef37uvmolr3jb50 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:redpanda_id=cpbifef37uvmolr3jb50 Value:0xc0d81f3268} C:{Var:C Labels:redpanda_id=cpbifef37uvmolr3jb50 Value:0xc0d81f3260}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.652601357s EvaluationString:[ var='B' labels={redpanda_id=cpbifef37uvmolr3jb50} value=0 ], [ var='C' labels={redpanda_id=cpbifef37uvmolr3jb50} value=0 ]} {Instance:redpanda_id=cpbigqf37uvmolr3jb7g State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:redpanda_id=cpbigqf37uvmolr3jb7g Value:0xc0d81f3380} C:{Var:C Labels:redpanda_id=cpbigqf37uvmolr3jb7g Value:0xc0d81f3298}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.652605555s EvaluationString:[ var='B' labels={redpanda_id=cpbigqf37uvmolr3jb7g} value=0 ], [ var='C' labels={redpanda_id=cpbigqf37uvmolr3jb7g} value=0 ]}]" duration=43.612138ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ksldbwzq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.651414569Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ksldbwzq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.651376369Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-prod, pod=vault-0" t=2024-05-29T13:44:13.653223935Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ksldbwzq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.651302958Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-prod, pod=vault-0" t=2024-05-29T13:44:13.653209715Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ksktew0d-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.651208597Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ksktew0d-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.651197717Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-ops, pod=vault-2" t=2024-05-29T13:44:13.65308709Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=psychz-bcn-es-013" t=2024-05-29T13:44:13.653069448Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kshxkytk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.650838513Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kshxkytk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.650672592Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-ops, pod=vault-1" t=2024-05-29T13:44:13.652942723Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.651243917Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=packex-rix-lv-002" t=2024-05-29T13:44:13.652713242Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=253959 slug=roic instance= t=2024-05-29T13:44:13.652717333Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=nginx, namespace=defectdojo, pod=defectdojo-django-6cfb7c75c9-q2xrd" t=2024-05-29T13:44:13.652677906Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=765907 slug=orangebarrelmedia instance="account=houston, construction_number=hou-048, device=computer-side-2, host_name=hou-048-2" t=2024-05-29T13:44:13.652482563Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=nginx, namespace=defectdojo, pod=defectdojo-django-6cfb7c75c9-q2xrd" t=2024-05-29T13:44:13.652663146Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=155740 slug=routific instance="instance=map-service-6cdb559998-thwwd" t=2024-05-29T13:44:13.652665991Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=155740 slug=routific instance="instance=map-service-6cdb559998-n4bn2" t=2024-05-29T13:44:13.652633766Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=packex-rix-lv-001" t=2024-05-29T13:44:13.652587332Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=155740 slug=routific instance="instance=map-service-6cdb559998-kh2zf" t=2024-05-29T13:44:13.652589936Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=155740 slug=routific instance="instance=map-service-6cdb559998-9cqmw" t=2024-05-29T13:44:13.652557349Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:13.652408589Z caller=remote_image_capturer.go:61 user=765907 slug=orangebarrelmedia rule_org_id=1 rule_uid=a87dee58-f12b-467a-bbb1-b9f04c85c790 dashboard=do3-KLMIz panel=5 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=meteo-server, namespace=nettle-stage, pod=meteo-server-56c9b7c546-kdbr5" t=2024-05-29T13:44:13.652511993Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=packex-fin-05" t=2024-05-29T13:44:13.652443495Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=packex-fin-03" t=2024-05-29T13:44:13.652166488Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=packex-fin-03" t=2024-05-29T13:44:13.652153143Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.651988099Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=jenkins-master, namespace=jenkins, pod=jenkins-kropyva" t=2024-05-29T13:44:13.651930443Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=packex-fin-01" t=2024-05-29T13:44:13.651873796Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.651795422Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=116479 slug=tomtomnv instance="datasource_uid=grafanacloud-prom, ref_id=A,B" t=2024-05-29T13:44:13.651723132Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.65178251Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.651818749Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=320274 slug=monarchares + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:13.651837759Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=116479 slug=tomtomnv instance="datasource_uid=grafanacloud-prom, ref_id=A,B" t=2024-05-29T13:44:13.651685548Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.651698901Z caller=remote_image_capturer.go:54 user=765907 slug=orangebarrelmedia rule_org_id=1 rule_uid=a87dee58-f12b-467a-bbb1-b9f04c85c790 dashboard=do3-KLMIz panel=5 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=one-vno-lt-002" t=2024-05-29T13:44:13.651693304Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.651666081Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.651607958Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.651633697Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=669226 slug=mailneurodesk + level=debug ts=2024-05-29T13:44:13.651572818Z caller=ruler.go:522 msg="tenant is owned by this instance" user=669226 slug=mailneurodesk groups=0 + level=debug ts=2024-05-29T13:44:13.651498017Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.651535569Z caller=remote_image_capturer.go:61 user=765907 slug=orangebarrelmedia rule_org_id=1 rule_uid=a87dee58-f12b-467a-bbb1-b9f04c85c790 dashboard=do3-KLMIz panel=5 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=one-vanc-5" t=2024-05-29T13:44:13.651469391Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.651332744Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=hemp-filemanagement, namespace=nettle-prod, pod=hemp-filemanagement-7d87bbb9f7-bgfcc" t=2024-05-29T13:44:13.651315616Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=126872 slug=sensoper instance="datasource_uid=f0ZX1i07z, ref_id=A" t=2024-05-29T13:44:13.651195693Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=hemp-cabinet-logs, namespace=nettle-prod, pod=hemp-cabinet-logs-7c9867b8fb-rfxql" t=2024-05-29T13:44:13.651062924Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=849729 slug=medopsimscare instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.650942627Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.650914079Z caller=client.go:80 msg="creating client for grafana instance" user=672169 addr=dns:///myobtest-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.650858857Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=607188 slug=lpsydb + logger=ngalert.state.manager user=233137 slug=mirrornode t=2024-05-29T13:44:13.650906762Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.650830343Z caller=ruler.go:522 msg="tenant is owned by this instance" user=607188 slug=lpsydb groups=0 + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=hemp-cabinet, namespace=nettle-prod, pod=hemp-cabinet-6b8974bcc5-zr99b" t=2024-05-29T13:44:13.650790021Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.65079801Z caller=remote_instance_store.go:51 user=306551 slug=teckresourcesalerts msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-syd-au-003" t=2024-05-29T13:44:13.650799112Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:13.650692843Z caller=remote_image_capturer.go:61 user=765907 slug=orangebarrelmedia rule_org_id=1 rule_uid=a87dee58-f12b-467a-bbb1-b9f04c85c790 dashboard=do3-KLMIz panel=5 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-syd-au-002" t=2024-05-29T13:44:13.650668151Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.650641925Z caller=remote_instance_store.go:51 user=751407 slug=nethermindjuno msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ksh79gar-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.650616821Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ksh79gar-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.650602601Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.650535772Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ksh79gar-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.65052582Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.650557629Z caller=remote_instance_store.go:51 user=482907 slug=wavelonp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.650547872Z caller=ruler.go:522 msg="tenant is owned by this instance" user=557719 slug=mariahernandez groups=0 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-syd-au-001" t=2024-05-29T13:44:13.650524172Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ksh79gar-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.65051284Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=hemp-admin, namespace=nettle-stage, pod=hemp-admin-fc7ff9bf5-mtdjn" t=2024-05-29T13:44:13.650525175Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-mia-us-022" t=2024-05-29T13:44:13.650399359Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-mia-us-022" t=2024-05-29T13:44:13.650373912Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=hemp-admin, namespace=nettle-prod, pod=hemp-admin-6ccb767786-wfs7b" t=2024-05-29T13:44:13.650330714Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ksfedfm8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.650300688Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.650246277Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=692010 slug=mercariusprod instance="datasource_uid=gfds-prometheus-wrapper, ref_id=Burn rate" t=2024-05-29T13:44:13.650260894Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.650212339Z caller=ruler.go:522 msg="tenant is owned by this instance" user=412160 slug=lpmeinfra groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ks3okxm6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.650245437Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ks3okxm6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.650217197Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=hemp-admin, namespace=nettle-prod, pod=hemp-admin-6ccb767786-8kz75" t=2024-05-29T13:44:13.650206179Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ks1fnbdd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.649978434Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kryjdkw0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.649836303Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kryjdkw0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.649823013Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kryjdkw0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.649786002Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.650068049Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.650068745Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=522360 slug=me110 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-krqcabt2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.649680211Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-mia-us-020" t=2024-05-29T13:44:13.650075945Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-krqcabt2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.649611031Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.649859508Z caller=client.go:80 msg="creating client for grafana instance" user=745320 addr=dns:///mostafasherif-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-krqcabt2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.64956437Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-krosp2qn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.64951582Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=177683 slug=ubitricityprod instance="datasource_uid=grafanacloud-prom, ref_id=A,B,C" t=2024-05-29T13:44:13.649936195Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-krj6qhuw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.649381218Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=177683 slug=ubitricityprod instance="datasource_uid=grafanacloud-prom, ref_id=A,B,C" t=2024-05-29T13:44:13.649910396Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.649934215Z caller=remote_image_capturer.go:54 user=765907 slug=orangebarrelmedia rule_org_id=1 rule_uid=a87dee58-f12b-467a-bbb1-b9f04c85c790 dashboard=do3-KLMIz panel=5 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-krj6qhuw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.649247557Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=177683 slug=ubitricityprod version=16 fingerprint=e3d9d821cfa8789a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.649779589Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A,B,C State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.649421972s EvaluationString:}]" duration=37.306931ms + level=warn ts=2024-05-29T13:44:13.649826122Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=514503 slug=mauro + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-mia-us-018" t=2024-05-29T13:44:13.649813893Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=314947 slug=h10n t=2024-05-29T13:44:13.649767286Z level=debug msg="Saving alert states done" count=6 max_state_save_concurrency=1 duration=99.679876ms + logger=ngalert.state.manager user=765907 slug=orangebarrelmedia instance="account=houston, construction_number=hou-025, device=computer-side-1, host_name=hou-025-1" t=2024-05-29T13:44:13.649807549Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=765907 slug=orangebarrelmedia instance="account=experience-columbus, construction_number=exp-024, device=computer-side-2, host_name=exp-024-2" t=2024-05-29T13:44:13.649773287Z level=debug msg="Changing state" previous_state=Normal next_state=Pending previous_ends_at=2024-05-29T13:44:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=784151 slug=bmsbv instance= t=2024-05-29T13:44:13.64974876Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=fdroid-web, namespace=nettle-prod, pod=fdroid-web-7d99c5844-qcbt7" t=2024-05-29T13:44:13.649745221Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=784151 slug=bmsbv instance= t=2024-05-29T13:44:13.64973421Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.649687743Z caller=remote_image_capturer.go:61 user=765907 slug=orangebarrelmedia rule_org_id=1 rule_uid=a87dee58-f12b-467a-bbb1-b9f04c85c790 dashboard=do3-KLMIz panel=5 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-zxxkx" t=2024-05-29T13:44:13.649623846Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-spq7v" t=2024-05-29T13:44:13.649542388Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-mia-us-016" t=2024-05-29T13:44:13.649568685Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-mia-us-016" t=2024-05-29T13:44:13.649556984Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-n4pfb" t=2024-05-29T13:44:13.649420763Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kr9xi7qa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.649202136Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kr9xi7qa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.649130926Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.649207095Z caller=remote_instance_store.go:51 user=26909 slug=designcrowd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kr7dstqe-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.648988094Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kr7dstqe-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.648921773Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:13.64897142Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.17953ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-mia-us-012" t=2024-05-29T13:44:13.649019585Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-mia-us-012" t=2024-05-29T13:44:13.6490078Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-mia-us-011" t=2024-05-29T13:44:13.64891371Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.64884645Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.648817708Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.648786869Z caller=remote_image_capturer.go:54 user=765907 slug=orangebarrelmedia rule_org_id=1 rule_uid=a87dee58-f12b-467a-bbb1-b9f04c85c790 dashboard=do3-KLMIz panel=5 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-mia-us-010" t=2024-05-29T13:44:13.648760681Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kr5inssx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.648749232Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.648770732Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-4w8p9" t=2024-05-29T13:44:13.648679893Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-4w8p9" t=2024-05-29T13:44:13.648665604Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-mia-us-009" t=2024-05-29T13:44:13.648618575Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-mia-us-009" t=2024-05-29T13:44:13.648609828Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kr4reu0v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.64860185Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-4hgdc" t=2024-05-29T13:44:13.648558848Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kr4reu0v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.648475669Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=descheduler, namespace=core, pod=descheduler-75c69f57bf-jkxj6" t=2024-05-29T13:44:13.648332319Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=descheduler, namespace=core, pod=descheduler-75c69f57bf-jkxj6" t=2024-05-29T13:44:13.648315141Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kr2fjzmv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.648194546Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=descheduler, namespace=core, pod=descheduler-75c69f57bf-5bhq9" t=2024-05-29T13:44:13.648189595Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=descheduler, namespace=core, pod=descheduler-75c69f57bf-5bhq9" t=2024-05-29T13:44:13.648169103Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-mex-mx-004" t=2024-05-29T13:44:13.648150031Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-mex-mx-004" t=2024-05-29T13:44:13.648136092Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-mex-mx-003" t=2024-05-29T13:44:13.648016892Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-mex-mx-002" t=2024-05-29T13:44:13.64791266Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.647788062Z caller=remote_instance_store.go:51 user=71676 slug=lemonbeat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=dependency-track-frontend, namespace=dependencytrack, pod=dependencytrack-dependency-track-frontend-86544c6b8f-z77b5" t=2024-05-29T13:44:13.647767938Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=765907 slug=orangebarrelmedia instance="account=cleveland, construction_number=cle-142, device=computer-side-1, host_name=cle-142-1" t=2024-05-29T13:44:13.647732198Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.64771167Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=765907 slug=orangebarrelmedia instance="account=cleveland, construction_number=cle-142, device=computer-side-1, host_name=cle-142-1" t=2024-05-29T13:44:13.647719204Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:13.647675797Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.664917ms + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=dependency-track-apiserver, namespace=dependencytrack, pod=dependencytrack-dependency-track-apiserver-7f9c76b7f4-gv9vx" t=2024-05-29T13:44:13.647637523Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqzjosco-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.647678431Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=765907 slug=orangebarrelmedia instance="account=baltimore, construction_number=bal-003, device=computer-side-2, host_name=bal-003-2" t=2024-05-29T13:44:13.647675973Z level=warn msg="Failed to take an image" dashboard=do3-KLMIz panel=5 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-lax-us-008" t=2024-05-29T13:44:13.647641348Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqzdtqf5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.647564199Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-lax-us-008" t=2024-05-29T13:44:13.647626992Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqzdtqf5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.647532209Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=852841 slug=agrivolt instance= t=2024-05-29T13:44:13.647539693Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.647561186Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqzdtqf5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.647485139Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=55491 slug=demandbase t=2024-05-29T13:44:13.64745458Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=34.025764ms + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.647453725Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqz05agr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.647325697Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqz05agr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.647259026Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=controller, namespace=core, pod=prod-ingress-nginx-controller-5945dbfdd6-7wqgh" t=2024-05-29T13:44:13.647154897Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=controller, namespace=core, pod=prod-ingress-nginx-controller-5945dbfdd6-7wqgh" t=2024-05-29T13:44:13.647133965Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.647204147Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.647252842Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.647153843Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.647157253Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.647119812Z caller=remote_instance_store.go:51 user=115097 slug=controlplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqyu132n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.647109705Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqyu132n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.647094385Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqyu132n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.647020374Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.647033673Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.646881037Z caller=remote_instance_store.go:51 user=463523 slug=porchatto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqxjwnvv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.646951573Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=controller, namespace=core, pod=ops-ingress-nginx-controller-64df748b88-dfxw4" t=2024-05-29T13:44:13.646947815Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=463523 slug=porchatto instance= t=2024-05-29T13:44:13.646818403Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=489921 slug=statuscake t=2024-05-29T13:44:13.646966764Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=489921 slug=statuscake instance= t=2024-05-29T13:44:13.646939229Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=controller, namespace=core, pod=ops-ingress-nginx-controller-64df748b88-cdxb2" t=2024-05-29T13:44:13.646758241Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=489921 slug=statuscake version=82 fingerprint=6237af150a64dc2b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.646821842Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[Last:{Var:Last Labels: Value:0xc01e2bd0e8} Value:{Var:Value Labels: Value:0xc01e2bd100}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.646498526s EvaluationString:[ var='Last' labels={} value=0 ], [ var='Value' labels={} value=0 ]}]" duration=5.722846ms + level=debug ts=2024-05-29T13:44:13.646872622Z caller=remote_image_capturer.go:54 user=765907 slug=orangebarrelmedia rule_org_id=1 rule_uid=a87dee58-f12b-467a-bbb1-b9f04c85c790 dashboard=do3-KLMIz panel=5 msg="rendering alert image with grafana" + logger=ngalert.state.manager.persist user=112732 slug=gleamer t=2024-05-29T13:44:13.646755472Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=33.959551ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqxjwnvv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.646716771Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=765907 slug=orangebarrelmedia instance="account=baltimore, construction_number=bal-003, device=computer-side-2, host_name=bal-003-2" t=2024-05-29T13:44:13.64674556Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=765907 slug=orangebarrelmedia t=2024-05-29T13:44:13.646712702Z level=debug msg="State manager processing evaluation results" resultCount=9 + level=debug ts=2024-05-29T13:44:13.646665564Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-lax-us-001" t=2024-05-29T13:44:13.646680999Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.646655807Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.646458411Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqv095a5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.646404137Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=449554 slug=metricgamingppe t=2024-05-29T13:44:13.646333525Z level=debug msg="Saving alert states done" count=6 max_state_save_concurrency=1 duration=141.690059ms + level=debug ts=2024-05-29T13:44:13.646345894Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.646281027Z caller=client.go:80 msg="creating client for grafana instance" user=527488 addr=dns:///monkeybrains6-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.646246912Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=323226 slug=leewestern + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqv095a5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.646283466Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=celery, namespace=defectdojo, pod=defectdojo-celery-beat-7c4787c5b6-s7tws" t=2024-05-29T13:44:13.646167215Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqu8udam-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.646217785Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-hnd-jp-006" t=2024-05-29T13:44:13.646166858Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-hnd-jp-006" t=2024-05-29T13:44:13.646156136Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqu8udam-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.646072974Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqu8udam-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.646048114Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.645976783Z caller=client.go:80 msg="creating client for grafana instance" user=553140 addr=dns:///mikeh2-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.646019851Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqneuemc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.645979443Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-2" t=2024-05-29T13:44:13.645845325Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-hnd-jp-004" t=2024-05-29T13:44:13.64594113Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-hnd-jp-004" t=2024-05-29T13:44:13.645931051Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-gru-ve-006" t=2024-05-29T13:44:13.645829678Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqjax2o6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.645755241Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-gru-ve-005" t=2024-05-29T13:44:13.645691356Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-1" t=2024-05-29T13:44:13.645683034Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqjax2o6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.645607909Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqe1eyic-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.645539719Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-gru-uy-006" t=2024-05-29T13:44:13.645564527Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-0" t=2024-05-29T13:44:13.64554531Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqe1eyic-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.645468188Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-gru-uy-005" t=2024-05-29T13:44:13.645445567Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=bank-vaults, namespace=vault-prod, pod=vault-configurer-749cd6747c-hl55j" t=2024-05-29T13:44:13.645398943Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.645359855Z caller=ruler.go:522 msg="tenant is owned by this instance" user=348969 slug=huseinzol05 groups=5 + level=debug ts=2024-05-29T13:44:13.645332872Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-gru-pe-006" t=2024-05-29T13:44:13.645297179Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqa12no0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.645282476Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kqa12no0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.645207725Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-gru-pe-005" t=2024-05-29T13:44:13.645188954Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.645186215Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=250150 slug=bizagi version=58 fingerprint=1b451890d49cedef attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.645073184Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.644824122s EvaluationString:}]" duration=225.537028ms + logger=ngalert.state.manager.persist user=328778 slug=teemuskog t=2024-05-29T13:44:13.645033537Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kq9k33y9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.645031143Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-gru-pa-005" t=2024-05-29T13:44:13.644891493Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.644764115Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kq2yy99u-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.644780251Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kq2yy99u-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.64472548Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-configurer-754ccc9cb7-q2bxk" t=2024-05-29T13:44:13.64468025Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-gru-gt-005" t=2024-05-29T13:44:13.644667968Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-gru-gt-005" t=2024-05-29T13:44:13.64465815Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kq09tkgz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.644605479Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kq09tkgz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.644538338Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kq09tkgz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.644482058Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-2" t=2024-05-29T13:44:13.64447543Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-gru-ec-006" t=2024-05-29T13:44:13.644540527Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kpx4ft7k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.644425907Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.644507423Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.64428867Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-gru-co-009" t=2024-05-29T13:44:13.644280517Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-gru-co-009" t=2024-05-29T13:44:13.644267805Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kpuxuh8v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.644205145Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:13.64425723Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-1" t=2024-05-29T13:44:13.644224795Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=70430 slug=dapperlabs instance= t=2024-05-29T13:44:13.644224022Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-gru-co-008" t=2024-05-29T13:44:13.644168153Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-0" t=2024-05-29T13:44:13.644049757Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kpoehzzv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.644072994Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-gru-co-007" t=2024-05-29T13:44:13.644060992Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-gru-co-007" t=2024-05-29T13:44:13.644050822Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-gru-cl-006" t=2024-05-29T13:44:13.643945829Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.643895747Z caller=remote_image_capturer.go:54 user=328778 slug=teemuskog rule_org_id=1 rule_uid=yqZG3mynz dashboard=ZkYEDgsnk panel=6 msg="rendering alert image with grafana" + level=debug ts=2024-05-29T13:44:13.643803403Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kpo8lomz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.64375332Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.643718193Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=471861 slug=planetstaging t=2024-05-29T13:44:13.643743025Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=328778 slug=teemuskog instance= t=2024-05-29T13:44:13.643696379Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=backup, namespace=jenkins, pod=jenkins-kropyva" t=2024-05-29T13:44:13.643676922Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=471861 slug=planetstaging instance= t=2024-05-29T13:44:13.643721725Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kpo8lomz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.643678199Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kpo8lomz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.643645889Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=471861 slug=planetstaging version=1 fingerprint=1f60df7d1cc34ade attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.643623376Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc01e5743b0} C:{Var:C Labels: Value:0xc01e5743b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.643311842s EvaluationString:[ var='B' labels={} value=NaN ], [ var='C' labels={} value=0 ]}]" duration=45.244641ms + level=debug ts=2024-05-29T13:44:13.643505674Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=444725 slug=devnextgen t=2024-05-29T13:44:13.643493998Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kpmp4vab-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.643473047Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kpl9m10x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.643435157Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=444725 slug=devnextgen instance= t=2024-05-29T13:44:13.643451667Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.64342341Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=authzgateway-server, namespace=nettle-stage, pod=authzgateway-server-56cd6d8b49-62h6v" t=2024-05-29T13:44:13.643513817Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=authzgateway-server, namespace=nettle-stage, pod=authzgateway-server-56cd6d8b49-62h6v" t=2024-05-29T13:44:13.643448402Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=authzgateway-server, namespace=nettle-prod, pod=authzgateway-server-cb4b87d9d-n8xgc" t=2024-05-29T13:44:13.643280187Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=authzgateway-server, namespace=nettle-prod, pod=authzgateway-server-cb4b87d9d-n8xgc" t=2024-05-29T13:44:13.643260446Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kpl9m10x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.643254435Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-gru-br-019" t=2024-05-29T13:44:13.643270351Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.643142802Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.64312989Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod t=2024-05-29T13:44:13.643108455Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info component=discovery ts=2024-05-29T13:44:13.643024737Z caller=client.go:80 msg="creating client for grafana instance" user=734220 addr=dns:///mediacentral-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-gru-br-018" t=2024-05-29T13:44:13.643081817Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kpdwdvjq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.642981312Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=apk-store-oauth2-proxy, namespace=nettle-prod, pod=apk-store-oauth2-proxy-77d6d6dd9d-lfjdf" t=2024-05-29T13:44:13.642970745Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.642927602Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.642963735Z caller=ruler.go:522 msg="tenant is owned by this instance" user=530891 slug=gravitas groups=0 + level=warn ts=2024-05-29T13:44:13.642921703Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=705779 slug=kkgarnet + level=debug ts=2024-05-29T13:44:13.642886273Z caller=ruler.go:522 msg="tenant is owned by this instance" user=705779 slug=kkgarnet groups=0 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-gru-bo-006" t=2024-05-29T13:44:13.642810431Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=349229 slug=kropyva instance="cluster=core/grafana-agent, container=apk-store, namespace=nettle-stage, pod=apk-store-7bdb5fd5c8-7v8x9" t=2024-05-29T13:44:13.642732061Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-gru-bo-005" t=2024-05-29T13:44:13.64269936Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kpbyxfl1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.642549878Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.642456625Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kp7cq0vk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.642404116Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.642478275Z caller=remote_instance_store.go:51 user=75789 slug=mysign msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-gru-ar-008" t=2024-05-29T13:44:13.642459484Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=314067 slug=itsme instance= t=2024-05-29T13:44:13.642428397Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=314067 slug=itsme t=2024-05-29T13:44:13.642383482Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=752743 slug=andreydmitr20 instance= t=2024-05-29T13:44:13.642415153Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kp7cq0vk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.642345816Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=752743 slug=andreydmitr20 t=2024-05-29T13:44:13.642369462Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=314067 slug=itsme version=4 fingerprint=ac290e00f016c0eb attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.642277127Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.641931251s EvaluationString:}]" duration=13.378489ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kp7cq0vk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.642270905Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kp5n968w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.642226035Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=75789 slug=mysign instance= t=2024-05-29T13:44:13.642224678Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kp5n968w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.642210424Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.642027316Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-ewr-us-035" t=2024-05-29T13:44:13.642053305Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-ewr-us-035" t=2024-05-29T13:44:13.642039279Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.641217164Z caller=client.go:80 msg="creating client for grafana instance" user=745187 addr=dns:///maninweb3-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=316418 slug=workmotion instance="LoadBalancer=app/beta-backend-lb/44a8c2667a89a576, TargetGroup=targetgroup/beta-partner-service-tg/796e7c05bcf06e11" t=2024-05-29T13:44:13.641924976Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=warn ts=2024-05-29T13:44:13.641815361Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=368335 slug=kcroucamp + logger=ngalert.state.manager user=316418 slug=workmotion instance="LoadBalancer=app/beta-backend-lb/44a8c2667a89a576, TargetGroup=targetgroup/beta-partner-service-tg/796e7c05bcf06e11" t=2024-05-29T13:44:13.641889321Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.640726069Z caller=ruler.go:522 msg="tenant is owned by this instance" user=368335 slug=kcroucamp groups=0 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-ewr-us-034" t=2024-05-29T13:44:13.641902295Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.641849959Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=349229 slug=kropyva version=6 fingerprint=b3ff32ef3c5a4777 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.638252881Z level=debug msg="Alert rule evaluated" results="[{Instance:cluster=core/grafana-agent, container=activation-server, namespace=nettle-prod, pod=activation-server-547667fd6b-ql6td State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=activation-server, namespace=nettle-prod, pod=activation-server-547667fd6b-ql6td Value:0xc03c3bab78} B:{Var:B Labels:cluster=core/grafana-agent, container=activation-server, namespace=nettle-prod, pod=activation-server-547667fd6b-ql6td Value:0xc03c3babc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631390098s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=activation-server, namespace=nettle-prod, pod=activation-server-547667fd6b-ql6td} value=0.029252925292529253 ], [ var='B' labels={cluster=core/grafana-agent, container=activation-server, namespace=nettle-prod, pod=activation-server-547667fd6b-ql6td} value=0 ]} {Instance:cluster=core/grafana-agent, container=activation-server, namespace=nettle-stage, pod=activation-server-86c4bc5457-s5j9s State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=activation-server, namespace=nettle-stage, pod=activation-server-86c4bc5457-s5j9s Value:0xc03c3bac50} B:{Var:B Labels:cluster=core/grafana-agent, container=activation-server, namespace=nettle-stage, pod=activation-server-86c4bc5457-s5j9s Value:0xc03c3bac90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631407261s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=activation-server, namespace=nettle-stage, pod=activation-server-86c4bc5457-s5j9s} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=activation-server, namespace=nettle-stage, pod=activation-server-86c4bc5457-s5j9s} value=0 ]} {Instance:cluster=core/grafana-agent, container=android-sdk, namespace=pipelines-android, pod=mapa-pipeline-pr-299-4twjq-fprtx-dzxjt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=android-sdk, namespace=pipelines-android, pod=mapa-pipeline-pr-299-4twjq-fprtx-dzxjt Value:0xc03c3bad00} B:{Var:B Labels:cluster=core/grafana-agent, container=android-sdk, namespace=pipelines-android, pod=mapa-pipeline-pr-299-4twjq-fprtx-dzxjt Value:0xc03c3bad50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63141498s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=android-sdk, namespace=pipelines-android, pod=mapa-pipeline-pr-299-4twjq-fprtx-dzxjt} value=0.001582278481012658 ], [ var='B' labels={cluster=core/grafana-agent, container=android-sdk, namespace=pipelines-android, pod=mapa-pipeline-pr-299-4twjq-fprtx-dzxjt} value=0 ]} {Instance:cluster=core/grafana-agent, container=apk-store, namespace=nettle-prod, pod=apk-store-7cb9599866-s5jm6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=apk-store, namespace=nettle-prod, pod=apk-store-7cb9599866-s5jm6 Value:0xc03c3badf0} B:{Var:B Labels:cluster=core/grafana-agent, container=apk-store, namespace=nettle-prod, pod=apk-store-7cb9599866-s5jm6 Value:0xc03c3bae40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63142224s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=apk-store, namespace=nettle-prod, pod=apk-store-7cb9599866-s5jm6} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=apk-store, namespace=nettle-prod, pod=apk-store-7cb9599866-s5jm6} value=0 ]} {Instance:cluster=core/grafana-agent, container=apk-store, namespace=nettle-stage, pod=apk-store-7bdb5fd5c8-7v8x9 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=apk-store, namespace=nettle-stage, pod=apk-store-7bdb5fd5c8-7v8x9 Value:0xc03c3baee0} B:{Var:B Labels:cluster=core/grafana-agent, container=apk-store, namespace=nettle-stage, pod=apk-store-7bdb5fd5c8-7v8x9 Value:0xc03c3baf40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631429348s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=apk-store, namespace=nettle-stage, pod=apk-store-7bdb5fd5c8-7v8x9} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=apk-store, namespace=nettle-stage, pod=apk-store-7bdb5fd5c8-7v8x9} value=0 ]} {Instance:cluster=core/grafana-agent, container=apk-store-oauth2-proxy, namespace=nettle-prod, pod=apk-store-oauth2-proxy-77d6d6dd9d-lfjdf State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=apk-store-oauth2-proxy, namespace=nettle-prod, pod=apk-store-oauth2-proxy-77d6d6dd9d-lfjdf Value:0xc03c3bafc0} B:{Var:B Labels:cluster=core/grafana-agent, container=apk-store-oauth2-proxy, namespace=nettle-prod, pod=apk-store-oauth2-proxy-77d6d6dd9d-lfjdf Value:0xc03c3bb010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631438803s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=apk-store-oauth2-proxy, namespace=nettle-prod, pod=apk-store-oauth2-proxy-77d6d6dd9d-lfjdf} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=apk-store-oauth2-proxy, namespace=nettle-prod, pod=apk-store-oauth2-proxy-77d6d6dd9d-lfjdf} value=0 ]} {Instance:cluster=core/grafana-agent, container=apk-store-oauth2-proxy, namespace=nettle-stage, pod=apk-store-oauth2-proxy-78fbc8c697-kgvpx State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=apk-store-oauth2-proxy, namespace=nettle-stage, pod=apk-store-oauth2-proxy-78fbc8c697-kgvpx Value:0xc03c3bb090} B:{Var:B Labels:cluster=core/grafana-agent, container=apk-store-oauth2-proxy, namespace=nettle-stage, pod=apk-store-oauth2-proxy-78fbc8c697-kgvpx Value:0xc03c3bb0d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631446247s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=apk-store-oauth2-proxy, namespace=nettle-stage, pod=apk-store-oauth2-proxy-78fbc8c697-kgvpx} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=apk-store-oauth2-proxy, namespace=nettle-stage, pod=apk-store-oauth2-proxy-78fbc8c697-kgvpx} value=0 ]} {Instance:cluster=core/grafana-agent, container=authzgateway-server, namespace=nettle-prod, pod=authzgateway-server-cb4b87d9d-n8xgc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=authzgateway-server, namespace=nettle-prod, pod=authzgateway-server-cb4b87d9d-n8xgc Value:0xc03c3bb158} B:{Var:B Labels:cluster=core/grafana-agent, container=authzgateway-server, namespace=nettle-prod, pod=authzgateway-server-cb4b87d9d-n8xgc Value:0xc03c3bb198}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631452493s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=authzgateway-server, namespace=nettle-prod, pod=authzgateway-server-cb4b87d9d-n8xgc} value=0.0018796992481203009 ], [ var='B' labels={cluster=core/grafana-agent, container=authzgateway-server, namespace=nettle-prod, pod=authzgateway-server-cb4b87d9d-n8xgc} value=0 ]} {Instance:cluster=core/grafana-agent, container=authzgateway-server, namespace=nettle-stage, pod=authzgateway-server-56cd6d8b49-62h6v State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=authzgateway-server, namespace=nettle-stage, pod=authzgateway-server-56cd6d8b49-62h6v Value:0xc03c3bb220} B:{Var:B Labels:cluster=core/grafana-agent, container=authzgateway-server, namespace=nettle-stage, pod=authzgateway-server-56cd6d8b49-62h6v Value:0xc03c3bb260}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631459005s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=authzgateway-server, namespace=nettle-stage, pod=authzgateway-server-56cd6d8b49-62h6v} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=authzgateway-server, namespace=nettle-stage, pod=authzgateway-server-56cd6d8b49-62h6v} value=0 ]} {Instance:cluster=core/grafana-agent, container=backup, namespace=jenkins, pod=jenkins-kropyva State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=backup, namespace=jenkins, pod=jenkins-kropyva Value:0xc03c3bb300} B:{Var:B Labels:cluster=core/grafana-agent, container=backup, namespace=jenkins, pod=jenkins-kropyva Value:0xc03c3bb338}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631463505s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=backup, namespace=jenkins, pod=jenkins-kropyva} value=0.34223918575063617 ], [ var='B' labels={cluster=core/grafana-agent, container=backup, namespace=jenkins, pod=jenkins-kropyva} value=0 ]} {Instance:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-0 Value:0xc03c3bb400} B:{Var:B Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-0 Value:0xc03c3bb460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631471004s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-0} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-0} value=0 ]} {Instance:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-1 Value:0xc03c3bb580} B:{Var:B Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-1 Value:0xc03c3bb518}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631476538s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-1} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-1} value=0 ]} {Instance:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-2 Value:0xc03c3bb630} B:{Var:B Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-2 Value:0xc03c3bb698}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631483133s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-2} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-2} value=0 ]} {Instance:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-configurer-754ccc9cb7-q2bxk State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-configurer-754ccc9cb7-q2bxk Value:0xc03c3bb740} B:{Var:B Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-configurer-754ccc9cb7-q2bxk Value:0xc03c3bb790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63148725s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-configurer-754ccc9cb7-q2bxk} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-ops, pod=vault-configurer-754ccc9cb7-q2bxk} value=0 ]} {Instance:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-prod, pod=vault-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-prod, pod=vault-0 Value:0xc03c3bb850} B:{Var:B Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-prod, pod=vault-0 Value:0xc03c3bb8d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631490947s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-prod, pod=vault-0} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-prod, pod=vault-0} value=0 ]} {Instance:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-prod, pod=vault-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-prod, pod=vault-1 Value:0xc03c3bb9f8} B:{Var:B Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-prod, pod=vault-1 Value:0xc03c3bba58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63149517s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-prod, pod=vault-1} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-prod, pod=vault-1} value=0 ]} {Instance:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-prod, pod=vault-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-prod, pod=vault-2 Value:0xc03c3bbb10} B:{Var:B Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-prod, pod=vault-2 Value:0xc03c3bbb68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631498846s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-prod, pod=vault-2} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-prod, pod=vault-2} value=0 ]} {Instance:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-prod, pod=vault-configurer-749cd6747c-hl55j State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-prod, pod=vault-configurer-749cd6747c-hl55j Value:0xc03c3bbcc0} B:{Var:B Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-prod, pod=vault-configurer-749cd6747c-hl55j Value:0xc03c3bbd10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63150246s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-prod, pod=vault-configurer-749cd6747c-hl55j} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-prod, pod=vault-configurer-749cd6747c-hl55j} value=0 ]} {Instance:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-0 Value:0xc03c3bbe18} B:{Var:B Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-0 Value:0xc03c3bbdc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631505597s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-0} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-0} value=0 ]} {Instance:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-1 Value:0xc03c3bbf30} B:{Var:B Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-1 Value:0xc03c3bbed0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631511263s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-1} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-1} value=0 ]} {Instance:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-2 Value:0xc065b28128} B:{Var:B Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-2 Value:0xc065b28068}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631514749s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-2} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-2} value=0 ]} {Instance:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-configurer-545d6f4d5-qwrqs State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-configurer-545d6f4d5-qwrqs Value:0xc065b281d0} B:{Var:B Labels:cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-configurer-545d6f4d5-qwrqs Value:0xc065b28220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631523729s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-configurer-545d6f4d5-qwrqs} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=bank-vaults, namespace=vault-stage, pod=vault-configurer-545d6f4d5-qwrqs} value=0 ]} {Instance:cluster=core/grafana-agent, container=celery, namespace=defectdojo, pod=defectdojo-celery-beat-7c4787c5b6-s7tws State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=celery, namespace=defectdojo, pod=defectdojo-celery-beat-7c4787c5b6-s7tws Value:0xc065b282b0} B:{Var:B Labels:cluster=core/grafana-agent, container=celery, namespace=defectdojo, pod=defectdojo-celery-beat-7c4787c5b6-s7tws Value:0xc065b28300}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631527765s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=celery, namespace=defectdojo, pod=defectdojo-celery-beat-7c4787c5b6-s7tws} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=celery, namespace=defectdojo, pod=defectdojo-celery-beat-7c4787c5b6-s7tws} value=0 ]} {Instance:cluster=core/grafana-agent, container=celery, namespace=defectdojo, pod=defectdojo-celery-worker-8588d96b8b-qmnnm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=celery, namespace=defectdojo, pod=defectdojo-celery-worker-8588d96b8b-qmnnm Value:0xc065b28378} B:{Var:B Labels:cluster=core/grafana-agent, container=celery, namespace=defectdojo, pod=defectdojo-celery-worker-8588d96b8b-qmnnm Value:0xc065b283d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631531895s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=celery, namespace=defectdojo, pod=defectdojo-celery-worker-8588d96b8b-qmnnm} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=celery, namespace=defectdojo, pod=defectdojo-celery-worker-8588d96b8b-qmnnm} value=0 ]} {Instance:cluster=core/grafana-agent, container=controller, namespace=core, pod=ops-ingress-nginx-controller-64df748b88-cdxb2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=controller, namespace=core, pod=ops-ingress-nginx-controller-64df748b88-cdxb2 Value:0xc065b28460} B:{Var:B Labels:cluster=core/grafana-agent, container=controller, namespace=core, pod=ops-ingress-nginx-controller-64df748b88-cdxb2 Value:0xc065b284b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631535268s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=controller, namespace=core, pod=ops-ingress-nginx-controller-64df748b88-cdxb2} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=controller, namespace=core, pod=ops-ingress-nginx-controller-64df748b88-cdxb2} value=0 ]} {Instance:cluster=core/grafana-agent, container=controller, namespace=core, pod=ops-ingress-nginx-controller-64df748b88-dfxw4 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=controller, namespace=core, pod=ops-ingress-nginx-controller-64df748b88-dfxw4 Value:0xc065b28548} B:{Var:B Labels:cluster=core/grafana-agent, container=controller, namespace=core, pod=ops-ingress-nginx-controller-64df748b88-dfxw4 Value:0xc065b28590}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631538832s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=controller, namespace=core, pod=ops-ingress-nginx-controller-64df748b88-dfxw4} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=controller, namespace=core, pod=ops-ingress-nginx-controller-64df748b88-dfxw4} value=0 ]} {Instance:cluster=core/grafana-agent, container=controller, namespace=core, pod=prod-ingress-nginx-controller-5945dbfdd6-7wqgh State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=controller, namespace=core, pod=prod-ingress-nginx-controller-5945dbfdd6-7wqgh Value:0xc065b28668} B:{Var:B Labels:cluster=core/grafana-agent, container=controller, namespace=core, pod=prod-ingress-nginx-controller-5945dbfdd6-7wqgh Value:0xc065b28628}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631542283s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=controller, namespace=core, pod=prod-ingress-nginx-controller-5945dbfdd6-7wqgh} value=0.0013525698827772767 ], [ var='B' labels={cluster=core/grafana-agent, container=controller, namespace=core, pod=prod-ingress-nginx-controller-5945dbfdd6-7wqgh} value=0 ]} {Instance:cluster=core/grafana-agent, container=controller, namespace=core, pod=prod-ingress-nginx-controller-5945dbfdd6-xhvsh State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=controller, namespace=core, pod=prod-ingress-nginx-controller-5945dbfdd6-xhvsh Value:0xc065b286f0} B:{Var:B Labels:cluster=core/grafana-agent, container=controller, namespace=core, pod=prod-ingress-nginx-controller-5945dbfdd6-xhvsh Value:0xc065b28740}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631546238s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=controller, namespace=core, pod=prod-ingress-nginx-controller-5945dbfdd6-xhvsh} value=0.0014031805425631433 ], [ var='B' labels={cluster=core/grafana-agent, container=controller, namespace=core, pod=prod-ingress-nginx-controller-5945dbfdd6-xhvsh} value=0 ]} {Instance:cluster=core/grafana-agent, container=controller, namespace=core, pod=stage-ingress-nginx-controller-b9fd8cf7b-6vlmx State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=controller, namespace=core, pod=stage-ingress-nginx-controller-b9fd8cf7b-6vlmx Value:0xc065b287d8} B:{Var:B Labels:cluster=core/grafana-agent, container=controller, namespace=core, pod=stage-ingress-nginx-controller-b9fd8cf7b-6vlmx Value:0xc065b28960}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631550467s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=controller, namespace=core, pod=stage-ingress-nginx-controller-b9fd8cf7b-6vlmx} value=0.0018726591760299626 ], [ var='B' labels={cluster=core/grafana-agent, container=controller, namespace=core, pod=stage-ingress-nginx-controller-b9fd8cf7b-6vlmx} value=0 ]} {Instance:cluster=core/grafana-agent, container=dependency-track-apiserver, namespace=dependencytrack, pod=dependencytrack-dependency-track-apiserver-7f9c76b7f4-gv9vx State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=dependency-track-apiserver, namespace=dependencytrack, pod=dependencytrack-dependency-track-apiserver-7f9c76b7f4-gv9vx Value:0xc065b289e0} B:{Var:B Labels:cluster=core/grafana-agent, container=dependency-track-apiserver, namespace=dependencytrack, pod=dependencytrack-dependency-track-apiserver-7f9c76b7f4-gv9vx Value:0xc065b28b28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631554559s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=dependency-track-apiserver, namespace=dependencytrack, pod=dependencytrack-dependency-track-apiserver-7f9c76b7f4-gv9vx} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=dependency-track-apiserver, namespace=dependencytrack, pod=dependencytrack-dependency-track-apiserver-7f9c76b7f4-gv9vx} value=0 ]} {Instance:cluster=core/grafana-agent, container=dependency-track-frontend, namespace=dependencytrack, pod=dependencytrack-dependency-track-frontend-86544c6b8f-z77b5 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=dependency-track-frontend, namespace=dependencytrack, pod=dependencytrack-dependency-track-frontend-86544c6b8f-z77b5 Value:0xc065b28d50} B:{Var:B Labels:cluster=core/grafana-agent, container=dependency-track-frontend, namespace=dependencytrack, pod=dependencytrack-dependency-track-frontend-86544c6b8f-z77b5 Value:0xc065b28c78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631561764s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=dependency-track-frontend, namespace=dependencytrack, pod=dependencytrack-dependency-track-frontend-86544c6b8f-z77b5} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=dependency-track-frontend, namespace=dependencytrack, pod=dependencytrack-dependency-track-frontend-86544c6b8f-z77b5} value=0 ]} {Instance:cluster=core/grafana-agent, container=descheduler, namespace=core, pod=descheduler-6bf5f8654d-p52mm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=descheduler, namespace=core, pod=descheduler-6bf5f8654d-p52mm Value:0xc065b28de8} B:{Var:B Labels:cluster=core/grafana-agent, container=descheduler, namespace=core, pod=descheduler-6bf5f8654d-p52mm Value:0xc065b28ee8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631569115s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=descheduler, namespace=core, pod=descheduler-6bf5f8654d-p52mm} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=descheduler, namespace=core, pod=descheduler-6bf5f8654d-p52mm} value=0 ]} {Instance:cluster=core/grafana-agent, container=descheduler, namespace=core, pod=descheduler-75c69f57bf-5bhq9 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=descheduler, namespace=core, pod=descheduler-75c69f57bf-5bhq9 Value:0xc065b28f70} B:{Var:B Labels:cluster=core/grafana-agent, container=descheduler, namespace=core, pod=descheduler-75c69f57bf-5bhq9 Value:0xc065b28fc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63157292s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=descheduler, namespace=core, pod=descheduler-75c69f57bf-5bhq9} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=descheduler, namespace=core, pod=descheduler-75c69f57bf-5bhq9} value=0 ]} {Instance:cluster=core/grafana-agent, container=descheduler, namespace=core, pod=descheduler-75c69f57bf-jkxj6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=descheduler, namespace=core, pod=descheduler-75c69f57bf-jkxj6 Value:0xc065b29058} B:{Var:B Labels:cluster=core/grafana-agent, container=descheduler, namespace=core, pod=descheduler-75c69f57bf-jkxj6 Value:0xc065b290a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631576993s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=descheduler, namespace=core, pod=descheduler-75c69f57bf-jkxj6} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=descheduler, namespace=core, pod=descheduler-75c69f57bf-jkxj6} value=0 ]} {Instance:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-42vbm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-42vbm Value:0xc065b29128} B:{Var:B Labels:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-42vbm Value:0xc065b29178}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631580498s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-42vbm} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-42vbm} value=0 ]} {Instance:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-4hgdc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-4hgdc Value:0xc065b29248} B:{Var:B Labels:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-4hgdc Value:0xc065b291e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631584409s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-4hgdc} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-4hgdc} value=0 ]} {Instance:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-4w8p9 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-4w8p9 Value:0xc065b292d8} B:{Var:B Labels:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-4w8p9 Value:0xc065b29318}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631587986s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-4w8p9} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-4w8p9} value=0 ]} {Instance:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-9wm4f State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-9wm4f Value:0xc065b293e8} B:{Var:B Labels:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-9wm4f Value:0xc065b29398}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63159146s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-9wm4f} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-9wm4f} value=0 ]} {Instance:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-d77cm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-d77cm Value:0xc065b29478} B:{Var:B Labels:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-d77cm Value:0xc065b294a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63159443s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-d77cm} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-d77cm} value=0 ]} {Instance:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-dnm95 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-dnm95 Value:0xc065b29598} B:{Var:B Labels:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-dnm95 Value:0xc065b29538}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63159928s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-dnm95} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-dnm95} value=0 ]} {Instance:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-mxwsg State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-mxwsg Value:0xc065b29708} B:{Var:B Labels:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-mxwsg Value:0xc065b29748}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631604008s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-mxwsg} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-mxwsg} value=0 ]} {Instance:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-n4pfb State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-n4pfb Value:0xc065b297d8} B:{Var:B Labels:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-n4pfb Value:0xc065b29918}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631607762s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-n4pfb} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-n4pfb} value=0 ]} {Instance:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-spq7v State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-spq7v Value:0xc065b29a68} B:{Var:B Labels:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-spq7v Value:0xc065b299a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631611197s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-spq7v} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-spq7v} value=0 ]} {Instance:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-zxxkx State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-zxxkx Value:0xc065b29b38} B:{Var:B Labels:cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-zxxkx Value:0xc065b29ae8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631619131s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-zxxkx} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=falco, namespace=falco, pod=falco-zxxkx} value=0 ]} {Instance:cluster=core/grafana-agent, container=fdroid-web, namespace=nettle-prod, pod=fdroid-web-7d99c5844-qcbt7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=fdroid-web, namespace=nettle-prod, pod=fdroid-web-7d99c5844-qcbt7 Value:0xc065b29be8} B:{Var:B Labels:cluster=core/grafana-agent, container=fdroid-web, namespace=nettle-prod, pod=fdroid-web-7d99c5844-qcbt7 Value:0xc065b29c50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631622916s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=fdroid-web, namespace=nettle-prod, pod=fdroid-web-7d99c5844-qcbt7} value=0.001457062211981567 ], [ var='B' labels={cluster=core/grafana-agent, container=fdroid-web, namespace=nettle-prod, pod=fdroid-web-7d99c5844-qcbt7} value=0 ]} {Instance:cluster=core/grafana-agent, container=fdroid-web, namespace=nettle-prod, pod=fdroid-web-7d99c5844-tzf6l State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=fdroid-web, namespace=nettle-prod, pod=fdroid-web-7d99c5844-tzf6l Value:0xc065b29d40} B:{Var:B Labels:cluster=core/grafana-agent, container=fdroid-web, namespace=nettle-prod, pod=fdroid-web-7d99c5844-tzf6l Value:0xc065b29cf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63162781s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=fdroid-web, namespace=nettle-prod, pod=fdroid-web-7d99c5844-tzf6l} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=fdroid-web, namespace=nettle-prod, pod=fdroid-web-7d99c5844-tzf6l} value=0 ]} {Instance:cluster=core/grafana-agent, container=fdroid-web, namespace=nettle-stage, pod=fdroid-web-64cf66bb89-2nfx6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=fdroid-web, namespace=nettle-stage, pod=fdroid-web-64cf66bb89-2nfx6 Value:0xc065b29df0} B:{Var:B Labels:cluster=core/grafana-agent, container=fdroid-web, namespace=nettle-stage, pod=fdroid-web-64cf66bb89-2nfx6 Value:0xc065b29e40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631631324s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=fdroid-web, namespace=nettle-stage, pod=fdroid-web-64cf66bb89-2nfx6} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=fdroid-web, namespace=nettle-stage, pod=fdroid-web-64cf66bb89-2nfx6} value=0 ]} {Instance:cluster=core/grafana-agent, container=hemp-admin, namespace=nettle-prod, pod=hemp-admin-6ccb767786-8kz75 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=hemp-admin, namespace=nettle-prod, pod=hemp-admin-6ccb767786-8kz75 Value:0xc065b29f40} B:{Var:B Labels:cluster=core/grafana-agent, container=hemp-admin, namespace=nettle-prod, pod=hemp-admin-6ccb767786-8kz75 Value:0xc065b29ef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631635058s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=hemp-admin, namespace=nettle-prod, pod=hemp-admin-6ccb767786-8kz75} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=hemp-admin, namespace=nettle-prod, pod=hemp-admin-6ccb767786-8kz75} value=0 ]} {Instance:cluster=core/grafana-agent, container=hemp-admin, namespace=nettle-prod, pod=hemp-admin-6ccb767786-wfs7b State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=hemp-admin, namespace=nettle-prod, pod=hemp-admin-6ccb767786-wfs7b Value:0xc067512010} B:{Var:B Labels:cluster=core/grafana-agent, container=hemp-admin, namespace=nettle-prod, pod=hemp-admin-6ccb767786-wfs7b Value:0xc067512070}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631638412s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=hemp-admin, namespace=nettle-prod, pod=hemp-admin-6ccb767786-wfs7b} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=hemp-admin, namespace=nettle-prod, pod=hemp-admin-6ccb767786-wfs7b} value=0 ]} {Instance:cluster=core/grafana-agent, container=hemp-admin, namespace=nettle-stage, pod=hemp-admin-fc7ff9bf5-mtdjn State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=hemp-admin, namespace=nettle-stage, pod=hemp-admin-fc7ff9bf5-mtdjn Value:0xc067512130} B:{Var:B Labels:cluster=core/grafana-agent, container=hemp-admin, namespace=nettle-stage, pod=hemp-admin-fc7ff9bf5-mtdjn Value:0xc067512180}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63164318s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=hemp-admin, namespace=nettle-stage, pod=hemp-admin-fc7ff9bf5-mtdjn} value=0.03207547169811321 ], [ var='B' labels={cluster=core/grafana-agent, container=hemp-admin, namespace=nettle-stage, pod=hemp-admin-fc7ff9bf5-mtdjn} value=0 ]} {Instance:cluster=core/grafana-agent, container=hemp-cabinet, namespace=nettle-prod, pod=hemp-cabinet-6b8974bcc5-sd8lt State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=hemp-cabinet, namespace=nettle-prod, pod=hemp-cabinet-6b8974bcc5-sd8lt Value:0xc067512228} B:{Var:B Labels:cluster=core/grafana-agent, container=hemp-cabinet, namespace=nettle-prod, pod=hemp-cabinet-6b8974bcc5-sd8lt Value:0xc067512360}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631648618s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=hemp-cabinet, namespace=nettle-prod, pod=hemp-cabinet-6b8974bcc5-sd8lt} value=0.0030895983522142125 ], [ var='B' labels={cluster=core/grafana-agent, container=hemp-cabinet, namespace=nettle-prod, pod=hemp-cabinet-6b8974bcc5-sd8lt} value=0 ]} {Instance:cluster=core/grafana-agent, container=hemp-cabinet, namespace=nettle-prod, pod=hemp-cabinet-6b8974bcc5-zr99b State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=hemp-cabinet, namespace=nettle-prod, pod=hemp-cabinet-6b8974bcc5-zr99b Value:0xc067512408} B:{Var:B Labels:cluster=core/grafana-agent, container=hemp-cabinet, namespace=nettle-prod, pod=hemp-cabinet-6b8974bcc5-zr99b Value:0xc067512450}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631652106s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=hemp-cabinet, namespace=nettle-prod, pod=hemp-cabinet-6b8974bcc5-zr99b} value=0.011893870082342177 ], [ var='B' labels={cluster=core/grafana-agent, container=hemp-cabinet, namespace=nettle-prod, pod=hemp-cabinet-6b8974bcc5-zr99b} value=0 ]} {Instance:cluster=core/grafana-agent, container=hemp-cabinet, namespace=nettle-stage, pod=hemp-cabinet-5447694458-lmg8m State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=hemp-cabinet, namespace=nettle-stage, pod=hemp-cabinet-5447694458-lmg8m Value:0xc067512500} B:{Var:B Labels:cluster=core/grafana-agent, container=hemp-cabinet, namespace=nettle-stage, pod=hemp-cabinet-5447694458-lmg8m Value:0xc067512560}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631655998s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=hemp-cabinet, namespace=nettle-stage, pod=hemp-cabinet-5447694458-lmg8m} value=0.002472187886279357 ], [ var='B' labels={cluster=core/grafana-agent, container=hemp-cabinet, namespace=nettle-stage, pod=hemp-cabinet-5447694458-lmg8m} value=0 ]} {Instance:cluster=core/grafana-agent, container=hemp-cabinet-logs, namespace=nettle-prod, pod=hemp-cabinet-logs-7c9867b8fb-rfxql State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=hemp-cabinet-logs, namespace=nettle-prod, pod=hemp-cabinet-logs-7c9867b8fb-rfxql Value:0xc067512630} B:{Var:B Labels:cluster=core/grafana-agent, container=hemp-cabinet-logs, namespace=nettle-prod, pod=hemp-cabinet-logs-7c9867b8fb-rfxql Value:0xc0675125f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631661142s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=hemp-cabinet-logs, namespace=nettle-prod, pod=hemp-cabinet-logs-7c9867b8fb-rfxql} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=hemp-cabinet-logs, namespace=nettle-prod, pod=hemp-cabinet-logs-7c9867b8fb-rfxql} value=0 ]} {Instance:cluster=core/grafana-agent, container=hemp-cabinet-logs, namespace=nettle-stage, pod=hemp-cabinet-logs-67fcd6c9f5-t8sx5 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=hemp-cabinet-logs, namespace=nettle-stage, pod=hemp-cabinet-logs-67fcd6c9f5-t8sx5 Value:0xc0675126a0} B:{Var:B Labels:cluster=core/grafana-agent, container=hemp-cabinet-logs, namespace=nettle-stage, pod=hemp-cabinet-logs-67fcd6c9f5-t8sx5 Value:0xc0675126f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631664512s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=hemp-cabinet-logs, namespace=nettle-stage, pod=hemp-cabinet-logs-67fcd6c9f5-t8sx5} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=hemp-cabinet-logs, namespace=nettle-stage, pod=hemp-cabinet-logs-67fcd6c9f5-t8sx5} value=0 ]} {Instance:cluster=core/grafana-agent, container=hemp-filemanagement, namespace=nettle-prod, pod=hemp-filemanagement-7d87bbb9f7-bgfcc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=hemp-filemanagement, namespace=nettle-prod, pod=hemp-filemanagement-7d87bbb9f7-bgfcc Value:0xc067512780} B:{Var:B Labels:cluster=core/grafana-agent, container=hemp-filemanagement, namespace=nettle-prod, pod=hemp-filemanagement-7d87bbb9f7-bgfcc Value:0xc0675127c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631668367s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=hemp-filemanagement, namespace=nettle-prod, pod=hemp-filemanagement-7d87bbb9f7-bgfcc} value=0.009562841530054645 ], [ var='B' labels={cluster=core/grafana-agent, container=hemp-filemanagement, namespace=nettle-prod, pod=hemp-filemanagement-7d87bbb9f7-bgfcc} value=0 ]} {Instance:cluster=core/grafana-agent, container=hemp-filemanagement, namespace=nettle-stage, pod=hemp-filemanagement-84f66d9fc5-zj66g State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=hemp-filemanagement, namespace=nettle-stage, pod=hemp-filemanagement-84f66d9fc5-zj66g Value:0xc067512850} B:{Var:B Labels:cluster=core/grafana-agent, container=hemp-filemanagement, namespace=nettle-stage, pod=hemp-filemanagement-84f66d9fc5-zj66g Value:0xc067512890}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63167227s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=hemp-filemanagement, namespace=nettle-stage, pod=hemp-filemanagement-84f66d9fc5-zj66g} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=hemp-filemanagement, namespace=nettle-stage, pod=hemp-filemanagement-84f66d9fc5-zj66g} value=0 ]} {Instance:cluster=core/grafana-agent, container=hemp-ui, namespace=nettle-prod, pod=hemp-ui-5f8fdc696c-xbqpj State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=hemp-ui, namespace=nettle-prod, pod=hemp-ui-5f8fdc696c-xbqpj Value:0xc067512920} B:{Var:B Labels:cluster=core/grafana-agent, container=hemp-ui, namespace=nettle-prod, pod=hemp-ui-5f8fdc696c-xbqpj Value:0xc067512a40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631675841s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=hemp-ui, namespace=nettle-prod, pod=hemp-ui-5f8fdc696c-xbqpj} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=hemp-ui, namespace=nettle-prod, pod=hemp-ui-5f8fdc696c-xbqpj} value=0 ]} {Instance:cluster=core/grafana-agent, container=hemp-ui, namespace=nettle-stage, pod=hemp-ui-5dd6db4f57-h8qlw State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=hemp-ui, namespace=nettle-stage, pod=hemp-ui-5dd6db4f57-h8qlw Value:0xc067512ac8} B:{Var:B Labels:cluster=core/grafana-agent, container=hemp-ui, namespace=nettle-stage, pod=hemp-ui-5dd6db4f57-h8qlw Value:0xc067512b10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631679651s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=hemp-ui, namespace=nettle-stage, pod=hemp-ui-5dd6db4f57-h8qlw} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=hemp-ui, namespace=nettle-stage, pod=hemp-ui-5dd6db4f57-h8qlw} value=0 ]} {Instance:cluster=core/grafana-agent, container=jenkins-master, namespace=jenkins, pod=jenkins-kropyva State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=jenkins-master, namespace=jenkins, pod=jenkins-kropyva Value:0xc067512bb0} B:{Var:B Labels:cluster=core/grafana-agent, container=jenkins-master, namespace=jenkins, pod=jenkins-kropyva Value:0xc067512c20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631683402s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=jenkins-master, namespace=jenkins, pod=jenkins-kropyva} value=0.002066115702479339 ], [ var='B' labels={cluster=core/grafana-agent, container=jenkins-master, namespace=jenkins, pod=jenkins-kropyva} value=0 ]} {Instance:cluster=core/grafana-agent, container=keycloak, namespace=nettle-prod, pod=keycloak-keycloakx-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=keycloak, namespace=nettle-prod, pod=keycloak-keycloakx-0 Value:0xc067512d28} B:{Var:B Labels:cluster=core/grafana-agent, container=keycloak, namespace=nettle-prod, pod=keycloak-keycloakx-0 Value:0xc067512cd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631687532s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=keycloak, namespace=nettle-prod, pod=keycloak-keycloakx-0} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=keycloak, namespace=nettle-prod, pod=keycloak-keycloakx-0} value=0 ]} {Instance:cluster=core/grafana-agent, container=keycloak, namespace=nettle-stage, pod=keycloak-keycloakx-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=keycloak, namespace=nettle-stage, pod=keycloak-keycloakx-0 Value:0xc067512e78} B:{Var:B Labels:cluster=core/grafana-agent, container=keycloak, namespace=nettle-stage, pod=keycloak-keycloakx-0 Value:0xc067512ee8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.6316912s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=keycloak, namespace=nettle-stage, pod=keycloak-keycloakx-0} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=keycloak, namespace=nettle-stage, pod=keycloak-keycloakx-0} value=0 ]} {Instance:cluster=core/grafana-agent, container=meteo-server, namespace=nettle-prod, pod=meteo-server-68c7dc76dc-rflf5 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=meteo-server, namespace=nettle-prod, pod=meteo-server-68c7dc76dc-rflf5 Value:0xc067512f90} B:{Var:B Labels:cluster=core/grafana-agent, container=meteo-server, namespace=nettle-prod, pod=meteo-server-68c7dc76dc-rflf5 Value:0xc067512fe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631694606s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=meteo-server, namespace=nettle-prod, pod=meteo-server-68c7dc76dc-rflf5} value=0.08098159509202454 ], [ var='B' labels={cluster=core/grafana-agent, container=meteo-server, namespace=nettle-prod, pod=meteo-server-68c7dc76dc-rflf5} value=0 ]} {Instance:cluster=core/grafana-agent, container=meteo-server, namespace=nettle-stage, pod=meteo-server-56c9b7c546-kdbr5 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=meteo-server, namespace=nettle-stage, pod=meteo-server-56c9b7c546-kdbr5 Value:0xc067513090} B:{Var:B Labels:cluster=core/grafana-agent, container=meteo-server, namespace=nettle-stage, pod=meteo-server-56c9b7c546-kdbr5 Value:0xc0675130e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631699287s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=meteo-server, namespace=nettle-stage, pod=meteo-server-56c9b7c546-kdbr5} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=meteo-server, namespace=nettle-stage, pod=meteo-server-56c9b7c546-kdbr5} value=0 ]} {Instance:cluster=core/grafana-agent, container=nginx, namespace=defectdojo, pod=defectdojo-django-6cfb7c75c9-q2xrd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=nginx, namespace=defectdojo, pod=defectdojo-django-6cfb7c75c9-q2xrd Value:0xc067513170} B:{Var:B Labels:cluster=core/grafana-agent, container=nginx, namespace=defectdojo, pod=defectdojo-django-6cfb7c75c9-q2xrd Value:0xc0675131b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63170261s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=nginx, namespace=defectdojo, pod=defectdojo-django-6cfb7c75c9-q2xrd} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=nginx, namespace=defectdojo, pod=defectdojo-django-6cfb7c75c9-q2xrd} value=0 ]} {Instance:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-ops, pod=vault-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-ops, pod=vault-0 Value:0xc067513258} B:{Var:B Labels:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-ops, pod=vault-0 Value:0xc0675132a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631707361s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-ops, pod=vault-0} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-ops, pod=vault-0} value=0 ]} {Instance:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-ops, pod=vault-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-ops, pod=vault-1 Value:0xc067513328} B:{Var:B Labels:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-ops, pod=vault-1 Value:0xc067513378}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631710839s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-ops, pod=vault-1} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-ops, pod=vault-1} value=0 ]} {Instance:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-ops, pod=vault-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-ops, pod=vault-2 Value:0xc067513410} B:{Var:B Labels:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-ops, pod=vault-2 Value:0xc067513470}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631714951s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-ops, pod=vault-2} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-ops, pod=vault-2} value=0 ]} {Instance:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-prod, pod=vault-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-prod, pod=vault-0 Value:0xc067513500} B:{Var:B Labels:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-prod, pod=vault-0 Value:0xc067513548}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631719626s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-prod, pod=vault-0} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-prod, pod=vault-0} value=0 ]} {Instance:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-prod, pod=vault-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-prod, pod=vault-1 Value:0xc0675135d8} B:{Var:B Labels:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-prod, pod=vault-1 Value:0xc067513628}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631724693s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-prod, pod=vault-1} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-prod, pod=vault-1} value=0 ]} {Instance:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-prod, pod=vault-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-prod, pod=vault-2 Value:0xc0675137e8} B:{Var:B Labels:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-prod, pod=vault-2 Value:0xc0675137a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631729288s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-prod, pod=vault-2} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-prod, pod=vault-2} value=0 ]} {Instance:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-stage, pod=vault-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-stage, pod=vault-0 Value:0xc067513870} B:{Var:B Labels:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-stage, pod=vault-0 Value:0xc0675138e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631733083s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-stage, pod=vault-0} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-stage, pod=vault-0} value=0 ]} {Instance:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-stage, pod=vault-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-stage, pod=vault-1 Value:0xc067513978} B:{Var:B Labels:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-stage, pod=vault-1 Value:0xc0675139b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631736627s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-stage, pod=vault-1} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-stage, pod=vault-1} value=0 ]} {Instance:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-stage, pod=vault-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-stage, pod=vault-2 Value:0xc067513a50} B:{Var:B Labels:cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-stage, pod=vault-2 Value:0xc067513aa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63173983s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-stage, pod=vault-2} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=prometheus-exporter, namespace=vault-stage, pod=vault-2} value=0 ]} {Instance:cluster=core/grafana-agent, container=reflector, namespace=core, pod=reflector-ccd7686f7-6wbvr State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=reflector, namespace=core, pod=reflector-ccd7686f7-6wbvr Value:0xc067513b40} B:{Var:B Labels:cluster=core/grafana-agent, container=reflector, namespace=core, pod=reflector-ccd7686f7-6wbvr Value:0xc067513b98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631743649s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=reflector, namespace=core, pod=reflector-ccd7686f7-6wbvr} value=0.006024096385542168 ], [ var='B' labels={cluster=core/grafana-agent, container=reflector, namespace=core, pod=reflector-ccd7686f7-6wbvr} value=0 ]} {Instance:cluster=core/grafana-agent, container=reflector, namespace=core, pod=reflector-ccd7686f7-9d986 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=reflector, namespace=core, pod=reflector-ccd7686f7-9d986 Value:0xc067513de0} B:{Var:B Labels:cluster=core/grafana-agent, container=reflector, namespace=core, pod=reflector-ccd7686f7-9d986 Value:0xc067513e20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63174695s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=reflector, namespace=core, pod=reflector-ccd7686f7-9d986} value=0.0026041666666666665 ], [ var='B' labels={cluster=core/grafana-agent, container=reflector, namespace=core, pod=reflector-ccd7686f7-9d986} value=0 ]} {Instance:cluster=core/grafana-agent, container=reflector, namespace=core, pod=reflector-ccd7686f7-tr28z State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=reflector, namespace=core, pod=reflector-ccd7686f7-tr28z Value:0xc07d5ee0f0} B:{Var:B Labels:cluster=core/grafana-agent, container=reflector, namespace=core, pod=reflector-ccd7686f7-tr28z Value:0xc067513ea0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631750722s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=reflector, namespace=core, pod=reflector-ccd7686f7-tr28z} value=0.01639344262295082 ], [ var='B' labels={cluster=core/grafana-agent, container=reflector, namespace=core, pod=reflector-ccd7686f7-tr28z} value=0 ]} {Instance:cluster=core/grafana-agent, container=reloader-reloader, namespace=core, pod=reloader-reloader-57bff8dbf9-qtwdh State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=reloader-reloader, namespace=core, pod=reloader-reloader-57bff8dbf9-qtwdh Value:0xc07d5ee160} B:{Var:B Labels:cluster=core/grafana-agent, container=reloader-reloader, namespace=core, pod=reloader-reloader-57bff8dbf9-qtwdh Value:0xc07d5ee1a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631755099s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=reloader-reloader, namespace=core, pod=reloader-reloader-57bff8dbf9-qtwdh} value=0.16666666666666666 ], [ var='B' labels={cluster=core/grafana-agent, container=reloader-reloader, namespace=core, pod=reloader-reloader-57bff8dbf9-qtwdh} value=0 ]} {Instance:cluster=core/grafana-agent, container=reloader-reloader, namespace=core, pod=reloader-reloader-66f757db6-28wp6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=reloader-reloader, namespace=core, pod=reloader-reloader-66f757db6-28wp6 Value:0xc07d5ee268} B:{Var:B Labels:cluster=core/grafana-agent, container=reloader-reloader, namespace=core, pod=reloader-reloader-66f757db6-28wp6 Value:0xc07d5ee2a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631758367s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=reloader-reloader, namespace=core, pod=reloader-reloader-66f757db6-28wp6} value=0.2222222222222222 ], [ var='B' labels={cluster=core/grafana-agent, container=reloader-reloader, namespace=core, pod=reloader-reloader-66f757db6-28wp6} value=0 ]} {Instance:cluster=core/grafana-agent, container=reloader-reloader, namespace=core, pod=reloader-reloader-66f757db6-cgcmf State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=reloader-reloader, namespace=core, pod=reloader-reloader-66f757db6-cgcmf Value:0xc07d5ee318} B:{Var:B Labels:cluster=core/grafana-agent, container=reloader-reloader, namespace=core, pod=reloader-reloader-66f757db6-cgcmf Value:0xc07d5ee3e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631762095s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=reloader-reloader, namespace=core, pod=reloader-reloader-66f757db6-cgcmf} value=0.15816326530612246 ], [ var='B' labels={cluster=core/grafana-agent, container=reloader-reloader, namespace=core, pod=reloader-reloader-66f757db6-cgcmf} value=0 ]} {Instance:cluster=core/grafana-agent, container=repo-server, namespace=gitops, pod=argo-cd-argocd-repo-server-6b9cb56fcc-xtz6j State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=repo-server, namespace=gitops, pod=argo-cd-argocd-repo-server-6b9cb56fcc-xtz6j Value:0xc07d5ee5c0} B:{Var:B Labels:cluster=core/grafana-agent, container=repo-server, namespace=gitops, pod=argo-cd-argocd-repo-server-6b9cb56fcc-xtz6j Value:0xc07d5ee690}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631765906s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=repo-server, namespace=gitops, pod=argo-cd-argocd-repo-server-6b9cb56fcc-xtz6j} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=repo-server, namespace=gitops, pod=argo-cd-argocd-repo-server-6b9cb56fcc-xtz6j} value=0 ]} {Instance:cluster=core/grafana-agent, container=repo-server, namespace=gitops, pod=argo-cd-argocd-repo-server-6b9cb56fcc-zgd9h State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=repo-server, namespace=gitops, pod=argo-cd-argocd-repo-server-6b9cb56fcc-zgd9h Value:0xc07d5ee730} B:{Var:B Labels:cluster=core/grafana-agent, container=repo-server, namespace=gitops, pod=argo-cd-argocd-repo-server-6b9cb56fcc-zgd9h Value:0xc07d5ee8d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631769509s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=repo-server, namespace=gitops, pod=argo-cd-argocd-repo-server-6b9cb56fcc-zgd9h} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=repo-server, namespace=gitops, pod=argo-cd-argocd-repo-server-6b9cb56fcc-zgd9h} value=0 ]} {Instance:cluster=core/grafana-agent, container=s3-proxy, namespace=nettle-prod, pod=hemp-cabinet-logs-7c9867b8fb-rfxql State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=s3-proxy, namespace=nettle-prod, pod=hemp-cabinet-logs-7c9867b8fb-rfxql Value:0xc07d5ee978} B:{Var:B Labels:cluster=core/grafana-agent, container=s3-proxy, namespace=nettle-prod, pod=hemp-cabinet-logs-7c9867b8fb-rfxql Value:0xc07d5eea20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631774535s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=s3-proxy, namespace=nettle-prod, pod=hemp-cabinet-logs-7c9867b8fb-rfxql} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=s3-proxy, namespace=nettle-prod, pod=hemp-cabinet-logs-7c9867b8fb-rfxql} value=0 ]} {Instance:cluster=core/grafana-agent, container=s3-proxy, namespace=nettle-stage, pod=hemp-cabinet-logs-67fcd6c9f5-t8sx5 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=s3-proxy, namespace=nettle-stage, pod=hemp-cabinet-logs-67fcd6c9f5-t8sx5 Value:0xc07d5eeab8} B:{Var:B Labels:cluster=core/grafana-agent, container=s3-proxy, namespace=nettle-stage, pod=hemp-cabinet-logs-67fcd6c9f5-t8sx5 Value:0xc07d5eeb88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631790485s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=s3-proxy, namespace=nettle-stage, pod=hemp-cabinet-logs-67fcd6c9f5-t8sx5} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=s3-proxy, namespace=nettle-stage, pod=hemp-cabinet-logs-67fcd6c9f5-t8sx5} value=0 ]} {Instance:cluster=core/grafana-agent, container=secrets-to-vault, namespace=vault-ops, pod=secrets-to-vault-84d97769c7-gjlmg State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=secrets-to-vault, namespace=vault-ops, pod=secrets-to-vault-84d97769c7-gjlmg Value:0xc07d5eed90} B:{Var:B Labels:cluster=core/grafana-agent, container=secrets-to-vault, namespace=vault-ops, pod=secrets-to-vault-84d97769c7-gjlmg Value:0xc07d5eedf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63179393s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=secrets-to-vault, namespace=vault-ops, pod=secrets-to-vault-84d97769c7-gjlmg} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=secrets-to-vault, namespace=vault-ops, pod=secrets-to-vault-84d97769c7-gjlmg} value=0 ]} {Instance:cluster=core/grafana-agent, container=secrets-to-vault, namespace=vault-prod, pod=secrets-to-vault-56b896b8dc-v4n7f State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=secrets-to-vault, namespace=vault-prod, pod=secrets-to-vault-56b896b8dc-v4n7f Value:0xc07d5eef10} B:{Var:B Labels:cluster=core/grafana-agent, container=secrets-to-vault, namespace=vault-prod, pod=secrets-to-vault-56b896b8dc-v4n7f Value:0xc07d5eef60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631797303s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=secrets-to-vault, namespace=vault-prod, pod=secrets-to-vault-56b896b8dc-v4n7f} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=secrets-to-vault, namespace=vault-prod, pod=secrets-to-vault-56b896b8dc-v4n7f} value=0 ]} {Instance:cluster=core/grafana-agent, container=secrets-to-vault, namespace=vault-stage, pod=secrets-to-vault-7f67d56b-g8gtb State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=secrets-to-vault, namespace=vault-stage, pod=secrets-to-vault-7f67d56b-g8gtb Value:0xc07d5ef090} B:{Var:B Labels:cluster=core/grafana-agent, container=secrets-to-vault, namespace=vault-stage, pod=secrets-to-vault-7f67d56b-g8gtb Value:0xc07d5ef0f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631805352s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=secrets-to-vault, namespace=vault-stage, pod=secrets-to-vault-7f67d56b-g8gtb} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=secrets-to-vault, namespace=vault-stage, pod=secrets-to-vault-7f67d56b-g8gtb} value=0 ]} {Instance:cluster=core/grafana-agent, container=sonarqube, namespace=sonarqube, pod=sonarqube-sonarqube-c94ccdb69-mg8g6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=sonarqube, namespace=sonarqube, pod=sonarqube-sonarqube-c94ccdb69-mg8g6 Value:0xc07d5ef2c8} B:{Var:B Labels:cluster=core/grafana-agent, container=sonarqube, namespace=sonarqube, pod=sonarqube-sonarqube-c94ccdb69-mg8g6 Value:0xc07d5ef320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631808988s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=sonarqube, namespace=sonarqube, pod=sonarqube-sonarqube-c94ccdb69-mg8g6} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=sonarqube, namespace=sonarqube, pod=sonarqube-sonarqube-c94ccdb69-mg8g6} value=0 ]} {Instance:cluster=core/grafana-agent, container=trivy-operator, namespace=trivy-system, pod=trivy-operator-75b45c4666-lfvdb State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=trivy-operator, namespace=trivy-system, pod=trivy-operator-75b45c4666-lfvdb Value:0xc07d5ef430} B:{Var:B Labels:cluster=core/grafana-agent, container=trivy-operator, namespace=trivy-system, pod=trivy-operator-75b45c4666-lfvdb Value:0xc07d5ef4f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631814604s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=trivy-operator, namespace=trivy-system, pod=trivy-operator-75b45c4666-lfvdb} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=trivy-operator, namespace=trivy-system, pod=trivy-operator-75b45c4666-lfvdb} value=0 ]} {Instance:cluster=core/grafana-agent, container=trivy-operator, namespace=trivy-system, pod=trivy-operator-7db7d86bdf-5qx6p State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=trivy-operator, namespace=trivy-system, pod=trivy-operator-7db7d86bdf-5qx6p Value:0xc07d5ef598} B:{Var:B Labels:cluster=core/grafana-agent, container=trivy-operator, namespace=trivy-system, pod=trivy-operator-7db7d86bdf-5qx6p Value:0xc07d5ef650}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63181841s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=trivy-operator, namespace=trivy-system, pod=trivy-operator-7db7d86bdf-5qx6p} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=trivy-operator, namespace=trivy-system, pod=trivy-operator-7db7d86bdf-5qx6p} value=0 ]} {Instance:cluster=core/grafana-agent, container=uwsgi, namespace=defectdojo, pod=defectdojo-django-6cfb7c75c9-q2xrd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=uwsgi, namespace=defectdojo, pod=defectdojo-django-6cfb7c75c9-q2xrd Value:0xc07d5ef880} B:{Var:B Labels:cluster=core/grafana-agent, container=uwsgi, namespace=defectdojo, pod=defectdojo-django-6cfb7c75c9-q2xrd Value:0xc07d5ef6e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631822298s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=uwsgi, namespace=defectdojo, pod=defectdojo-django-6cfb7c75c9-q2xrd} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=uwsgi, namespace=defectdojo, pod=defectdojo-django-6cfb7c75c9-q2xrd} value=0 ]} {Instance:cluster=core/grafana-agent, container=vault, namespace=vault-ops, pod=vault-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=vault, namespace=vault-ops, pod=vault-0 Value:0xc07d5ef990} B:{Var:B Labels:cluster=core/grafana-agent, container=vault, namespace=vault-ops, pod=vault-0 Value:0xc07d5ef938}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631825442s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=vault, namespace=vault-ops, pod=vault-0} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=vault, namespace=vault-ops, pod=vault-0} value=0 ]} {Instance:cluster=core/grafana-agent, container=vault, namespace=vault-ops, pod=vault-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=vault, namespace=vault-ops, pod=vault-1 Value:0xc07d5efaa0} B:{Var:B Labels:cluster=core/grafana-agent, container=vault, namespace=vault-ops, pod=vault-1 Value:0xc07d5efb00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631828738s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=vault, namespace=vault-ops, pod=vault-1} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=vault, namespace=vault-ops, pod=vault-1} value=0 ]} {Instance:cluster=core/grafana-agent, container=vault, namespace=vault-ops, pod=vault-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=vault, namespace=vault-ops, pod=vault-2 Value:0xc07d5efc18} B:{Var:B Labels:cluster=core/grafana-agent, container=vault, namespace=vault-ops, pod=vault-2 Value:0xc07d5efc68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63183224s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=vault, namespace=vault-ops, pod=vault-2} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=vault, namespace=vault-ops, pod=vault-2} value=0 ]} {Instance:cluster=core/grafana-agent, container=vault, namespace=vault-prod, pod=vault-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=vault, namespace=vault-prod, pod=vault-0 Value:0xc07d5efd80} B:{Var:B Labels:cluster=core/grafana-agent, container=vault, namespace=vault-prod, pod=vault-0 Value:0xc07d5efdd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631847989s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=vault, namespace=vault-prod, pod=vault-0} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=vault, namespace=vault-prod, pod=vault-0} value=0 ]} {Instance:cluster=core/grafana-agent, container=vault, namespace=vault-prod, pod=vault-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=vault, namespace=vault-prod, pod=vault-1 Value:0xc07d5efef0} B:{Var:B Labels:cluster=core/grafana-agent, container=vault, namespace=vault-prod, pod=vault-1 Value:0xc07d5eff48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631853711s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=vault, namespace=vault-prod, pod=vault-1} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=vault, namespace=vault-prod, pod=vault-1} value=0 ]} {Instance:cluster=core/grafana-agent, container=vault, namespace=vault-prod, pod=vault-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=vault, namespace=vault-prod, pod=vault-2 Value:0xc03aa5a058} B:{Var:B Labels:cluster=core/grafana-agent, container=vault, namespace=vault-prod, pod=vault-2 Value:0xc03aa5a010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631859207s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=vault, namespace=vault-prod, pod=vault-2} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=vault, namespace=vault-prod, pod=vault-2} value=0 ]} {Instance:cluster=core/grafana-agent, container=vault, namespace=vault-stage, pod=vault-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=vault, namespace=vault-stage, pod=vault-0 Value:0xc03aa5a0d8} B:{Var:B Labels:cluster=core/grafana-agent, container=vault, namespace=vault-stage, pod=vault-0 Value:0xc03aa5a120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631864345s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=vault, namespace=vault-stage, pod=vault-0} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=vault, namespace=vault-stage, pod=vault-0} value=0 ]} {Instance:cluster=core/grafana-agent, container=vault, namespace=vault-stage, pod=vault-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=vault, namespace=vault-stage, pod=vault-1 Value:0xc03aa5a1a8} B:{Var:B Labels:cluster=core/grafana-agent, container=vault, namespace=vault-stage, pod=vault-1 Value:0xc03aa5a1e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631869365s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=vault, namespace=vault-stage, pod=vault-1} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=vault, namespace=vault-stage, pod=vault-1} value=0 ]} {Instance:cluster=core/grafana-agent, container=vault, namespace=vault-stage, pod=vault-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=vault, namespace=vault-stage, pod=vault-2 Value:0xc03aa5a278} B:{Var:B Labels:cluster=core/grafana-agent, container=vault, namespace=vault-stage, pod=vault-2 Value:0xc03aa5a2c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631875839s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=vault, namespace=vault-stage, pod=vault-2} value=0 ], [ var='B' labels={cluster=core/grafana-agent, container=vault, namespace=vault-stage, pod=vault-2} value=0 ]} {Instance:cluster=core/grafana-agent, container=vault-operator, namespace=vault, pod=vault-operator-7cf5f6c784-5zmvs State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=vault-operator, namespace=vault, pod=vault-operator-7cf5f6c784-5zmvs Value:0xc03aa5a348} B:{Var:B Labels:cluster=core/grafana-agent, container=vault-operator, namespace=vault, pod=vault-operator-7cf5f6c784-5zmvs Value:0xc03aa5a390}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631880852s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=vault-operator, namespace=vault, pod=vault-operator-7cf5f6c784-5zmvs} value=0.08006535947712419 ], [ var='B' labels={cluster=core/grafana-agent, container=vault-operator, namespace=vault, pod=vault-operator-7cf5f6c784-5zmvs} value=0 ]} {Instance:cluster=core/grafana-agent, container=vault-operator, namespace=vault, pod=vault-operator-7cf5f6c784-8x2gk State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=vault-operator, namespace=vault, pod=vault-operator-7cf5f6c784-8x2gk Value:0xc03aa5a410} B:{Var:B Labels:cluster=core/grafana-agent, container=vault-operator, namespace=vault, pod=vault-operator-7cf5f6c784-8x2gk Value:0xc03aa5a450}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631884995s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=vault-operator, namespace=vault, pod=vault-operator-7cf5f6c784-8x2gk} value=0.05753968253968254 ], [ var='B' labels={cluster=core/grafana-agent, container=vault-operator, namespace=vault, pod=vault-operator-7cf5f6c784-8x2gk} value=0 ]} {Instance:cluster=core/grafana-agent, container=vault-operator, namespace=vault, pod=vault-operator-7cf5f6c784-xkqr2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, container=vault-operator, namespace=vault, pod=vault-operator-7cf5f6c784-xkqr2 Value:0xc03aa5a520} B:{Var:B Labels:cluster=core/grafana-agent, container=vault-operator, namespace=vault, pod=vault-operator-7cf5f6c784-xkqr2 Value:0xc03aa5a4d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631889879s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, container=vault-operator, namespace=vault, pod=vault-operator-7cf5f6c784-xkqr2} value=0.06212424849699399 ], [ var='B' labels={cluster=core/grafana-agent, container=vault-operator, namespace=vault, pod=vault-operator-7cf5f6c784-xkqr2} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=core, pod=descheduler-6bf5f8654d-p52mm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=core, pod=descheduler-6bf5f8654d-p52mm Value:0xc03aa5a658} B:{Var:B Labels:cluster=core/grafana-agent, namespace=core, pod=descheduler-6bf5f8654d-p52mm Value:0xc03aa5a680}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631893959s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=core, pod=descheduler-6bf5f8654d-p52mm} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=core, pod=descheduler-6bf5f8654d-p52mm} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=core, pod=descheduler-75c69f57bf-5bhq9 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=core, pod=descheduler-75c69f57bf-5bhq9 Value:0xc03aa5a770} B:{Var:B Labels:cluster=core/grafana-agent, namespace=core, pod=descheduler-75c69f57bf-5bhq9 Value:0xc03aa5a748}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631898345s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=core, pod=descheduler-75c69f57bf-5bhq9} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=core, pod=descheduler-75c69f57bf-5bhq9} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=core, pod=descheduler-75c69f57bf-jkxj6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=core, pod=descheduler-75c69f57bf-jkxj6 Value:0xc03aa5a858} B:{Var:B Labels:cluster=core/grafana-agent, namespace=core, pod=descheduler-75c69f57bf-jkxj6 Value:0xc03aa5a838}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631902167s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=core, pod=descheduler-75c69f57bf-jkxj6} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=core, pod=descheduler-75c69f57bf-jkxj6} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=core, pod=reflector-ccd7686f7-6wbvr State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=core, pod=reflector-ccd7686f7-6wbvr Value:0xc03aa5a920} B:{Var:B Labels:cluster=core/grafana-agent, namespace=core, pod=reflector-ccd7686f7-6wbvr Value:0xc03aa5a948}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631905717s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=core, pod=reflector-ccd7686f7-6wbvr} value=0.006097560975609756 ], [ var='B' labels={cluster=core/grafana-agent, namespace=core, pod=reflector-ccd7686f7-6wbvr} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=core, pod=reflector-ccd7686f7-9d986 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=core, pod=reflector-ccd7686f7-9d986 Value:0xc03aa5a988} B:{Var:B Labels:cluster=core/grafana-agent, namespace=core, pod=reflector-ccd7686f7-9d986 Value:0xc03aa5a9f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63191047s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=core, pod=reflector-ccd7686f7-9d986} value=0.002638522427440633 ], [ var='B' labels={cluster=core/grafana-agent, namespace=core, pod=reflector-ccd7686f7-9d986} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=core, pod=reflector-ccd7686f7-tr28z State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=core, pod=reflector-ccd7686f7-tr28z Value:0xc03aa5aa38} B:{Var:B Labels:cluster=core/grafana-agent, namespace=core, pod=reflector-ccd7686f7-tr28z Value:0xc03aa5aa58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631914352s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=core, pod=reflector-ccd7686f7-tr28z} value=0.03252032520325203 ], [ var='B' labels={cluster=core/grafana-agent, namespace=core, pod=reflector-ccd7686f7-tr28z} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=core, pod=reloader-reloader-57bff8dbf9-qtwdh State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=core, pod=reloader-reloader-57bff8dbf9-qtwdh Value:0xc03aa5aac8} B:{Var:B Labels:cluster=core/grafana-agent, namespace=core, pod=reloader-reloader-57bff8dbf9-qtwdh Value:0xc03aa5aaa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631918125s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=core, pod=reloader-reloader-57bff8dbf9-qtwdh} value=0.15625000000000003 ], [ var='B' labels={cluster=core/grafana-agent, namespace=core, pod=reloader-reloader-57bff8dbf9-qtwdh} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=core, pod=reloader-reloader-66f757db6-28wp6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=core, pod=reloader-reloader-66f757db6-28wp6 Value:0xc03aa5ab10} B:{Var:B Labels:cluster=core/grafana-agent, namespace=core, pod=reloader-reloader-66f757db6-28wp6 Value:0xc03aa5ab38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63192229s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=core, pod=reloader-reloader-66f757db6-28wp6} value=0.22608695652173916 ], [ var='B' labels={cluster=core/grafana-agent, namespace=core, pod=reloader-reloader-66f757db6-28wp6} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=core, pod=reloader-reloader-66f757db6-cgcmf State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=core, pod=reloader-reloader-66f757db6-cgcmf Value:0xc03aa5ab78} B:{Var:B Labels:cluster=core/grafana-agent, namespace=core, pod=reloader-reloader-66f757db6-cgcmf Value:0xc03aa5ab98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631927432s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=core, pod=reloader-reloader-66f757db6-cgcmf} value=0.15270935960591134 ], [ var='B' labels={cluster=core/grafana-agent, namespace=core, pod=reloader-reloader-66f757db6-cgcmf} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=defectdojo, pod=defectdojo-celery-beat-7c4787c5b6-s7tws State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=defectdojo, pod=defectdojo-celery-beat-7c4787c5b6-s7tws Value:0xc03aa5abf8} B:{Var:B Labels:cluster=core/grafana-agent, namespace=defectdojo, pod=defectdojo-celery-beat-7c4787c5b6-s7tws Value:0xc03aa5ac28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631930986s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=defectdojo, pod=defectdojo-celery-beat-7c4787c5b6-s7tws} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=defectdojo, pod=defectdojo-celery-beat-7c4787c5b6-s7tws} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=defectdojo, pod=defectdojo-celery-worker-8588d96b8b-qmnnm State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=defectdojo, pod=defectdojo-celery-worker-8588d96b8b-qmnnm Value:0xc03aa5ac80} B:{Var:B Labels:cluster=core/grafana-agent, namespace=defectdojo, pod=defectdojo-celery-worker-8588d96b8b-qmnnm Value:0xc03aa5acb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631934528s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=defectdojo, pod=defectdojo-celery-worker-8588d96b8b-qmnnm} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=defectdojo, pod=defectdojo-celery-worker-8588d96b8b-qmnnm} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=defectdojo, pod=defectdojo-django-6cfb7c75c9-q2xrd State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=defectdojo, pod=defectdojo-django-6cfb7c75c9-q2xrd Value:0xc03aa5aec0} B:{Var:B Labels:cluster=core/grafana-agent, namespace=defectdojo, pod=defectdojo-django-6cfb7c75c9-q2xrd Value:0xc03aa5aef8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631937617s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=defectdojo, pod=defectdojo-django-6cfb7c75c9-q2xrd} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=defectdojo, pod=defectdojo-django-6cfb7c75c9-q2xrd} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=dependencytrack, pod=dependencytrack-dependency-track-apiserver-7f9c76b7f4-gv9vx State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=dependencytrack, pod=dependencytrack-dependency-track-apiserver-7f9c76b7f4-gv9vx Value:0xc03aa5af90} B:{Var:B Labels:cluster=core/grafana-agent, namespace=dependencytrack, pod=dependencytrack-dependency-track-apiserver-7f9c76b7f4-gv9vx Value:0xc03aa5af60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63194113s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=dependencytrack, pod=dependencytrack-dependency-track-apiserver-7f9c76b7f4-gv9vx} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=dependencytrack, pod=dependencytrack-dependency-track-apiserver-7f9c76b7f4-gv9vx} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=dependencytrack, pod=dependencytrack-dependency-track-frontend-86544c6b8f-z77b5 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=dependencytrack, pod=dependencytrack-dependency-track-frontend-86544c6b8f-z77b5 Value:0xc03aa5b010} B:{Var:B Labels:cluster=core/grafana-agent, namespace=dependencytrack, pod=dependencytrack-dependency-track-frontend-86544c6b8f-z77b5 Value:0xc03aa5b040}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631945107s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=dependencytrack, pod=dependencytrack-dependency-track-frontend-86544c6b8f-z77b5} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=dependencytrack, pod=dependencytrack-dependency-track-frontend-86544c6b8f-z77b5} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=gitops, pod=argo-cd-argocd-repo-server-6b9cb56fcc-xtz6j State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=gitops, pod=argo-cd-argocd-repo-server-6b9cb56fcc-xtz6j Value:0xc03aa5b090} B:{Var:B Labels:cluster=core/grafana-agent, namespace=gitops, pod=argo-cd-argocd-repo-server-6b9cb56fcc-xtz6j Value:0xc03aa5b0b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631949234s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=gitops, pod=argo-cd-argocd-repo-server-6b9cb56fcc-xtz6j} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=gitops, pod=argo-cd-argocd-repo-server-6b9cb56fcc-xtz6j} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=gitops, pod=argo-cd-argocd-repo-server-6b9cb56fcc-zgd9h State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=gitops, pod=argo-cd-argocd-repo-server-6b9cb56fcc-zgd9h Value:0xc03aa5b128} B:{Var:B Labels:cluster=core/grafana-agent, namespace=gitops, pod=argo-cd-argocd-repo-server-6b9cb56fcc-zgd9h Value:0xc03aa5b100}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631953006s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=gitops, pod=argo-cd-argocd-repo-server-6b9cb56fcc-zgd9h} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=gitops, pod=argo-cd-argocd-repo-server-6b9cb56fcc-zgd9h} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=jenkins, pod=jenkins-kropyva State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=jenkins, pod=jenkins-kropyva Value:0xc03aa5b190} B:{Var:B Labels:cluster=core/grafana-agent, namespace=jenkins, pod=jenkins-kropyva Value:0xc03aa5b1b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631972792s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=jenkins, pod=jenkins-kropyva} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=jenkins, pod=jenkins-kropyva} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=nettle-prod, pod=activation-server-547667fd6b-ql6td State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=nettle-prod, pod=activation-server-547667fd6b-ql6td Value:0xc03aa5b218} B:{Var:B Labels:cluster=core/grafana-agent, namespace=nettle-prod, pod=activation-server-547667fd6b-ql6td Value:0xc03aa5b248}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63197891s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=nettle-prod, pod=activation-server-547667fd6b-ql6td} value=0.030495552731893263 ], [ var='B' labels={cluster=core/grafana-agent, namespace=nettle-prod, pod=activation-server-547667fd6b-ql6td} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=nettle-prod, pod=apk-store-7cb9599866-s5jm6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=nettle-prod, pod=apk-store-7cb9599866-s5jm6 Value:0xc03aa5b3a8} B:{Var:B Labels:cluster=core/grafana-agent, namespace=nettle-prod, pod=apk-store-7cb9599866-s5jm6 Value:0xc03aa5b2a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631985392s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=nettle-prod, pod=apk-store-7cb9599866-s5jm6} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=nettle-prod, pod=apk-store-7cb9599866-s5jm6} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=nettle-prod, pod=apk-store-oauth2-proxy-77d6d6dd9d-lfjdf State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=nettle-prod, pod=apk-store-oauth2-proxy-77d6d6dd9d-lfjdf Value:0xc03aa5b400} B:{Var:B Labels:cluster=core/grafana-agent, namespace=nettle-prod, pod=apk-store-oauth2-proxy-77d6d6dd9d-lfjdf Value:0xc03aa5b430}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.631991482s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=nettle-prod, pod=apk-store-oauth2-proxy-77d6d6dd9d-lfjdf} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=nettle-prod, pod=apk-store-oauth2-proxy-77d6d6dd9d-lfjdf} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=nettle-prod, pod=authzgateway-server-cb4b87d9d-n8xgc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=nettle-prod, pod=authzgateway-server-cb4b87d9d-n8xgc Value:0xc03aa5b498} B:{Var:B Labels:cluster=core/grafana-agent, namespace=nettle-prod, pod=authzgateway-server-cb4b87d9d-n8xgc Value:0xc03aa5b4e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63199813s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=nettle-prod, pod=authzgateway-server-cb4b87d9d-n8xgc} value=0.0018050541516245488 ], [ var='B' labels={cluster=core/grafana-agent, namespace=nettle-prod, pod=authzgateway-server-cb4b87d9d-n8xgc} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=nettle-prod, pod=fdroid-web-7d99c5844-qcbt7 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=nettle-prod, pod=fdroid-web-7d99c5844-qcbt7 Value:0xc03aa5b540} B:{Var:B Labels:cluster=core/grafana-agent, namespace=nettle-prod, pod=fdroid-web-7d99c5844-qcbt7 Value:0xc03aa5b578}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632004636s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=nettle-prod, pod=fdroid-web-7d99c5844-qcbt7} value=0.0014008655913978494 ], [ var='B' labels={cluster=core/grafana-agent, namespace=nettle-prod, pod=fdroid-web-7d99c5844-qcbt7} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=nettle-prod, pod=fdroid-web-7d99c5844-tzf6l State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=nettle-prod, pod=fdroid-web-7d99c5844-tzf6l Value:0xc03aa5b5c8} B:{Var:B Labels:cluster=core/grafana-agent, namespace=nettle-prod, pod=fdroid-web-7d99c5844-tzf6l Value:0xc03aa5b5f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632010285s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=nettle-prod, pod=fdroid-web-7d99c5844-tzf6l} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=nettle-prod, pod=fdroid-web-7d99c5844-tzf6l} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=nettle-prod, pod=hemp-cabinet-logs-7c9867b8fb-rfxql State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=nettle-prod, pod=hemp-cabinet-logs-7c9867b8fb-rfxql Value:0xc03aa5b730} B:{Var:B Labels:cluster=core/grafana-agent, namespace=nettle-prod, pod=hemp-cabinet-logs-7c9867b8fb-rfxql Value:0xc03aa5b760}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63201546s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=nettle-prod, pod=hemp-cabinet-logs-7c9867b8fb-rfxql} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=nettle-prod, pod=hemp-cabinet-logs-7c9867b8fb-rfxql} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=nettle-prod, pod=hemp-ui-5f8fdc696c-xbqpj State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=nettle-prod, pod=hemp-ui-5f8fdc696c-xbqpj Value:0xc03aa5b7b8} B:{Var:B Labels:cluster=core/grafana-agent, namespace=nettle-prod, pod=hemp-ui-5f8fdc696c-xbqpj Value:0xc03aa5b7e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632020057s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=nettle-prod, pod=hemp-ui-5f8fdc696c-xbqpj} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=nettle-prod, pod=hemp-ui-5f8fdc696c-xbqpj} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=nettle-prod, pod=meteo-server-68c7dc76dc-rflf5 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=nettle-prod, pod=meteo-server-68c7dc76dc-rflf5 Value:0xc03aa5b840} B:{Var:B Labels:cluster=core/grafana-agent, namespace=nettle-prod, pod=meteo-server-68c7dc76dc-rflf5 Value:0xc03aa5b870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632023692s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=nettle-prod, pod=meteo-server-68c7dc76dc-rflf5} value=0.07390396659707725 ], [ var='B' labels={cluster=core/grafana-agent, namespace=nettle-prod, pod=meteo-server-68c7dc76dc-rflf5} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=nettle-stage, pod=activation-server-86c4bc5457-s5j9s State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=nettle-stage, pod=activation-server-86c4bc5457-s5j9s Value:0xc03aa5b8e0} B:{Var:B Labels:cluster=core/grafana-agent, namespace=nettle-stage, pod=activation-server-86c4bc5457-s5j9s Value:0xc03aa5b910}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632028072s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=nettle-stage, pod=activation-server-86c4bc5457-s5j9s} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=nettle-stage, pod=activation-server-86c4bc5457-s5j9s} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=nettle-stage, pod=apk-store-7bdb5fd5c8-7v8x9 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=nettle-stage, pod=apk-store-7bdb5fd5c8-7v8x9 Value:0xc03aa5b968} B:{Var:B Labels:cluster=core/grafana-agent, namespace=nettle-stage, pod=apk-store-7bdb5fd5c8-7v8x9 Value:0xc03aa5b990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632031717s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=nettle-stage, pod=apk-store-7bdb5fd5c8-7v8x9} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=nettle-stage, pod=apk-store-7bdb5fd5c8-7v8x9} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=nettle-stage, pod=apk-store-oauth2-proxy-78fbc8c697-kgvpx State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=nettle-stage, pod=apk-store-oauth2-proxy-78fbc8c697-kgvpx Value:0xc03aa5b9f0} B:{Var:B Labels:cluster=core/grafana-agent, namespace=nettle-stage, pod=apk-store-oauth2-proxy-78fbc8c697-kgvpx Value:0xc03aa5ba20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632036168s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=nettle-stage, pod=apk-store-oauth2-proxy-78fbc8c697-kgvpx} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=nettle-stage, pod=apk-store-oauth2-proxy-78fbc8c697-kgvpx} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=nettle-stage, pod=authzgateway-server-56cd6d8b49-62h6v State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=nettle-stage, pod=authzgateway-server-56cd6d8b49-62h6v Value:0xc03aa5ba80} B:{Var:B Labels:cluster=core/grafana-agent, namespace=nettle-stage, pod=authzgateway-server-56cd6d8b49-62h6v Value:0xc03aa5bab0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632039217s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=nettle-stage, pod=authzgateway-server-56cd6d8b49-62h6v} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=nettle-stage, pod=authzgateway-server-56cd6d8b49-62h6v} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=nettle-stage, pod=fdroid-web-64cf66bb89-2nfx6 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=nettle-stage, pod=fdroid-web-64cf66bb89-2nfx6 Value:0xc03aa5bb60} B:{Var:B Labels:cluster=core/grafana-agent, namespace=nettle-stage, pod=fdroid-web-64cf66bb89-2nfx6 Value:0xc03aa5bb90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632043643s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=nettle-stage, pod=fdroid-web-64cf66bb89-2nfx6} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=nettle-stage, pod=fdroid-web-64cf66bb89-2nfx6} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=nettle-stage, pod=hemp-cabinet-logs-67fcd6c9f5-t8sx5 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=nettle-stage, pod=hemp-cabinet-logs-67fcd6c9f5-t8sx5 Value:0xc03aa5bc78} B:{Var:B Labels:cluster=core/grafana-agent, namespace=nettle-stage, pod=hemp-cabinet-logs-67fcd6c9f5-t8sx5 Value:0xc03aa5bcb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632048952s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=nettle-stage, pod=hemp-cabinet-logs-67fcd6c9f5-t8sx5} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=nettle-stage, pod=hemp-cabinet-logs-67fcd6c9f5-t8sx5} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=nettle-stage, pod=hemp-ui-5dd6db4f57-h8qlw State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=nettle-stage, pod=hemp-ui-5dd6db4f57-h8qlw Value:0xc03aa5bd10} B:{Var:B Labels:cluster=core/grafana-agent, namespace=nettle-stage, pod=hemp-ui-5dd6db4f57-h8qlw Value:0xc03aa5bd40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632054385s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=nettle-stage, pod=hemp-ui-5dd6db4f57-h8qlw} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=nettle-stage, pod=hemp-ui-5dd6db4f57-h8qlw} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=nettle-stage, pod=meteo-server-56c9b7c546-kdbr5 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=nettle-stage, pod=meteo-server-56c9b7c546-kdbr5 Value:0xc03aa5bda0} B:{Var:B Labels:cluster=core/grafana-agent, namespace=nettle-stage, pod=meteo-server-56c9b7c546-kdbr5 Value:0xc03aa5bdd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63206128s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=nettle-stage, pod=meteo-server-56c9b7c546-kdbr5} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=nettle-stage, pod=meteo-server-56c9b7c546-kdbr5} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=vault, pod=vault-operator-7cf5f6c784-5zmvs State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=vault, pod=vault-operator-7cf5f6c784-5zmvs Value:0xc03aa5be18} B:{Var:B Labels:cluster=core/grafana-agent, namespace=vault, pod=vault-operator-7cf5f6c784-5zmvs Value:0xc03aa5be38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632069417s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=vault, pod=vault-operator-7cf5f6c784-5zmvs} value=0.08419243986254296 ], [ var='B' labels={cluster=core/grafana-agent, namespace=vault, pod=vault-operator-7cf5f6c784-5zmvs} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=vault, pod=vault-operator-7cf5f6c784-8x2gk State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=vault, pod=vault-operator-7cf5f6c784-8x2gk Value:0xc03aa5be80} B:{Var:B Labels:cluster=core/grafana-agent, namespace=vault, pod=vault-operator-7cf5f6c784-8x2gk Value:0xc03aa5bea8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632078628s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=vault, pod=vault-operator-7cf5f6c784-8x2gk} value=0.0536779324055666 ], [ var='B' labels={cluster=core/grafana-agent, namespace=vault, pod=vault-operator-7cf5f6c784-8x2gk} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=vault, pod=vault-operator-7cf5f6c784-xkqr2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=vault, pod=vault-operator-7cf5f6c784-xkqr2 Value:0xc03aa5bee8} B:{Var:B Labels:cluster=core/grafana-agent, namespace=vault, pod=vault-operator-7cf5f6c784-xkqr2 Value:0xc03aa5bf10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63208549s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=vault, pod=vault-operator-7cf5f6c784-xkqr2} value=0.06498951781970651 ], [ var='B' labels={cluster=core/grafana-agent, namespace=vault, pod=vault-operator-7cf5f6c784-xkqr2} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=vault-ops, pod=secrets-to-vault-84d97769c7-gjlmg State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=vault-ops, pod=secrets-to-vault-84d97769c7-gjlmg Value:0xc03aa5bf70} B:{Var:B Labels:cluster=core/grafana-agent, namespace=vault-ops, pod=secrets-to-vault-84d97769c7-gjlmg Value:0xc03aa5bfa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632091607s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=vault-ops, pod=secrets-to-vault-84d97769c7-gjlmg} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=vault-ops, pod=secrets-to-vault-84d97769c7-gjlmg} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=vault-ops, pod=vault-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=vault-ops, pod=vault-0 Value:0xc0a6044010} B:{Var:B Labels:cluster=core/grafana-agent, namespace=vault-ops, pod=vault-0 Value:0xc0a6044060}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632097603s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=vault-ops, pod=vault-0} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=vault-ops, pod=vault-0} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=vault-ops, pod=vault-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=vault-ops, pod=vault-1 Value:0xc0a6044268} B:{Var:B Labels:cluster=core/grafana-agent, namespace=vault-ops, pod=vault-1 Value:0xc0a6044298}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632101875s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=vault-ops, pod=vault-1} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=vault-ops, pod=vault-1} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=vault-ops, pod=vault-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=vault-ops, pod=vault-2 Value:0xc0a6044308} B:{Var:B Labels:cluster=core/grafana-agent, namespace=vault-ops, pod=vault-2 Value:0xc0a6044338}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632105417s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=vault-ops, pod=vault-2} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=vault-ops, pod=vault-2} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=vault-ops, pod=vault-configurer-754ccc9cb7-q2bxk State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=vault-ops, pod=vault-configurer-754ccc9cb7-q2bxk Value:0xc0a6044390} B:{Var:B Labels:cluster=core/grafana-agent, namespace=vault-ops, pod=vault-configurer-754ccc9cb7-q2bxk Value:0xc0a60443c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632108573s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=vault-ops, pod=vault-configurer-754ccc9cb7-q2bxk} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=vault-ops, pod=vault-configurer-754ccc9cb7-q2bxk} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=vault-prod, pod=secrets-to-vault-56b896b8dc-v4n7f State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=vault-prod, pod=secrets-to-vault-56b896b8dc-v4n7f Value:0xc0a6044430} B:{Var:B Labels:cluster=core/grafana-agent, namespace=vault-prod, pod=secrets-to-vault-56b896b8dc-v4n7f Value:0xc0a6044460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632113934s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=vault-prod, pod=secrets-to-vault-56b896b8dc-v4n7f} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=vault-prod, pod=secrets-to-vault-56b896b8dc-v4n7f} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=vault-prod, pod=vault-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=vault-prod, pod=vault-0 Value:0xc0a6044508} B:{Var:B Labels:cluster=core/grafana-agent, namespace=vault-prod, pod=vault-0 Value:0xc0a60444c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632117652s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=vault-prod, pod=vault-0} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=vault-prod, pod=vault-0} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=vault-prod, pod=vault-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=vault-prod, pod=vault-1 Value:0xc0a6044578} B:{Var:B Labels:cluster=core/grafana-agent, namespace=vault-prod, pod=vault-1 Value:0xc0a60445a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632121337s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=vault-prod, pod=vault-1} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=vault-prod, pod=vault-1} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=vault-prod, pod=vault-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=vault-prod, pod=vault-2 Value:0xc0a6044648} B:{Var:B Labels:cluster=core/grafana-agent, namespace=vault-prod, pod=vault-2 Value:0xc0a6044618}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632125023s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=vault-prod, pod=vault-2} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=vault-prod, pod=vault-2} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=vault-prod, pod=vault-configurer-749cd6747c-hl55j State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=vault-prod, pod=vault-configurer-749cd6747c-hl55j Value:0xc0a60446a0} B:{Var:B Labels:cluster=core/grafana-agent, namespace=vault-prod, pod=vault-configurer-749cd6747c-hl55j Value:0xc0a60446e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632128876s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=vault-prod, pod=vault-configurer-749cd6747c-hl55j} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=vault-prod, pod=vault-configurer-749cd6747c-hl55j} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=vault-stage, pod=secrets-to-vault-7f67d56b-g8gtb State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=vault-stage, pod=secrets-to-vault-7f67d56b-g8gtb Value:0xc0a6044740} B:{Var:B Labels:cluster=core/grafana-agent, namespace=vault-stage, pod=secrets-to-vault-7f67d56b-g8gtb Value:0xc0a6044788}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63213242s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=vault-stage, pod=secrets-to-vault-7f67d56b-g8gtb} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=vault-stage, pod=secrets-to-vault-7f67d56b-g8gtb} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=vault-stage, pod=vault-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=vault-stage, pod=vault-0 Value:0xc0a60447f8} B:{Var:B Labels:cluster=core/grafana-agent, namespace=vault-stage, pod=vault-0 Value:0xc0a6044838}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632136715s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=vault-stage, pod=vault-0} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=vault-stage, pod=vault-0} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=vault-stage, pod=vault-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=vault-stage, pod=vault-1 Value:0xc0a60448d8} B:{Var:B Labels:cluster=core/grafana-agent, namespace=vault-stage, pod=vault-1 Value:0xc0a60448a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.63214105s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=vault-stage, pod=vault-1} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=vault-stage, pod=vault-1} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=vault-stage, pod=vault-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=vault-stage, pod=vault-2 Value:0xc0a6044988} B:{Var:B Labels:cluster=core/grafana-agent, namespace=vault-stage, pod=vault-2 Value:0xc0a6044940}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.6321459s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=vault-stage, pod=vault-2} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=vault-stage, pod=vault-2} value=0 ]} {Instance:cluster=core/grafana-agent, namespace=vault-stage, pod=vault-configurer-545d6f4d5-qwrqs State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cluster=core/grafana-agent, namespace=vault-stage, pod=vault-configurer-545d6f4d5-qwrqs Value:0xc0a6044a20} B:{Var:B Labels:cluster=core/grafana-agent, namespace=vault-stage, pod=vault-configurer-545d6f4d5-qwrqs Value:0xc0a60449f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632151564s EvaluationString:[ var='A' labels={cluster=core/grafana-agent, namespace=vault-stage, pod=vault-configurer-545d6f4d5-qwrqs} value=0 ], [ var='B' labels={cluster=core/grafana-agent, namespace=vault-stage, pod=vault-configurer-545d6f4d5-qwrqs} value=0 ]}]" duration=49.870614ms + logger=ngalert.state.manager user=316418 slug=workmotion t=2024-05-29T13:44:13.641627761Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kp5n968w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.641571708Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.641530613Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.709337ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-ewr-us-031" t=2024-05-29T13:44:13.641503906Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-koyy0mu1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.641451577Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-ewr-us-031" t=2024-05-29T13:44:13.641472083Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=373502 slug=stakeandrelax instance= t=2024-05-29T13:44:13.641348693Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-koyy0mu1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.641386546Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.641299153Z caller=remote_instance_store.go:51 user=159781 slug=suncornoc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kotlxnmg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.641338335Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-ewr-us-030" t=2024-05-29T13:44:13.641322025Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-ewr-us-030" t=2024-05-29T13:44:13.641312158Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.641240797Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.64121512Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kotlxnmg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.641176634Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.641188281Z caller=ruler.go:522 msg="tenant is owned by this instance" user=453263 slug=gmamonitor groups=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-koibnvi1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.641145433Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.641190365Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=297051 slug=idealpos + level=debug ts=2024-05-29T13:44:13.641157363Z caller=ruler.go:522 msg="tenant is owned by this instance" user=297051 slug=idealpos groups=0 + level=debug ts=2024-05-29T13:44:13.641071969Z caller=ruler.go:522 msg="tenant is owned by this instance" user=475146 slug=iuriimordovin groups=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-koibnvi1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.641067683Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=361282 slug=turing instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.640909578Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-koddwrw4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.640894371Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=361282 slug=turing t=2024-05-29T13:44:13.640877342Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.640931806Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.640985572Z caller=remote_instance_store.go:51 user=344017 slug=descript msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.640978067Z caller=client.go:80 msg="creating client for grafana instance" user=412155 addr=dns:///lpsyapp-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.64079042Z caller=client.go:80 msg="creating client for grafana instance" user=639081 addr=dns:///limark-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.scheduler user=344017 slug=descript version=3 fingerprint=811f9b8b79fa257a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.64078916Z level=debug msg="Alert rule evaluated" results="[{Instance:resource.label.project_id=production-273614, resource.type=k8s_container State:Normal Error: Results:map[] Values:map[Reduce:{Var:Reduce Labels:resource.label.project_id=production-273614, resource.type=k8s_container Value:0xc0656e8dd0} Threshold:{Var:Threshold Labels:resource.label.project_id=production-273614, resource.type=k8s_container Value:0xc0656e8dd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.640436652s EvaluationString:[ var='Reduce' labels={resource.label.project_id=production-273614, resource.type=k8s_container} value=0.15151549846350162 ], [ var='Threshold' labels={resource.label.project_id=production-273614, resource.type=k8s_container} value=0 ]}]" duration=490.233423ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-ewr-us-027" t=2024-05-29T13:44:13.640872498Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.64070168Z caller=ruler.go:522 msg="tenant is owned by this instance" user=532577 slug=karimshakirov123 groups=0 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-ewr-us-027" t=2024-05-29T13:44:13.640860737Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-koddwrw4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.64084044Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.640758641Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kocjrxco-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.640711449Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kocjrxco-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.640645648Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.640579002Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kobzlg5l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.640503837Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-ewr-us-025" t=2024-05-29T13:44:13.640527244Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.640514081Z caller=client.go:80 msg="creating client for grafana instance" user=504368 addr=dns:///libremfg-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-ewr-us-025" t=2024-05-29T13:44:13.640484336Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-ewr-us-024" t=2024-05-29T13:44:13.640310199Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kob8xdlc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.640063442Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.640117059Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.640171768Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.640103141Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:13.640090672Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=426229 slug=accelbyte version=349 fingerprint=d040f19fa3d4bf12 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.640041324Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.639711979s EvaluationString:}]" duration=253.732277ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kob8xdlc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.63979553Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kob8xdlc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.639729089Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kob8xdlc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.639697839Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-ewr-us-020" t=2024-05-29T13:44:13.639737426Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.639559049Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-koa8rcv3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.639160413Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-ewr-us-018" t=2024-05-29T13:44:13.639506166Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.639286864Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-ewr-us-017" t=2024-05-29T13:44:13.639377739Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.639255975Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.639213117Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.93132ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-koa8rcv3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.639100363Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.639069439Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.639017465Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ko9tkrd2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.63883669Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-dfw-us-056" t=2024-05-29T13:44:13.638987244Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-dfw-us-056" t=2024-05-29T13:44:13.638975577Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.63894369Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=163215 slug=tripadvisor instance= t=2024-05-29T13:44:13.638930099Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ko4bs59o-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.638346325Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.638874369Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=163215 slug=tripadvisor t=2024-05-29T13:44:13.638888078Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-knyhteh0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.638191813Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-knw0rcgj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.637745029Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-knw0rcgj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.637414745Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-knuu84yr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.636989941Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-dfw-us-054" t=2024-05-29T13:44:13.638728001Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.638677535Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-knseatt1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.636739838Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-knrv1brh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.636471126Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-knrv1brh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.636387365Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.638433082Z caller=remote_instance_store.go:51 user=739130 slug=redphasetech msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.638445119Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:13.638395599Z level=debug msg="Saving alert states" count=9 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-dfw-us-051" t=2024-05-29T13:44:13.638370232Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-ef2ae177e1c74244, persistentvolumeclaim=data-zookeeper-1" t=2024-05-29T13:44:13.638352207Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-knru2vnp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.636307024Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-ef2ae177e1c74244, persistentvolumeclaim=data-zookeeper-1" t=2024-05-29T13:44:13.638344735Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-ece5a23614a84830, persistentvolumeclaim=data-redpanda-0" t=2024-05-29T13:44:13.638324002Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-knbxffeq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.6359201Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-knbxffeq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.635835369Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-e669e8fefb304425, persistentvolumeclaim=main-repo1" t=2024-05-29T13:44:13.638260043Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-a8c77b39535646ec, persistentvolumeclaim=data-zookeeper-2" t=2024-05-29T13:44:13.638238465Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-knbxffeq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.635752928Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kn4lzda0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.635655427Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kn4lzda0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.634531786Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-8f325d7d34394abc, persistentvolumeclaim=data-zookeeper-0" t=2024-05-29T13:44:13.638211549Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-4b28e369a6e94ce1, persistentvolumeclaim=main-main-6w2b-pgdata" t=2024-05-29T13:44:13.638188944Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-32495332d3b440a6, persistentvolumeclaim=data-prometheus-0" t=2024-05-29T13:44:13.638170544Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kmw2euhr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.634062071Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kmw2euhr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.63395766Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kmw2euhr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.633862149Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.638013993Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=327842 slug=exabeam t=2024-05-29T13:44:13.637987692Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.257913ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-km2nsqxn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.63306396Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-km2nsqxn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.63301382Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-dfw-us-048" t=2024-05-29T13:44:13.637921421Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-km2nsqxn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.632885009Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-dfw-us-047" t=2024-05-29T13:44:13.637809787Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-klo2iohk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.632547015Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=49546 slug=nulogyinfra instance= t=2024-05-29T13:44:13.637784621Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=49546 slug=nulogyinfra instance= t=2024-05-29T13:44:13.637770707Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-dfw-us-046" t=2024-05-29T13:44:13.637679364Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-dfw-us-045" t=2024-05-29T13:44:13.637575208Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-dfw-us-042" t=2024-05-29T13:44:13.637160104Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.637092037Z caller=remote_instance_store.go:51 user=320906 slug=techcyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=320906 slug=techcyte t=2024-05-29T13:44:13.637045484Z level=debug msg="Saving alert states" count=9 max_state_save_concurrency=1 + logger=ngalert.state.manager user=320906 slug=techcyte instance="AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-cargo-service/1a5201fb58eb9db2" t=2024-05-29T13:44:13.637030986Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-dfw-us-041" t=2024-05-29T13:44:13.637008046Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=320906 slug=techcyte instance="AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-backend-b/46058aeb69c45c43" t=2024-05-29T13:44:13.636980894Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=320906 slug=techcyte instance="AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/trai-techcyte-ts/25042bfe75653c6c" t=2024-05-29T13:44:13.636958074Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=320906 slug=techcyte instance="AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/tr-tec-data-too-self-service-mod/61a358397cfe5a18" t=2024-05-29T13:44:13.636920762Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=320906 slug=techcyte instance="AvailabilityZone=us-west-2b, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-backend-b/46058aeb69c45c43" t=2024-05-29T13:44:13.636880081Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=320906 slug=techcyte instance="AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-copier-tg/85582018f2f39862" t=2024-05-29T13:44:13.636793575Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=320906 slug=techcyte instance="AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/trai-techcyte-ts/25042bfe75653c6c" t=2024-05-29T13:44:13.636719684Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=320906 slug=techcyte instance="AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/tr-tec-uploader-cloud-manager/028d7f18dc0918bc" t=2024-05-29T13:44:13.636677316Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-dfw-us-039" t=2024-05-29T13:44:13.636727835Z level=debug msg="Keeping state" state=Normal + Error parsing panelUID for alert annotationruleID1403dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=320906 slug=techcyte version=5 fingerprint=df5795bcf1f6c3c0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.636409032Z level=debug msg="Alert rule evaluated" results="[{Instance:AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/tr-tec-uploader-cloud-manager/028d7f18dc0918bc State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/tr-tec-uploader-cloud-manager/028d7f18dc0918bc Value:0xc00d460690} C:{Var:C Labels:AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/tr-tec-uploader-cloud-manager/028d7f18dc0918bc Value:0xc00d460698}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.635697266s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/tr-tec-uploader-cloud-manager/028d7f18dc0918bc} value=1.494234404622932 ], [ var='C' labels={AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/tr-tec-uploader-cloud-manager/028d7f18dc0918bc} value=0 ]} {Instance:AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/trai-techcyte-ts/25042bfe75653c6c State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/trai-techcyte-ts/25042bfe75653c6c Value:0xc00d460770} C:{Var:C Labels:AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/trai-techcyte-ts/25042bfe75653c6c Value:0xc00d460778}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.635713833s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/trai-techcyte-ts/25042bfe75653c6c} value=2.005241119382374 ], [ var='C' labels={AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/trai-techcyte-ts/25042bfe75653c6c} value=0 ]} {Instance:AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-cargo-service/1a5201fb58eb9db2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-cargo-service/1a5201fb58eb9db2 Value:0xc00d460848} C:{Var:C Labels:AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-cargo-service/1a5201fb58eb9db2 Value:0xc00d460840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.635721885s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-cargo-service/1a5201fb58eb9db2} value=0.10983386957845755 ], [ var='C' labels={AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-cargo-service/1a5201fb58eb9db2} value=0 ]} {Instance:AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-copier-tg/85582018f2f39862 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-copier-tg/85582018f2f39862 Value:0xc00d460910} C:{Var:C Labels:AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-copier-tg/85582018f2f39862 Value:0xc00d460918}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.635726698s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-copier-tg/85582018f2f39862} value=0.003116541495113373 ], [ var='C' labels={AvailabilityZone=us-west-2a, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-copier-tg/85582018f2f39862} value=0 ]} {Instance:AvailabilityZone=us-west-2b, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-backend-b/46058aeb69c45c43 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2b, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-backend-b/46058aeb69c45c43 Value:0xc00d460a08} C:{Var:C Labels:AvailabilityZone=us-west-2b, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-backend-b/46058aeb69c45c43 Value:0xc00d460a00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.635731649s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2b, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-backend-b/46058aeb69c45c43} value=0.8672432566585406 ], [ var='C' labels={AvailabilityZone=us-west-2b, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-backend-b/46058aeb69c45c43} value=0 ]} {Instance:AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/tr-tec-data-too-self-service-mod/61a358397cfe5a18 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/tr-tec-data-too-self-service-mod/61a358397cfe5a18 Value:0xc00d460ad8} C:{Var:C Labels:AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/tr-tec-data-too-self-service-mod/61a358397cfe5a18 Value:0xc00d460ad0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.635735856s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/tr-tec-data-too-self-service-mod/61a358397cfe5a18} value=1.6902026735573548 ], [ var='C' labels={AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/tr-tec-data-too-self-service-mod/61a358397cfe5a18} value=0 ]} {Instance:AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/trai-techcyte-ts/25042bfe75653c6c State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/trai-techcyte-ts/25042bfe75653c6c Value:0xc00d460bc0} C:{Var:C Labels:AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/trai-techcyte-ts/25042bfe75653c6c Value:0xc00d460bc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.635740636s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/trai-techcyte-ts/25042bfe75653c6c} value=2.1439817418452662 ], [ var='C' labels={AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/trai-techcyte-ts/25042bfe75653c6c} value=0 ]} {Instance:AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-backend-b/46058aeb69c45c43 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-backend-b/46058aeb69c45c43 Value:0xc00d460cc0} C:{Var:C Labels:AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-backend-b/46058aeb69c45c43 Value:0xc00d460cc8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.635748564s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-backend-b/46058aeb69c45c43} value=0.5407757156592286 ], [ var='C' labels={AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-backend-b/46058aeb69c45c43} value=0 ]} {Instance:AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-cargo-service/1a5201fb58eb9db2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-cargo-service/1a5201fb58eb9db2 Value:0xc00d460da0} C:{Var:C Labels:AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-cargo-service/1a5201fb58eb9db2 Value:0xc00d460da8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.635756046s EvaluationString:[ var='B' labels={AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-cargo-service/1a5201fb58eb9db2} value=0.07519952053012179 ], [ var='C' labels={AvailabilityZone=us-west-2c, LoadBalancer=app/trai-techcyte-external/36d1a019b6021663, TargetGroup=targetgroup/training-techcyte-cargo-service/1a5201fb58eb9db2} value=0 ]}]" duration=105.450619ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-dfw-us-037" t=2024-05-29T13:44:13.636463494Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.636299001Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=679029 slug=joveoprodaws t=2024-05-29T13:44:13.636318896Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.331947ms + level=debug ts=2024-05-29T13:44:13.636288599Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.635921245Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.635980075Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:13.635876478Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.128941ms + level=debug ts=2024-05-29T13:44:13.635882494Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.63580556Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.63575833Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-dfw-us-032" t=2024-05-29T13:44:13.635787357Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.635587722Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-dfw-us-031" t=2024-05-29T13:44:13.635650086Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.635634166Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.635619231Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.63554027Z caller=remote_instance_store.go:51 user=679029 slug=joveoprodaws msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-dfw-us-030" t=2024-05-29T13:44:13.635542602Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.635465914Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.635456706Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=750696 slug=karankak21 + level=info component=discovery ts=2024-05-29T13:44:13.635407038Z caller=client.go:80 msg="creating client for grafana instance" user=656911 addr=dns:///liamgibs-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.635217785Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-dfw-us-028" t=2024-05-29T13:44:13.635250502Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.635088643Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.635154482Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:13.635043427Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=96.654553ms + level=debug ts=2024-05-29T13:44:13.635034247Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.635025904Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=471154 slug=kinrossfarm + level=debug ts=2024-05-29T13:44:13.634973727Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.634974631Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.634968743Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.634958223Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=maxi-dfw-us-026" t=2024-05-29T13:44:13.63497524Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.634958376Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.634888183Z caller=ruler.go:522 msg="tenant is owned by this instance" user=433616 slug=kalfeher groups=0 + level=debug ts=2024-05-29T13:44:13.634889534Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.634829595Z caller=remote_instance_store.go:51 user=656284 slug=cencosudx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=656284 slug=cencosudx instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.634750853Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:13.634732443Z caller=remote_alert_sender.go:94 user=229187 slug=feeliters host=feeliters-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.117.124:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=b7d56ef1-ffec-460f-89b1-843388cc40a6 alerts=1 + logger=ngalert.state.manager user=656284 slug=cencosudx t=2024-05-29T13:44:13.634649921Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info component=discovery ts=2024-05-29T13:44:13.634520693Z caller=client.go:80 msg="creating client for grafana instance" user=323226 addr=dns:///leewestern-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ro-08" t=2024-05-29T13:44:13.634469702Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.634407436Z caller=remote_alert_sender.go:94 user=467258 slug=neonprod host=neonprod-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.146.36.75:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=b8172590-d847-450e-9f9f-e9a1673d8195 alerts=1 + level=info ts=2024-05-29T13:44:13.634359963Z caller=remote_alert_sender.go:94 user=467258 slug=neonprod host=neonprod-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.143.72:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=b8172590-d847-450e-9f9f-e9a1673d8195 alerts=1 + level=debug ts=2024-05-29T13:44:13.634112087Z caller=remote_instance_store.go:51 user=290313 slug=replit msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ro-06" t=2024-05-29T13:44:13.634176707Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ro-05" t=2024-05-29T13:44:13.634062909Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.634040769Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:13.633922276Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.633834069Z caller=remote_instance_store.go:51 user=706031 slug=miceutest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=538037 slug=drivewealth t=2024-05-29T13:44:13.633848491Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ro-03" t=2024-05-29T13:44:13.633844198Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=538037 slug=drivewealth version=122 fingerprint=0bbec1a5ad2f8a34 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.633762912Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc01237ce80} B:{Var:B Labels: Value:0xc01237ce88} C:{Var:C Labels: Value:0xc01237ce90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.633053974s EvaluationString:[ var='A' labels={} value=37.93677279540916 ], [ var='B' labels={} value=37.93677279540916 ], [ var='C' labels={} value=0 ]}]" duration=24.288849ms + logger=ngalert.state.manager user=71697 slug=lovelysystems instance= t=2024-05-29T13:44:13.633729817Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.633800492Z caller=remote_instance_store.go:51 user=71697 slug=lovelysystems msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.633680899Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.63370849Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ro-02" t=2024-05-29T13:44:13.63369579Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ro-02" t=2024-05-29T13:44:13.633681006Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=71697 slug=lovelysystems t=2024-05-29T13:44:13.633652347Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-itmi-48" t=2024-05-29T13:44:13.63338959Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=707575 slug=prod1themomproject instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.633230876Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=707575 slug=prod1themomproject instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.633206771Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=707575 slug=prod1themomproject version=4 fingerprint=e82bf394464b5528 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.633128986Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632850038s EvaluationString:}]" duration=31.829829ms + level=debug ts=2024-05-29T13:44:13.63283423Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=477346 slug=dariakober instance="datasource_uid=oV7OnVO4k, ref_id=A" t=2024-05-29T13:44:13.633001405Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=477346 slug=dariakober instance="datasource_uid=oV7OnVO4k, ref_id=A" t=2024-05-29T13:44:13.632992529Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=477346 slug=dariakober version=3 fingerprint=96ff8e48cf4cc780 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.632903474Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=oV7OnVO4k, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.632577039s EvaluationString:}]" duration=133.326185ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-itmi-39" t=2024-05-29T13:44:13.63288461Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-itmi-39" t=2024-05-29T13:44:13.632871563Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.632720599Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-itmi-38" t=2024-05-29T13:44:13.632708867Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=products, cluster=lmnd-development-eu-west-1, container=kube-state-metrics, deployment=products, endpoint=http, instance=10.12.61.8:8080, job=kube-state-metrics, namespace=master, pod=kube-state-metrics-6c795d5489-x6j8c, region=eu-west-1, service=kube-state-metrics, stage=development" t=2024-05-29T13:44:13.632662953Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-klgbjccr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.632270042Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-itmi-35" t=2024-05-29T13:44:13.63220442Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-itmi-35" t=2024-05-29T13:44:13.632191515Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-klgbjccr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.63208543Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.632079794Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.631953784Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:13.631993828Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=191103 slug=amazonadmin t=2024-05-29T13:44:13.631970468Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.631905366Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-itmi-33" t=2024-05-29T13:44:13.631894738Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.631840219Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.631803755Z caller=remote_instance_store.go:51 user=839521 slug=prodgk msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.631639947Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kl8gpo0n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.631585345Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=109452 slug=deltarisk t=2024-05-29T13:44:13.631574107Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=30.356797ms + Error parsing panelUID for alert annotationruleID1365dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.state.manager.persist user=20177 slug=paddledash t=2024-05-29T13:44:13.631529354Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=36.441352ms + logger=ngalert.state.manager.persist user=114286 slug=enverus t=2024-05-29T13:44:13.631350023Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=34.181493ms + level=debug ts=2024-05-29T13:44:13.631198895Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.631159036Z caller=client.go:80 msg="creating client for grafana instance" user=725571 addr=dns:///laikglow-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-itmi-27" t=2024-05-29T13:44:13.631144774Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-itmi-27" t=2024-05-29T13:44:13.631133798Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kl531n9w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.631037709Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kl531n9w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.630977059Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-itmi-24" t=2024-05-29T13:44:13.630995468Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.630929666Z caller=ruler.go:522 msg="tenant is owned by this instance" user=391105 slug=jamiekieranmartin groups=0 + level=debug ts=2024-05-29T13:44:13.630738289Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=432323 slug=lithic instance="DBClusterIdentifier=prod-journal-processor-cluster" t=2024-05-29T13:44:13.630762485Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.630731862Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=111653 slug=theassociationmxp t=2024-05-29T13:44:13.630603892Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=35.763086ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kl1rvmnl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.630534464Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-frpa1-63" t=2024-05-29T13:44:13.630603355Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.630431852Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kl1rvmnl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.630354902Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-frpa1-62" t=2024-05-29T13:44:13.630488608Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-frpa1-56" t=2024-05-29T13:44:13.630220655Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.630203161Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.630170583Z caller=remote_image_capturer.go:61 user=22115 slug=tiki rule_org_id=1 rule_uid=ohr3MTdnz dashboard=000000006 panel=7 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=22115 slug=tiki instance= t=2024-05-29T13:44:13.630195356Z level=warn msg="Failed to take an image" dashboard=000000006 panel=7 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kl0g388f-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.63007456Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-frpa1-54" t=2024-05-29T13:44:13.629980366Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kl0g388f-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.629882028Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:13.629796707Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.629657011Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kkr628z6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.629590915Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kkr628z6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.629522764Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=22115 slug=tiki instance= t=2024-05-29T13:44:13.629515504Z level=debug msg="Execution error state is Alerting" handler=resultAlerting previous_handler=resultError + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-frpa1-36" t=2024-05-29T13:44:13.629535185Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:13.629419474Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-frpa1-27" t=2024-05-29T13:44:13.629421873Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.629343695Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.629257319Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.629227753Z caller=remote_instance_store.go:51 user=638425 slug=docktech msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-frpa1-25" t=2024-05-29T13:44:13.629174215Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-frpa1-25" t=2024-05-29T13:44:13.62916055Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-frpa1-24" t=2024-05-29T13:44:13.629040084Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=84360 slug=sib t=2024-05-29T13:44:13.628906177Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=45.08411ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-frpa1-22" t=2024-05-29T13:44:13.628907297Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kkodo37y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.628943698Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kkodo37y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.628906248Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.628830621Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-frpa1-20" t=2024-05-29T13:44:13.628677506Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-frpa1-20" t=2024-05-29T13:44:13.628667455Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.628466942Z caller=ruler.go:522 msg="tenant is owned by this instance" user=361379 slug=guestpro groups=0 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-frpa1-18" t=2024-05-29T13:44:13.62843619Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kkn5be6b-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.628300691Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.628332309Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=308587 slug=internationaltouch + level=debug ts=2024-05-29T13:44:13.627579469Z caller=ruler.go:522 msg="tenant is owned by this instance" user=480089 slug=jozefsparetan groups=0 + level=debug ts=2024-05-29T13:44:13.62819621Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.628173563Z caller=grafana.go:247 user=299948 slug=sidekickhealth msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=16" groups=1 alerts=0 + level=info ts=2024-05-29T13:44:13.628149394Z caller=grafana.go:247 user=299948 slug=sidekickhealth msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="limit_alerts=16" groups=2 alerts=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kkhuhnk5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.628096499Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.627919145Z caller=ruler.go:522 msg="tenant is owned by this instance" user=311763 slug=guntoroyk groups=0 + level=debug ts=2024-05-29T13:44:13.627597523Z caller=ruler.go:522 msg="tenant is owned by this instance" user=349595 slug=jeremyhart groups=0 + level=info component=discovery ts=2024-05-29T13:44:13.627888866Z caller=client.go:80 msg="creating client for grafana instance" user=365389 addr=dns:///kenntec-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kkhuhnk5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.628007948Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.627831107Z caller=client.go:80 msg="creating client for grafana instance" user=458781 addr=dns:///katchassets-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.62781017Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=524083 slug=jeffreyhktse + level=debug ts=2024-05-29T13:44:13.627914654Z caller=remote_instance_store.go:51 user=456850 slug=juniz msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.627779801Z caller=client.go:80 msg="creating client for grafana instance" user=532577 addr=dns:///karimshakirov123-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-frpa1-14" t=2024-05-29T13:44:13.627929043Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.62791805Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-frpa1-12" t=2024-05-29T13:44:13.627685348Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-frpa1-11" t=2024-05-29T13:44:13.62756792Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-frpa1-11" t=2024-05-29T13:44:13.627554159Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=767797 slug=mgmresorts t=2024-05-29T13:44:13.627493675Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.095444ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kkexmx7r-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.627123839Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-frpa1-09" t=2024-05-29T13:44:13.627262155Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=178459 slug=usemodernlogic t=2024-05-29T13:44:13.626991331Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=178459 slug=usemodernlogic instance= t=2024-05-29T13:44:13.626966631Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-frpa1-06" t=2024-05-29T13:44:13.626926574Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kkdn1zsd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.626692605Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=679831 slug=joveostageaws t=2024-05-29T13:44:13.626577442Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.626498652Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.626503342Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.626493608Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kkdn1zsd-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.626422952Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.626421208Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.626358655Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kk4tnz19-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.626278911Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-frpa1-02" t=2024-05-29T13:44:13.626251839Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.626092104Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.626034197Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kk4tnz19-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.625906077Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-es-ba-24" t=2024-05-29T13:44:13.625866366Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kk2ncd5y-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.625801486Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.62586709Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kk2ncd5y-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.625761065Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.625818626Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-es-ba-23" t=2024-05-29T13:44:13.625760819Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.625647522Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kk2ncd5y-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.625519363Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.62551131Z caller=client.go:80 msg="creating client for grafana instance" user=433616 addr=dns:///kalfeher-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.625284613Z caller=ruler.go:522 msg="tenant is owned by this instance" user=374211 slug=isomer groups=0 + level=info component=discovery ts=2024-05-29T13:44:13.625372856Z caller=client.go:80 msg="creating client for grafana instance" user=542550 addr=dns:///justinnaismith-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.625282663Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=311967 slug=hmuchaku + level=debug ts=2024-05-29T13:44:13.625491912Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-es-ba-21" t=2024-05-29T13:44:13.625438045Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-es-ba-21" t=2024-05-29T13:44:13.625423181Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.6252957Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.625353345Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=353678 slug=hofsink + level=debug ts=2024-05-29T13:44:13.625325218Z caller=ruler.go:522 msg="tenant is owned by this instance" user=513474 slug=heorobotics groups=0 + level=info component=discovery ts=2024-05-29T13:44:13.625286106Z caller=client.go:80 msg="creating client for grafana instance" user=563758 addr=dns:///jswa-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.625240988Z caller=ruler.go:522 msg="tenant is owned by this instance" user=311967 slug=hmuchaku groups=0 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-es-ba-20" t=2024-05-29T13:44:13.625322723Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.625239574Z caller=ruler.go:522 msg="tenant is owned by this instance" user=436007 slug=hyundairotemnif groups=0 + level=debug ts=2024-05-29T13:44:13.625027159Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-es-ba-18" t=2024-05-29T13:44:13.625094461Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.62505497Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-es-ba-17" t=2024-05-29T13:44:13.625000657Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kjzi8rsp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.624845606Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kjzi8rsp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.624694374Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-es-ba-15" t=2024-05-29T13:44:13.624930854Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-es-ba-15" t=2024-05-29T13:44:13.624923526Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.624841922Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-es-ba-10" t=2024-05-29T13:44:13.624851587Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kjzi8rsp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.624553593Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.624651149Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kjxk9wot-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.624124279Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:13.624504726Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-es-ba-07" t=2024-05-29T13:44:13.624604246Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112732 slug=gleamer t=2024-05-29T13:44:13.624511541Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=112732 slug=gleamer version=1 fingerprint=8a03c309d102b2b6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.624408834Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[C0:{Var:C Labels: Value:0xc01818d948} C1:{Var:C Labels: Value:0xc01818d968}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.624095117s EvaluationString:[ var='C0' metric='Value' labels={} value=13 ], [ var='C1' metric='Value' labels={} value=13 ]}]" duration=81.908302ms + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=4 fingerprint=0a9426a0898ef76b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.624410295Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=9.244354ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ch-35" t=2024-05-29T13:44:13.624499949Z level=debug msg="Setting next state" handler=resultNormal + level=error ts=2024-05-29T13:44:13.624364545Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + level=debug ts=2024-05-29T13:44:13.624415822Z caller=remote_instance_store.go:51 user=700783 slug=gsgmedia msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.624352941Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kjxiep5w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.623851416Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kjxiep5w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.623743785Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kjxiep5w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.623690864Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ch-33" t=2024-05-29T13:44:13.624311423Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ch-33" t=2024-05-29T13:44:13.624303225Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.624157592Z caller=grafana.go:247 user=398784 slug=ttec msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=_S7XcbAMzcloud" groups=0 alerts=0 + level=warn ts=2024-05-29T13:44:13.624171927Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=552880 slug=imentu + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kjnyrk3v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.623576203Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.624149242Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.624126016Z caller=grafana.go:247 user=398784 slug=ttec msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=_S7XcbAMzcloud" groups=0 alerts=0 + logger=ngalert.state.manager user=806229 slug=simplisafe instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.624080672Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.624070171Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.62407564Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.623681644Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ch-29" t=2024-05-29T13:44:13.623980059Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ch-28" t=2024-05-29T13:44:13.623880922Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.623179959Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.623703051Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.623737427Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.62330868Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ch-24" t=2024-05-29T13:44:13.623396143Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ch-24" t=2024-05-29T13:44:13.623379415Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.623225268Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ch-22" t=2024-05-29T13:44:13.623115755Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ch-21" t=2024-05-29T13:44:13.622976039Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ch-21" t=2024-05-29T13:44:13.622963214Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.622943884Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ch-19" t=2024-05-29T13:44:13.622719851Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.622514502Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ch-17" t=2024-05-29T13:44:13.62246797Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=214309 slug=spenmo t=2024-05-29T13:44:13.622453992Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.110984ms + level=debug ts=2024-05-29T13:44:13.622476283Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ch-14" t=2024-05-29T13:44:13.622051207Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=146728 slug=dgc t=2024-05-29T13:44:13.621988477Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=52.16836ms + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.621953217Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.621904245Z caller=remote_instance_store.go:51 user=384712 slug=nearinc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ch-13" t=2024-05-29T13:44:13.621938566Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ch-12" t=2024-05-29T13:44:13.621847584Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ch-12" t=2024-05-29T13:44:13.621834262Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-ch-11" t=2024-05-29T13:44:13.621695628Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.621691053Z caller=remote_instance_store.go:51 user=224047 slug=ppbtradingtribeprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=314794 slug=bberger instance= t=2024-05-29T13:44:13.621591804Z level=debug msg="Execution error state is Alerting" handler=resultAlerting previous_handler=resultError + logger=ngalert.state.manager user=314794 slug=bberger instance= t=2024-05-29T13:44:13.621538337Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance= t=2024-05-29T13:44:13.621629945Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=224047 slug=ppbtradingtribeprd instance= t=2024-05-29T13:44:13.62162178Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=314794 slug=bberger t=2024-05-29T13:44:13.621497182Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=ddm0pgeh97tvke, ref_id=A" t=2024-05-29T13:44:13.621276043Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=ddm0pgeh97tvke, ref_id=A" t=2024-05-29T13:44:13.621260754Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=ddm0pgeh97tvke, ref_id=A" t=2024-05-29T13:44:13.621231421Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.621225634Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=ddm0pgeh97tvke, ref_id=A" t=2024-05-29T13:44:13.621196141Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=ddm0pgeh97tvke, ref_id=A" t=2024-05-29T13:44:13.621170944Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=ddm0pgeh97tvke, ref_id=A" t=2024-05-29T13:44:13.621165275Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=ddm0pgeh97tvke, ref_id=A" t=2024-05-29T13:44:13.621153885Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:13.621179171Z caller=grafana.go:247 user=35611 slug=play msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=000000012" groups=0 alerts=0 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-bts-sk-002" t=2024-05-29T13:44:13.620988743Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet t=2024-05-29T13:44:13.620975399Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info ts=2024-05-29T13:44:13.620882742Z caller=grafana.go:247 user=35611 slug=play msg="rules manager rule groups request" path=/prometheus/api/v1/rules grafana_org_id=1 query="dashboard_uid=000000012" groups=0 alerts=0 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-bts-sk-001" t=2024-05-29T13:44:13.620876649Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.620845549Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-bg-02" t=2024-05-29T13:44:13.620745782Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=m247-bg-02" t=2024-05-29T13:44:13.620733011Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.620669796Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=328755 slug=infogrideu instance="DBClusterIdentifier=webapp-db-live, Role=READER" t=2024-05-29T13:44:13.620683001Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.620321844Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.620274094Z caller=remote_alert_sender.go:94 user=698103 slug=vericast host=vericast-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.245.254:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddfnr97wr9ibof alerts=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=kemi-al-02" t=2024-05-29T13:44:13.620285027Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.620245216Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.620228522Z caller=remote_alert_sender.go:94 user=698103 slug=vericast host=vericast-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.104.126:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ddfnr97wr9ibof alerts=1 + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.620177257Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=698103 slug=vericast t=2024-05-29T13:44:13.62012313Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.68382ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=hosteasy-md-02" t=2024-05-29T13:44:13.620018874Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=hosteasy-md-02" t=2024-05-29T13:44:13.620005535Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.619976812Z caller=ruler.go:522 msg="tenant is owned by this instance" user=402722 slug=gowthamsp023 groups=0 + level=info ts=2024-05-29T13:44:13.619904884Z caller=remote_alert_sender.go:94 user=412141 slug=sharethrough host=sharethrough-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.144.237.34:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fdgyskmp8joqof alerts=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=hosteasy-md-01" t=2024-05-29T13:44:13.619877644Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=327842 slug=exabeam t=2024-05-29T13:44:13.619664714Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="topic=pasta-status-storage" t=2024-05-29T13:44:13.619585351Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=327842 slug=exabeam version=329 fingerprint=795f4b97a03937d0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.619603821Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.619320827s EvaluationString:}]" duration=257.60718ms + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="topic=pasta-offset-storage" t=2024-05-29T13:44:13.619553162Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="topic=pasta-offset-storage" t=2024-05-29T13:44:13.619542769Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=hive-tpa-4" t=2024-05-29T13:44:13.619531331Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="topic=dp_v2_internal_offeringtool_offering" t=2024-05-29T13:44:13.619406975Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="topic=dp_v2_internal_event_v3" t=2024-05-29T13:44:13.619382796Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=hive-tpa-25" t=2024-05-29T13:44:13.619253847Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="topic=dp_v2_internal_compacted_retail_shops" t=2024-05-29T13:44:13.61922863Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.historian backend=loki user=459086 slug=metricgamingprd t=2024-05-29T13:44:13.619185657Z level=debug msg="Done saving alert state history batch" + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="topic=dp_v2_internal_bonustool_reward" t=2024-05-29T13:44:13.619199769Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=703825 slug=andrewbauman t=2024-05-29T13:44:13.619194694Z level=debug msg="Saving alert states" count=33 max_state_save_concurrency=1 + logger=ngalert.state.manager user=703825 slug=andrewbauman instance="id=qemu/136, instance=pve19.tri-coat.com:9221, job=pve, name=nextcloud.tri-coat.com, type=qemu" t=2024-05-29T13:44:13.619171591Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=703825 slug=andrewbauman instance="id=qemu/132, instance=pve19.tri-coat.com:9221, job=pve, name=gw-69.41.195.54.mwpol.net, type=qemu" t=2024-05-29T13:44:13.61911089Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="topic=dp_v2_internal_betstorage_transaction" t=2024-05-29T13:44:13.619091885Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="topic=dp_v2_internal_betstorage_coupon_v2" t=2024-05-29T13:44:13.619067944Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="topic=dp_v2_internal_betstorage_coupon_v2" t=2024-05-29T13:44:13.619052327Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=703825 slug=andrewbauman instance="id=qemu/129, instance=pve19.tri-coat.com:9221, job=pve, name=opnsense.tri-coat.com, type=qemu" t=2024-05-29T13:44:13.619040309Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=703825 slug=andrewbauman instance="id=qemu/126, instance=pve19.tri-coat.com:9221, job=pve, name=philip.tri-coat.com, type=qemu" t=2024-05-29T13:44:13.618992645Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.618921942Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.618857937Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=307381 slug=kambitaskforce version=159 fingerprint=dc06744d5f7cb32e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.618661034Z level=debug msg="Alert rule evaluated" results="[{Instance:topic=dp_v2_internal_betoffer State:Normal Error: Results:map[] Values:map[LastOffsetLagPerTopic:{Var:LastOffsetLagPerTopic Labels:topic=dp_v2_internal_betoffer Value:0xc045169c40} OffsetLagPerTopic:{Var:OffsetLagPerTopic Labels:topic=dp_v2_internal_betoffer Value:0xc045169c50} TopicOffsetLagAbove1Milliion:{Var:TopicOffsetLagAbove1Milliion Labels:topic=dp_v2_internal_betoffer Value:0xc045169c60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.617796761s EvaluationString:[ var='LastOffsetLagPerTopic' labels={topic=dp_v2_internal_betoffer} value=0 ], [ var='OffsetLagPerTopic' labels={topic=dp_v2_internal_betoffer} value=0 ], [ var='TopicOffsetLagAbove1Milliion' labels={topic=dp_v2_internal_betoffer} value=0 ]} {Instance:topic=dp_v2_internal_betstorage_coupon_v2 State:Normal Error: Results:map[] Values:map[LastOffsetLagPerTopic:{Var:LastOffsetLagPerTopic Labels:topic=dp_v2_internal_betstorage_coupon_v2 Value:0xc045169cb0} OffsetLagPerTopic:{Var:OffsetLagPerTopic Labels:topic=dp_v2_internal_betstorage_coupon_v2 Value:0xc045169c90} TopicOffsetLagAbove1Milliion:{Var:TopicOffsetLagAbove1Milliion Labels:topic=dp_v2_internal_betstorage_coupon_v2 Value:0xc045169ca0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.617811117s EvaluationString:[ var='LastOffsetLagPerTopic' labels={topic=dp_v2_internal_betstorage_coupon_v2} value=0 ], [ var='OffsetLagPerTopic' labels={topic=dp_v2_internal_betstorage_coupon_v2} value=0 ], [ var='TopicOffsetLagAbove1Milliion' labels={topic=dp_v2_internal_betstorage_coupon_v2} value=0 ]} {Instance:topic=dp_v2_internal_betstorage_transaction State:Normal Error: Results:map[] Values:map[LastOffsetLagPerTopic:{Var:LastOffsetLagPerTopic Labels:topic=dp_v2_internal_betstorage_transaction Value:0xc045169cd0} OffsetLagPerTopic:{Var:OffsetLagPerTopic Labels:topic=dp_v2_internal_betstorage_transaction Value:0xc045169ce0} TopicOffsetLagAbove1Milliion:{Var:TopicOffsetLagAbove1Milliion Labels:topic=dp_v2_internal_betstorage_transaction Value:0xc045169cf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.617823435s EvaluationString:[ var='LastOffsetLagPerTopic' labels={topic=dp_v2_internal_betstorage_transaction} value=0 ], [ var='OffsetLagPerTopic' labels={topic=dp_v2_internal_betstorage_transaction} value=0 ], [ var='TopicOffsetLagAbove1Milliion' labels={topic=dp_v2_internal_betstorage_transaction} value=0 ]} {Instance:topic=dp_v2_internal_bonustool_abp State:Normal Error: Results:map[] Values:map[LastOffsetLagPerTopic:{Var:LastOffsetLagPerTopic Labels:topic=dp_v2_internal_bonustool_abp Value:0xc045169d20} OffsetLagPerTopic:{Var:OffsetLagPerTopic Labels:topic=dp_v2_internal_bonustool_abp Value:0xc045169d30} TopicOffsetLagAbove1Milliion:{Var:TopicOffsetLagAbove1Milliion Labels:topic=dp_v2_internal_bonustool_abp Value:0xc045169d10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.617838706s EvaluationString:[ var='LastOffsetLagPerTopic' labels={topic=dp_v2_internal_bonustool_abp} value=0 ], [ var='OffsetLagPerTopic' labels={topic=dp_v2_internal_bonustool_abp} value=0 ], [ var='TopicOffsetLagAbove1Milliion' labels={topic=dp_v2_internal_bonustool_abp} value=0 ]} {Instance:topic=dp_v2_internal_bonustool_reward State:Normal Error: Results:map[] Values:map[LastOffsetLagPerTopic:{Var:LastOffsetLagPerTopic Labels:topic=dp_v2_internal_bonustool_reward Value:0xc045169d60} OffsetLagPerTopic:{Var:OffsetLagPerTopic Labels:topic=dp_v2_internal_bonustool_reward Value:0xc045169d70} TopicOffsetLagAbove1Milliion:{Var:TopicOffsetLagAbove1Milliion Labels:topic=dp_v2_internal_bonustool_reward Value:0xc045169d80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.617845275s EvaluationString:[ var='LastOffsetLagPerTopic' labels={topic=dp_v2_internal_bonustool_reward} value=0 ], [ var='OffsetLagPerTopic' labels={topic=dp_v2_internal_bonustool_reward} value=0 ], [ var='TopicOffsetLagAbove1Milliion' labels={topic=dp_v2_internal_bonustool_reward} value=0 ]} {Instance:topic=dp_v2_internal_compacted_retail_shops State:Normal Error: Results:map[] Values:map[LastOffsetLagPerTopic:{Var:LastOffsetLagPerTopic Labels:topic=dp_v2_internal_compacted_retail_shops Value:0xc045169db0} OffsetLagPerTopic:{Var:OffsetLagPerTopic Labels:topic=dp_v2_internal_compacted_retail_shops Value:0xc045169dc0} TopicOffsetLagAbove1Milliion:{Var:TopicOffsetLagAbove1Milliion Labels:topic=dp_v2_internal_compacted_retail_shops Value:0xc045169dd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.617852897s EvaluationString:[ var='LastOffsetLagPerTopic' labels={topic=dp_v2_internal_compacted_retail_shops} value=0 ], [ var='OffsetLagPerTopic' labels={topic=dp_v2_internal_compacted_retail_shops} value=0 ], [ var='TopicOffsetLagAbove1Milliion' labels={topic=dp_v2_internal_compacted_retail_shops} value=0 ]} {Instance:topic=dp_v2_internal_compacted_retail_terminals State:Normal Error: Results:map[] Values:map[LastOffsetLagPerTopic:{Var:LastOffsetLagPerTopic Labels:topic=dp_v2_internal_compacted_retail_terminals Value:0xc045169f10} OffsetLagPerTopic:{Var:OffsetLagPerTopic Labels:topic=dp_v2_internal_compacted_retail_terminals Value:0xc045169f20} TopicOffsetLagAbove1Milliion:{Var:TopicOffsetLagAbove1Milliion Labels:topic=dp_v2_internal_compacted_retail_terminals Value:0xc045169f30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.617861895s EvaluationString:[ var='LastOffsetLagPerTopic' labels={topic=dp_v2_internal_compacted_retail_terminals} value=0 ], [ var='OffsetLagPerTopic' labels={topic=dp_v2_internal_compacted_retail_terminals} value=0 ], [ var='TopicOffsetLagAbove1Milliion' labels={topic=dp_v2_internal_compacted_retail_terminals} value=0 ]} {Instance:topic=dp_v2_internal_event_v3 State:Normal Error: Results:map[] Values:map[LastOffsetLagPerTopic:{Var:LastOffsetLagPerTopic Labels:topic=dp_v2_internal_event_v3 Value:0xc045169f60} OffsetLagPerTopic:{Var:OffsetLagPerTopic Labels:topic=dp_v2_internal_event_v3 Value:0xc007e16000} TopicOffsetLagAbove1Milliion:{Var:TopicOffsetLagAbove1Milliion Labels:topic=dp_v2_internal_event_v3 Value:0xc007e16010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.617872021s EvaluationString:[ var='LastOffsetLagPerTopic' labels={topic=dp_v2_internal_event_v3} value=0 ], [ var='OffsetLagPerTopic' labels={topic=dp_v2_internal_event_v3} value=0 ], [ var='TopicOffsetLagAbove1Milliion' labels={topic=dp_v2_internal_event_v3} value=0 ]} {Instance:topic=dp_v2_internal_offeringtool_offering State:Normal Error: Results:map[] Values:map[LastOffsetLagPerTopic:{Var:LastOffsetLagPerTopic Labels:topic=dp_v2_internal_offeringtool_offering Value:0xc007e16040} OffsetLagPerTopic:{Var:OffsetLagPerTopic Labels:topic=dp_v2_internal_offeringtool_offering Value:0xc007e16050} TopicOffsetLagAbove1Milliion:{Var:TopicOffsetLagAbove1Milliion Labels:topic=dp_v2_internal_offeringtool_offering Value:0xc007e16030}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.617878567s EvaluationString:[ var='LastOffsetLagPerTopic' labels={topic=dp_v2_internal_offeringtool_offering} value=0 ], [ var='OffsetLagPerTopic' labels={topic=dp_v2_internal_offeringtool_offering} value=0 ], [ var='TopicOffsetLagAbove1Milliion' labels={topic=dp_v2_internal_offeringtool_offering} value=0 ]} {Instance:topic=dp_v2_internal_punter_risk_profile State:Normal Error: Results:map[] Values:map[LastOffsetLagPerTopic:{Var:LastOffsetLagPerTopic Labels:topic=dp_v2_internal_punter_risk_profile Value:0xc007e16070} OffsetLagPerTopic:{Var:OffsetLagPerTopic Labels:topic=dp_v2_internal_punter_risk_profile Value:0xc007e16080} TopicOffsetLagAbove1Milliion:{Var:TopicOffsetLagAbove1Milliion Labels:topic=dp_v2_internal_punter_risk_profile Value:0xc007e16090}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.617885538s EvaluationString:[ var='LastOffsetLagPerTopic' labels={topic=dp_v2_internal_punter_risk_profile} value=0 ], [ var='OffsetLagPerTopic' labels={topic=dp_v2_internal_punter_risk_profile} value=0 ], [ var='TopicOffsetLagAbove1Milliion' labels={topic=dp_v2_internal_punter_risk_profile} value=0 ]} {Instance:topic=dp_v2_internal_retail_wallet_transactions State:Normal Error: Results:map[] Values:map[LastOffsetLagPerTopic:{Var:LastOffsetLagPerTopic Labels:topic=dp_v2_internal_retail_wallet_transactions Value:0xc007e160d0} OffsetLagPerTopic:{Var:OffsetLagPerTopic Labels:topic=dp_v2_internal_retail_wallet_transactions Value:0xc007e160b0} TopicOffsetLagAbove1Milliion:{Var:TopicOffsetLagAbove1Milliion Labels:topic=dp_v2_internal_retail_wallet_transactions Value:0xc007e160c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.617893528s EvaluationString:[ var='LastOffsetLagPerTopic' labels={topic=dp_v2_internal_retail_wallet_transactions} value=0 ], [ var='OffsetLagPerTopic' labels={topic=dp_v2_internal_retail_wallet_transactions} value=0 ], [ var='TopicOffsetLagAbove1Milliion' labels={topic=dp_v2_internal_retail_wallet_transactions} value=0 ]} {Instance:topic=pasta-config-storage State:Normal Error: Results:map[] Values:map[LastOffsetLagPerTopic:{Var:LastOffsetLagPerTopic Labels:topic=pasta-config-storage Value:0xc007e160f0} OffsetLagPerTopic:{Var:OffsetLagPerTopic Labels:topic=pasta-config-storage Value:0xc007e16100} TopicOffsetLagAbove1Milliion:{Var:TopicOffsetLagAbove1Milliion Labels:topic=pasta-config-storage Value:0xc007e16110}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.61790007s EvaluationString:[ var='LastOffsetLagPerTopic' labels={topic=pasta-config-storage} value=0 ], [ var='OffsetLagPerTopic' labels={topic=pasta-config-storage} value=0 ], [ var='TopicOffsetLagAbove1Milliion' labels={topic=pasta-config-storage} value=0 ]} {Instance:topic=pasta-offset-storage State:Normal Error: Results:map[] Values:map[LastOffsetLagPerTopic:{Var:LastOffsetLagPerTopic Labels:topic=pasta-offset-storage Value:0xc007e16130} OffsetLagPerTopic:{Var:OffsetLagPerTopic Labels:topic=pasta-offset-storage Value:0xc007e16140} TopicOffsetLagAbove1Milliion:{Var:TopicOffsetLagAbove1Milliion Labels:topic=pasta-offset-storage Value:0xc007e16150}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.617906853s EvaluationString:[ var='LastOffsetLagPerTopic' labels={topic=pasta-offset-storage} value=0 ], [ var='OffsetLagPerTopic' labels={topic=pasta-offset-storage} value=0 ], [ var='TopicOffsetLagAbove1Milliion' labels={topic=pasta-offset-storage} value=0 ]} {Instance:topic=pasta-status-storage State:Normal Error: Results:map[] Values:map[LastOffsetLagPerTopic:{Var:LastOffsetLagPerTopic Labels:topic=pasta-status-storage Value:0xc007e16180} OffsetLagPerTopic:{Var:OffsetLagPerTopic Labels:topic=pasta-status-storage Value:0xc007e16190} TopicOffsetLagAbove1Milliion:{Var:TopicOffsetLagAbove1Milliion Labels:topic=pasta-status-storage Value:0xc007e16170}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.617913445s EvaluationString:[ var='LastOffsetLagPerTopic' labels={topic=pasta-status-storage} value=0 ], [ var='OffsetLagPerTopic' labels={topic=pasta-status-storage} value=0 ], [ var='TopicOffsetLagAbove1Milliion' labels={topic=pasta-status-storage} value=0 ]}]" duration=79.533985ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=hive-tpa-22" t=2024-05-29T13:44:13.618843374Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.618746727Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=703825 slug=andrewbauman instance="id=qemu/117, instance=pve19.tri-coat.com:9221, job=pve, name=docker.tri-coat.com, type=qemu" t=2024-05-29T13:44:13.618771788Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=lQBzRG7Vz, ref_id=A" t=2024-05-29T13:44:13.61875017Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=703825 slug=andrewbauman instance="id=qemu/111, instance=pve19.tri-coat.com:9221, job=pve, name=willow.mwpol.net, type=qemu" t=2024-05-29T13:44:13.618724248Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.618716702Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=416983 slug=hawara + level=info component=discovery ts=2024-05-29T13:44:13.618635746Z caller=client.go:80 msg="creating client for grafana instance" user=427403 addr=dns:///jeppo1234-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.618582331Z caller=ruler.go:522 msg="tenant is owned by this instance" user=334836 slug=fakihsoumi groups=0 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=hive-tpa-20" t=2024-05-29T13:44:13.618607636Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=hive-tpa-20" t=2024-05-29T13:44:13.618596601Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=703825 slug=andrewbauman instance="id=qemu/100, instance=pve19.tri-coat.com:9221, job=pve, name=Gateway, type=qemu" t=2024-05-29T13:44:13.618571466Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=hive-tpa-2" t=2024-05-29T13:44:13.618496031Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=hive-tpa-19" t=2024-05-29T13:44:13.618361483Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=703825 slug=andrewbauman instance="id=lxc/139, instance=pve19.tri-coat.com:9221, job=pve, name=imapsync.tri-coat.com, type=lxc" t=2024-05-29T13:44:13.618375988Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=703825 slug=andrewbauman instance="id=lxc/139, instance=pve19.tri-coat.com:9221, job=pve, name=imapsync.tri-coat.com, type=lxc" t=2024-05-29T13:44:13.618366912Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=hive-tpa-18" t=2024-05-29T13:44:13.618251169Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=703825 slug=andrewbauman instance="id=lxc/130, instance=pve19.tri-coat.com:9221, job=pve, name=beacon.mwpol.net, type=lxc" t=2024-05-29T13:44:13.618129272Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=hive-tpa-16" t=2024-05-29T13:44:13.618015641Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.617878059Z caller=client.go:80 msg="creating client for grafana instance" user=461795 addr=dns:///jeddii-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=703825 slug=andrewbauman instance="id=lxc/122, instance=pve19.tri-coat.com:9221, job=pve, name=punchclock.tri-coat.com, type=lxc" t=2024-05-29T13:44:13.617921304Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=703825 slug=andrewbauman instance="id=lxc/120, instance=pve19.tri-coat.com:9221, job=pve, name=sec1-69.41.195.53.mwpol.net, type=lxc" t=2024-05-29T13:44:13.617863118Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=703825 slug=andrewbauman instance="id=lxc/119, instance=pve19.tri-coat.com:9221, job=pve, name=proxy.tri-coat.com, type=lxc" t=2024-05-29T13:44:13.617781589Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.617724508Z caller=remote_instance_store.go:51 user=191103 slug=amazonadmin msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:13.617689985Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=191103 slug=amazonadmin t=2024-05-29T13:44:13.617603775Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=191103 slug=amazonadmin version=67 fingerprint=b122b760c055c007 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.617539967Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.617326755s EvaluationString:}]" duration=172.888254ms + logger=ngalert.state.manager user=703825 slug=andrewbauman instance="id=lxc/113, instance=pve19.tri-coat.com:9221, job=pve, name=secuity1gw19.mwpol.ca, type=lxc" t=2024-05-29T13:44:13.61754165Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=703825 slug=andrewbauman instance="id=lxc/113, instance=pve19.tri-coat.com:9221, job=pve, name=secuity1gw19.mwpol.ca, type=lxc" t=2024-05-29T13:44:13.617531862Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=703825 slug=andrewbauman instance="id=lxc/112, instance=pve19.tri-coat.com:9221, job=pve, name=wireguard.tri-coat.com, type=lxc" t=2024-05-29T13:44:13.617471061Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.617466786Z caller=remote_instance_store.go:51 user=84360 slug=sib msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=84360 slug=sib instance= t=2024-05-29T13:44:13.617402256Z level=debug msg="Setting next state" handler=resultError + level=info ts=2024-05-29T13:44:13.617399764Z caller=remote_alert_sender.go:94 user=815713 slug=returnstaging host=returnstaging-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.144.186.23:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=cde1ra29t7a4gd alerts=1 + logger=ngalert.state.manager user=703825 slug=andrewbauman instance="id=lxc/107, instance=pve19.tri-coat.com:9221, job=pve, name=mysql.tri-coat.com, type=lxc" t=2024-05-29T13:44:13.617394504Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=703825 slug=andrewbauman instance="id=lxc/106, instance=pve19.tri-coat.com:9221, job=pve, name=GW-69.41.195.51.mwpol.net, type=lxc" t=2024-05-29T13:44:13.617300407Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=hive-tpa-10" t=2024-05-29T13:44:13.617261949Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=703825 slug=andrewbauman instance="id=lxc/105, instance=pve19.tri-coat.com:9221, job=pve, name=bitbucket.tri-coat.com, type=lxc" t=2024-05-29T13:44:13.617234332Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=hive-tpa-1" t=2024-05-29T13:44:13.617124485Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.617011778Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.616986008Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=703825 slug=andrewbauman instance="id=lxc/102, instance=pve19.tri-coat.com:9221, job=pve, name=grafana.tri-coat.com, type=lxc" t=2024-05-29T13:44:13.617024638Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=703825 slug=andrewbauman instance="id=lxc/102, instance=pve19.tri-coat.com:9221, job=pve, name=grafana.tri-coat.com, type=lxc" t=2024-05-29T13:44:13.61700996Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=839521 slug=prodgk instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.616918481Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.616685456Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=839521 slug=prodgk t=2024-05-29T13:44:13.616903608Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=703825 slug=andrewbauman version=14 fingerprint=46aad6d0ab777bad attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.616152125Z level=debug msg="Alert rule evaluated" results="[{Instance:id=lxc/101, instance=pve19.tri-coat.com:9221, job=pve, name=prometheus.tri-coat.com, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/101, instance=pve19.tri-coat.com:9221, job=pve, name=prometheus.tri-coat.com, type=lxc Value:0xc061565bf8} B:{Var:B Labels:id=lxc/101, instance=pve19.tri-coat.com:9221, job=pve, name=prometheus.tri-coat.com, type=lxc Value:0xc061565c50} C:{Var:C Labels:id=lxc/101, instance=pve19.tri-coat.com:9221, job=pve, name=prometheus.tri-coat.com, type=lxc Value:0xc061565ca8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.61382054s EvaluationString:[ var='A' labels={id=lxc/101, instance=pve19.tri-coat.com:9221, job=pve, name=prometheus.tri-coat.com, type=lxc} value=0 ], [ var='B' labels={id=lxc/101, instance=pve19.tri-coat.com:9221, job=pve, name=prometheus.tri-coat.com, type=lxc} value=0 ], [ var='C' labels={id=lxc/101, instance=pve19.tri-coat.com:9221, job=pve, name=prometheus.tri-coat.com, type=lxc} value=0 ]} {Instance:id=lxc/102, instance=pve19.tri-coat.com:9221, job=pve, name=grafana.tri-coat.com, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/102, instance=pve19.tri-coat.com:9221, job=pve, name=grafana.tri-coat.com, type=lxc Value:0xc061565d40} B:{Var:B Labels:id=lxc/102, instance=pve19.tri-coat.com:9221, job=pve, name=grafana.tri-coat.com, type=lxc Value:0xc061565d98} C:{Var:C Labels:id=lxc/102, instance=pve19.tri-coat.com:9221, job=pve, name=grafana.tri-coat.com, type=lxc Value:0xc061565df0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.613848541s EvaluationString:[ var='A' labels={id=lxc/102, instance=pve19.tri-coat.com:9221, job=pve, name=grafana.tri-coat.com, type=lxc} value=0 ], [ var='B' labels={id=lxc/102, instance=pve19.tri-coat.com:9221, job=pve, name=grafana.tri-coat.com, type=lxc} value=0 ], [ var='C' labels={id=lxc/102, instance=pve19.tri-coat.com:9221, job=pve, name=grafana.tri-coat.com, type=lxc} value=0 ]} {Instance:id=lxc/103, instance=pve19.tri-coat.com:9221, job=pve, name=sec2-69.41.195.52.mwpol.net, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/103, instance=pve19.tri-coat.com:9221, job=pve, name=sec2-69.41.195.52.mwpol.net, type=lxc Value:0xc061565e90} B:{Var:B Labels:id=lxc/103, instance=pve19.tri-coat.com:9221, job=pve, name=sec2-69.41.195.52.mwpol.net, type=lxc Value:0xc061565ee8} C:{Var:C Labels:id=lxc/103, instance=pve19.tri-coat.com:9221, job=pve, name=sec2-69.41.195.52.mwpol.net, type=lxc Value:0xc061565f40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.613862101s EvaluationString:[ var='A' labels={id=lxc/103, instance=pve19.tri-coat.com:9221, job=pve, name=sec2-69.41.195.52.mwpol.net, type=lxc} value=0 ], [ var='B' labels={id=lxc/103, instance=pve19.tri-coat.com:9221, job=pve, name=sec2-69.41.195.52.mwpol.net, type=lxc} value=0 ], [ var='C' labels={id=lxc/103, instance=pve19.tri-coat.com:9221, job=pve, name=sec2-69.41.195.52.mwpol.net, type=lxc} value=0 ]} {Instance:id=lxc/104, instance=pve19.tri-coat.com:9221, job=pve, name=tri-coat.com, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/104, instance=pve19.tri-coat.com:9221, job=pve, name=tri-coat.com, type=lxc Value:0xc008ebc000} B:{Var:B Labels:id=lxc/104, instance=pve19.tri-coat.com:9221, job=pve, name=tri-coat.com, type=lxc Value:0xc008ebc068} C:{Var:C Labels:id=lxc/104, instance=pve19.tri-coat.com:9221, job=pve, name=tri-coat.com, type=lxc Value:0xc008ebc0b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.613873141s EvaluationString:[ var='A' labels={id=lxc/104, instance=pve19.tri-coat.com:9221, job=pve, name=tri-coat.com, type=lxc} value=0 ], [ var='B' labels={id=lxc/104, instance=pve19.tri-coat.com:9221, job=pve, name=tri-coat.com, type=lxc} value=0 ], [ var='C' labels={id=lxc/104, instance=pve19.tri-coat.com:9221, job=pve, name=tri-coat.com, type=lxc} value=0 ]} {Instance:id=lxc/105, instance=pve19.tri-coat.com:9221, job=pve, name=bitbucket.tri-coat.com, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/105, instance=pve19.tri-coat.com:9221, job=pve, name=bitbucket.tri-coat.com, type=lxc Value:0xc008ebc138} B:{Var:B Labels:id=lxc/105, instance=pve19.tri-coat.com:9221, job=pve, name=bitbucket.tri-coat.com, type=lxc Value:0xc008ebc180} C:{Var:C Labels:id=lxc/105, instance=pve19.tri-coat.com:9221, job=pve, name=bitbucket.tri-coat.com, type=lxc Value:0xc008ebc1c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.613882461s EvaluationString:[ var='A' labels={id=lxc/105, instance=pve19.tri-coat.com:9221, job=pve, name=bitbucket.tri-coat.com, type=lxc} value=0 ], [ var='B' labels={id=lxc/105, instance=pve19.tri-coat.com:9221, job=pve, name=bitbucket.tri-coat.com, type=lxc} value=0 ], [ var='C' labels={id=lxc/105, instance=pve19.tri-coat.com:9221, job=pve, name=bitbucket.tri-coat.com, type=lxc} value=0 ]} {Instance:id=lxc/106, instance=pve19.tri-coat.com:9221, job=pve, name=GW-69.41.195.51.mwpol.net, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/106, instance=pve19.tri-coat.com:9221, job=pve, name=GW-69.41.195.51.mwpol.net, type=lxc Value:0xc008ebc270} B:{Var:B Labels:id=lxc/106, instance=pve19.tri-coat.com:9221, job=pve, name=GW-69.41.195.51.mwpol.net, type=lxc Value:0xc008ebc2a0} C:{Var:C Labels:id=lxc/106, instance=pve19.tri-coat.com:9221, job=pve, name=GW-69.41.195.51.mwpol.net, type=lxc Value:0xc008ebc2e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.613892491s EvaluationString:[ var='A' labels={id=lxc/106, instance=pve19.tri-coat.com:9221, job=pve, name=GW-69.41.195.51.mwpol.net, type=lxc} value=0 ], [ var='B' labels={id=lxc/106, instance=pve19.tri-coat.com:9221, job=pve, name=GW-69.41.195.51.mwpol.net, type=lxc} value=0 ], [ var='C' labels={id=lxc/106, instance=pve19.tri-coat.com:9221, job=pve, name=GW-69.41.195.51.mwpol.net, type=lxc} value=0 ]} {Instance:id=lxc/107, instance=pve19.tri-coat.com:9221, job=pve, name=mysql.tri-coat.com, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/107, instance=pve19.tri-coat.com:9221, job=pve, name=mysql.tri-coat.com, type=lxc Value:0xc008ebc358} B:{Var:B Labels:id=lxc/107, instance=pve19.tri-coat.com:9221, job=pve, name=mysql.tri-coat.com, type=lxc Value:0xc008ebc390} C:{Var:C Labels:id=lxc/107, instance=pve19.tri-coat.com:9221, job=pve, name=mysql.tri-coat.com, type=lxc Value:0xc008ebc3e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.613906941s EvaluationString:[ var='A' labels={id=lxc/107, instance=pve19.tri-coat.com:9221, job=pve, name=mysql.tri-coat.com, type=lxc} value=0 ], [ var='B' labels={id=lxc/107, instance=pve19.tri-coat.com:9221, job=pve, name=mysql.tri-coat.com, type=lxc} value=0 ], [ var='C' labels={id=lxc/107, instance=pve19.tri-coat.com:9221, job=pve, name=mysql.tri-coat.com, type=lxc} value=0 ]} {Instance:id=lxc/112, instance=pve19.tri-coat.com:9221, job=pve, name=wireguard.tri-coat.com, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/112, instance=pve19.tri-coat.com:9221, job=pve, name=wireguard.tri-coat.com, type=lxc Value:0xc008ebc468} B:{Var:B Labels:id=lxc/112, instance=pve19.tri-coat.com:9221, job=pve, name=wireguard.tri-coat.com, type=lxc Value:0xc008ebc498} C:{Var:C Labels:id=lxc/112, instance=pve19.tri-coat.com:9221, job=pve, name=wireguard.tri-coat.com, type=lxc Value:0xc008ebc4e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.613947062s EvaluationString:[ var='A' labels={id=lxc/112, instance=pve19.tri-coat.com:9221, job=pve, name=wireguard.tri-coat.com, type=lxc} value=0 ], [ var='B' labels={id=lxc/112, instance=pve19.tri-coat.com:9221, job=pve, name=wireguard.tri-coat.com, type=lxc} value=0 ], [ var='C' labels={id=lxc/112, instance=pve19.tri-coat.com:9221, job=pve, name=wireguard.tri-coat.com, type=lxc} value=0 ]} {Instance:id=lxc/113, instance=pve19.tri-coat.com:9221, job=pve, name=secuity1gw19.mwpol.ca, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/113, instance=pve19.tri-coat.com:9221, job=pve, name=secuity1gw19.mwpol.ca, type=lxc Value:0xc008ebc570} B:{Var:B Labels:id=lxc/113, instance=pve19.tri-coat.com:9221, job=pve, name=secuity1gw19.mwpol.ca, type=lxc Value:0xc008ebc5b8} C:{Var:C Labels:id=lxc/113, instance=pve19.tri-coat.com:9221, job=pve, name=secuity1gw19.mwpol.ca, type=lxc Value:0xc008ebc608}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.613958982s EvaluationString:[ var='A' labels={id=lxc/113, instance=pve19.tri-coat.com:9221, job=pve, name=secuity1gw19.mwpol.ca, type=lxc} value=0 ], [ var='B' labels={id=lxc/113, instance=pve19.tri-coat.com:9221, job=pve, name=secuity1gw19.mwpol.ca, type=lxc} value=0 ], [ var='C' labels={id=lxc/113, instance=pve19.tri-coat.com:9221, job=pve, name=secuity1gw19.mwpol.ca, type=lxc} value=0 ]} {Instance:id=lxc/114, instance=pve19.tri-coat.com:9221, job=pve, name=ansible.tri-coat.com, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/114, instance=pve19.tri-coat.com:9221, job=pve, name=ansible.tri-coat.com, type=lxc Value:0xc008ebc688} B:{Var:B Labels:id=lxc/114, instance=pve19.tri-coat.com:9221, job=pve, name=ansible.tri-coat.com, type=lxc Value:0xc008ebc6d0} C:{Var:C Labels:id=lxc/114, instance=pve19.tri-coat.com:9221, job=pve, name=ansible.tri-coat.com, type=lxc Value:0xc008ebc718}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.613969462s EvaluationString:[ var='A' labels={id=lxc/114, instance=pve19.tri-coat.com:9221, job=pve, name=ansible.tri-coat.com, type=lxc} value=0 ], [ var='B' labels={id=lxc/114, instance=pve19.tri-coat.com:9221, job=pve, name=ansible.tri-coat.com, type=lxc} value=0 ], [ var='C' labels={id=lxc/114, instance=pve19.tri-coat.com:9221, job=pve, name=ansible.tri-coat.com, type=lxc} value=0 ]} {Instance:id=lxc/116, instance=pve19.tri-coat.com:9221, job=pve, name=openhab.tri-coat.com, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/116, instance=pve19.tri-coat.com:9221, job=pve, name=openhab.tri-coat.com, type=lxc Value:0xc008ebc7a0} B:{Var:B Labels:id=lxc/116, instance=pve19.tri-coat.com:9221, job=pve, name=openhab.tri-coat.com, type=lxc Value:0xc008ebc7e0} C:{Var:C Labels:id=lxc/116, instance=pve19.tri-coat.com:9221, job=pve, name=openhab.tri-coat.com, type=lxc Value:0xc008ebc820}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.613979662s EvaluationString:[ var='A' labels={id=lxc/116, instance=pve19.tri-coat.com:9221, job=pve, name=openhab.tri-coat.com, type=lxc} value=0 ], [ var='B' labels={id=lxc/116, instance=pve19.tri-coat.com:9221, job=pve, name=openhab.tri-coat.com, type=lxc} value=0 ], [ var='C' labels={id=lxc/116, instance=pve19.tri-coat.com:9221, job=pve, name=openhab.tri-coat.com, type=lxc} value=0 ]} {Instance:id=lxc/118, instance=pve19.tri-coat.com:9221, job=pve, name=smtp.tri-coat.com, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/118, instance=pve19.tri-coat.com:9221, job=pve, name=smtp.tri-coat.com, type=lxc Value:0xc008ebc8b8} B:{Var:B Labels:id=lxc/118, instance=pve19.tri-coat.com:9221, job=pve, name=smtp.tri-coat.com, type=lxc Value:0xc008ebc8f8} C:{Var:C Labels:id=lxc/118, instance=pve19.tri-coat.com:9221, job=pve, name=smtp.tri-coat.com, type=lxc Value:0xc008ebc940}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.613988642s EvaluationString:[ var='A' labels={id=lxc/118, instance=pve19.tri-coat.com:9221, job=pve, name=smtp.tri-coat.com, type=lxc} value=0 ], [ var='B' labels={id=lxc/118, instance=pve19.tri-coat.com:9221, job=pve, name=smtp.tri-coat.com, type=lxc} value=0 ], [ var='C' labels={id=lxc/118, instance=pve19.tri-coat.com:9221, job=pve, name=smtp.tri-coat.com, type=lxc} value=0 ]} {Instance:id=lxc/119, instance=pve19.tri-coat.com:9221, job=pve, name=proxy.tri-coat.com, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/119, instance=pve19.tri-coat.com:9221, job=pve, name=proxy.tri-coat.com, type=lxc Value:0xc008ebc9a8} B:{Var:B Labels:id=lxc/119, instance=pve19.tri-coat.com:9221, job=pve, name=proxy.tri-coat.com, type=lxc Value:0xc008ebc9e0} C:{Var:C Labels:id=lxc/119, instance=pve19.tri-coat.com:9221, job=pve, name=proxy.tri-coat.com, type=lxc Value:0xc008ebca38}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.613998833s EvaluationString:[ var='A' labels={id=lxc/119, instance=pve19.tri-coat.com:9221, job=pve, name=proxy.tri-coat.com, type=lxc} value=0 ], [ var='B' labels={id=lxc/119, instance=pve19.tri-coat.com:9221, job=pve, name=proxy.tri-coat.com, type=lxc} value=0 ], [ var='C' labels={id=lxc/119, instance=pve19.tri-coat.com:9221, job=pve, name=proxy.tri-coat.com, type=lxc} value=0 ]} {Instance:id=lxc/120, instance=pve19.tri-coat.com:9221, job=pve, name=sec1-69.41.195.53.mwpol.net, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/120, instance=pve19.tri-coat.com:9221, job=pve, name=sec1-69.41.195.53.mwpol.net, type=lxc Value:0xc008ebcad0} B:{Var:B Labels:id=lxc/120, instance=pve19.tri-coat.com:9221, job=pve, name=sec1-69.41.195.53.mwpol.net, type=lxc Value:0xc008ebcb18} C:{Var:C Labels:id=lxc/120, instance=pve19.tri-coat.com:9221, job=pve, name=sec1-69.41.195.53.mwpol.net, type=lxc Value:0xc008ebcb60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.614009643s EvaluationString:[ var='A' labels={id=lxc/120, instance=pve19.tri-coat.com:9221, job=pve, name=sec1-69.41.195.53.mwpol.net, type=lxc} value=0 ], [ var='B' labels={id=lxc/120, instance=pve19.tri-coat.com:9221, job=pve, name=sec1-69.41.195.53.mwpol.net, type=lxc} value=0 ], [ var='C' labels={id=lxc/120, instance=pve19.tri-coat.com:9221, job=pve, name=sec1-69.41.195.53.mwpol.net, type=lxc} value=0 ]} {Instance:id=lxc/122, instance=pve19.tri-coat.com:9221, job=pve, name=punchclock.tri-coat.com, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/122, instance=pve19.tri-coat.com:9221, job=pve, name=punchclock.tri-coat.com, type=lxc Value:0xc008ebcbf8} B:{Var:B Labels:id=lxc/122, instance=pve19.tri-coat.com:9221, job=pve, name=punchclock.tri-coat.com, type=lxc Value:0xc008ebcd18} C:{Var:C Labels:id=lxc/122, instance=pve19.tri-coat.com:9221, job=pve, name=punchclock.tri-coat.com, type=lxc Value:0xc008ebcd60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.614022213s EvaluationString:[ var='A' labels={id=lxc/122, instance=pve19.tri-coat.com:9221, job=pve, name=punchclock.tri-coat.com, type=lxc} value=0 ], [ var='B' labels={id=lxc/122, instance=pve19.tri-coat.com:9221, job=pve, name=punchclock.tri-coat.com, type=lxc} value=0 ], [ var='C' labels={id=lxc/122, instance=pve19.tri-coat.com:9221, job=pve, name=punchclock.tri-coat.com, type=lxc} value=0 ]} {Instance:id=lxc/125, instance=pve19.tri-coat.com:9221, job=pve, name=security2gw19.tri-coat.com, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/125, instance=pve19.tri-coat.com:9221, job=pve, name=security2gw19.tri-coat.com, type=lxc Value:0xc008ebce40} B:{Var:B Labels:id=lxc/125, instance=pve19.tri-coat.com:9221, job=pve, name=security2gw19.tri-coat.com, type=lxc Value:0xc008ebce90} C:{Var:C Labels:id=lxc/125, instance=pve19.tri-coat.com:9221, job=pve, name=security2gw19.tri-coat.com, type=lxc Value:0xc008ebcde8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.614033343s EvaluationString:[ var='A' labels={id=lxc/125, instance=pve19.tri-coat.com:9221, job=pve, name=security2gw19.tri-coat.com, type=lxc} value=0 ], [ var='B' labels={id=lxc/125, instance=pve19.tri-coat.com:9221, job=pve, name=security2gw19.tri-coat.com, type=lxc} value=0 ], [ var='C' labels={id=lxc/125, instance=pve19.tri-coat.com:9221, job=pve, name=security2gw19.tri-coat.com, type=lxc} value=0 ]} {Instance:id=lxc/127, instance=pve19.tri-coat.com:9221, job=pve, name=mail.tri-coat.com, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/127, instance=pve19.tri-coat.com:9221, job=pve, name=mail.tri-coat.com, type=lxc Value:0xc008ebd078} B:{Var:B Labels:id=lxc/127, instance=pve19.tri-coat.com:9221, job=pve, name=mail.tri-coat.com, type=lxc Value:0xc008ebcfd0} C:{Var:C Labels:id=lxc/127, instance=pve19.tri-coat.com:9221, job=pve, name=mail.tri-coat.com, type=lxc Value:0xc008ebd030}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.614044554s EvaluationString:[ var='A' labels={id=lxc/127, instance=pve19.tri-coat.com:9221, job=pve, name=mail.tri-coat.com, type=lxc} value=0 ], [ var='B' labels={id=lxc/127, instance=pve19.tri-coat.com:9221, job=pve, name=mail.tri-coat.com, type=lxc} value=0 ], [ var='C' labels={id=lxc/127, instance=pve19.tri-coat.com:9221, job=pve, name=mail.tri-coat.com, type=lxc} value=0 ]} {Instance:id=lxc/130, instance=pve19.tri-coat.com:9221, job=pve, name=beacon.mwpol.net, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/130, instance=pve19.tri-coat.com:9221, job=pve, name=beacon.mwpol.net, type=lxc Value:0xc008ebd130} B:{Var:B Labels:id=lxc/130, instance=pve19.tri-coat.com:9221, job=pve, name=beacon.mwpol.net, type=lxc Value:0xc008ebd188} C:{Var:C Labels:id=lxc/130, instance=pve19.tri-coat.com:9221, job=pve, name=beacon.mwpol.net, type=lxc Value:0xc008ebd1e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.614055554s EvaluationString:[ var='A' labels={id=lxc/130, instance=pve19.tri-coat.com:9221, job=pve, name=beacon.mwpol.net, type=lxc} value=0 ], [ var='B' labels={id=lxc/130, instance=pve19.tri-coat.com:9221, job=pve, name=beacon.mwpol.net, type=lxc} value=0 ], [ var='C' labels={id=lxc/130, instance=pve19.tri-coat.com:9221, job=pve, name=beacon.mwpol.net, type=lxc} value=0 ]} {Instance:id=lxc/131, instance=pve19.tri-coat.com:9221, job=pve, name=mysql.tri-coat.com, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/131, instance=pve19.tri-coat.com:9221, job=pve, name=mysql.tri-coat.com, type=lxc Value:0xc008ebd268} B:{Var:B Labels:id=lxc/131, instance=pve19.tri-coat.com:9221, job=pve, name=mysql.tri-coat.com, type=lxc Value:0xc008ebd298} C:{Var:C Labels:id=lxc/131, instance=pve19.tri-coat.com:9221, job=pve, name=mysql.tri-coat.com, type=lxc Value:0xc008ebd2e8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.614066664s EvaluationString:[ var='A' labels={id=lxc/131, instance=pve19.tri-coat.com:9221, job=pve, name=mysql.tri-coat.com, type=lxc} value=0 ], [ var='B' labels={id=lxc/131, instance=pve19.tri-coat.com:9221, job=pve, name=mysql.tri-coat.com, type=lxc} value=0 ], [ var='C' labels={id=lxc/131, instance=pve19.tri-coat.com:9221, job=pve, name=mysql.tri-coat.com, type=lxc} value=0 ]} {Instance:id=lxc/133, instance=pve19.tri-coat.com:9221, job=pve, name=proxy.mwpol.net, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/133, instance=pve19.tri-coat.com:9221, job=pve, name=proxy.mwpol.net, type=lxc Value:0xc008ebd470} B:{Var:B Labels:id=lxc/133, instance=pve19.tri-coat.com:9221, job=pve, name=proxy.mwpol.net, type=lxc Value:0xc008ebd4b8} C:{Var:C Labels:id=lxc/133, instance=pve19.tri-coat.com:9221, job=pve, name=proxy.mwpol.net, type=lxc Value:0xc008ebd530}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.614081884s EvaluationString:[ var='A' labels={id=lxc/133, instance=pve19.tri-coat.com:9221, job=pve, name=proxy.mwpol.net, type=lxc} value=0 ], [ var='B' labels={id=lxc/133, instance=pve19.tri-coat.com:9221, job=pve, name=proxy.mwpol.net, type=lxc} value=0 ], [ var='C' labels={id=lxc/133, instance=pve19.tri-coat.com:9221, job=pve, name=proxy.mwpol.net, type=lxc} value=0 ]} {Instance:id=lxc/139, instance=pve19.tri-coat.com:9221, job=pve, name=imapsync.tri-coat.com, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/139, instance=pve19.tri-coat.com:9221, job=pve, name=imapsync.tri-coat.com, type=lxc Value:0xc008ebd5a8} B:{Var:B Labels:id=lxc/139, instance=pve19.tri-coat.com:9221, job=pve, name=imapsync.tri-coat.com, type=lxc Value:0xc008ebd5f0} C:{Var:C Labels:id=lxc/139, instance=pve19.tri-coat.com:9221, job=pve, name=imapsync.tri-coat.com, type=lxc Value:0xc008ebd648}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.614092524s EvaluationString:[ var='A' labels={id=lxc/139, instance=pve19.tri-coat.com:9221, job=pve, name=imapsync.tri-coat.com, type=lxc} value=0 ], [ var='B' labels={id=lxc/139, instance=pve19.tri-coat.com:9221, job=pve, name=imapsync.tri-coat.com, type=lxc} value=0 ], [ var='C' labels={id=lxc/139, instance=pve19.tri-coat.com:9221, job=pve, name=imapsync.tri-coat.com, type=lxc} value=0 ]} {Instance:id=lxc/202, instance=pve19.tri-coat.com:9221, job=pve, name=nolapro.tri-coat.com, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/202, instance=pve19.tri-coat.com:9221, job=pve, name=nolapro.tri-coat.com, type=lxc Value:0xc008ebd6d0} B:{Var:B Labels:id=lxc/202, instance=pve19.tri-coat.com:9221, job=pve, name=nolapro.tri-coat.com, type=lxc Value:0xc008ebd718} C:{Var:C Labels:id=lxc/202, instance=pve19.tri-coat.com:9221, job=pve, name=nolapro.tri-coat.com, type=lxc Value:0xc008ebd758}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.614101244s EvaluationString:[ var='A' labels={id=lxc/202, instance=pve19.tri-coat.com:9221, job=pve, name=nolapro.tri-coat.com, type=lxc} value=0 ], [ var='B' labels={id=lxc/202, instance=pve19.tri-coat.com:9221, job=pve, name=nolapro.tri-coat.com, type=lxc} value=0 ], [ var='C' labels={id=lxc/202, instance=pve19.tri-coat.com:9221, job=pve, name=nolapro.tri-coat.com, type=lxc} value=0 ]} {Instance:id=lxc/205, instance=pve19.tri-coat.com:9221, job=pve, name=timetrex.tri-coat.com, type=lxc State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=lxc/205, instance=pve19.tri-coat.com:9221, job=pve, name=timetrex.tri-coat.com, type=lxc Value:0xc008ebd7e0} B:{Var:B Labels:id=lxc/205, instance=pve19.tri-coat.com:9221, job=pve, name=timetrex.tri-coat.com, type=lxc Value:0xc008ebd810} C:{Var:C Labels:id=lxc/205, instance=pve19.tri-coat.com:9221, job=pve, name=timetrex.tri-coat.com, type=lxc Value:0xc008ebd860}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.614132044s EvaluationString:[ var='A' labels={id=lxc/205, instance=pve19.tri-coat.com:9221, job=pve, name=timetrex.tri-coat.com, type=lxc} value=0 ], [ var='B' labels={id=lxc/205, instance=pve19.tri-coat.com:9221, job=pve, name=timetrex.tri-coat.com, type=lxc} value=0 ], [ var='C' labels={id=lxc/205, instance=pve19.tri-coat.com:9221, job=pve, name=timetrex.tri-coat.com, type=lxc} value=0 ]} {Instance:id=qemu/100, instance=pve19.tri-coat.com:9221, job=pve, name=Gateway, type=qemu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=qemu/100, instance=pve19.tri-coat.com:9221, job=pve, name=Gateway, type=qemu Value:0xc008ebd9a0} B:{Var:B Labels:id=qemu/100, instance=pve19.tri-coat.com:9221, job=pve, name=Gateway, type=qemu Value:0xc008ebd8f8} C:{Var:C Labels:id=qemu/100, instance=pve19.tri-coat.com:9221, job=pve, name=Gateway, type=qemu Value:0xc008ebd950}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.614144915s EvaluationString:[ var='A' labels={id=qemu/100, instance=pve19.tri-coat.com:9221, job=pve, name=Gateway, type=qemu} value=9284.266666666668 ], [ var='B' labels={id=qemu/100, instance=pve19.tri-coat.com:9221, job=pve, name=Gateway, type=qemu} value=0.008854166666666668 ], [ var='C' labels={id=qemu/100, instance=pve19.tri-coat.com:9221, job=pve, name=Gateway, type=qemu} value=0 ]} {Instance:id=qemu/108, instance=pve19.tri-coat.com:9221, job=pve, name=MX.desktop.tri-coat.com, type=qemu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=qemu/108, instance=pve19.tri-coat.com:9221, job=pve, name=MX.desktop.tri-coat.com, type=qemu Value:0xc008ebdab0} B:{Var:B Labels:id=qemu/108, instance=pve19.tri-coat.com:9221, job=pve, name=MX.desktop.tri-coat.com, type=qemu Value:0xc008ebda28} C:{Var:C Labels:id=qemu/108, instance=pve19.tri-coat.com:9221, job=pve, name=MX.desktop.tri-coat.com, type=qemu Value:0xc008ebda70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.614161165s EvaluationString:[ var='A' labels={id=qemu/108, instance=pve19.tri-coat.com:9221, job=pve, name=MX.desktop.tri-coat.com, type=qemu} value=375.46666666666664 ], [ var='B' labels={id=qemu/108, instance=pve19.tri-coat.com:9221, job=pve, name=MX.desktop.tri-coat.com, type=qemu} value=0.00035807291666666664 ], [ var='C' labels={id=qemu/108, instance=pve19.tri-coat.com:9221, job=pve, name=MX.desktop.tri-coat.com, type=qemu} value=0 ]} {Instance:id=qemu/111, instance=pve19.tri-coat.com:9221, job=pve, name=willow.mwpol.net, type=qemu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=qemu/111, instance=pve19.tri-coat.com:9221, job=pve, name=willow.mwpol.net, type=qemu Value:0xc008ebdbb8} B:{Var:B Labels:id=qemu/111, instance=pve19.tri-coat.com:9221, job=pve, name=willow.mwpol.net, type=qemu Value:0xc008ebdc18} C:{Var:C Labels:id=qemu/111, instance=pve19.tri-coat.com:9221, job=pve, name=willow.mwpol.net, type=qemu Value:0xc008ebdb48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.614176845s EvaluationString:[ var='A' labels={id=qemu/111, instance=pve19.tri-coat.com:9221, job=pve, name=willow.mwpol.net, type=qemu} value=49196.8 ], [ var='B' labels={id=qemu/111, instance=pve19.tri-coat.com:9221, job=pve, name=willow.mwpol.net, type=qemu} value=0.046917724609375 ], [ var='C' labels={id=qemu/111, instance=pve19.tri-coat.com:9221, job=pve, name=willow.mwpol.net, type=qemu} value=0 ]} {Instance:id=qemu/117, instance=pve19.tri-coat.com:9221, job=pve, name=docker.tri-coat.com, type=qemu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=qemu/117, instance=pve19.tri-coat.com:9221, job=pve, name=docker.tri-coat.com, type=qemu Value:0xc008ebdd20} B:{Var:B Labels:id=qemu/117, instance=pve19.tri-coat.com:9221, job=pve, name=docker.tri-coat.com, type=qemu Value:0xc008ebdca0} C:{Var:C Labels:id=qemu/117, instance=pve19.tri-coat.com:9221, job=pve, name=docker.tri-coat.com, type=qemu Value:0xc008ebdcd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.614189685s EvaluationString:[ var='A' labels={id=qemu/117, instance=pve19.tri-coat.com:9221, job=pve, name=docker.tri-coat.com, type=qemu} value=4795.733333333334 ], [ var='B' labels={id=qemu/117, instance=pve19.tri-coat.com:9221, job=pve, name=docker.tri-coat.com, type=qemu} value=0.0045735677083333336 ], [ var='C' labels={id=qemu/117, instance=pve19.tri-coat.com:9221, job=pve, name=docker.tri-coat.com, type=qemu} value=0 ]} {Instance:id=qemu/121, instance=pve19.tri-coat.com:9221, job=pve, name=security1.mwpol.net, type=qemu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=qemu/121, instance=pve19.tri-coat.com:9221, job=pve, name=security1.mwpol.net, type=qemu Value:0xc008ebdda8} B:{Var:B Labels:id=qemu/121, instance=pve19.tri-coat.com:9221, job=pve, name=security1.mwpol.net, type=qemu Value:0xc008ebddd8} C:{Var:C Labels:id=qemu/121, instance=pve19.tri-coat.com:9221, job=pve, name=security1.mwpol.net, type=qemu Value:0xc008ebde28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.614199235s EvaluationString:[ var='A' labels={id=qemu/121, instance=pve19.tri-coat.com:9221, job=pve, name=security1.mwpol.net, type=qemu} value=1.3795733333333335e+06 ], [ var='B' labels={id=qemu/121, instance=pve19.tri-coat.com:9221, job=pve, name=security1.mwpol.net, type=qemu} value=1.3156636555989585 ], [ var='C' labels={id=qemu/121, instance=pve19.tri-coat.com:9221, job=pve, name=security1.mwpol.net, type=qemu} value=0 ]} {Instance:id=qemu/124, instance=pve19.tri-coat.com:9221, job=pve, name=security2.mwpol.net, type=qemu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=qemu/124, instance=pve19.tri-coat.com:9221, job=pve, name=security2.mwpol.net, type=qemu Value:0xc008ebdea8} B:{Var:B Labels:id=qemu/124, instance=pve19.tri-coat.com:9221, job=pve, name=security2.mwpol.net, type=qemu Value:0xc008ebdef0} C:{Var:C Labels:id=qemu/124, instance=pve19.tri-coat.com:9221, job=pve, name=security2.mwpol.net, type=qemu Value:0xc008ebdf30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.614209225s EvaluationString:[ var='A' labels={id=qemu/124, instance=pve19.tri-coat.com:9221, job=pve, name=security2.mwpol.net, type=qemu} value=1.0230464e+06 ], [ var='B' labels={id=qemu/124, instance=pve19.tri-coat.com:9221, job=pve, name=security2.mwpol.net, type=qemu} value=0.975653076171875 ], [ var='C' labels={id=qemu/124, instance=pve19.tri-coat.com:9221, job=pve, name=security2.mwpol.net, type=qemu} value=0 ]} {Instance:id=qemu/126, instance=pve19.tri-coat.com:9221, job=pve, name=philip.tri-coat.com, type=qemu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=qemu/126, instance=pve19.tri-coat.com:9221, job=pve, name=philip.tri-coat.com, type=qemu Value:0xc043cc6000} B:{Var:B Labels:id=qemu/126, instance=pve19.tri-coat.com:9221, job=pve, name=philip.tri-coat.com, type=qemu Value:0xc043cc6038} C:{Var:C Labels:id=qemu/126, instance=pve19.tri-coat.com:9221, job=pve, name=philip.tri-coat.com, type=qemu Value:0xc008ebdfb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.614221045s EvaluationString:[ var='A' labels={id=qemu/126, instance=pve19.tri-coat.com:9221, job=pve, name=philip.tri-coat.com, type=qemu} value=13260.8 ], [ var='B' labels={id=qemu/126, instance=pve19.tri-coat.com:9221, job=pve, name=philip.tri-coat.com, type=qemu} value=0.012646484375 ], [ var='C' labels={id=qemu/126, instance=pve19.tri-coat.com:9221, job=pve, name=philip.tri-coat.com, type=qemu} value=0 ]} {Instance:id=qemu/129, instance=pve19.tri-coat.com:9221, job=pve, name=opnsense.tri-coat.com, type=qemu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=qemu/129, instance=pve19.tri-coat.com:9221, job=pve, name=opnsense.tri-coat.com, type=qemu Value:0xc043cc60f8} B:{Var:B Labels:id=qemu/129, instance=pve19.tri-coat.com:9221, job=pve, name=opnsense.tri-coat.com, type=qemu Value:0xc043cc6158} C:{Var:C Labels:id=qemu/129, instance=pve19.tri-coat.com:9221, job=pve, name=opnsense.tri-coat.com, type=qemu Value:0xc043cc6190}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.614234405s EvaluationString:[ var='A' labels={id=qemu/129, instance=pve19.tri-coat.com:9221, job=pve, name=opnsense.tri-coat.com, type=qemu} value=37546.666666666664 ], [ var='B' labels={id=qemu/129, instance=pve19.tri-coat.com:9221, job=pve, name=opnsense.tri-coat.com, type=qemu} value=0.035807291666666664 ], [ var='C' labels={id=qemu/129, instance=pve19.tri-coat.com:9221, job=pve, name=opnsense.tri-coat.com, type=qemu} value=0 ]} {Instance:id=qemu/132, instance=pve19.tri-coat.com:9221, job=pve, name=gw-69.41.195.54.mwpol.net, type=qemu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=qemu/132, instance=pve19.tri-coat.com:9221, job=pve, name=gw-69.41.195.54.mwpol.net, type=qemu Value:0xc043cc6288} B:{Var:B Labels:id=qemu/132, instance=pve19.tri-coat.com:9221, job=pve, name=gw-69.41.195.54.mwpol.net, type=qemu Value:0xc043cc61f8} C:{Var:C Labels:id=qemu/132, instance=pve19.tri-coat.com:9221, job=pve, name=gw-69.41.195.54.mwpol.net, type=qemu Value:0xc043cc6240}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.614248446s EvaluationString:[ var='A' labels={id=qemu/132, instance=pve19.tri-coat.com:9221, job=pve, name=gw-69.41.195.54.mwpol.net, type=qemu} value=31965.866666666665 ], [ var='B' labels={id=qemu/132, instance=pve19.tri-coat.com:9221, job=pve, name=gw-69.41.195.54.mwpol.net, type=qemu} value=0.030485026041666665 ], [ var='C' labels={id=qemu/132, instance=pve19.tri-coat.com:9221, job=pve, name=gw-69.41.195.54.mwpol.net, type=qemu} value=0 ]} {Instance:id=qemu/136, instance=pve19.tri-coat.com:9221, job=pve, name=nextcloud.tri-coat.com, type=qemu State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:id=qemu/136, instance=pve19.tri-coat.com:9221, job=pve, name=nextcloud.tri-coat.com, type=qemu Value:0xc043cc6368} B:{Var:B Labels:id=qemu/136, instance=pve19.tri-coat.com:9221, job=pve, name=nextcloud.tri-coat.com, type=qemu Value:0xc043cc6398} C:{Var:C Labels:id=qemu/136, instance=pve19.tri-coat.com:9221, job=pve, name=nextcloud.tri-coat.com, type=qemu Value:0xc043cc63c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.614261266s EvaluationString:[ var='A' labels={id=qemu/136, instance=pve19.tri-coat.com:9221, job=pve, name=nextcloud.tri-coat.com, type=qemu} value=58624 ], [ var='B' labels={id=qemu/136, instance=pve19.tri-coat.com:9221, job=pve, name=nextcloud.tri-coat.com, type=qemu} value=0.055908203125 ], [ var='C' labels={id=qemu/136, instance=pve19.tri-coat.com:9221, job=pve, name=nextcloud.tri-coat.com, type=qemu} value=0 ]}]" duration=14.321962ms + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.616717205Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.616709242Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.616803463Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.616648424Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:13.616691562Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=hive-tpa-09" t=2024-05-29T13:44:13.616828091Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=hive-tpa-07" t=2024-05-29T13:44:13.616599263Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=hive-tpa-07" t=2024-05-29T13:44:13.616584395Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.616188822Z caller=remote_instance_store.go:51 user=350551 slug=loopme msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.616266501Z caller=remote_instance_store.go:51 user=765752 slug=octave msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=23997 slug=wheniwork t=2024-05-29T13:44:13.616131056Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=23997 slug=wheniwork instance= t=2024-05-29T13:44:13.616099624Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.615905509Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=23997 slug=wheniwork version=1 fingerprint=c5b0f5dd5edecfea attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.615974011Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[C0:{Var:C Labels: Value:} C1:{Var:C Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.61562413s EvaluationString:[ var='C0' metric='NoData' labels={} value=null ], [ var='C1' metric='NoData' labels={} value=null ]}]" duration=92.875133ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=h1p-za-06" t=2024-05-29T13:44:13.616081577Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=295631 slug=dapvizor t=2024-05-29T13:44:13.615876676Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.404712ms + logger=ngalert.state.manager.persist user=609912 slug=wirestock t=2024-05-29T13:44:13.615864898Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.423866ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=h1p-za-05" t=2024-05-29T13:44:13.615900793Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=229187 slug=feeliters version=1 fingerprint=6d51ee578c333bde attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.615709383Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=zmxD7nink, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.615303023s EvaluationString:}]" duration=124.813015ms + level=debug ts=2024-05-29T13:44:13.61579223Z caller=remote_instance_store.go:51 user=314947 slug=h10n msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.615744516Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.615804883Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.615759053Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.615682452Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.615707386Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.615657578Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.615663522Z caller=remote_instance_store.go:51 user=556147 slug=bettercloudholding msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.615606116Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=h1p-za-04" t=2024-05-29T13:44:13.615598827Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.615468493Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.615299262Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=733461 slug=lattice instance= t=2024-05-29T13:44:13.615377254Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=733461 slug=lattice instance= t=2024-05-29T13:44:13.615366325Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=h1p-za-02" t=2024-05-29T13:44:13.615221523Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=h1p-za-02" t=2024-05-29T13:44:13.615175254Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.61513856Z caller=client.go:80 msg="creating client for grafana instance" user=623546 addr=dns:///jcwardle-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.615016459Z caller=client.go:80 msg="creating client for grafana instance" user=529212 addr=dns:///jamesperry-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.615035376Z caller=client.go:80 msg="creating client for grafana instance" user=391105 addr=dns:///jamiekieranmartin-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.614994221Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=286641 slug=goanna + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:13.6149782Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=114492 slug=railsbank instance="QueueName=PROD-PLAY-RB_QUEUE_EVENT_STREAMING_DD-DLQ" t=2024-05-29T13:44:13.614958559Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=114492 slug=railsbank t=2024-05-29T13:44:13.614922583Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=h1p-za-01" t=2024-05-29T13:44:13.614850081Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=h1p-gru-br-010" t=2024-05-29T13:44:13.614669726Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=h1p-gru-br-010" t=2024-05-29T13:44:13.614626073Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.614322298Z caller=client.go:80 msg="creating client for grafana instance" user=475146 addr=dns:///iuriimordovin-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.61431931Z caller=client.go:80 msg="creating client for grafana instance" user=374211 addr=dns:///isomer-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.612884222Z caller=ruler.go:522 msg="tenant is owned by this instance" user=508814 slug=focia groups=0 + level=debug ts=2024-05-29T13:44:13.612731303Z caller=ruler.go:522 msg="tenant is owned by this instance" user=510925 slug=freddo groups=0 + level=debug ts=2024-05-29T13:44:13.614154028Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.61394213Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.613642451Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=h1p-gru-br-005" t=2024-05-29T13:44:13.613614542Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.613555442Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.612029074Z caller=ruler.go:522 msg="tenant is owned by this instance" user=434892 slug=apexfsnzdev groups=55 + logger=ngalert.state.manager.persist user=656459 slug=activeport t=2024-05-29T13:44:13.613496335Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=656459 slug=activeport version=106 fingerprint=0f8cb05fc960112d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.613385645Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=a17a51ac-52fa-4a8f-ae4d-66e273cfbbfc, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.613125961s EvaluationString:}]" duration=16.287549ms + logger=ngalert.scheduler user=55491 slug=demandbase version=1 fingerprint=808b953e305c4a2b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.613290041Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=000000120, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.613050421s EvaluationString:}]" duration=70.065479ms + level=debug ts=2024-05-29T13:44:13.61334499Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=h1p-gru-br-003" t=2024-05-29T13:44:13.613308365Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.613296655Z caller=client.go:80 msg="creating client for grafana instance" user=715436 addr=dns:///canerakar-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=h1p-gru-br-003" t=2024-05-29T13:44:13.613291988Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.613268955Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=543165 slug=bitgatecoa + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=h1p-gru-br-002" t=2024-05-29T13:44:13.613190114Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.613146811Z caller=remote_instance_store.go:51 user=706031 slug=miceutest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.613105741Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.61311599Z caller=remote_instance_store.go:51 user=306551 slug=teckresourcesalerts msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=h1p-gru-br-001" t=2024-05-29T13:44:13.613071207Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.612746049Z caller=remote_instance_store.go:51 user=451427 slug=rocketchat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112732 slug=gleamer instance= t=2024-05-29T13:44:13.61277617Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.612665849Z caller=client.go:80 msg="creating client for grafana instance" user=521278 addr=dns:///canarysail-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.scheduler user=112732 slug=gleamer version=1 fingerprint=ffbd846cfa65991e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.61260814Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.612332009s EvaluationString:}]" duration=11.527784ms + level=debug ts=2024-05-29T13:44:13.612541072Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.612553892Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.612186687Z caller=client.go:80 msg="creating client for grafana instance" user=449001 addr=dns:///imtiazahmedjesu-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.612141293Z caller=client.go:80 msg="creating client for grafana instance" user=552880 addr=dns:///imentu-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.612081465Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=546452 slug=daksr + level=info component=discovery ts=2024-05-29T13:44:13.612058384Z caller=client.go:80 msg="creating client for grafana instance" user=756802 addr=dns:///ickkou-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.611957066Z caller=ruler.go:522 msg="tenant is owned by this instance" user=498106 slug=georgepage groups=0 + level=debug ts=2024-05-29T13:44:13.611429013Z caller=ruler.go:522 msg="tenant is owned by this instance" user=289956 slug=edukita groups=2 + level=debug ts=2024-05-29T13:44:13.612379196Z caller=remote_instance_store.go:51 user=214309 slug=spenmo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.612415326Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.611890514Z caller=client.go:80 msg="creating client for grafana instance" user=311967 addr=dns:///hmuchaku-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=214309 slug=spenmo instance= t=2024-05-29T13:44:13.612315973Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-syd-au-001" t=2024-05-29T13:44:13.612391956Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.611774458Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=656531 slug=elcrow3d + level=debug ts=2024-05-29T13:44:13.611346767Z caller=ruler.go:522 msg="tenant is owned by this instance" user=648693 slug=freshtest groups=0 + level=warn ts=2024-05-29T13:44:13.611677993Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=445690 slug=finmo + level=debug ts=2024-05-29T13:44:13.611712254Z caller=ruler.go:522 msg="tenant is owned by this instance" user=494010 slug=dtlan groups=0 + level=info component=discovery ts=2024-05-29T13:44:13.61169889Z caller=client.go:80 msg="creating client for grafana instance" user=288955 addr=dns:///haydenkwelsh-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.611674764Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=659368 slug=fidelio + level=debug ts=2024-05-29T13:44:13.611329999Z caller=ruler.go:522 msg="tenant is owned by this instance" user=659368 slug=fidelio groups=0 + level=warn ts=2024-05-29T13:44:13.61161352Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=649955 slug=conall12345 + level=debug ts=2024-05-29T13:44:13.609772631Z caller=ruler.go:522 msg="tenant is owned by this instance" user=415341 slug=boxfish groups=1 + level=warn ts=2024-05-29T13:44:13.61152831Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=616641 slug=fliu104 + level=debug ts=2024-05-29T13:44:13.612167129Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:13.612148646Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance= t=2024-05-29T13:44:13.612129945Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=288032 slug=dapperlabssre t=2024-05-29T13:44:13.612069437Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.612083807Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-sin-ph-002" t=2024-05-29T13:44:13.611851653Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.611752019Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-sin-ph-002" t=2024-05-29T13:44:13.611836466Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.611700441Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.611637634Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.611491946Z caller=remote_instance_store.go:51 user=485459 slug=heroiclabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.611472018Z caller=remote_instance_store.go:51 user=147497 slug=rhodev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.611431406Z caller=remote_instance_store.go:51 user=739130 slug=redphasetech msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=767797 slug=mgmresorts t=2024-05-29T13:44:13.611394121Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-sin-in-002" t=2024-05-29T13:44:13.611421262Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.611251673Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-sin-id-001" t=2024-05-29T13:44:13.611076543Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.611078078Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=700451 slug=anaplantestnonprodau + level=debug ts=2024-05-29T13:44:13.611024573Z caller=ruler.go:522 msg="tenant is owned by this instance" user=700451 slug=anaplantestnonprodau groups=0 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-sin-gu-002" t=2024-05-29T13:44:13.610969222Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-sin-gu-002" t=2024-05-29T13:44:13.610956885Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.610914163Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=611646 slug=dbagino + level=debug component=discovery ts=2024-05-29T13:44:13.610423005Z caller=retry.go:58 user=526479 msg="retrying grpc request" method=/remoteruler.rules.v1.RulesService/GetByRuleGroup attempt=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-sin-gu-001" t=2024-05-29T13:44:13.610833427Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=538355 slug=flogic t=2024-05-29T13:44:13.610737933Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=538355 slug=flogic instance="__name__=aws_ec2_cpucredit_balance_average, account_id=641264638977, dimension_InstanceId=i-0f37c3808f9678c80, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:ec2:ap-northeast-1:641264638977:instance/i-0f37c3808f9678c80, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter" t=2024-05-29T13:44:13.610717761Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-per-au-002" t=2024-05-29T13:44:13.610706175Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-per-au-002" t=2024-05-29T13:44:13.610694165Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.610628134Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.610486402Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=22115 slug=tiki t=2024-05-29T13:44:13.610591737Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=33.397843ms + level=debug ts=2024-05-29T13:44:13.610448612Z caller=ruler.go:522 msg="tenant is owned by this instance" user=381977 slug=firstpress groups=0 + level=info ts=2024-05-29T13:44:13.610398039Z caller=remote_image_capturer.go:61 user=698103 slug=vericast rule_org_id=1 rule_uid=ddfnr97wr9ibof dashboard=QHpy1KI4z panel=267 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:13.610270464Z caller=remote_instance_store.go:51 user=461798 slug=betfair msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.610299755Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-mel-au-002" t=2024-05-29T13:44:13.610097133Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-mel-au-001" t=2024-05-29T13:44:13.610006023Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-mel-au-001" t=2024-05-29T13:44:13.609995694Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:13.609951149Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=716631 slug=sugatsune instance="__name__=windows_service_status, agent_hostname=sgst-qa-app01, instance=sgst-qa-app01:12345, job=integrations/windows_exporter, name=enableserverwildflyslave2, status=ok" t=2024-05-29T13:44:13.609843556Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.609854264Z caller=remote_instance_store.go:51 user=71676 slug=lemonbeat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-hnd-jp-005" t=2024-05-29T13:44:13.609883547Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.609675018Z caller=ruler.go:522 msg="tenant is owned by this instance" user=364718 slug=ericm groups=0 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-hnd-jp-004" t=2024-05-29T13:44:13.609768888Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-hnd-jp-004" t=2024-05-29T13:44:13.609758006Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.60969757Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=716631 slug=sugatsune version=1 fingerprint=0ab088de0ddd338f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.60954982Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=windows_service_status, agent_hostname=prod-app-01, instance=prod-app-01:12345, job=integrations/windows_exporter, name=enableserverwildflyslave2, status=ok State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=windows_service_status, agent_hostname=prod-app-01, instance=prod-app-01:12345, job=integrations/windows_exporter, name=enableserverwildflyslave2, status=ok Value:0xc0023bd638} B:{Var:B Labels:__name__=windows_service_status, agent_hostname=prod-app-01, instance=prod-app-01:12345, job=integrations/windows_exporter, name=enableserverwildflyslave2, status=ok Value:0xc0023bd680} C:{Var:C Labels:__name__=windows_service_status, agent_hostname=prod-app-01, instance=prod-app-01:12345, job=integrations/windows_exporter, name=enableserverwildflyslave2, status=ok Value:0xc0023bd6c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.60921969s EvaluationString:[ var='A' labels={__name__=windows_service_status, agent_hostname=prod-app-01, instance=prod-app-01:12345, job=integrations/windows_exporter, name=enableserverwildflyslave2, status=ok} value=1 ], [ var='B' labels={__name__=windows_service_status, agent_hostname=prod-app-01, instance=prod-app-01:12345, job=integrations/windows_exporter, name=enableserverwildflyslave2, status=ok} value=1 ], [ var='C' labels={__name__=windows_service_status, agent_hostname=prod-app-01, instance=prod-app-01:12345, job=integrations/windows_exporter, name=enableserverwildflyslave2, status=ok} value=0 ]} {Instance:__name__=windows_service_status, agent_hostname=sgst-qa-app01, instance=sgst-qa-app01:12345, job=integrations/windows_exporter, name=enableserverwildflyslave2, status=ok State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=windows_service_status, agent_hostname=sgst-qa-app01, instance=sgst-qa-app01:12345, job=integrations/windows_exporter, name=enableserverwildflyslave2, status=ok Value:0xc0023bd758} B:{Var:B Labels:__name__=windows_service_status, agent_hostname=sgst-qa-app01, instance=sgst-qa-app01:12345, job=integrations/windows_exporter, name=enableserverwildflyslave2, status=ok Value:0xc0023bd7a0} C:{Var:C Labels:__name__=windows_service_status, agent_hostname=sgst-qa-app01, instance=sgst-qa-app01:12345, job=integrations/windows_exporter, name=enableserverwildflyslave2, status=ok Value:0xc0023bd7f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.60923962s EvaluationString:[ var='A' labels={__name__=windows_service_status, agent_hostname=sgst-qa-app01, instance=sgst-qa-app01:12345, job=integrations/windows_exporter, name=enableserverwildflyslave2, status=ok} value=1 ], [ var='B' labels={__name__=windows_service_status, agent_hostname=sgst-qa-app01, instance=sgst-qa-app01:12345, job=integrations/windows_exporter, name=enableserverwildflyslave2, status=ok} value=1 ], [ var='C' labels={__name__=windows_service_status, agent_hostname=sgst-qa-app01, instance=sgst-qa-app01:12345, job=integrations/windows_exporter, name=enableserverwildflyslave2, status=ok} value=0 ]}]" duration=6.973969ms + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.60965354Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.609612121Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod t=2024-05-29T13:44:13.609615496Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-hnd-jp-003" t=2024-05-29T13:44:13.609606416Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:13.609524303Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=41.388416ms + level=debug ts=2024-05-29T13:44:13.609469234Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-hnd-jp-002" t=2024-05-29T13:44:13.609491117Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-hnd-jp-002" t=2024-05-29T13:44:13.609477554Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=18335 slug=semaphore t=2024-05-29T13:44:13.609435671Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=30.951034ms + level=debug ts=2024-05-29T13:44:13.609389609Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698103 slug=vericast instance= t=2024-05-29T13:44:13.609373046Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.609156366Z caller=remote_instance_store.go:51 user=93046 slug=nese msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-hkg-hk-001" t=2024-05-29T13:44:13.60911966Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.609091222Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-fra-tr-001" t=2024-05-29T13:44:13.608830978Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-fra-ma-002" t=2024-05-29T13:44:13.60871504Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-fra-ma-002" t=2024-05-29T13:44:13.608702105Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.608614592Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-fra-ma-001" t=2024-05-29T13:44:13.608533651Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-fra-lb-001" t=2024-05-29T13:44:13.608261619Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-b33ccb9a715c4fb0, persistentvolumeclaim=data-zookeeper-1" t=2024-05-29T13:44:13.608188857Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-fra-it-002" t=2024-05-29T13:44:13.60814317Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.607878718Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-fra-it-002" t=2024-05-29T13:44:13.60813017Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-aaa52bae2a4940d4, persistentvolumeclaim=main-main-68lc-pgdata" t=2024-05-29T13:44:13.608140614Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-aa62c702bc634546, persistentvolumeclaim=main-main-qk54-pgdata" t=2024-05-29T13:44:13.60811488Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-aa62c702bc634546, persistentvolumeclaim=main-main-qk54-pgdata" t=2024-05-29T13:44:13.60810343Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.607924669Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.607899548Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-8e4d4f45626b44c3, persistentvolumeclaim=data-prometheus-0" t=2024-05-29T13:44:13.608037685Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-fra-it-001" t=2024-05-29T13:44:13.607975888Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-2466b283ac9140d1, persistentvolumeclaim=data-zookeeper-2" t=2024-05-29T13:44:13.607936508Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="persistentvolume=pvc-0db712d9a13d435e, persistentvolumeclaim=data-redpanda-0" t=2024-05-29T13:44:13.607893805Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.607687452Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=206107 slug=hydrolix version=4 fingerprint=e6c139b877fd2fe6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.607555361Z level=debug msg="Alert rule evaluated" results="[{Instance:persistentvolume=pvc-051b8f56d83c49e4, persistentvolumeclaim=data-rabbitmq-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-051b8f56d83c49e4, persistentvolumeclaim=data-rabbitmq-0 Value:0xc00eaefa48} B:{Var:B Labels:persistentvolume=pvc-051b8f56d83c49e4, persistentvolumeclaim=data-rabbitmq-0 Value:0xc00eaefab0} C:{Var:C Labels:persistentvolume=pvc-051b8f56d83c49e4, persistentvolumeclaim=data-rabbitmq-0 Value:0xc00eaefab8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.606764749s EvaluationString:[ var='A' labels={persistentvolume=pvc-051b8f56d83c49e4, persistentvolumeclaim=data-rabbitmq-0} value=0.0028578776393093025 ], [ var='B' labels={persistentvolume=pvc-051b8f56d83c49e4, persistentvolumeclaim=data-rabbitmq-0} value=0.0028578776393093025 ], [ var='C' labels={persistentvolume=pvc-051b8f56d83c49e4, persistentvolumeclaim=data-rabbitmq-0} value=0 ]} {Instance:persistentvolume=pvc-0db712d9a13d435e, persistentvolumeclaim=data-redpanda-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-0db712d9a13d435e, persistentvolumeclaim=data-redpanda-0 Value:0xc00eaefb70} B:{Var:B Labels:persistentvolume=pvc-0db712d9a13d435e, persistentvolumeclaim=data-redpanda-0 Value:0xc00eaefb78} C:{Var:C Labels:persistentvolume=pvc-0db712d9a13d435e, persistentvolumeclaim=data-redpanda-0 Value:0xc00eaefb18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.606779554s EvaluationString:[ var='A' labels={persistentvolume=pvc-0db712d9a13d435e, persistentvolumeclaim=data-redpanda-0} value=0.1529374283175475 ], [ var='B' labels={persistentvolume=pvc-0db712d9a13d435e, persistentvolumeclaim=data-redpanda-0} value=0.1529374283175475 ], [ var='C' labels={persistentvolume=pvc-0db712d9a13d435e, persistentvolumeclaim=data-redpanda-0} value=0 ]} {Instance:persistentvolume=pvc-2466b283ac9140d1, persistentvolumeclaim=data-zookeeper-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-2466b283ac9140d1, persistentvolumeclaim=data-zookeeper-2 Value:0xc00eaefbc8} B:{Var:B Labels:persistentvolume=pvc-2466b283ac9140d1, persistentvolumeclaim=data-zookeeper-2 Value:0xc00eaefc10} C:{Var:C Labels:persistentvolume=pvc-2466b283ac9140d1, persistentvolumeclaim=data-zookeeper-2 Value:0xc00eaefc18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.606787737s EvaluationString:[ var='A' labels={persistentvolume=pvc-2466b283ac9140d1, persistentvolumeclaim=data-zookeeper-2} value=0.0025349425547414113 ], [ var='B' labels={persistentvolume=pvc-2466b283ac9140d1, persistentvolumeclaim=data-zookeeper-2} value=0.0025349425547414113 ], [ var='C' labels={persistentvolume=pvc-2466b283ac9140d1, persistentvolumeclaim=data-zookeeper-2} value=0 ]} {Instance:persistentvolume=pvc-3699b27429d048f0, persistentvolumeclaim=data-rabbitmq-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-3699b27429d048f0, persistentvolumeclaim=data-rabbitmq-2 Value:0xc00eaefc68} B:{Var:B Labels:persistentvolume=pvc-3699b27429d048f0, persistentvolumeclaim=data-rabbitmq-2 Value:0xc00eaefcb0} C:{Var:C Labels:persistentvolume=pvc-3699b27429d048f0, persistentvolumeclaim=data-rabbitmq-2 Value:0xc00eaefcb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.606799235s EvaluationString:[ var='A' labels={persistentvolume=pvc-3699b27429d048f0, persistentvolumeclaim=data-rabbitmq-2} value=0.0018276168604211935 ], [ var='B' labels={persistentvolume=pvc-3699b27429d048f0, persistentvolumeclaim=data-rabbitmq-2} value=0.0018276168604211935 ], [ var='C' labels={persistentvolume=pvc-3699b27429d048f0, persistentvolumeclaim=data-rabbitmq-2} value=0 ]} {Instance:persistentvolume=pvc-81fd57a9c095421d, persistentvolumeclaim=main-repo1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-81fd57a9c095421d, persistentvolumeclaim=main-repo1 Value:0xc00eaefd30} B:{Var:B Labels:persistentvolume=pvc-81fd57a9c095421d, persistentvolumeclaim=main-repo1 Value:0xc00eaefd38} C:{Var:C Labels:persistentvolume=pvc-81fd57a9c095421d, persistentvolumeclaim=main-repo1 Value:0xc00eaefd90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.606808277s EvaluationString:[ var='A' labels={persistentvolume=pvc-81fd57a9c095421d, persistentvolumeclaim=main-repo1} value=0.008246911777304858 ], [ var='B' labels={persistentvolume=pvc-81fd57a9c095421d, persistentvolumeclaim=main-repo1} value=0.008246911777304858 ], [ var='C' labels={persistentvolume=pvc-81fd57a9c095421d, persistentvolumeclaim=main-repo1} value=0 ]} {Instance:persistentvolume=pvc-8e4d4f45626b44c3, persistentvolumeclaim=data-prometheus-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-8e4d4f45626b44c3, persistentvolumeclaim=data-prometheus-0 Value:0xc00eaefdc0} B:{Var:B Labels:persistentvolume=pvc-8e4d4f45626b44c3, persistentvolumeclaim=data-prometheus-0 Value:0xc00eaefdc8} C:{Var:C Labels:persistentvolume=pvc-8e4d4f45626b44c3, persistentvolumeclaim=data-prometheus-0 Value:0xc00eaefdf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.606816899s EvaluationString:[ var='A' labels={persistentvolume=pvc-8e4d4f45626b44c3, persistentvolumeclaim=data-prometheus-0} value=0.3010701065801697 ], [ var='B' labels={persistentvolume=pvc-8e4d4f45626b44c3, persistentvolumeclaim=data-prometheus-0} value=0.3010701065801697 ], [ var='C' labels={persistentvolume=pvc-8e4d4f45626b44c3, persistentvolumeclaim=data-prometheus-0} value=0 ]} {Instance:persistentvolume=pvc-8f325bc2923e4cba, persistentvolumeclaim=data-zookeeper-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-8f325bc2923e4cba, persistentvolumeclaim=data-zookeeper-0 Value:0xc00eaefe50} B:{Var:B Labels:persistentvolume=pvc-8f325bc2923e4cba, persistentvolumeclaim=data-zookeeper-0 Value:0xc00eaefe58} C:{Var:C Labels:persistentvolume=pvc-8f325bc2923e4cba, persistentvolumeclaim=data-zookeeper-0 Value:0xc00eaefea0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.606859435s EvaluationString:[ var='A' labels={persistentvolume=pvc-8f325bc2923e4cba, persistentvolumeclaim=data-zookeeper-0} value=0.0028085566445752975 ], [ var='B' labels={persistentvolume=pvc-8f325bc2923e4cba, persistentvolumeclaim=data-zookeeper-0} value=0.0028085566445752975 ], [ var='C' labels={persistentvolume=pvc-8f325bc2923e4cba, persistentvolumeclaim=data-zookeeper-0} value=0 ]} {Instance:persistentvolume=pvc-aa62c702bc634546, persistentvolumeclaim=main-main-qk54-pgdata State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-aa62c702bc634546, persistentvolumeclaim=main-main-qk54-pgdata Value:0xc00eaefee0} B:{Var:B Labels:persistentvolume=pvc-aa62c702bc634546, persistentvolumeclaim=main-main-qk54-pgdata Value:0xc00eaefee8} C:{Var:C Labels:persistentvolume=pvc-aa62c702bc634546, persistentvolumeclaim=main-main-qk54-pgdata Value:0xc00eaeff10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.606867621s EvaluationString:[ var='A' labels={persistentvolume=pvc-aa62c702bc634546, persistentvolumeclaim=main-main-qk54-pgdata} value=0.059219790901024215 ], [ var='B' labels={persistentvolume=pvc-aa62c702bc634546, persistentvolumeclaim=main-main-qk54-pgdata} value=0.059219790901024215 ], [ var='C' labels={persistentvolume=pvc-aa62c702bc634546, persistentvolumeclaim=main-main-qk54-pgdata} value=0 ]} {Instance:persistentvolume=pvc-aaa52bae2a4940d4, persistentvolumeclaim=main-main-68lc-pgdata State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-aaa52bae2a4940d4, persistentvolumeclaim=main-main-68lc-pgdata Value:0xc00eaeff80} B:{Var:B Labels:persistentvolume=pvc-aaa52bae2a4940d4, persistentvolumeclaim=main-main-68lc-pgdata Value:0xc00eaeff50} C:{Var:C Labels:persistentvolume=pvc-aaa52bae2a4940d4, persistentvolumeclaim=main-main-68lc-pgdata Value:0xc00eaeff58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.606876859s EvaluationString:[ var='A' labels={persistentvolume=pvc-aaa52bae2a4940d4, persistentvolumeclaim=main-main-68lc-pgdata} value=0.05889796285222671 ], [ var='B' labels={persistentvolume=pvc-aaa52bae2a4940d4, persistentvolumeclaim=main-main-68lc-pgdata} value=0.05889796285222671 ], [ var='C' labels={persistentvolume=pvc-aaa52bae2a4940d4, persistentvolumeclaim=main-main-68lc-pgdata} value=0 ]} {Instance:persistentvolume=pvc-aade62912f5a40d8, persistentvolumeclaim=data-rabbitmq-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-aade62912f5a40d8, persistentvolumeclaim=data-rabbitmq-1 Value:0xc00eaeffd0} B:{Var:B Labels:persistentvolume=pvc-aade62912f5a40d8, persistentvolumeclaim=data-rabbitmq-1 Value:0xc00eaeffd8} C:{Var:C Labels:persistentvolume=pvc-aade62912f5a40d8, persistentvolumeclaim=data-rabbitmq-1 Value:0xc010940030}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.606887956s EvaluationString:[ var='A' labels={persistentvolume=pvc-aade62912f5a40d8, persistentvolumeclaim=data-rabbitmq-1} value=0.0025599944885745566 ], [ var='B' labels={persistentvolume=pvc-aade62912f5a40d8, persistentvolumeclaim=data-rabbitmq-1} value=0.0025599944885745566 ], [ var='C' labels={persistentvolume=pvc-aade62912f5a40d8, persistentvolumeclaim=data-rabbitmq-1} value=0 ]} {Instance:persistentvolume=pvc-b33ccb9a715c4fb0, persistentvolumeclaim=data-zookeeper-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:persistentvolume=pvc-b33ccb9a715c4fb0, persistentvolumeclaim=data-zookeeper-1 Value:0xc010940088} B:{Var:B Labels:persistentvolume=pvc-b33ccb9a715c4fb0, persistentvolumeclaim=data-zookeeper-1 Value:0xc0109400d0} C:{Var:C Labels:persistentvolume=pvc-b33ccb9a715c4fb0, persistentvolumeclaim=data-zookeeper-1 Value:0xc010940080}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.606899695s EvaluationString:[ var='A' labels={persistentvolume=pvc-b33ccb9a715c4fb0, persistentvolumeclaim=data-zookeeper-1} value=0.0032731917298869178 ], [ var='B' labels={persistentvolume=pvc-b33ccb9a715c4fb0, persistentvolumeclaim=data-zookeeper-1} value=0.0032731917298869178 ], [ var='C' labels={persistentvolume=pvc-b33ccb9a715c4fb0, persistentvolumeclaim=data-zookeeper-1} value=0 ]}]" duration=16.674751ms + level=debug ts=2024-05-29T13:44:13.607704875Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.607628863Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=163215 slug=tripadvisor t=2024-05-29T13:44:13.607620745Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.607659591Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-fra-eg-002" t=2024-05-29T13:44:13.607609288Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163215 slug=tripadvisor instance= t=2024-05-29T13:44:13.60759735Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-fra-eg-002" t=2024-05-29T13:44:13.607594175Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.607398154Z caller=remote_instance_store.go:51 user=716527 slug=newpigqa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=716527 slug=newpigqa instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.607312933Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=716527 slug=newpigqa instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.607304762Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-fra-dz-002" t=2024-05-29T13:44:13.607356038Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.607243494Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-fra-dz-001" t=2024-05-29T13:44:13.607250554Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-fra-de-001" t=2024-05-29T13:44:13.606767184Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-fra-de-001" t=2024-05-29T13:44:13.606755392Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.606703797Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-dfw-tt-002" t=2024-05-29T13:44:13.606641083Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.606474132Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.606465348Z caller=remote_instance_store.go:51 user=248027 slug=mishp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.606321335Z caller=remote_instance_store.go:51 user=795224 slug=gannettdigital msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=201790 slug=veedmo t=2024-05-29T13:44:13.606393406Z level=debug msg="Saving alert states done" count=10 max_state_save_concurrency=1 duration=338.230201ms + logger=ngalert.state.manager.persist user=248027 slug=mishp t=2024-05-29T13:44:13.606435703Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=248027 slug=mishp instance= t=2024-05-29T13:44:13.606419102Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-dfw-pr-002" t=2024-05-29T13:44:13.606354241Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-dfw-pr-001" t=2024-05-29T13:44:13.60625288Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-dfw-ky-001" t=2024-05-29T13:44:13.606025786Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.605891785Z caller=client.go:80 msg="creating client for grafana instance" user=701088 addr=dns:///bykovas-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.605815396Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.605695146Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.605624745Z caller=remote_instance_store.go:51 user=494481 slug=inspectiv msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-dfw-hn-001" t=2024-05-29T13:44:13.605594232Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-dfw-hn-001" t=2024-05-29T13:44:13.605577756Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=494481 slug=inspectiv t=2024-05-29T13:44:13.605513401Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.605398645Z caller=remote_instance_store.go:51 user=890273 slug=cmhusqnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.605356264Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.605304951Z caller=remote_instance_store.go:51 user=736975 slug=jetcomms msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-dfw-do-001" t=2024-05-29T13:44:13.605329786Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.60526316Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-dfw-cu-001" t=2024-05-29T13:44:13.60512795Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.605073377Z caller=ruler.go:522 msg="tenant is owned by this instance" user=615633 slug=arcticfarming groups=1 + level=info component=discovery ts=2024-05-29T13:44:13.605029477Z caller=client.go:80 msg="creating client for grafana instance" user=744941 addr=dns:///buzzinga-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.605021692Z caller=remote_instance_store.go:51 user=557231 slug=lnrsusinsuranceprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.604961976Z caller=ruler.go:522 msg="tenant is owned by this instance" user=488985 slug=aestusparra groups=0 + level=debug ts=2024-05-29T13:44:13.604881248Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.604720347Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.604774322Z caller=remote_instance_store.go:51 user=482907 slug=wavelonp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-bne-au-002" t=2024-05-29T13:44:13.604646898Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-bne-au-001" t=2024-05-29T13:44:13.604486043Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.604304913Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-ams-nl-002" t=2024-05-29T13:44:13.604068924Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.603873466Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=717225 slug=autoflo + level=warn ts=2024-05-29T13:44:13.603867366Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=686771 slug=ashwanthtesteuwest1 + level=info component=discovery ts=2024-05-29T13:44:13.603634363Z caller=client.go:80 msg="creating client for grafana instance" user=550705 addr=dns:///brightdemo-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gsl-adl-au-002" t=2024-05-29T13:44:13.603460847Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.603385909Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.603320589Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.603207018Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=glesys-osl-no-002" t=2024-05-29T13:44:13.603241256Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=glesys-osl-no-001" t=2024-05-29T13:44:13.603079247Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.603140359Z caller=client.go:80 msg="creating client for grafana instance" user=505433 addr=dns:///brairlab-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.603203807Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.603074086Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.603067358Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=765074 slug=bananu7 + level=debug ts=2024-05-29T13:44:13.60302762Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.602976157Z caller=ruler.go:522 msg="tenant is owned by this instance" user=765074 slug=bananu7 groups=0 + level=debug ts=2024-05-29T13:44:13.603025385Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.602963969Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.602853703Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.602709385Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.602688072Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.602581554Z caller=ruler.go:522 msg="tenant is owned by this instance" user=767290 slug=automatiiondconstrat groups=0 + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.602628239Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.602599864Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=236496 slug=improbable t=2024-05-29T13:44:13.602464955Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.663936ms + level=debug ts=2024-05-29T13:44:13.602232331Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.602162024Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=glesys-cph-dk-002" t=2024-05-29T13:44:13.602203078Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.602005733Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.601947866Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=glesys-arn-se-005" t=2024-05-29T13:44:13.601937961Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.601569544Z caller=client.go:80 msg="creating client for grafana instance" user=781975 addr=dns:///bonnierhealthcare-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.601881531Z caller=remote_instance_store.go:51 user=306551 slug=teckresourcesalerts msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=412141 slug=sharethrough instance="cloud_region=us-west-1" t=2024-05-29T13:44:13.601609412Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=412141 slug=sharethrough instance="cloud_region=us-west-1" t=2024-05-29T13:44:13.601600557Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=412141 slug=sharethrough instance="cloud_region=us-west-1" t=2024-05-29T13:44:13.601592562Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=glesys-arn-se-003" t=2024-05-29T13:44:13.601608991Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.601487563Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.601433178Z caller=remote_instance_store.go:51 user=706031 slug=miceutest msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.601254441Z caller=client.go:80 msg="creating client for grafana instance" user=661756 addr=dns:///boluda-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=glesys-arn-se-001" t=2024-05-29T13:44:13.601254667Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.601258475Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.601251119Z caller=remote_instance_store.go:51 user=109452 slug=deltarisk msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=109452 slug=deltarisk t=2024-05-29T13:44:13.601213294Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-ord-us-030" t=2024-05-29T13:44:13.601141537Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.601075212Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.600951638Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=682230 slug=andreim + level=info component=discovery ts=2024-05-29T13:44:13.600614435Z caller=client.go:80 msg="creating client for grafana instance" user=558826 addr=dns:///boardgamesio-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.595520787Z caller=client.go:80 msg="creating client for grafana instance" user=627059 addr=dns:///bemine-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.6006104Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.596264294Z caller=ruler.go:522 msg="tenant is owned by this instance" user=632736 slug=aqpanaciy groups=0 + logger=ngalert.state.manager user=562267 slug=return instance="asset_id=alfen-2259944026, asset_name=LSPS1, device_id=xymr4d, errors=" t=2024-05-29T13:44:13.600592925Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.600533052Z caller=remote_instance_store.go:51 user=555280 slug=hipcreative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=562267 slug=return instance="asset_id=rolls-royce-ESSENT, asset_name=L2-Castor-offtaker43, device_id=mygxnd, errors=rrs_partial_operation" t=2024-05-29T13:44:13.600525933Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-ord-us-025" t=2024-05-29T13:44:13.600516281Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.600462534Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=492225 slug=artmetering + logger=ngalert.state.manager.persist user=609912 slug=wirestock t=2024-05-29T13:44:13.600435742Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=555280 slug=hipcreative t=2024-05-29T13:44:13.600463538Z level=debug msg="Saving alert states" count=95 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.600338232Z caller=ruler.go:522 msg="tenant is owned by this instance" user=703936 slug=albertkhaliullin groups=0 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-ord-us-024" t=2024-05-29T13:44:13.600361904Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-ord-us-024" t=2024-05-29T13:44:13.600348641Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.600353469Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=609912 slug=wirestock t=2024-05-29T13:44:13.600334431Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.600275167Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.600244831Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=557128 slug=ahlers + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-ord-us-023" t=2024-05-29T13:44:13.600218896Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.600183431Z caller=client.go:80 msg="creating client for grafana instance" user=521518 addr=dns:///bitshelf-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.599528025Z caller=client.go:80 msg="creating client for grafana instance" user=506965 addr=dns:///bitlane-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-ord-us-022" t=2024-05-29T13:44:13.600067766Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-ord-us-021" t=2024-05-29T13:44:13.599943996Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.599790589Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=webapp-staging, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=spib, pod=webapp-staging-5886bd975d-phk6q" t=2024-05-29T13:44:13.599691441Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-ord-us-020" t=2024-05-29T13:44:13.599680213Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.599518455Z caller=remote_instance_store.go:51 user=295631 slug=dapvizor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-ord-us-018" t=2024-05-29T13:44:13.599431649Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.599412132Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=295631 slug=dapvizor instance="datasource_uid=ioFV1Jn4z, ref_id=A" t=2024-05-29T13:44:13.599435543Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=295631 slug=dapvizor t=2024-05-29T13:44:13.599415703Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=webapp-prod, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=spib, pod=webapp-prod-59bdc598ff-tgnx4" t=2024-05-29T13:44:13.599399856Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-ord-us-018" t=2024-05-29T13:44:13.599397856Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=316418 slug=workmotion t=2024-05-29T13:44:13.59933539Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=316418 slug=workmotion version=1 fingerprint=f0e7f705d5d3f4c2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.59926354Z level=debug msg="Alert rule evaluated" results="[{Instance:ApiId=1mr10216z5, Method=--, Resource=/v1/payroll-funding, Stage=-- State:NoData Error: Results:map[] Values:map[B:{Var:B Labels:ApiId=1mr10216z5, Method=--, Resource=/v1/payroll-funding, Stage=-- Value:} C:{Var:C Labels:ApiId=1mr10216z5, Method=--, Resource=/v1/payroll-funding, Stage=-- Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.598839751s EvaluationString:[ var='B' labels={ApiId=1mr10216z5, Method=--, Resource=/v1/payroll-funding, Stage=--} value=null ], [ var='C' labels={ApiId=1mr10216z5, Method=--, Resource=/v1/payroll-funding, Stage=--} value=null ]}]" duration=32.429895ms + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=webapp-prod, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=spib, pod=webapp-prod-59bdc598ff-jmgwn" t=2024-05-29T13:44:13.599309678Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-ord-us-016" t=2024-05-29T13:44:13.599181417Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-ord-us-016" t=2024-05-29T13:44:13.599153073Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-ord-us-015" t=2024-05-29T13:44:13.599034538Z level=debug msg="Setting next state" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.598932419Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=630368 slug=ashleygeeknz + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=sync-appointment, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=etl, pod=sync-appointment-6794fbcc58-q9gw6" t=2024-05-29T13:44:13.598919858Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=stream, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=convoy-system, pod=convoy-stream-86d5866499-wpf8m" t=2024-05-29T13:44:13.598776512Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=warn ts=2024-05-29T13:44:13.598075911Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=521697 slug=adhosttest + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-ord-us-013" t=2024-05-29T13:44:13.59869509Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.59798891Z caller=ruler.go:522 msg="tenant is owned by this instance" user=521697 slug=adhosttest groups=0 + logger=ngalert.state.manager user=815713 slug=returnstaging instance="datasource_uid=timescale_read_only, ref_id=A" t=2024-05-29T13:44:13.598658808Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=spib-elixir-staging, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=spib, pod=spib-elixir-staging-5d4c449c48-tsxth" t=2024-05-29T13:44:13.598642363Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=spib-elixir-staging, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=spib, pod=spib-elixir-staging-5d4c449c48-tsxth" t=2024-05-29T13:44:13.598615845Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.598526668Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-ord-us-012" t=2024-05-29T13:44:13.598548285Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-ord-us-012" t=2024-05-29T13:44:13.598536056Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=spib-elixir-prod, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=spib, pod=spib-elixir-prod-7b4c6659b7-tzkk5" t=2024-05-29T13:44:13.598527986Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.598388581Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.598270525Z caller=remote_instance_store.go:51 user=796993 slug=marketops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=spib-elixir-prod, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=spib, pod=spib-elixir-prod-676bb689b5-vrp5m" t=2024-05-29T13:44:13.598311659Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-ord-us-010" t=2024-05-29T13:44:13.598273779Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:13.598228314Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.071083ms + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=server, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=convoy-system, pod=convoy-server-578df9499f-pxd25" t=2024-05-29T13:44:13.598083515Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=server, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=convoy-system, pod=convoy-server-578df9499f-pxd25" t=2024-05-29T13:44:13.598070963Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=scheduler, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=convoy-system, pod=convoy-scheduler-7dd4bb59b9-qvsfs" t=2024-05-29T13:44:13.597996172Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-ord-us-008" t=2024-05-29T13:44:13.597948863Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=redis, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=redis-system, pod=redis-replicas-2" t=2024-05-29T13:44:13.597906373Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.597868745Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=350346 slug=restake instance="datasource_uid=grafanacloud-prom, ref_id=A,C" t=2024-05-29T13:44:13.597813022Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=350346 slug=restake t=2024-05-29T13:44:13.597766536Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.597607665Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.597460765Z caller=remote_instance_store.go:51 user=320906 slug=techcyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.597438153Z caller=remote_instance_store.go:51 user=400599 slug=unionai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-ord-us-007" t=2024-05-29T13:44:13.597379473Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=redis, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=convoy-system, pod=redis-master-0" t=2024-05-29T13:44:13.597404338Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.597237339Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.597169371Z caller=remote_rule_evaluator.go:193 user=656459 slug=activeport msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager.persist user=114286 slug=enverus t=2024-05-29T13:44:13.597165044Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=114286 slug=enverus version=21 fingerprint=8715f7fe428ff5fe attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.596990494Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=windows_service_state, instance=prt-fileserver-1, job=integrations/windows_exporter, name=tomcat10, state=running State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=windows_service_state, instance=prt-fileserver-1, job=integrations/windows_exporter, name=tomcat10, state=running Value:0xc02ca4d818} B:{Var:B Labels:__name__=windows_service_state, instance=prt-fileserver-1, job=integrations/windows_exporter, name=tomcat10, state=running Value:0xc02ca4d870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.596632339s EvaluationString:[ var='A' labels={__name__=windows_service_state, instance=prt-fileserver-1, job=integrations/windows_exporter, name=tomcat10, state=running} value=1 ], [ var='B' labels={__name__=windows_service_state, instance=prt-fileserver-1, job=integrations/windows_exporter, name=tomcat10, state=running} value=0 ]}]" duration=32.745815ms + level=debug ts=2024-05-29T13:44:13.596987652Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=237629 slug=ocrolus t=2024-05-29T13:44:13.596975552Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.250653ms + level=debug ts=2024-05-29T13:44:13.596835454Z caller=remote_instance_store.go:51 user=397201 slug=zultys msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:13.596924117Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=59.321426ms + logger=ngalert.state.manager user=397201 slug=zultys instance="exported_instance=smsgate2" t=2024-05-29T13:44:13.596782159Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-ord-us-005" t=2024-05-29T13:44:13.596852351Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=397201 slug=zultys version=8 fingerprint=797c316c3d73782b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.596677833Z level=debug msg="Alert rule evaluated" results="[{Instance:exported_instance=smsgate2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:exported_instance=smsgate2 Value:0xc004d50d80} B:{Var:B Labels:exported_instance=smsgate2 Value:0xc004d50d90} C:{Var:C Labels:exported_instance=smsgate2 Value:0xc004d50d70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.596260725s EvaluationString:[ var='A' labels={exported_instance=smsgate2} value=63.74137931034483 ], [ var='B' labels={exported_instance=smsgate2} value=63.74137931034483 ], [ var='C' labels={exported_instance=smsgate2} value=0 ]}]" duration=120.40945ms + level=debug ts=2024-05-29T13:44:13.596528116Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=444725 slug=devnextgen instance= t=2024-05-29T13:44:13.596544791Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:13.596505463Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.257908ms + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=gp-defects-prod, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=spib, pod=gp-defects-prod-28608240-mmbj6" t=2024-05-29T13:44:13.596531365Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.59645659Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.596269154Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-ord-us-003" t=2024-05-29T13:44:13.596353664Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=245291 slug=pismo version=1 fingerprint=a9248c46c4cf293c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.596268354Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.595947989s EvaluationString:}]" duration=373.972863ms + level=debug ts=2024-05-29T13:44:13.596276695Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.596223087Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=agent, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=etl, pod=prefect-agent-bbdc47bb9-ksqqx" t=2024-05-29T13:44:13.596168308Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.595971316Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.103699ms + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=agent, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=etl, pod=prefect-agent-759b799dcf-68mj4" t=2024-05-29T13:44:13.596053327Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.595869638Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.595738806Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=agent, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=etl, pod=prefect-agent-5879797b9f-76lx5" t=2024-05-29T13:44:13.59574708Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=agent, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=etl, pod=prefect-agent-5879797b9f-76lx5" t=2024-05-29T13:44:13.595738559Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=agent, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=etl, pod=prefect-agent-5879797b9f-6cslb" t=2024-05-29T13:44:13.59565518Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=acmesolver, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=rabbitmq-system, pod=cm-acme-http-solver-znrvd" t=2024-05-29T13:44:13.595573339Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.595477486Z caller=ruler.go:522 msg="tenant is owned by this instance" user=532085 slug=alexbxz groups=0 + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=acmesolver, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=cm-acme-http-solver-lg2h9" t=2024-05-29T13:44:13.595392728Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=acmesolver, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=cm-acme-http-solver-lg2h9" t=2024-05-29T13:44:13.59537661Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-068" t=2024-05-29T13:44:13.595367156Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.595294507Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-068" t=2024-05-29T13:44:13.595299895Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=hip-af-do-k8s, container=acmesolver, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=cm-acme-http-solver-gvnfz" t=2024-05-29T13:44:13.595261804Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=523906 slug=cyberark t=2024-05-29T13:44:13.595222036Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=523906 slug=cyberark instance="ClusterName=syslog-server-prod-JenkinsSyslogServerProdMaster, Series=syslog-server-prod-JenkinsSyslogServerProdMaster SyslogServer-prod-JenkinsSyslogServerProdMaster, ServiceName=SyslogServer-prod-JenkinsSyslogServerProdMaster" t=2024-05-29T13:44:13.595208432Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=811998 slug=pietsch91 instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.595113639Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=811998 slug=pietsch91 t=2024-05-29T13:44:13.595098839Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="datasource_uid=1x3mYGa7z, ref_id=A" t=2024-05-29T13:44:13.595090432Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=web-utility-plugin-checker-backend, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=web-utility-plugin-checker-backend-68598f898d-86jtc, uid=528f419c-08c4-4bc6-9585-273faf38fd15" t=2024-05-29T13:44:13.595148593Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.595014282Z caller=remote_instance_store.go:51 user=237629 slug=ocrolus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=20177 slug=paddledash instance="Component=checkout-service, SLI=APILatencyPaymentEndpoints" t=2024-05-29T13:44:13.595054991Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=237629 slug=ocrolus t=2024-05-29T13:44:13.594986501Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=237629 slug=ocrolus instance= t=2024-05-29T13:44:13.594975435Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=237629 slug=ocrolus instance= t=2024-05-29T13:44:13.594968867Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=20177 slug=paddledash t=2024-05-29T13:44:13.594990107Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=523906 slug=cyberark t=2024-05-29T13:44:13.594996905Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=111653 slug=theassociationmxp t=2024-05-29T13:44:13.59483532Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.594918067Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.5948686Z caller=remote_image_capturer.go:33 user=555280 slug=hipcreative rule_org_id=1 rule_uid=c79296bb-5240-4020-b222-22442749d0a1 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager.persist user=608555 slug=ias t=2024-05-29T13:44:13.594898975Z level=debug msg="Deleting alert states" count=1 + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=web-utility-backend, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=web-utility-backend-7977cc7d4f-rxh55, uid=53bd57d0-40ce-40ae-8af8-e9ab194b8b0e" t=2024-05-29T13:44:13.59485144Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.594807259Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=307381 slug=kambitaskforce version=2 fingerprint=fec2d85e3aa0e820 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.594728669Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=1x3mYGa7z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.594316401s EvaluationString:}]" duration=41.208173ms + logger=ngalert.scheduler user=111653 slug=theassociationmxp version=1 fingerprint=b458ec4fe2d2cb44 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.594702621Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.594378991s EvaluationString:}]" duration=63.428888ms + logger=ngalert.state.manager user=608555 slug=ias t=2024-05-29T13:44:13.594816472Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=web-utility-backend, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=web-utility-backend-7977cc7d4f-rxh55, uid=53bd57d0-40ce-40ae-8af8-e9ab194b8b0e" t=2024-05-29T13:44:13.594799791Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-066" t=2024-05-29T13:44:13.594831927Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=608555 slug=ias version=24 fingerprint=f5a48ddb8598d4ba attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.594744685Z level=debug msg="Alert rule evaluated" results="[{Instance:Series=query0cd16e87d34e4a7cae975e229f69619d, TargetGroup=targetgroup/eng-ct-zh-zt-ml/7540ca45763622da State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Series=query0cd16e87d34e4a7cae975e229f69619d, TargetGroup=targetgroup/eng-ct-zh-zt-ml/7540ca45763622da Value:0xc08edcd810} C:{Var:C Labels:Series=query0cd16e87d34e4a7cae975e229f69619d, TargetGroup=targetgroup/eng-ct-zh-zt-ml/7540ca45763622da Value:0xc08edcd850}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.594349191s EvaluationString:[ var='B' labels={Series=query0cd16e87d34e4a7cae975e229f69619d, TargetGroup=targetgroup/eng-ct-zh-zt-ml/7540ca45763622da} value=0.2354734210526316 ], [ var='C' labels={Series=query0cd16e87d34e4a7cae975e229f69619d, TargetGroup=targetgroup/eng-ct-zh-zt-ml/7540ca45763622da} value=0 ]}]" duration=40.993489ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-066" t=2024-05-29T13:44:13.59476284Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.594628984Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=secretshop-twilio-usages, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=secretshop-twilio-usages-28610320-nwz24, uid=dc5e5f6d-4685-4692-8fef-44f5a7df2921" t=2024-05-29T13:44:13.594529644Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-065" t=2024-05-29T13:44:13.594578105Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-065" t=2024-05-29T13:44:13.594506187Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.594343482Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=secretshop-twilio-usages, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=secretshop-twilio-usages-28610315-bqlqn, uid=2b7e5522-1f20-4b92-a665-b3423dc09847" t=2024-05-29T13:44:13.594396811Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.594228594Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-064" t=2024-05-29T13:44:13.594281933Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.594090673Z caller=client.go:80 msg="creating client for grafana instance" user=499556 addr=dns:///beamzero-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.594075447Z caller=remote_instance_store.go:51 user=538355 slug=flogic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=wes.sea, group=directory, instance=directory-02.wes.sea, origin=volterra-infra-vm" t=2024-05-29T13:44:13.59410409Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=538355 slug=flogic instance="datasource_uid=bfdc80c2-01d3-481f-8d92-756e545e3bd1, ref_id=A" t=2024-05-29T13:44:13.59397561Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=538355 slug=flogic t=2024-05-29T13:44:13.593943886Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=538355 slug=flogic version=12 fingerprint=e0de1ceb899fff93 attempt=1 now=2024-05-29T13:44:00Z t=2024-05-29T13:44:13.59386894Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=bfdc80c2-01d3-481f-8d92-756e545e3bd1, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:00 +0000 UTC EvaluationDuration:13.593480475s EvaluationString:}]" duration=4.930382515s + level=warn ts=2024-05-29T13:44:13.593924572Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=559754 slug=alopem + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=secretshop-backend-update-twilio-subaccnt-auth, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=secretshop-backend-update-twilio-subaccnt-auth-28616400-zpqtk, uid=58834848-19a6-4224-9c0d-f9b94338da77" t=2024-05-29T13:44:13.593943036Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=secretshop-backend-update-twilio-subaccnt-auth, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=secretshop-backend-update-twilio-subaccnt-auth-28616400-zpqtk, uid=58834848-19a6-4224-9c0d-f9b94338da77" t=2024-05-29T13:44:13.593925265Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ty8.tky, group=directory, instance=directory-01.ty8.tky, origin=volterra-infra-vm" t=2024-05-29T13:44:13.593931665Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.59372777Z caller=ruler.go:522 msg="tenant is owned by this instance" user=559754 slug=alopem groups=0 + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=secretshop-backend-update-twilio-subaccnt-auth, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=secretshop-backend-update-twilio-subaccnt-auth-28616280-d746v, uid=a371ea64-24d9-473c-b09d-541f717aeed0" t=2024-05-29T13:44:13.593773371Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sy5.syd, group=directory, instance=directory-02.sy5.syd, origin=volterra-infra-vm" t=2024-05-29T13:44:13.593734824Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.593742825Z caller=remote_instance_store.go:51 user=633335 slug=promqlworkshop msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=secretshop-backend-update-twilio-subaccnt-auth, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=secretshop-backend-update-twilio-subaccnt-auth-28616160-rjqld, uid=3f984ce4-dbcf-4bf8-a389-cfe4d8761024" t=2024-05-29T13:44:13.593663391Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sv10.sjc, group=directory, instance=directory-02.sv10.sjc, origin=volterra-infra-vm" t=2024-05-29T13:44:13.593631299Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sv10.sjc, group=directory, instance=directory-01.sv10.sjc, origin=volterra-infra-vm" t=2024-05-29T13:44:13.593585437Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sto6.sto, group=directory, instance=directory-02.sto6.sto, origin=volterra-infra-vm" t=2024-05-29T13:44:13.593551316Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.593546538Z caller=remote_instance_store.go:51 user=788474 slug=elisasre msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=788474 slug=elisasre instance="__name__=probe_success, cluster=sre-ci.k8s.local, component=icc-dev, instance=https://10.222.158.13, monitor=monitor-477, namespace=health, region=sdcv3, target=https://10.222.158.13" t=2024-05-29T13:44:13.593494417Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=secretshop-backend-retry-ghl-opportunity, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=secretshop-backend-retry-ghl-opportunity-28616161-4glbz, uid=7ed2a5bf-3955-40fa-b007-d14a5df1ad89" t=2024-05-29T13:44:13.593440478Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sp4.sao, group=directory, instance=directory-01.sp4.sao, origin=volterra-infra-vm" t=2024-05-29T13:44:13.593357383Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sp4.sao, group=directory, instance=directory-01.sp4.sao, origin=volterra-infra-vm" t=2024-05-29T13:44:13.59334679Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=secretshop-backend-retry-ghl-opportunity, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=secretshop-backend-retry-ghl-opportunity-28615921-wbl8f, uid=dc11a883-b17e-4439-a57d-3af5c3875a40" t=2024-05-29T13:44:13.593345834Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-061" t=2024-05-29T13:44:13.593353984Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sif.che, group=directory, instance=directory-02.sif.che, origin=volterra-infra-vm" t=2024-05-29T13:44:13.593325078Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sif.che, group=directory, instance=directory-01.sif.che, origin=volterra-infra-vm" t=2024-05-29T13:44:13.593290227Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sif.che, group=directory, instance=directory-01.sif.che, origin=volterra-infra-vm" t=2024-05-29T13:44:13.593279216Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sg3.sin, group=directory, instance=directory-02.sg3.sin, origin=volterra-infra-vm" t=2024-05-29T13:44:13.593255354Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.593141755Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sg3.sin, group=directory, instance=directory-01.sg3.sin, origin=volterra-infra-vm" t=2024-05-29T13:44:13.593203267Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, group=directory, instance=directory-02.pa4.par, origin=volterra-infra-vm" t=2024-05-29T13:44:13.593165014Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, group=directory, instance=directory-01.pa4.par, origin=volterra-infra-vm" t=2024-05-29T13:44:13.593142777Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, group=directory, instance=directory-01.pa4.par, origin=volterra-infra-vm" t=2024-05-29T13:44:13.593130268Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.593032854Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-060" t=2024-05-29T13:44:13.593077109Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=schema-frontend, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=schema-frontend-578fb69f85-kzqjc, uid=a9eb8b73-e4cd-44d6-b772-e15b6034af01" t=2024-05-29T13:44:13.5930521Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=491157 slug=prd01wr instance= t=2024-05-29T13:44:13.593002216Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa2.par, group=directory, instance=directory-01.pa2.par, origin=volterra-infra-vm" t=2024-05-29T13:44:13.593040588Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-059" t=2024-05-29T13:44:13.592952607Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=schema-backend, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=schema-backend-7fcf6ffb89-bnlkl, uid=59c0ec2f-7bc3-4275-b58f-3e3dd9aab39f" t=2024-05-29T13:44:13.592976361Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.592855562Z caller=ruler.go:522 msg="tenant is owned by this instance" user=538356 slug=agrides groups=0 + logger=ngalert.scheduler user=491157 slug=prd01wr version=1 fingerprint=a5ee8f50d570f714 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.592879801Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[D:{Var:D Labels: Value:} E:{Var:E Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.592416077s EvaluationString:[ var='D' labels={} value=null ], [ var='E' labels={} value=null ]}]" duration=122.22225ms + level=debug ts=2024-05-29T13:44:13.59267606Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=os1.osa, group=directory, instance=directory-01.os1.osa, origin=volterra-infra-vm" t=2024-05-29T13:44:13.592724208Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-057" t=2024-05-29T13:44:13.592687778Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-057" t=2024-05-29T13:44:13.592676556Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.592564214Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ny2.nyc, group=directory, instance=directory-02.ny2.nyc, origin=volterra-infra-vm" t=2024-05-29T13:44:13.59259225Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=report-backend-pull-ghl-opportunity, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=report-backend-pull-ghl-opportunity-28615980-jqnkn, uid=5bf1b1cd-7460-44be-8c1a-1eec772ae78a" t=2024-05-29T13:44:13.592511452Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.592402857Z caller=client.go:80 msg="creating client for grafana instance" user=655803 addr=dns:///bartrijnders14grafana-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-054" t=2024-05-29T13:44:13.592288639Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=report-backend-pull-ghl-opportunity, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=report-backend-pull-ghl-opportunity-28613100-fq4m4, uid=ebcf9ece-a07b-4475-9cc1-bfe763d5658e" t=2024-05-29T13:44:13.59230058Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.592165913Z caller=remote_instance_store.go:51 user=485459 slug=heroiclabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.592163586Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=mtl7.mon, group=directory, instance=directory-02.mtl7.mon, origin=volterra-infra-vm" t=2024-05-29T13:44:13.592186284Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=mtl7.mon, group=directory, instance=directory-02.mtl7.mon, origin=volterra-infra-vm" t=2024-05-29T13:44:13.59214869Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=mtl7.mon, group=directory, instance=directory-01.mtl7.mon, origin=volterra-infra-vm" t=2024-05-29T13:44:13.59211017Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=me1.mel, group=directory, instance=directory-01.me1.mel, origin=volterra-infra-vm" t=2024-05-29T13:44:13.592054419Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=report-backend-generate-daily-reports-script, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=report-backend-generate-daily-reports-script-28614675-fj2qg, uid=deb41225-2273-4d6f-bfb8-7eeaffe37ce1" t=2024-05-29T13:44:13.591971367Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pi-dx-shipments-backfill" t=2024-05-29T13:44:13.591964264Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pi-dx-shipments-backfill" t=2024-05-29T13:44:13.59195911Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-collection-statuses-backfill" t=2024-05-29T13:44:13.591883209Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=pcm-dx-collection-statuses-backfill" t=2024-05-29T13:44:13.591876003Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.591833352Z caller=ruler.go:522 msg="tenant is owned by this instance" user=729661 slug=airbusatlanticval groups=0 + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=mb2.mum, group=directory, instance=directory-02.mb2.mum, origin=volterra-infra-vm" t=2024-05-29T13:44:13.591867011Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=475799 slug=dpdcz instance="consumer_name=il-bf-parcel-events" t=2024-05-29T13:44:13.591827306Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=mb2.mum, group=directory, instance=directory-01.mb2.mum, origin=volterra-infra-vm" t=2024-05-29T13:44:13.591816273Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=report-backend-generate-ad-data-ghl-opp-report, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=report-backend-generate-ad-data-ghl-opp-report-28616405-7j65z, uid=0ca3f56b-7ffc-4c04-8354-f5ba6bed1bcd" t=2024-05-29T13:44:13.591739282Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.591744887Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-050" t=2024-05-29T13:44:13.591757629Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=report-backend-generate-ad-data-ghl-opp-report, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=report-backend-generate-ad-data-ghl-opp-report-28616405-7j65z, uid=0ca3f56b-7ffc-4c04-8354-f5ba6bed1bcd" t=2024-05-29T13:44:13.591729982Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ls1.lis, group=directory, instance=directory-01.ls1.lis, origin=volterra-infra-vm" t=2024-05-29T13:44:13.591737434Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ld6.lon, group=directory, instance=directory-02.ld6.lon, origin=volterra-infra-vm" t=2024-05-29T13:44:13.591705626Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ld6.lon, group=directory, instance=directory-02.ld6.lon, origin=volterra-infra-vm" t=2024-05-29T13:44:13.591694924Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=report-backend-generate-ad-data-ghl-opp-report, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=report-backend-generate-ad-data-ghl-opp-report-28616045-h9287, uid=7a4b6bdd-67a2-4037-addf-1cec264e79dc" t=2024-05-29T13:44:13.591650521Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.591612619Z caller=remote_instance_store.go:51 user=765752 slug=octave msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=report-backend-generate-ad-data-ghl-opp-report, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=report-backend-generate-ad-data-ghl-opp-report-28616045-h9287, uid=7a4b6bdd-67a2-4037-addf-1cec264e79dc" t=2024-05-29T13:44:13.591638403Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=hk2.hkg, group=directory, instance=directory-02.hk2.hkg, origin=volterra-infra-vm" t=2024-05-29T13:44:13.591638959Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=fr4.fra, group=directory, instance=directory-02.fr4.fra, origin=volterra-infra-vm" t=2024-05-29T13:44:13.591558181Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.591432127Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=report-backend-generate-ad-data-ghl-opp-report, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=report-backend-generate-ad-data-ghl-opp-report-28615685-xccbp, uid=81ae6878-64f9-4f42-b729-f7e66cfdcb88" t=2024-05-29T13:44:13.591498654Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-048" t=2024-05-29T13:44:13.591478441Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.591440414Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=400599 slug=unionai t=2024-05-29T13:44:13.591406995Z level=debug msg="Saving alert states" count=45 max_state_save_concurrency=1 + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-utt-mgdp-prod-us-east-2-postgres-primary-xq49-0" t=2024-05-29T13:44:13.591379297Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.591250746Z caller=ruler.go:522 msg="tenant is owned by this instance" user=501513 slug=amakuru groups=2 + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=report-backend, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=report-backend-576df46c54-fcxsl, uid=8a926a67-7d8e-4041-9eb0-ba1f8493b38e" t=2024-05-29T13:44:13.591220502Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.591113024Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-ssi-postgres-primary-3473-0" t=2024-05-29T13:44:13.591120097Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-serverless-preview-postgres-primary-unj2-0" t=2024-05-29T13:44:13.591094997Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=533463 slug=lacewallet t=2024-05-29T13:44:13.591030733Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=62.446629ms + logger=ngalert.state.manager user=415003 slug=salaryfinance instance="agent_hostname=PROD-SAL-VMH-WEB09, instance=PROD-SAL-VMH-WEB09:12345, job=integrations/windows_exporter" t=2024-05-29T13:44:13.590898857Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.590801785Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.590768915Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-043" t=2024-05-29T13:44:13.590757584Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.590700075Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.590630905Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-042" t=2024-05-29T13:44:13.590587594Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-sample-tenant-postgres-f1kl-0" t=2024-05-29T13:44:13.590486218Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-041" t=2024-05-29T13:44:13.590490175Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=453308 slug=hyperzodprod t=2024-05-29T13:44:13.590429971Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-sample-tenant-postgres-f1kl-0" t=2024-05-29T13:44:13.590419479Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=453308 slug=hyperzodprod instance= t=2024-05-29T13:44:13.590414292Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-porch-postgres-nrfc-0" t=2024-05-29T13:44:13.590357791Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.590353604Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=local-backend-process-request, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=local-backend-process-request-28611254-jkhtr, uid=c392aed9-5e1b-4810-9844-7b8c3b1dccd6" t=2024-05-29T13:44:13.590377352Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.590350498Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=453308 slug=hyperzodprod t=2024-05-29T13:44:13.590231085Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=local-backend-get-queued-scan-image, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=local-backend-get-queued-scan-image-28613188-gwnv7, uid=59964643-c614-429b-a308-9fad90b2e990" t=2024-05-29T13:44:13.590281969Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=local-backend-get-queued-scan-image, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=local-backend-get-queued-scan-image-28613188-gwnv7, uid=59964643-c614-429b-a308-9fad90b2e990" t=2024-05-29T13:44:13.590269413Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.590019635Z caller=ruler.go:522 msg="tenant is owned by this instance" user=319971 slug=aldisouthdev groups=1 + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-organizations-postgres-5t6g-0" t=2024-05-29T13:44:13.590005003Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dx1.dxb, group=directory, instance=directory-01.dx1.dxb, origin=volterra-infra-vm" t=2024-05-29T13:44:13.590135757Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-037" t=2024-05-29T13:44:13.589933215Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dc12.ash, group=directory, instance=directory-01.dc12.ash, origin=volterra-infra-vm" t=2024-05-29T13:44:13.589997926Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=328300 slug=workable t=2024-05-29T13:44:13.589948744Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-multicluster-example-postgres-primary-0" t=2024-05-29T13:44:13.589970902Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dal3.dal, group=directory, instance=directory-02.dal3.dal, origin=volterra-infra-vm" t=2024-05-29T13:44:13.58992863Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dal3.dal, group=directory, instance=directory-02.dal3.dal, origin=volterra-infra-vm" t=2024-05-29T13:44:13.589897118Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=196413 slug=form3production t=2024-05-29T13:44:13.589717088Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.308349ms + level=debug ts=2024-05-29T13:44:13.589689908Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-msat-test-postgres-primary-0" t=2024-05-29T13:44:13.589655136Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ams9.ams, group=directory, instance=directory-02.ams9.ams, origin=volterra-infra-vm" t=2024-05-29T13:44:13.589758018Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ams9.ams, group=directory, instance=directory-02.ams9.ams, origin=volterra-infra-vm" t=2024-05-29T13:44:13.589734176Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ams9.ams, group=directory, instance=directory-01.ams9.ams, origin=volterra-infra-vm" t=2024-05-29T13:44:13.589649414Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-methanesat-postgres-primary-0" t=2024-05-29T13:44:13.589494175Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=local-backend-geogrid-scans, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=local-backend-geogrid-scans-28611650-78jt7, uid=38cbe71e-02c8-4a8d-ac4e-81866afaccdc" t=2024-05-29T13:44:13.589708089Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-035" t=2024-05-29T13:44:13.589694556Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.589568264Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.589508919Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-034" t=2024-05-29T13:44:13.589558698Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-itmethods-postgres-primary-yuu7-0" t=2024-05-29T13:44:13.58942343Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-infra-tests-postgres-primary-0" t=2024-05-29T13:44:13.58939779Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.589481999Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-executions-postgres-4ddk-0" t=2024-05-29T13:44:13.589254774Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-executions-postgres-4ddk-0" t=2024-05-29T13:44:13.589248329Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=local-backend-geogrid-scans, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=local-backend-geogrid-scans-28611647-6d86g, uid=b1704eb4-79b0-4177-8ecd-e68d84ffd7a9" t=2024-05-29T13:44:13.58927243Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-dogfood-postgres-kyp8-0" t=2024-05-29T13:44:13.589231562Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.58916375Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-dogfood-postgres-kyp8-0" t=2024-05-29T13:44:13.589223589Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-031" t=2024-05-29T13:44:13.589196376Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=lemonade-products, cluster=lmnd-production-us-east-1, container=kube-state-metrics, deployment=lemonade-products, endpoint=http, instance=10.16.27.82:8080, job=kube-state-metrics, namespace=production, pod=kube-state-metrics-6c795d5489-v5txw, region=us-east-1, service=kube-state-metrics, stage=production" t=2024-05-29T13:44:13.589174185Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-demo-postgres-7v0d-0" t=2024-05-29T13:44:13.589179779Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.589055983Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=local-backend-consumers, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=local-backend-consumers-9b86f95dc-n49jk, uid=49416c23-c13c-4b0b-b689-3cbb87fe2b77" t=2024-05-29T13:44:13.589127284Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-030" t=2024-05-29T13:44:13.589066883Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.589041698Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-costs-test-postgres-primary-0" t=2024-05-29T13:44:13.589051779Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-cluster-postgres-tm7o-0" t=2024-05-29T13:44:13.589023379Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:13.588957508Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-cluster-postgres-81q7-0" t=2024-05-29T13:44:13.588932733Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-029" t=2024-05-29T13:44:13.588941125Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-authorizer-postgres-1mh4-0" t=2024-05-29T13:44:13.588865375Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=headless-chrome-backend-v2, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=headless-chrome-backend-v2-ff4f8f777-q7hwj, uid=697aeffb-ba5d-4586-ab51-acc4d1735221" t=2024-05-29T13:44:13.588834754Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-artifacts-test-postgres-primary-0" t=2024-05-29T13:44:13.588797232Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-artifacts-test-postgres-primary-0" t=2024-05-29T13:44:13.588787608Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=400599 slug=unionai instance="DBInstanceIdentifier=opta-artifacts-postgres-primary-oqhn-0" t=2024-05-29T13:44:13.58875991Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-027" t=2024-05-29T13:44:13.588721025Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=A" t=2024-05-29T13:44:13.588670225Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=headless-chrome-backend, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=headless-chrome-backend-75d88d8769-4zlw5, uid=e06741f3-b116-4375-80f9-5434f0bee5c8" t=2024-05-29T13:44:13.588659258Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=cubestore, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=cubestore-0, uid=23022e62-4c74-40e7-99a2-75976889b186" t=2024-05-29T13:44:13.588559837Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-026" t=2024-05-29T13:44:13.588567123Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:13.58836167Z level=debug msg="Saving alert states" count=8 max_state_save_concurrency=1 + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdicucphttr7ka, ref_id=A" t=2024-05-29T13:44:13.58835296Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdicucphttr7ka, ref_id=A" t=2024-05-29T13:44:13.588347604Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdicucphttr7ka, ref_id=A" t=2024-05-29T13:44:13.588337365Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdicucphttr7ka, ref_id=A" t=2024-05-29T13:44:13.588279961Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.588400229Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.588326808Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdicucphttr7ka, ref_id=A" t=2024-05-29T13:44:13.588196093Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.588127618Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206107 slug=hydrolix t=2024-05-29T13:44:13.588159779Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-022" t=2024-05-29T13:44:13.588066022Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-022" t=2024-05-29T13:44:13.588051282Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=af-gateway, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=af-gateway-74ddc7c66b-54cpj, uid=cd91f574-6074-4bcd-93eb-c0ea5a54ce14" t=2024-05-29T13:44:13.587986292Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=555280 slug=hipcreative instance="__name__=kube_pod_container_status_restarts_total, cluster=gke-hip-af, container=af-gateway, instance=grafana-k8s-monitoring-kube-state-metrics.grafana-cloud.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=af, pod=af-gateway-74ddc7c66b-54cpj, uid=cd91f574-6074-4bcd-93eb-c0ea5a54ce14" t=2024-05-29T13:44:13.587971756Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:13.587873817Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.587847843Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-021" t=2024-05-29T13:44:13.587875083Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.587703943Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-020" t=2024-05-29T13:44:13.587737326Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.587603172Z caller=remote_instance_store.go:51 user=201790 slug=veedmo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.587466188Z caller=remote_instance_store.go:51 user=796993 slug=marketops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.587453017Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-017" t=2024-05-29T13:44:13.587370953Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=142180 slug=luxtronic t=2024-05-29T13:44:13.587336241Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=38.698708ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-016" t=2024-05-29T13:44:13.587247191Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=306551 slug=teckresourcesalerts t=2024-05-29T13:44:13.587301808Z level=debug msg="Deleting alert states" count=1 + logger=ngalert.state.manager user=306551 slug=teckresourcesalerts instance="engineType=Cummins QSK78 Tier 4, equipmentName=HT957, site=FRO" t=2024-05-29T13:44:13.587250236Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.587064888Z caller=remote_instance_store.go:51 user=355429 slug=zenpli msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.586907442Z caller=remote_instance_store.go:51 user=465816 slug=metricgamingqa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=355429 slug=zenpli instance="Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.vendor-gateway.int.data-point-insights-calculated.0" t=2024-05-29T13:44:13.586994438Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=355429 slug=zenpli instance="Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.vendor-gateway.fct.data-point-orchestration-planned.0" t=2024-05-29T13:44:13.58697643Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=355429 slug=zenpli instance="Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.vendor-gateway.fct.data-point-insights-aggregated.0" t=2024-05-29T13:44:13.586945382Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=306551 slug=teckresourcesalerts instance="engineType=Cummins QSK78 Tier 4, equipmentName=HT407, site=GHO" t=2024-05-29T13:44:13.586950274Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=355429 slug=zenpli instance="Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.data-enrichment.fct.data-enrichment-response-sent.0" t=2024-05-29T13:44:13.586793797Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=355429 slug=zenpli instance="Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.data-enrichment.fct.data-enrichment-response-sent.0" t=2024-05-29T13:44:13.586786687Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=355429 slug=zenpli instance="Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.customer-management.fct.onboarding-decision-started.0" t=2024-05-29T13:44:13.586762803Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=355429 slug=zenpli instance="Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.customer-management.fct.onboarding-decision-notification-succeeded.0" t=2024-05-29T13:44:13.58674344Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=306551 slug=teckresourcesalerts instance="engineType=Cummins QSK78 Tier 4, equipmentName=HT406, site=GHO" t=2024-05-29T13:44:13.586857091Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.586473977Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=306551 slug=teckresourcesalerts instance="engineType=Cummins QSK78 Tier 4, equipmentName=HT405, site=GHO" t=2024-05-29T13:44:13.586763359Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.586467194Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=355429 slug=zenpli version=8 fingerprint=069b70e9b627092e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.586421067Z level=debug msg="Alert rule evaluated" results="[{Instance:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.customer-management.fct.onboarding-decision-notification-failed.0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.customer-management.fct.onboarding-decision-notification-failed.0 Value:0xc0253d7120} C:{Var:C Labels:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.customer-management.fct.onboarding-decision-notification-failed.0 Value:0xc0253d7158}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.585565303s EvaluationString:[ var='B' labels={Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.customer-management.fct.onboarding-decision-notification-failed.0} value=0 ], [ var='C' labels={Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.customer-management.fct.onboarding-decision-notification-failed.0} value=0 ]} {Instance:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.customer-management.fct.onboarding-decision-notification-succeeded.0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.customer-management.fct.onboarding-decision-notification-succeeded.0 Value:0xc0253d71e8} C:{Var:C Labels:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.customer-management.fct.onboarding-decision-notification-succeeded.0 Value:0xc0253d7220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.58558734s EvaluationString:[ var='B' labels={Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.customer-management.fct.onboarding-decision-notification-succeeded.0} value=0 ], [ var='C' labels={Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.customer-management.fct.onboarding-decision-notification-succeeded.0} value=0 ]} {Instance:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.customer-management.fct.onboarding-decision-started.0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.customer-management.fct.onboarding-decision-started.0 Value:0xc0253d72e8} C:{Var:C Labels:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.customer-management.fct.onboarding-decision-started.0 Value:0xc0253d72b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.585594467s EvaluationString:[ var='B' labels={Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.customer-management.fct.onboarding-decision-started.0} value=0 ], [ var='C' labels={Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.customer-management.fct.onboarding-decision-started.0} value=0 ]} {Instance:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.data-enrichment.fct.data-enrichment-response-sent.0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.data-enrichment.fct.data-enrichment-response-sent.0 Value:0xc0253d7348} C:{Var:C Labels:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.data-enrichment.fct.data-enrichment-response-sent.0 Value:0xc0253d73b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.58560511s EvaluationString:[ var='B' labels={Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.data-enrichment.fct.data-enrichment-response-sent.0} value=0 ], [ var='C' labels={Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.data-enrichment.fct.data-enrichment-response-sent.0} value=0 ]} {Instance:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.puzzle-planner.fct.identity-puzzle-solved.0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.puzzle-planner.fct.identity-puzzle-solved.0 Value:0xc0253d7420} C:{Var:C Labels:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.puzzle-planner.fct.identity-puzzle-solved.0 Value:0xc0253d7478}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.58561112s EvaluationString:[ var='B' labels={Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.puzzle-planner.fct.identity-puzzle-solved.0} value=0 ], [ var='C' labels={Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.puzzle-planner.fct.identity-puzzle-solved.0} value=0 ]} {Instance:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.underwriting.fct.onboarding-decision-taken.0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.underwriting.fct.onboarding-decision-taken.0 Value:0xc0253d74e8} C:{Var:C Labels:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.underwriting.fct.onboarding-decision-taken.0 Value:0xc0253d7520}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.58562039s EvaluationString:[ var='B' labels={Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.underwriting.fct.onboarding-decision-taken.0} value=0 ], [ var='C' labels={Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.underwriting.fct.onboarding-decision-taken.0} value=0 ]} {Instance:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.underwriting.fct.risk-assessment-finished.0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.underwriting.fct.risk-assessment-finished.0 Value:0xc0253d75e0} C:{Var:C Labels:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.underwriting.fct.risk-assessment-finished.0 Value:0xc0253d7618}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.585627033s EvaluationString:[ var='B' labels={Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.underwriting.fct.risk-assessment-finished.0} value=0 ], [ var='C' labels={Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.underwriting.fct.risk-assessment-finished.0} value=0 ]} {Instance:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.underwriting.fct.users-attributes-submitted.0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.underwriting.fct.users-attributes-submitted.0 Value:0xc0253d76d0} C:{Var:C Labels:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.underwriting.fct.users-attributes-submitted.0 Value:0xc0253d7698}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.585633191s EvaluationString:[ var='B' labels={Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.underwriting.fct.users-attributes-submitted.0} value=0 ], [ var='C' labels={Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.underwriting.fct.users-attributes-submitted.0} value=0 ]} {Instance:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.vendor-gateway.fct.data-point-insights-aggregated.0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.vendor-gateway.fct.data-point-insights-aggregated.0 Value:0xc0253d7750} C:{Var:C Labels:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.vendor-gateway.fct.data-point-insights-aggregated.0 Value:0xc0253d7888}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.585638811s EvaluationString:[ var='B' labels={Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.vendor-gateway.fct.data-point-insights-aggregated.0} value=0 ], [ var='C' labels={Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.vendor-gateway.fct.data-point-insights-aggregated.0} value=0 ]} {Instance:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.vendor-gateway.fct.data-point-orchestration-planned.0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.vendor-gateway.fct.data-point-orchestration-planned.0 Value:0xc0253d7940} C:{Var:C Labels:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.vendor-gateway.fct.data-point-orchestration-planned.0 Value:0xc0253d7908}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.585644922s EvaluationString:[ var='B' labels={Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.vendor-gateway.fct.data-point-orchestration-planned.0} value=0 ], [ var='C' labels={Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.vendor-gateway.fct.data-point-orchestration-planned.0} value=0 ]} {Instance:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.vendor-gateway.int.data-point-insights-calculated.0 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.vendor-gateway.int.data-point-insights-calculated.0 Value:0xc0253d7a18} C:{Var:C Labels:Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.vendor-gateway.int.data-point-insights-calculated.0 Value:0xc0253d79e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.585675853s EvaluationString:[ var='B' labels={Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.vendor-gateway.int.data-point-insights-calculated.0} value=0 ], [ var='C' labels={Cluster Name=zenpli, Consumer Group=zenpli-database-consumer, Topic=stag.mx.vendor-gateway.int.data-point-insights-calculated.0} value=0 ]}]" duration=224.610549ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-013" t=2024-05-29T13:44:13.586727567Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-013" t=2024-05-29T13:44:13.586694217Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.58645608Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.586573494Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.586427587Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=306551 slug=teckresourcesalerts instance="engineType=Cummins QSK78 Tier 4, equipmentName=HT401, site=GHO" t=2024-05-29T13:44:13.586488193Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.586426115Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.586281211Z caller=remote_instance_store.go:51 user=102207 slug=recogizergroup msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=306551 slug=teckresourcesalerts instance="engineType=Cummins QSK78 MCRS, equipmentName=HT952, site=FRO" t=2024-05-29T13:44:13.586384647Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.586265397Z caller=remote_instance_store.go:51 user=456850 slug=juniz msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.586256589Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.586186518Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-011" t=2024-05-29T13:44:13.586118475Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.585984059Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.585872579Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-010" t=2024-05-29T13:44:13.585915454Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.585811117Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-009" t=2024-05-29T13:44:13.585809908Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.585708918Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.585600719Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.585522613Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-006" t=2024-05-29T13:44:13.58534238Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.585288967Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=49546 slug=nulogyinfra t=2024-05-29T13:44:13.585190519Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=31.228825ms + level=debug ts=2024-05-29T13:44:13.584785358Z caller=remote_instance_store.go:51 user=20177 slug=paddledash msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=20177 slug=paddledash t=2024-05-29T13:44:13.584730345Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=237629 slug=ocrolus t=2024-05-29T13:44:13.58472301Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.584537102Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=237629 slug=ocrolus version=15 fingerprint=262831f0f16a9268 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.584594705Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.584311488s EvaluationString:}]" duration=75.48322ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-002" t=2024-05-29T13:44:13.584594998Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-lax-usl5-002" t=2024-05-29T13:44:13.58458326Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.584356662Z caller=remote_instance_store.go:51 user=172772 slug=ppbtradingtribe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.584284323Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.584239308Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.584226682Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=250150 slug=bizagi version=58 fingerprint=ecaa90d13580ed00 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.584068339Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.5837862s EvaluationString:}]" duration=285.831127ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-79" t=2024-05-29T13:44:13.584053337Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.583864142Z caller=remote_instance_store.go:51 user=84360 slug=sib msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.583766512Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-78" t=2024-05-29T13:44:13.583742975Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-77" t=2024-05-29T13:44:13.583623066Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-75" t=2024-05-29T13:44:13.583340573Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-74" t=2024-05-29T13:44:13.583236537Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="carrier=verizon_catm1, cluster_label=devctl, env=prd, team=siren" t=2024-05-29T13:44:13.583159653Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.582933012Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.269782ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-62" t=2024-05-29T13:44:13.582806769Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-60" t=2024-05-29T13:44:13.582620333Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.582599759Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=621635 slug=isitonfire t=2024-05-29T13:44:13.582232982Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-18" t=2024-05-29T13:44:13.582344046Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.582272843Z caller=remote_instance_store.go:51 user=638425 slug=docktech msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-17" t=2024-05-29T13:44:13.582232733Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=277970 slug=teckresourcestest t=2024-05-29T13:44:13.582102582Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=42.049437ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-169" t=2024-05-29T13:44:13.582120597Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.582022102Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.581684035Z caller=remote_alert_sender.go:94 user=642786 slug=sophoscomnsg host=sophoscomnsg-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.13.67:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=edd557df-ab37-4da0-a35d-4761e1beacc5 alerts=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-168" t=2024-05-29T13:44:13.581981953Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-165" t=2024-05-29T13:44:13.581602173Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.58150722Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-162" t=2024-05-29T13:44:13.581207601Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=lQBzRG7Vz, ref_id=B" t=2024-05-29T13:44:13.581138296Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.581020895Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=792645 slug=hubx instance="datasource_uid=a1f2bd27-ff00-4110-883f-3a13db4ec694, ref_id=A" t=2024-05-29T13:44:13.580994091Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.580876485Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.58087221Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=792645 slug=hubx version=5 fingerprint=c71d5364bea398a9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.580672666Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=a1f2bd27-ff00-4110-883f-3a13db4ec694, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.5802321s EvaluationString:}]" duration=220.295966ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-159" t=2024-05-29T13:44:13.580688052Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:13.580635973Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.580522384Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.580585405Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance= t=2024-05-29T13:44:13.580496222Z level=debug msg="Setting next state" handler=resultError + level=debug ts=2024-05-29T13:44:13.58042571Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.58035797Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=355343 slug=foxbase t=2024-05-29T13:44:13.580363654Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.937456ms + level=debug ts=2024-05-29T13:44:13.58028101Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-156" t=2024-05-29T13:44:13.580223704Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.580003598Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.579969379Z caller=remote_alert_sender.go:94 user=70430 slug=dapperlabs host=dapperlabs-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.144.208.13:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=f7ae58f8-cdd9-4de9-b9a2-2c173c8b6f4a alerts=1 + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.579859872Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.042178ms + logger=ngalert.state.manager.persist user=127813 slug=clearsale t=2024-05-29T13:44:13.579809218Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=32.32707ms + level=debug ts=2024-05-29T13:44:13.579859429Z caller=remote_instance_store.go:51 user=907836 slug=dfdsplatformservices msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=115997 slug=potlatchdeltic t=2024-05-29T13:44:13.579662123Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=28.306767ms + logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:13.579242273Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.545144ms + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:13.579516372Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=59.662369ms + level=debug ts=2024-05-29T13:44:13.579410722Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-151" t=2024-05-29T13:44:13.579451305Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.579408952Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.579311865Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.579237838Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=845543 slug=deliveryhero t=2024-05-29T13:44:13.579165702Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=60603 slug=avalaratax t=2024-05-29T13:44:13.579044Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=61.226273ms + level=debug ts=2024-05-29T13:44:13.579019327Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-147" t=2024-05-29T13:44:13.5787976Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.578727702Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.578516023Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=18335 slug=semaphore t=2024-05-29T13:44:13.578480762Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-145" t=2024-05-29T13:44:13.578493074Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=18335 slug=semaphore version=1 fingerprint=411ec7bc3ee8aaf9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.57839213Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=000000005, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.578013581s EvaluationString:}]" duration=32.40159ms + level=debug ts=2024-05-29T13:44:13.57837072Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-144" t=2024-05-29T13:44:13.578330448Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.577941883Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-143" t=2024-05-29T13:44:13.578182877Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-143" t=2024-05-29T13:44:13.578145729Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.578064504Z caller=remote_instance_store.go:51 user=795224 slug=gannettdigital msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=338059 slug=ninetailed t=2024-05-29T13:44:13.577805051Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=47.848034ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-140" t=2024-05-29T13:44:13.577617223Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.577472204Z caller=remote_instance_store.go:51 user=196413 slug=form3production msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.577364414Z caller=remote_instance_store.go:51 user=350551 slug=loopme msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.577245442Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.577017009Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.576866243Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.5767013Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.576798671Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.576859744Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.5766869Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-135" t=2024-05-29T13:44:13.576739199Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:13.576715744Z caller=remote_alert_sender.go:94 user=461036 slug=cgpgrafana host=cgpgrafana-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.160.35.168:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=jtvX7ydVz alerts=1 + level=debug ts=2024-05-29T13:44:13.576581353Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-135" t=2024-05-29T13:44:13.57662503Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-134" t=2024-05-29T13:44:13.576519579Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.576501078Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-134" t=2024-05-29T13:44:13.576502749Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.576346118Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.576309525Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=692010 slug=mercariusprod t=2024-05-29T13:44:13.576242843Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.674513ms + level=debug ts=2024-05-29T13:44:13.576071703Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-131" t=2024-05-29T13:44:13.576106698Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.576019448Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.575963698Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-130" t=2024-05-29T13:44:13.575987637Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.575931157Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.57590906Z caller=remote_instance_store.go:51 user=63636 slug=streamelements msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=736975 slug=jetcomms instance="device=/dev/disk/by-uuid/27884492-ce4d-48a0-bd04-c87480a9663f, fstype=ext4, instance=10.105.127.7:9100, job=node, mountpoint=/" t=2024-05-29T13:44:13.575849322Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=736975 slug=jetcomms instance="device=/dev/disk/by-uuid/27884492-ce4d-48a0-bd04-c87480a9663f, fstype=ext4, instance=10.105.127.7:9100, job=node, mountpoint=/" t=2024-05-29T13:44:13.575841892Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=63636 slug=streamelements version=3 fingerprint=10c9e607f7ba2715 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.575723448Z level=debug msg="Alert rule evaluated" results="[{Instance:consumer_name=creators-data-migration-consumer, stream_name=creators-data-migration State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:consumer_name=creators-data-migration-consumer, stream_name=creators-data-migration Value:0xc0253d6cb8} B:{Var:B Labels:consumer_name=creators-data-migration-consumer, stream_name=creators-data-migration Value:0xc0253d6c68} C:{Var:C Labels:consumer_name=creators-data-migration-consumer, stream_name=creators-data-migration Value:0xc0253d6cb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.575411458s EvaluationString:[ var='A' labels={consumer_name=creators-data-migration-consumer, stream_name=creators-data-migration} value=0 ], [ var='B' labels={consumer_name=creators-data-migration-consumer, stream_name=creators-data-migration} value=0 ], [ var='C' labels={consumer_name=creators-data-migration-consumer, stream_name=creators-data-migration} value=0 ]}]" duration=107.373349ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-129" t=2024-05-29T13:44:13.575829792Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.575733111Z caller=remote_instance_store.go:51 user=765752 slug=octave msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=736975 slug=jetcomms t=2024-05-29T13:44:13.575666188Z level=debug msg="State manager processing evaluation results" resultCount=6 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-128" t=2024-05-29T13:44:13.575693912Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.575685143Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=426229 slug=accelbyte version=29 fingerprint=404c609a904be332 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.575540201Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.575300667s EvaluationString:}]" duration=312.849626ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-127" t=2024-05-29T13:44:13.575555581Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-126" t=2024-05-29T13:44:13.575429174Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-126" t=2024-05-29T13:44:13.575388544Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.575336007Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=22115 slug=tiki instance= t=2024-05-29T13:44:13.575146862Z level=debug msg="Execution error state is Alerting" handler=resultAlerting previous_handler=resultError + logger=ngalert.state.manager user=22115 slug=tiki instance= t=2024-05-29T13:44:13.575134792Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager.persist user=173730 slug=nikon t=2024-05-29T13:44:13.575060679Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=41.221271ms + logger=ngalert.state.manager user=55491 slug=demandbase instance="Cluster Name=db1-cdc, Consumer Group=account, Topic=sr_account_cdc" t=2024-05-29T13:44:13.575023744Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=55491 slug=demandbase instance="Cluster Name=db1-cdc, Consumer Group=account, Topic=sr_account_cdc" t=2024-05-29T13:44:13.57500974Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=437245 slug=kosayuspun t=2024-05-29T13:44:13.574939641Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=437245 slug=kosayuspun instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.574922583Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=437245 slug=kosayuspun instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.574915497Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=437245 slug=kosayuspun instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.574898347Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-123" t=2024-05-29T13:44:13.574882697Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=437245 slug=kosayuspun version=20 fingerprint=e8b25778bb4fce21 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.574799913Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.574553211s EvaluationString:}]" duration=204.32282ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-122" t=2024-05-29T13:44:13.574754246Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=475170 slug=paypaplane t=2024-05-29T13:44:13.574588972Z level=debug msg="Saving alert states done" count=162 max_state_save_concurrency=1 duration=1.81888107s + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-121" t=2024-05-29T13:44:13.574611993Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.574561627Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=27998 slug=korob t=2024-05-29T13:44:13.574028728Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=157.941904ms + level=debug ts=2024-05-29T13:44:13.57390988Z caller=remote_instance_store.go:51 user=333241 slug=croesusdev msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.57385386Z caller=remote_image_capturer.go:61 user=22398 slug=sunfolding rule_org_id=1 rule_uid=a2f96966-6c03-43f8-871f-1aec1ed3a708 dashboard=e7T7NQdZk panel=69 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:13.573680909Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=333241 slug=croesusdev instance="datasource_uid=grafanacloud-prom, ref_id=A,B" t=2024-05-29T13:44:13.573621788Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.573594103Z caller=remote_instance_store.go:51 user=109452 slug=deltarisk msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=109452 slug=deltarisk t=2024-05-29T13:44:13.573554978Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-115" t=2024-05-29T13:44:13.57332417Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-114" t=2024-05-29T13:44:13.573212728Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-114" t=2024-05-29T13:44:13.573200262Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-113" t=2024-05-29T13:44:13.57308444Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=532409 slug=jnjdev instance= t=2024-05-29T13:44:13.572870096Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.572874648Z caller=remote_image_capturer.go:54 user=22398 slug=sunfolding rule_org_id=1 rule_uid=a2f96966-6c03-43f8-871f-1aec1ed3a708 dashboard=e7T7NQdZk panel=69 msg="rendering alert image with grafana" + level=debug ts=2024-05-29T13:44:13.572746892Z caller=remote_instance_store.go:51 user=796993 slug=marketops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=B" t=2024-05-29T13:44:13.57278562Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=B" t=2024-05-29T13:44:13.572774947Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=B" t=2024-05-29T13:44:13.572742413Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.57262096Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-11" t=2024-05-29T13:44:13.572612278Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:13.572550714Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=127813 slug=clearsale t=2024-05-29T13:44:13.572580635Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.572124307Z caller=remote_instance_store.go:51 user=456850 slug=juniz msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.572438346Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=127813 slug=clearsale t=2024-05-29T13:44:13.572433333Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=114492 slug=railsbank version=1 fingerprint=0c00b09a77d3607f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.572409724Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.572182369s EvaluationString:}]" duration=141.029639ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-109" t=2024-05-29T13:44:13.572479389Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-109" t=2024-05-29T13:44:13.572412245Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-108" t=2024-05-29T13:44:13.572279907Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=751407 slug=nethermindjuno instance="container=proxy, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=kong-api, pod=juno-prod-nethgate-dp-kong-747d58d76c-jxjz6, service=juno-prod-prometheus-kube-state-metrics, uid=fab06ed0-87f5-4245-84bc-e80296053d2d" t=2024-05-29T13:44:13.572255312Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.57216558Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:13.572110988Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=88.237429ms + logger=ngalert.state.manager user=456850 slug=juniz instance="Langhaus_ID=2, TE_ID=4" t=2024-05-29T13:44:13.571995979Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-107" t=2024-05-29T13:44:13.572131532Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=127813 slug=clearsale version=7 fingerprint=dc13e05e852953cc attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.572037668Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.571742152s EvaluationString:}]" duration=228.895084ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-106" t=2024-05-29T13:44:13.572014165Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=751407 slug=nethermindjuno instance="container=proxy, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=kong-api, pod=juno-prod-nethgate-cp-kong-8df9b4979-n7wrv, service=juno-prod-prometheus-kube-state-metrics, uid=335adc7a-2113-4902-a9cb-abe38ea2afd4" t=2024-05-29T13:44:13.572040518Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.572007738Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=456850 slug=juniz instance="Langhaus_ID=1, TE_ID=1" t=2024-05-29T13:44:13.571876505Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-105" t=2024-05-29T13:44:13.571875271Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.571794918Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-104" t=2024-05-29T13:44:13.571731499Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=916149 slug=cmfollpd t=2024-05-29T13:44:13.571639878Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=245291 slug=pismo version=30 fingerprint=0811cb73cee0c48e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.571590507Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.571311069s EvaluationString:}]" duration=451.551497ms + level=debug ts=2024-05-29T13:44:13.571605428Z caller=remote_instance_store.go:51 user=638425 slug=docktech msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:13.571614094Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=245291 slug=pismo version=5 fingerprint=eb3d547108bc2811 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.571543381Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[Alert Conditions0:{Var:Alert Conditions Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.571241527s EvaluationString:[ var='Alert Conditions0' metric='NoData' labels={} value=null ]}]" duration=252.654114ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-103" t=2024-05-29T13:44:13.571466999Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=916149 slug=cmfollpd t=2024-05-29T13:44:13.571474137Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.571458916Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=916149 slug=cmfollpd version=1 fingerprint=77b2b2baf0fb9a4a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.571395286Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=puusea4afollwxsapp1003.foll.gcp.hclsw.internal State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=puusea4afollwxsapp1003.foll.gcp.hclsw.internal Value:0xc038a43470} B:{Var:B Labels:instance=puusea4afollwxsapp1003.foll.gcp.hclsw.internal Value:0xc038a43480} C:{Var:C Labels:instance=puusea4afollwxsapp1003.foll.gcp.hclsw.internal Value:0xc038a43490}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.570853233s EvaluationString:[ var='A' labels={instance=puusea4afollwxsapp1003.foll.gcp.hclsw.internal} value=18.209425681940814 ], [ var='B' labels={instance=puusea4afollwxsapp1003.foll.gcp.hclsw.internal} value=18.209425681940814 ], [ var='C' labels={instance=puusea4afollwxsapp1003.foll.gcp.hclsw.internal} value=0 ]}]" duration=14.678032ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-102" t=2024-05-29T13:44:13.571345523Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.571321967Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gige-chi-101" t=2024-05-29T13:44:13.57122718Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.571169412Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=751407 slug=nethermindjuno instance="container=promtail, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=monitoring, pod=promtail-rllmf, service=juno-prod-prometheus-kube-state-metrics, uid=2c21a234-e4cc-473a-98fd-7747d628b5a7" t=2024-05-29T13:44:13.571071719Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.570958625Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.361729ms + logger=ngalert.state.manager user=751407 slug=nethermindjuno instance="container=promtail, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=monitoring, pod=promtail-mnx4r, service=juno-prod-prometheus-kube-state-metrics, uid=628f1aa6-13e9-4713-91f1-e79a5cc3f5d8" t=2024-05-29T13:44:13.570816922Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.570726323Z caller=remote_instance_store.go:51 user=700783 slug=gsgmedia msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=751407 slug=nethermindjuno instance="container=promtail, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=monitoring, pod=promtail-kgrb5, service=juno-prod-prometheus-kube-state-metrics, uid=8188f23d-1c58-4206-b7e9-f0ac4324617a" t=2024-05-29T13:44:13.570682149Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gaming-dp-fra-de-003" t=2024-05-29T13:44:13.570680092Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=gaming-dp-fra-de-002" t=2024-05-29T13:44:13.570540335Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=248141 slug=freemancloud t=2024-05-29T13:44:13.570559258Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.570458812Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=248141 slug=freemancloud version=9 fingerprint=5edf1e3971e0caa3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.570438111Z level=debug msg="Alert rule evaluated" results="[{Instance:app=fuzionv1, geo=westus State:Normal Error: Results:map[] Values:map[C:{Var:C Labels:app=fuzionv1, geo=westus Value:0xc021d092c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.570099972s EvaluationString:[ var='C' labels={app=fuzionv1, geo=westus} value=0 ]}]" duration=54.422638ms + level=debug ts=2024-05-29T13:44:13.569993264Z caller=remote_instance_store.go:51 user=240319 slug=languagewire msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=240319 slug=languagewire version=33 fingerprint=31fa4776fe73ef5d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.569761201Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.569426037s EvaluationString:}]" duration=26.693272ms + logger=ngalert.state.manager user=751407 slug=nethermindjuno instance="container=promtail, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=monitoring, pod=promtail-8xrmq, service=juno-prod-prometheus-kube-state-metrics, uid=f4c150f5-0304-4931-8cea-99bb8807d53e" t=2024-05-29T13:44:13.57031459Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=estnoc-lux-lu-003" t=2024-05-29T13:44:13.570217601Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=751407 slug=nethermindjuno instance="container=promtail, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=monitoring, pod=promtail-2xbnd, service=juno-prod-prometheus-kube-state-metrics, uid=6ffd1389-9264-4240-8c9d-410182a0e224" t=2024-05-29T13:44:13.570160841Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=751407 slug=nethermindjuno instance="container=promtail, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=monitoring, pod=promtail-2n479, service=juno-prod-prometheus-kube-state-metrics, uid=9a873859-c405-474f-9e53-aa0c5e7f911a" t=2024-05-29T13:44:13.570036785Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.569897207Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=440359 slug=twistatus t=2024-05-29T13:44:13.569922875Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=751407 slug=nethermindjuno instance="container=prometheus, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=monitoring, pod=prometheus-juno-prod-prometheus-kube-prometheus-0, service=juno-prod-prometheus-kube-state-metrics, uid=7f1a40b4-6f68-49fc-a6e4-03106cb0b15c" t=2024-05-29T13:44:13.569921145Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv instance="__name__=wrc_CCU, env=prd_cm, instance=10.64.10.135:9000, job=wrc_cm_ccu, platform=ORIGIN" t=2024-05-29T13:44:13.569803994Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=146728 slug=dgc instance= t=2024-05-29T13:44:13.569791014Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:13.569542892Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=432323 slug=lithic t=2024-05-29T13:44:13.569492552Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=751407 slug=nethermindjuno instance="container=prometheus, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=gmp-system, pod=collector-tz8vp, service=juno-prod-prometheus-kube-state-metrics, uid=834ec75f-181e-4adb-832c-1a55eab2814e" t=2024-05-29T13:44:13.569445987Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=estnoc-kef-is-001" t=2024-05-29T13:44:13.569418388Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=315028 slug=greenchoice instance= t=2024-05-29T13:44:13.569363275Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.569285209Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=estnoc-gr-03" t=2024-05-29T13:44:13.569302342Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=751407 slug=nethermindjuno instance="container=prometheus, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=gmp-system, pod=collector-nzspb, service=juno-prod-prometheus-kube-state-metrics, uid=de0d256c-f59c-44dc-9cea-bee21fa81e40" t=2024-05-29T13:44:13.569113135Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=estnoc-gr-01" t=2024-05-29T13:44:13.569059847Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=432323 slug=lithic instance="CacheClusterId=asapusw1-staging-sandbox-cluster-edd6886-001" t=2024-05-29T13:44:13.569031592Z level=debug msg="Keeping state" state=Normal + Error parsing panelUID for alert annotationruleID1628dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=432323 slug=lithic version=10 fingerprint=75eb8098fa40f860 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.568880308Z level=debug msg="Alert rule evaluated" results="[{Instance:CacheClusterId=asapusw1-staging-sandbox-cluster-edd6886-001 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:CacheClusterId=asapusw1-staging-sandbox-cluster-edd6886-001 Value:0xc021d08c80} C:{Var:C Labels:CacheClusterId=asapusw1-staging-sandbox-cluster-edd6886-001 Value:0xc021d08c88}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.568487775s EvaluationString:[ var='B' labels={CacheClusterId=asapusw1-staging-sandbox-cluster-edd6886-001} value=4.739072e+06 ], [ var='C' labels={CacheClusterId=asapusw1-staging-sandbox-cluster-edd6886-001} value=0 ]}]" duration=54.731228ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=estnoc-ee-02" t=2024-05-29T13:44:13.568946152Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.56859566Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=642786 slug=sophoscomnsg instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.568290445Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.568348576Z caller=remote_instance_store.go:51 user=642786 slug=sophoscomnsg msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-zrh-ch-003" t=2024-05-29T13:44:13.568443803Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=355343 slug=foxbase t=2024-05-29T13:44:13.568418796Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=355343 slug=foxbase instance="DBInstanceIdentifier=prod" t=2024-05-29T13:44:13.568404376Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=751407 slug=nethermindjuno instance="container=prometheus, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=gmp-system, pod=collector-595s7, service=juno-prod-prometheus-kube-state-metrics, uid=fd10eed6-a07c-4c7b-934c-cdeb1c727f37" t=2024-05-29T13:44:13.568361529Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.568294044Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=642786 slug=sophoscomnsg version=62 fingerprint=9f57aaa7bafa5305 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.568158233Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.567679581s EvaluationString:}]" duration=12.69738ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-zrh-ch-002" t=2024-05-29T13:44:13.568322536Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-zrh-ch-001" t=2024-05-29T13:44:13.568240007Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.568090963Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=4947 slug=mediamath instance= t=2024-05-29T13:44:13.568111267Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.567959786Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.5680072Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=751407 slug=nethermindjuno instance="container=loki, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=monitoring, pod=loki-0, service=juno-prod-prometheus-kube-state-metrics, uid=4f42ecda-4c5c-49a1-92fd-097e0ebc752f" t=2024-05-29T13:44:13.567945835Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.567935986Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.567892218Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=46ef578ce0554567 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.567778899Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.567536513s EvaluationString:}]" duration=382.890576ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-yyz-ca-010" t=2024-05-29T13:44:13.567743686Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.567667545Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=751407 slug=nethermindjuno instance="container=cert-manager-webhook, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-webhook-5f58d6bf86-jvx4h, service=juno-prod-prometheus-kube-state-metrics, uid=6d013a6c-0786-4eb1-803a-51ca9473107a" t=2024-05-29T13:44:13.567667302Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=751407 slug=nethermindjuno instance="container=cert-manager-controller, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-8694c7d4fd-pvg5d, service=juno-prod-prometheus-kube-state-metrics, uid=aff7f782-3ace-430f-940a-769d08b41374" t=2024-05-29T13:44:13.567514892Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-yyz-ca-008" t=2024-05-29T13:44:13.567488191Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=751407 slug=nethermindjuno instance="container=cert-manager-cainjector, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-cainjector-744cb68868-ht2k2, service=juno-prod-prometheus-kube-state-metrics, uid=6904ef61-a75b-41bc-85a9-f5e4f7e2f4f2" t=2024-05-29T13:44:13.567372167Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.567250074Z caller=remote_instance_store.go:51 user=320906 slug=techcyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=751407 slug=nethermindjuno instance="container=cert-manager-cainjector, endpoint=http, instance=192.168.198.20:8080, job=kube-state-metrics, namespace=cert-manager, pod=cert-manager-cainjector-744cb68868-ht2k2, service=juno-prod-prometheus-kube-state-metrics, uid=6904ef61-a75b-41bc-85a9-f5e4f7e2f4f2" t=2024-05-29T13:44:13.567348283Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=751407 slug=nethermindjuno t=2024-05-29T13:44:13.567123839Z level=debug msg="State manager processing evaluation results" resultCount=41 + level=debug ts=2024-05-29T13:44:13.567213314Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-yyz-ca-006" t=2024-05-29T13:44:13.567129128Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.567048952Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=384712 slug=nearinc t=2024-05-29T13:44:13.566915344Z level=debug msg="Saving alert states" count=37 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-yyz-ca-005" t=2024-05-29T13:44:13.566894911Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=384712 slug=nearinc instance="instance=35.234.55.76:9100, mountpoint=/, node_id=vip-mig-asia-east1-fjq2" t=2024-05-29T13:44:13.566754797Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=384712 slug=nearinc instance="instance=35.234.55.76:9100, mountpoint=/, node_id=vip-mig-asia-east1-fjq2" t=2024-05-29T13:44:13.566742219Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=384712 slug=nearinc instance="instance=35.229.159.47:9100, mountpoint=/, node_id=mainnet-rpc-03-asia-east1-a-561aa2e1" t=2024-05-29T13:44:13.566684618Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=384712 slug=nearinc instance="instance=35.221.189.143:9100, mountpoint=/, node_id=mainnet-rpc-02-asia-east1-b-daa829dc" t=2024-05-29T13:44:13.566626934Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=384712 slug=nearinc instance="instance=35.221.138.247:9100, mountpoint=/, node_id=mainnet-mig-asia-east1-d7nb" t=2024-05-29T13:44:13.566605652Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=384712 slug=nearinc instance="instance=35.221.138.247:9100, mountpoint=/, node_id=mainnet-mig-asia-east1-d7nb" t=2024-05-29T13:44:13.566595501Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=384712 slug=nearinc instance="instance=35.205.19.146:9100, mountpoint=/, node_id=mainnet-mig-europe-west1-zqbc" t=2024-05-29T13:44:13.566563852Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.566257265Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-yyz-ca-002" t=2024-05-29T13:44:13.566349259Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.566305296Z caller=remote_instance_store.go:51 user=137351 slug=pinnacle21 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.566257238Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=384712 slug=nearinc instance="instance=34.81.0.241:9100, mountpoint=/, node_id=mainnet-mig-asia-east1-3g0s" t=2024-05-29T13:44:13.566262486Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=384712 slug=nearinc instance="instance=34.81.0.241:9100, mountpoint=/, node_id=mainnet-mig-asia-east1-3g0s" t=2024-05-29T13:44:13.566253562Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-yyz-ca-001" t=2024-05-29T13:44:13.566244162Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=384712 slug=nearinc instance="instance=34.79.231.140:9100, mountpoint=/, node_id=mirror-mig-europe-west1-xl2b" t=2024-05-29T13:44:13.566184649Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=384712 slug=nearinc instance="instance=34.78.41.69:9100, mountpoint=/, node_id=mainnet-mig-europe-west1-1d97" t=2024-05-29T13:44:13.566129661Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=384712 slug=nearinc instance="instance=34.78.41.69:9100, mountpoint=/, node_id=mainnet-mig-europe-west1-1d97" t=2024-05-29T13:44:13.56608467Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:13.566070603Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:13.565945376Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.565961612Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=384712 slug=nearinc instance="instance=34.78.31.226:9100, mountpoint=/, node_id=mainnet-rpc-04-europe-west1-c-eed8c0d7" t=2024-05-29T13:44:13.565906823Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=438761 slug=wasabicloudprod t=2024-05-29T13:44:13.56575448Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.513794ms + level=debug ts=2024-05-29T13:44:13.565742831Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=384712 slug=nearinc instance="instance=34.77.16.128:9100, mountpoint=/, node_id=mainnet-rpc-03-europe-west1-b-cdf11a42" t=2024-05-29T13:44:13.565711298Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:13.565691628Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=612525 slug=adleyeview instance= t=2024-05-29T13:44:13.565584022Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.565481506Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.565398773Z caller=remote_alert_sender.go:94 user=80822 slug=corescientific host=corescientific-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.173.171:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=a5148bc0-ffe7-404f-97a5-992555e3fd8c alerts=1 + level=debug ts=2024-05-29T13:44:13.565386608Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.565287788Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.565287751Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=384712 slug=nearinc instance="instance=34.69.229.28:9100, mountpoint=/, node_id=vip-mig-us-central1-m9cm" t=2024-05-29T13:44:13.565311362Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-vie-at-003" t=2024-05-29T13:44:13.565315407Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.565284537Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.565220256Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=384712 slug=nearinc instance="instance=34.29.33.74:9100, mountpoint=/, node_id=mainnet-mig-us-central1-7x12" t=2024-05-29T13:44:13.565207683Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=384712 slug=nearinc instance="instance=34.173.13.142:9100, mountpoint=/, node_id=mainnet-dumper-mig-us-central1-dffl" t=2024-05-29T13:44:13.565175258Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.565181165Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=384712 slug=nearinc instance="instance=34.171.37.240:9100, mountpoint=/, node_id=mainnet-rpc-02-us-central1-b-cbed1a4a" t=2024-05-29T13:44:13.565149543Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=384712 slug=nearinc instance="instance=34.133.86.225:9100, mountpoint=/, node_id=mirror-mig-us-central1-gmj6" t=2024-05-29T13:44:13.565065275Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=384712 slug=nearinc instance="instance=130.211.243.16:9100, mountpoint=/, node_id=mainnet-rpc-04-asia-east1-b-fa49d14c" t=2024-05-29T13:44:13.564999535Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=384712 slug=nearinc instance="instance=104.199.137.250:9100, mountpoint=/, node_id=vip-mig-asia-east1-lt39" t=2024-05-29T13:44:13.564896382Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.564755327Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-tlv-il-001" t=2024-05-29T13:44:13.56475515Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.564612114Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.56459502Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-syd-au-003" t=2024-05-29T13:44:13.564586043Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-syd-au-002" t=2024-05-29T13:44:13.564449547Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.564410115Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.564322106Z caller=remote_rule_evaluator.go:193 user=114286 slug=enverus msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-syd-au-001" t=2024-05-29T13:44:13.564323669Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.563976726Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.563907809Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:13.563914089Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:13.563896698Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:13.563889549Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sin-th-001" t=2024-05-29T13:44:13.563677477Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sin-sg-007" t=2024-05-29T13:44:13.563531015Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=679831 slug=joveostageaws t=2024-05-29T13:44:13.563492684Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=29.133701ms + logger=ngalert.state.manager.persist user=147806 slug=adevintaengprod t=2024-05-29T13:44:13.560578193Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.563474241Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.563396977Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sin-sg-005" t=2024-05-29T13:44:13.563237113Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.562827561Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=770491 slug=reflex t=2024-05-29T13:44:13.562705907Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.562357407Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sin-np-002" t=2024-05-29T13:44:13.562342421Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sin-np-001" t=2024-05-29T13:44:13.562234107Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sin-my-005" t=2024-05-29T13:44:13.561914065Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=692010 slug=mercariusprod instance="datasource_uid=gfds-prometheus-wrapper, ref_id=A" t=2024-05-29T13:44:13.561545156Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=692010 slug=mercariusprod version=3 fingerprint=efa859295855e470 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.561437409Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=gfds-prometheus-wrapper, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.561087688s EvaluationString:}]" duration=21.041069ms + level=debug ts=2024-05-29T13:44:13.561482799Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sin-mn-002" t=2024-05-29T13:44:13.561456962Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=438185 slug=nodeinfra instance="chain=ETH, charon_version=v0.19.1, cloud=GCP, cluster_hash=b1c6af5, cluster_name=Lido x Obol: Dazzling Duck, cluster_network=mainnet, deployment=production, instance=202.8.9.132:3620, job=prod-ETH-mainnet, network=mainnet, node_name=prod_ETH_mainnet_validator_jp_1, purpose=obol, region=japan, servicetype=validator" t=2024-05-29T13:44:13.56082165Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=438185 slug=nodeinfra t=2024-05-29T13:44:13.560765501Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="chain=ETH" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sin-lk-002" t=2024-05-29T13:44:13.560696104Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=438185 slug=nodeinfra t=2024-05-29T13:44:13.560613605Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=438185 slug=nodeinfra version=25 fingerprint=f43df264b6d2b90b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.560493082Z level=debug msg="Alert rule evaluated" results="[{Instance:chain=ETH, charon_version=v0.19.1, cloud=GCP, cluster_hash=b1c6af5, cluster_name=Lido x Obol: Dazzling Duck, cluster_network=mainnet, deployment=production, instance=202.8.9.132:3620, job=prod-ETH-mainnet, network=mainnet, node_name=prod_ETH_mainnet_validator_jp_1, purpose=obol, region=japan, servicetype=validator State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:chain=ETH, charon_version=v0.19.1, cloud=GCP, cluster_hash=b1c6af5, cluster_name=Lido x Obol: Dazzling Duck, cluster_network=mainnet, deployment=production, instance=202.8.9.132:3620, job=prod-ETH-mainnet, network=mainnet, node_name=prod_ETH_mainnet_validator_jp_1, purpose=obol, region=japan, servicetype=validator Value:0xc01c0658e0} B:{Var:B Labels:chain=ETH, charon_version=v0.19.1, cloud=GCP, cluster_hash=b1c6af5, cluster_name=Lido x Obol: Dazzling Duck, cluster_network=mainnet, deployment=production, instance=202.8.9.132:3620, job=prod-ETH-mainnet, network=mainnet, node_name=prod_ETH_mainnet_validator_jp_1, purpose=obol, region=japan, servicetype=validator Value:0xc01c065b50} C:{Var:C Labels:chain=ETH, charon_version=v0.19.1, cloud=GCP, cluster_hash=b1c6af5, cluster_name=Lido x Obol: Dazzling Duck, cluster_network=mainnet, deployment=production, instance=202.8.9.132:3620, job=prod-ETH-mainnet, network=mainnet, node_name=prod_ETH_mainnet_validator_jp_1, purpose=obol, region=japan, servicetype=validator Value:0xc01c065da0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.560039072s EvaluationString:[ var='A' labels={chain=ETH, charon_version=v0.19.1, cloud=GCP, cluster_hash=b1c6af5, cluster_name=Lido x Obol: Dazzling Duck, cluster_network=mainnet, deployment=production, instance=202.8.9.132:3620, job=prod-ETH-mainnet, network=mainnet, node_name=prod_ETH_mainnet_validator_jp_1, purpose=obol, region=japan, servicetype=validator} value=6 ], [ var='B' labels={chain=ETH, charon_version=v0.19.1, cloud=GCP, cluster_hash=b1c6af5, cluster_name=Lido x Obol: Dazzling Duck, cluster_network=mainnet, deployment=production, instance=202.8.9.132:3620, job=prod-ETH-mainnet, network=mainnet, node_name=prod_ETH_mainnet_validator_jp_1, purpose=obol, region=japan, servicetype=validator} value=6 ], [ var='C' labels={chain=ETH, charon_version=v0.19.1, cloud=GCP, cluster_hash=b1c6af5, cluster_name=Lido x Obol: Dazzling Duck, cluster_network=mainnet, deployment=production, instance=202.8.9.132:3620, job=prod-ETH-mainnet, network=mainnet, node_name=prod_ETH_mainnet_validator_jp_1, purpose=obol, region=japan, servicetype=validator} value=0 ]}]" duration=42.074331ms + level=debug ts=2024-05-29T13:44:13.560531673Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.560502963Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.560398346Z caller=remote_instance_store.go:51 user=465816 slug=metricgamingqa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.560454122Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.56033528Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.560298995Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.560280866Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sin-lk-001" t=2024-05-29T13:44:13.56035077Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.560220807Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sin-la-002" t=2024-05-29T13:44:13.560235819Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.560107053Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiName=prod-workmotion-tenant-api, Method=--, Resource=/schema-countries, Stage=--" t=2024-05-29T13:44:13.557710725Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=316418 slug=workmotion version=3 fingerprint=0875ce987e31309f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.557585086Z level=debug msg="Alert rule evaluated" results="[{Instance:ApiName=prod-workmotion-tenant-api, Method=--, Resource=/schema-countries, Stage=-- State:NoData Error: Results:map[] Values:map[B:{Var:B Labels:ApiName=prod-workmotion-tenant-api, Method=--, Resource=/schema-countries, Stage=-- Value:} C:{Var:C Labels:ApiName=prod-workmotion-tenant-api, Method=--, Resource=/schema-countries, Stage=-- Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.557185549s EvaluationString:[ var='B' labels={ApiName=prod-workmotion-tenant-api, Method=--, Resource=/schema-countries, Stage=--} value=null ], [ var='C' labels={ApiName=prod-workmotion-tenant-api, Method=--, Resource=/schema-countries, Stage=--} value=null ]}]" duration=36.946417ms + level=debug ts=2024-05-29T13:44:13.557802476Z caller=remote_instance_store.go:51 user=316418 slug=workmotion msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=528849 slug=bitvavo instance= t=2024-05-29T13:44:13.560025351Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sin-la-001" t=2024-05-29T13:44:13.560046077Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.560029966Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=32.759581ms + logger=ngalert.state.manager user=528849 slug=bitvavo instance= t=2024-05-29T13:44:13.560015051Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=528849 slug=bitvavo t=2024-05-29T13:44:13.55996822Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=528849 slug=bitvavo version=1 fingerprint=a6be6658222881b0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.559832966Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.559520087s EvaluationString:}]" duration=27.656616ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sin-kz-002" t=2024-05-29T13:44:13.55992194Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.559594563Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.559597207Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.559615306Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sin-kh-002" t=2024-05-29T13:44:13.559585713Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.55949003Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sin-bt-001" t=2024-05-29T13:44:13.559154389Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sin-bn-002" t=2024-05-29T13:44:13.55903905Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sin-bn-002" t=2024-05-29T13:44:13.559031504Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sin-bd-001" t=2024-05-29T13:44:13.55870154Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sfo-us-027" t=2024-05-29T13:44:13.558488574Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sfo-us-027" t=2024-05-29T13:44:13.558482244Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sfo-us-025" t=2024-05-29T13:44:13.558262949Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sfo-us-024" t=2024-05-29T13:44:13.558131063Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sfo-us-023" t=2024-05-29T13:44:13.557995782Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sfo-us-023" t=2024-05-29T13:44:13.557988792Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.559214842Z caller=client.go:80 msg="creating client for grafana instance" user=357843 addr=dns:///goodstick-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=174880 slug=hydronrj instance= t=2024-05-29T13:44:13.559109967Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=174880 slug=hydronrj version=1 fingerprint=b79529e84a64e3c8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.559012258Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.558716581s EvaluationString:}]" duration=2.125094157s + level=debug ts=2024-05-29T13:44:13.558768493Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=452240 slug=trulioo t=2024-05-29T13:44:13.558805687Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=452240 slug=trulioo t=2024-05-29T13:44:13.558761278Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info component=discovery ts=2024-05-29T13:44:13.558771025Z caller=client.go:80 msg="creating client for grafana instance" user=286641 addr=dns:///goanna-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.558697814Z caller=remote_instance_store.go:51 user=796993 slug=marketops msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.558644104Z caller=client.go:80 msg="creating client for grafana instance" user=453263 addr=dns:///gmamonitor-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.558607434Z caller=ruler.go:522 msg="tenant is owned by this instance" user=305351 slug=dylspeaking groups=1 + level=warn ts=2024-05-29T13:44:13.558550474Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=330999 slug=evansopc + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.558593659Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.558410248Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.558044009Z caller=remote_alert_sender.go:94 user=544997 slug=cloudbuilders host=cloudbuilders-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.160.28.64:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=PuNLUAbVz alerts=1 + level=debug ts=2024-05-29T13:44:13.557749721Z caller=remote_instance_store.go:51 user=201790 slug=veedmo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.557520254Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114492 slug=railsbank instance="QueueName=PROD-PLAY-EUR-3-CREATE-ACCOUNT-DLQ" t=2024-05-29T13:44:13.556972998Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=183214 slug=vectorizedio t=2024-05-29T13:44:13.55685737Z level=debug msg="Saving alert states" count=20 max_state_save_concurrency=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=cpbifef37uvmolr3jb50" t=2024-05-29T13:44:13.556732718Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sea-us-015" t=2024-05-29T13:44:13.556709786Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.556658574Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=cpbidrn37uvmolr3jb0g" t=2024-05-29T13:44:13.55657998Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.556550143Z caller=remote_instance_store.go:51 user=471861 slug=planetstaging msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=635771 slug=sharedservices instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.556520361Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=139073 slug=cargo1 instance= t=2024-05-29T13:44:13.556444767Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=cpbiara74ocl436p6nv0" t=2024-05-29T13:44:13.556434673Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=cpbiao737uvmolr3jan0" t=2024-05-29T13:44:13.556379879Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=139073 slug=cargo1 version=5 fingerprint=ea2697d03c071ea7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.556316008Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.556036053s EvaluationString:}]" duration=231.370131ms + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=cpbiao737uvmolr3jan0" t=2024-05-29T13:44:13.556369508Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.556313899Z caller=remote_instance_store.go:51 user=638425 slug=docktech msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=cpbiaif37uvmolr3jal0" t=2024-05-29T13:44:13.556311933Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.556231498Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.556199364Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sea-us-011" t=2024-05-29T13:44:13.556136202Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sea-us-011" t=2024-05-29T13:44:13.556121075Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.556101621Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=426229 slug=accelbyte t=2024-05-29T13:44:13.556053736Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.556017391Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sea-us-010" t=2024-05-29T13:44:13.556006841Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sea-us-010" t=2024-05-29T13:44:13.555996935Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=cpbhvv274ocl436p6lb0" t=2024-05-29T13:44:13.55599065Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.555826243Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.555909273Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=656158 slug=muonspacegroundprod t=2024-05-29T13:44:13.555844543Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.787582ms + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.555842149Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.555809661Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sea-us-009" t=2024-05-29T13:44:13.555852338Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=cn6b1srsj6j1mdqdhil0" t=2024-05-29T13:44:13.555774905Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.554209335Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.580684ms + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=cjk99o0a3v32vef0rqtg" t=2024-05-29T13:44:13.555512701Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=ciddv0m5utor16capvn0" t=2024-05-29T13:44:13.555363206Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="redpanda_id=ci0c2f8k30vsi89l4v1g" t=2024-05-29T13:44:13.555262965Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.555276225Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=183214 slug=vectorizedio version=1 fingerprint=1099ad380c1b6a21 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.554952067Z level=debug msg="Alert rule evaluated" results="[{Instance:redpanda_id=ci0c2f8k30vsi89l4v1g State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:redpanda_id=ci0c2f8k30vsi89l4v1g Value:0xc03b8af8e8} B:{Var:B Labels:redpanda_id=ci0c2f8k30vsi89l4v1g Value:0xc03b8af930} C:{Var:C Labels:redpanda_id=ci0c2f8k30vsi89l4v1g Value:0xc03b8af938}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.554078213s EvaluationString:[ var='A' labels={redpanda_id=ci0c2f8k30vsi89l4v1g} value=0 ], [ var='B' labels={redpanda_id=ci0c2f8k30vsi89l4v1g} value=0 ], [ var='C' labels={redpanda_id=ci0c2f8k30vsi89l4v1g} value=0 ]} {Instance:redpanda_id=ciddv0m5utor16capvn0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:redpanda_id=ciddv0m5utor16capvn0 Value:0xc03b8af9b0} B:{Var:B Labels:redpanda_id=ciddv0m5utor16capvn0 Value:0xc03b8af9b8} C:{Var:C Labels:redpanda_id=ciddv0m5utor16capvn0 Value:0xc03b8af978}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.554091863s EvaluationString:[ var='A' labels={redpanda_id=ciddv0m5utor16capvn0} value=0 ], [ var='B' labels={redpanda_id=ciddv0m5utor16capvn0} value=0 ], [ var='C' labels={redpanda_id=ciddv0m5utor16capvn0} value=0 ]} {Instance:redpanda_id=ciep3ie5utor16caq7ig State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:redpanda_id=ciep3ie5utor16caq7ig Value:0xc03b8af9e8} B:{Var:B Labels:redpanda_id=ciep3ie5utor16caq7ig Value:0xc03b8afa10} C:{Var:C Labels:redpanda_id=ciep3ie5utor16caq7ig Value:0xc03b8afa18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.55409925s EvaluationString:[ var='A' labels={redpanda_id=ciep3ie5utor16caq7ig} value=0 ], [ var='B' labels={redpanda_id=ciep3ie5utor16caq7ig} value=0 ], [ var='C' labels={redpanda_id=ciep3ie5utor16caq7ig} value=0 ]} {Instance:redpanda_id=cjk99o0a3v32vef0rqtg State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:redpanda_id=cjk99o0a3v32vef0rqtg Value:0xc03b8afa48} B:{Var:B Labels:redpanda_id=cjk99o0a3v32vef0rqtg Value:0xc03b8afa70} C:{Var:C Labels:redpanda_id=cjk99o0a3v32vef0rqtg Value:0xc03b8afa78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.554159301s EvaluationString:[ var='A' labels={redpanda_id=cjk99o0a3v32vef0rqtg} value=0 ], [ var='B' labels={redpanda_id=cjk99o0a3v32vef0rqtg} value=0 ], [ var='C' labels={redpanda_id=cjk99o0a3v32vef0rqtg} value=0 ]} {Instance:redpanda_id=cliqutrruq7ct96cg01g State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:redpanda_id=cliqutrruq7ct96cg01g Value:0xc03b8afae0} B:{Var:B Labels:redpanda_id=cliqutrruq7ct96cg01g Value:0xc03b8afae8} C:{Var:C Labels:redpanda_id=cliqutrruq7ct96cg01g Value:0xc03b8afab8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.554165493s EvaluationString:[ var='A' labels={redpanda_id=cliqutrruq7ct96cg01g} value=0 ], [ var='B' labels={redpanda_id=cliqutrruq7ct96cg01g} value=0 ], [ var='C' labels={redpanda_id=cliqutrruq7ct96cg01g} value=0 ]} {Instance:redpanda_id=cliqvijruq7ct96cg040 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:redpanda_id=cliqvijruq7ct96cg040 Value:0xc03b8afb18} B:{Var:B Labels:redpanda_id=cliqvijruq7ct96cg040 Value:0xc03b8afb50} C:{Var:C Labels:redpanda_id=cliqvijruq7ct96cg040 Value:0xc03b8afb58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.55417391s EvaluationString:[ var='A' labels={redpanda_id=cliqvijruq7ct96cg040} value=0 ], [ var='B' labels={redpanda_id=cliqvijruq7ct96cg040} value=0 ], [ var='C' labels={redpanda_id=cliqvijruq7ct96cg040} value=0 ]} {Instance:redpanda_id=cn6b1srsj6j1mdqdhil0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:redpanda_id=cn6b1srsj6j1mdqdhil0 Value:0xc03b8afb88} B:{Var:B Labels:redpanda_id=cn6b1srsj6j1mdqdhil0 Value:0xc03b8afbb0} C:{Var:C Labels:redpanda_id=cn6b1srsj6j1mdqdhil0 Value:0xc03b8afbb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.55418174s EvaluationString:[ var='A' labels={redpanda_id=cn6b1srsj6j1mdqdhil0} value=0 ], [ var='B' labels={redpanda_id=cn6b1srsj6j1mdqdhil0} value=0 ], [ var='C' labels={redpanda_id=cn6b1srsj6j1mdqdhil0} value=0 ]} {Instance:redpanda_id=cpbhoai74ocl436p6je0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:redpanda_id=cpbhoai74ocl436p6je0 Value:0xc03b8afc08} B:{Var:B Labels:redpanda_id=cpbhoai74ocl436p6je0 Value:0xc03b8afc60} C:{Var:C Labels:redpanda_id=cpbhoai74ocl436p6je0 Value:0xc03b8afc68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.554187141s EvaluationString:[ var='A' labels={redpanda_id=cpbhoai74ocl436p6je0} value=0 ], [ var='B' labels={redpanda_id=cpbhoai74ocl436p6je0} value=0 ], [ var='C' labels={redpanda_id=cpbhoai74ocl436p6je0} value=0 ]} {Instance:redpanda_id=cpbhvv274ocl436p6lb0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:redpanda_id=cpbhvv274ocl436p6lb0 Value:0xc03b8afca8} B:{Var:B Labels:redpanda_id=cpbhvv274ocl436p6lb0 Value:0xc03b8afce0} C:{Var:C Labels:redpanda_id=cpbhvv274ocl436p6lb0 Value:0xc03b8afce8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.554193214s EvaluationString:[ var='A' labels={redpanda_id=cpbhvv274ocl436p6lb0} value=0 ], [ var='B' labels={redpanda_id=cpbhvv274ocl436p6lb0} value=0 ], [ var='C' labels={redpanda_id=cpbhvv274ocl436p6lb0} value=0 ]} {Instance:redpanda_id=cpbi0fq74ocl436p6lfg State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:redpanda_id=cpbi0fq74ocl436p6lfg Value:0xc03b8afd38} B:{Var:B Labels:redpanda_id=cpbi0fq74ocl436p6lfg Value:0xc03b8afd60} C:{Var:C Labels:redpanda_id=cpbi0fq74ocl436p6lfg Value:0xc03b8afd68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.554209957s EvaluationString:[ var='A' labels={redpanda_id=cpbi0fq74ocl436p6lfg} value=0 ], [ var='B' labels={redpanda_id=cpbi0fq74ocl436p6lfg} value=0 ], [ var='C' labels={redpanda_id=cpbi0fq74ocl436p6lfg} value=0 ]} {Instance:redpanda_id=cpbi30274ocl436p6ma0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:redpanda_id=cpbi30274ocl436p6ma0 Value:0xc03b8afde8} B:{Var:B Labels:redpanda_id=cpbi30274ocl436p6ma0 Value:0xc03b8afd98} C:{Var:C Labels:redpanda_id=cpbi30274ocl436p6ma0 Value:0xc03b8afde0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.554218856s EvaluationString:[ var='A' labels={redpanda_id=cpbi30274ocl436p6ma0} value=0 ], [ var='B' labels={redpanda_id=cpbi30274ocl436p6ma0} value=0 ], [ var='C' labels={redpanda_id=cpbi30274ocl436p6ma0} value=0 ]} {Instance:redpanda_id=cpbi8sq74ocl436p6ne0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:redpanda_id=cpbi8sq74ocl436p6ne0 Value:0xc03b8afe60} B:{Var:B Labels:redpanda_id=cpbi8sq74ocl436p6ne0 Value:0xc03b8afe68} C:{Var:C Labels:redpanda_id=cpbi8sq74ocl436p6ne0 Value:0xc03b8afe18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.55422373s EvaluationString:[ var='A' labels={redpanda_id=cpbi8sq74ocl436p6ne0} value=0 ], [ var='B' labels={redpanda_id=cpbi8sq74ocl436p6ne0} value=0 ], [ var='C' labels={redpanda_id=cpbi8sq74ocl436p6ne0} value=0 ]} {Instance:redpanda_id=cpbiaif37uvmolr3jal0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:redpanda_id=cpbiaif37uvmolr3jal0 Value:0xc03b8afef8} B:{Var:B Labels:redpanda_id=cpbiaif37uvmolr3jal0 Value:0xc03b8afec8} C:{Var:C Labels:redpanda_id=cpbiaif37uvmolr3jal0 Value:0xc03b8afef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.554228652s EvaluationString:[ var='A' labels={redpanda_id=cpbiaif37uvmolr3jal0} value=0 ], [ var='B' labels={redpanda_id=cpbiaif37uvmolr3jal0} value=0 ], [ var='C' labels={redpanda_id=cpbiaif37uvmolr3jal0} value=0 ]} {Instance:redpanda_id=cpbiao737uvmolr3jan0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:redpanda_id=cpbiao737uvmolr3jan0 Value:0xc03b8aff88} B:{Var:B Labels:redpanda_id=cpbiao737uvmolr3jan0 Value:0xc03b8aff48} C:{Var:C Labels:redpanda_id=cpbiao737uvmolr3jan0 Value:0xc03b8aff80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.554234773s EvaluationString:[ var='A' labels={redpanda_id=cpbiao737uvmolr3jan0} value=0 ], [ var='B' labels={redpanda_id=cpbiao737uvmolr3jan0} value=0 ], [ var='C' labels={redpanda_id=cpbiao737uvmolr3jan0} value=0 ]} {Instance:redpanda_id=cpbiara74ocl436p6nv0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:redpanda_id=cpbiara74ocl436p6nv0 Value:0xc03b8affe8} B:{Var:B Labels:redpanda_id=cpbiara74ocl436p6nv0 Value:0xc0d64a4020} C:{Var:C Labels:redpanda_id=cpbiara74ocl436p6nv0 Value:0xc0d64a4028}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.554239992s EvaluationString:[ var='A' labels={redpanda_id=cpbiara74ocl436p6nv0} value=0 ], [ var='B' labels={redpanda_id=cpbiara74ocl436p6nv0} value=0 ], [ var='C' labels={redpanda_id=cpbiara74ocl436p6nv0} value=0 ]} {Instance:redpanda_id=cpbibci74ocl436p6o30 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:redpanda_id=cpbibci74ocl436p6o30 Value:0xc0d64a4080} B:{Var:B Labels:redpanda_id=cpbibci74ocl436p6o30 Value:0xc0d64a4088} C:{Var:C Labels:redpanda_id=cpbibci74ocl436p6o30 Value:0xc0d64a4058}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.554245865s EvaluationString:[ var='A' labels={redpanda_id=cpbibci74ocl436p6o30} value=0 ], [ var='B' labels={redpanda_id=cpbibci74ocl436p6o30} value=0 ], [ var='C' labels={redpanda_id=cpbibci74ocl436p6o30} value=0 ]} {Instance:redpanda_id=cpbidrn37uvmolr3jb0g State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:redpanda_id=cpbidrn37uvmolr3jb0g Value:0xc0d64a40c8} B:{Var:B Labels:redpanda_id=cpbidrn37uvmolr3jb0g Value:0xc0d64a40f0} C:{Var:C Labels:redpanda_id=cpbidrn37uvmolr3jb0g Value:0xc0d64a40f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.554251629s EvaluationString:[ var='A' labels={redpanda_id=cpbidrn37uvmolr3jb0g} value=0 ], [ var='B' labels={redpanda_id=cpbidrn37uvmolr3jb0g} value=0 ], [ var='C' labels={redpanda_id=cpbidrn37uvmolr3jb0g} value=0 ]} {Instance:redpanda_id=cpbiee737uvmolr3jb1g State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:redpanda_id=cpbiee737uvmolr3jb1g Value:0xc0d64a4128} B:{Var:B Labels:redpanda_id=cpbiee737uvmolr3jb1g Value:0xc0d64a4150} C:{Var:C Labels:redpanda_id=cpbiee737uvmolr3jb1g Value:0xc0d64a4158}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.554257457s EvaluationString:[ var='A' labels={redpanda_id=cpbiee737uvmolr3jb1g} value=0 ], [ var='B' labels={redpanda_id=cpbiee737uvmolr3jb1g} value=0 ], [ var='C' labels={redpanda_id=cpbiee737uvmolr3jb1g} value=0 ]} {Instance:redpanda_id=cpbifef37uvmolr3jb50 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:redpanda_id=cpbifef37uvmolr3jb50 Value:0xc0d64a42a8} B:{Var:B Labels:redpanda_id=cpbifef37uvmolr3jb50 Value:0xc0d64a42d0} C:{Var:C Labels:redpanda_id=cpbifef37uvmolr3jb50 Value:0xc0d64a42d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.554262918s EvaluationString:[ var='A' labels={redpanda_id=cpbifef37uvmolr3jb50} value=0 ], [ var='B' labels={redpanda_id=cpbifef37uvmolr3jb50} value=0 ], [ var='C' labels={redpanda_id=cpbifef37uvmolr3jb50} value=0 ]} {Instance:redpanda_id=cpbigqf37uvmolr3jb7g State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:redpanda_id=cpbigqf37uvmolr3jb7g Value:0xc0d64a4308} B:{Var:B Labels:redpanda_id=cpbigqf37uvmolr3jb7g Value:0xc0d64a4330} C:{Var:C Labels:redpanda_id=cpbigqf37uvmolr3jb7g Value:0xc0d64a4338}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.554270412s EvaluationString:[ var='A' labels={redpanda_id=cpbigqf37uvmolr3jb7g} value=0 ], [ var='B' labels={redpanda_id=cpbigqf37uvmolr3jb7g} value=0 ], [ var='C' labels={redpanda_id=cpbigqf37uvmolr3jb7g} value=0 ]}]" duration=42.664758ms + level=debug ts=2024-05-29T13:44:13.554968282Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.554970451Z caller=remote_instance_store.go:51 user=486040 slug=dreamprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=486040 slug=dreamprod instance="container=pushgateway, endpoint=metrics, exported_job=equilibrium, instance=10.56.3.18:9091, job=monitoring/prometheus-pushgateway-podmonitoring, job_type=bulk_consistency_check, namespace=default, pod=pushgateway-prometheus-pushgateway-bcf5cf89c-pwg2q, prometheus=monitoring/prometheus-cloud, prometheus_replica=prometheus-prometheus-cloud-0" t=2024-05-29T13:44:13.554875258Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.554946085Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=59.76806ms + logger=ngalert.state.manager user=486040 slug=dreamprod t=2024-05-29T13:44:13.554755757Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=77750 slug=screenmeet t=2024-05-29T13:44:13.554808705Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=77750 slug=screenmeet version=4 fingerprint=aad5f92cda78e1c7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.554753652Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.554466339s EvaluationString:}]" duration=69.648548ms + level=debug ts=2024-05-29T13:44:13.554695483Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.554503644Z caller=remote_alert_sender.go:94 user=223160 slug=saince host=saince-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.110.201:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=a5128ed1-3b58-45c9-8d69-a62a340f205a alerts=1 + level=debug ts=2024-05-29T13:44:13.554477149Z caller=remote_instance_store.go:51 user=557231 slug=lnrsusinsuranceprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=530405 slug=zetetic t=2024-05-29T13:44:13.55443741Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=28.304377ms + level=debug ts=2024-05-29T13:44:13.554410145Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.554352357Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.554304679Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.554295049Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.55425751Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sea-us-005" t=2024-05-29T13:44:13.55409533Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="account_id=126843572856, cluster=arn:aws:ecs:us-east-1:126843572856:cluster/ins-vbd-prod-us-ecs-cluster, http_scheme=http, instance=localhost:8080, job=ins-vbd-prod-us-vbd, launchtype=fargate, life_cycle=prod, net_host_port=8080, platform=ecs, provider=aws, region=us-east-1, service=vbd, service_instance_id=localhost:8080, service_name=ins-vbd-prod-us-vbd, sys=vbd, task_arn=arn:aws:ecs:us-east-1:126843572856:task/ins-vbd-prod-us-ecs-cluster/54ccb19b3dfd47f89fc0e77517747d07, task_revision=12" t=2024-05-29T13:44:13.554000358Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=49546 slug=nulogyinfra instance= t=2024-05-29T13:44:13.553945353Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=49546 slug=nulogyinfra instance= t=2024-05-29T13:44:13.553937312Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod t=2024-05-29T13:44:13.553807169Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service_name=ins-vbd-prod-us-vbd, sys=vbd" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-sea-us-003" t=2024-05-29T13:44:13.553807641Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.553727981Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.553470026Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.553393798Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-prg-cz-001" t=2024-05-29T13:44:13.553345196Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.553218818Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=112732 slug=gleamer t=2024-05-29T13:44:13.553133754Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=121.788229ms + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.553133342Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.552921852Z caller=ruler.go:522 msg="tenant is owned by this instance" user=661086 slug=dlf groups=0 + level=debug ts=2024-05-29T13:44:13.552982649Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.552994927Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.552954388Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=e1052c07caa01b7d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.552926097Z level=debug msg="Alert rule evaluated" results="[{Instance:name=keepLastValue(eadp.gos.torch.prod.nhl-2023-ps5-gen5.Users_in_Game,5) Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc08edccc90} IgnoreBelow:{Var:IgnoreBelow Labels: Value:0xc08edccc98} Threshold:{Var:Threshold Labels: Value:0xc08edccc30} compare:{Var:compare Labels:name=keepLastValue(eadp.gos.torch.prod.nhl-2023-ps5-gen5.Users_in_Game,5) Query Value:0xc08edccc50} sum:{Var:sum Labels:name=keepLastValue(eadp.gos.torch.prod.nhl-2023-ps5-gen5.Users_in_Game,5) Query Value:0xc08edccc68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.552419386s EvaluationString:[ var='Breaches' labels={} value=1 ], [ var='IgnoreBelow' labels={} value=2000 ], [ var='Threshold' labels={} value=-4 ], [ var='compare' labels={name=keepLastValue(eadp.gos.torch.prod.nhl-2023-ps5-gen5.Users_in_Game,5) Query} value=0 ], [ var='sum' labels={name=keepLastValue(eadp.gos.torch.prod.nhl-2023-ps5-gen5.Users_in_Game,5) Query} value=0 ]}]" duration=51.419902ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-ord-us-011" t=2024-05-29T13:44:13.552933678Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.552828139Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-ord-us-010" t=2024-05-29T13:44:13.552824538Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-ord-us-009" t=2024-05-29T13:44:13.552674333Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="datasource_uid=BFKbhyz4k, ref_id=A" t=2024-05-29T13:44:13.552573787Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-ord-us-008" t=2024-05-29T13:44:13.55253138Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance="datasource_uid=BFKbhyz4k, ref_id=A" t=2024-05-29T13:44:13.552552191Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-ord-us-008" t=2024-05-29T13:44:13.55251405Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.552224075Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.552143788Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.552122918Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.552085785Z caller=client.go:80 msg="creating client for grafana instance" user=540078 addr=dns:///gauravgondhalekar-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.552044615Z caller=ruler.go:522 msg="tenant is owned by this instance" user=414734 slug=cubiko groups=7 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-ord-us-005" t=2024-05-29T13:44:13.552054256Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.551936162Z caller=remote_instance_store.go:51 user=115097 slug=controlplane msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.551886928Z caller=client.go:80 msg="creating client for grafana instance" user=670694 addr=dns:///fwwc2023-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.551758065Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=843304 slug=ppcgroup t=2024-05-29T13:44:13.551781053Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.791224ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-ord-us-003" t=2024-05-29T13:44:13.55162934Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.55157085Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.551528565Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=345344 slug=dkkim + level=debug ts=2024-05-29T13:44:13.550955821Z caller=ruler.go:522 msg="tenant is owned by this instance" user=345344 slug=dkkim groups=0 + level=info component=discovery ts=2024-05-29T13:44:13.551362735Z caller=client.go:80 msg="creating client for grafana instance" user=645370 addr=dns:///fullstop-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.551395986Z caller=remote_instance_store.go:51 user=115997 slug=potlatchdeltic msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.551337208Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=402679 slug=dicko + Error parsing panelUID for alert annotationruleID1292dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=115997 slug=potlatchdeltic version=2 fingerprint=5929e7076c5e80a6 attempt=1 now=2024-05-29T13:44:00Z t=2024-05-29T13:44:13.551193806Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Alerting Error: Results:map[] Values:map[B0:{Var:B Labels: Value:0xc021d08768}] EvaluatedAt:2024-05-29 13:44:00 +0000 UTC EvaluationDuration:13.550852038s EvaluationString:[ var='B0' metric='% Stud' labels={} value=20.48089308716187 ]}]" duration=4.341551756s + level=info component=discovery ts=2024-05-29T13:44:13.551103536Z caller=client.go:80 msg="creating client for grafana instance" user=510925 addr=dns:///freddo-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.551000571Z caller=ruler.go:522 msg="tenant is owned by this instance" user=603879 slug=ecoshield groups=0 + logger=ngalert.state.manager user=401442 slug=missioncontrolss instance="dyno=web.1, env=dev, herokuApp=heroku, job=drainlogs, serviceName=sf-auth, service_name=drainlogs, token=52d5811f-2ac0-452b-b0e1-5089b132b5be" t=2024-05-29T13:44:13.550973091Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=401442 slug=missioncontrolss version=12 fingerprint=4abc8999b2acee80 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.550835372Z level=debug msg="Alert rule evaluated" results="[{Instance:dyno=web.1, env=dev, herokuApp=heroku, job=drainlogs, serviceName=sf-auth, service_name=drainlogs, token=52d5811f-2ac0-452b-b0e1-5089b132b5be State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:dyno=web.1, env=dev, herokuApp=heroku, job=drainlogs, serviceName=sf-auth, service_name=drainlogs, token=52d5811f-2ac0-452b-b0e1-5089b132b5be Value:0xc020c45420} B:{Var:B Labels:dyno=web.1, env=dev, herokuApp=heroku, job=drainlogs, serviceName=sf-auth, service_name=drainlogs, token=52d5811f-2ac0-452b-b0e1-5089b132b5be Value:0xc020c45490}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.550452673s EvaluationString:[ var='A' labels={dyno=web.1, env=dev, herokuApp=heroku, job=drainlogs, serviceName=sf-auth, service_name=drainlogs, token=52d5811f-2ac0-452b-b0e1-5089b132b5be} value=0.10372093023255813 ], [ var='B' labels={dyno=web.1, env=dev, herokuApp=heroku, job=drainlogs, serviceName=sf-auth, service_name=drainlogs, token=52d5811f-2ac0-452b-b0e1-5089b132b5be} value=0 ]}]" duration=135.694389ms + level=info component=discovery ts=2024-05-29T13:44:13.550645582Z caller=client.go:80 msg="creating client for grafana instance" user=671478 addr=dns:///florinandone2018-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager.persist user=700783 slug=gsgmedia t=2024-05-29T13:44:13.550359995Z level=debug msg="Saving alert states" count=16 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.550570656Z caller=ruler.go:522 msg="tenant is owned by this instance" user=412641 slug=declon groups=1 + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.550336735Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.550316324Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.550309974Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.550304113Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.550294414Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.550286864Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.550282233Z level=debug msg="Setting next state" handler=resultNoData + level=info ts=2024-05-29T13:44:13.550287937Z caller=remote_alert_sender.go:94 user=767797 slug=mgmresorts host=mgmresorts-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.71.101:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=edmi8d5emw8aof alerts=1 + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.550246043Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.550239363Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.550234352Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.550213042Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.550176492Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.550142761Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-ord-us-001" t=2024-05-29T13:44:13.550297401Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.55010314Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.55006381Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.550050388Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.550044239Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=314947 slug=h10n t=2024-05-29T13:44:13.550085506Z level=debug msg="Saving alert states" count=6 max_state_save_concurrency=1 + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.550003378Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-mxp-it-058" t=2024-05-29T13:44:13.550123115Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.55008963Z caller=remote_instance_store.go:51 user=320906 slug=techcyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.549958498Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=320906 slug=techcyte instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.550021087Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.550026791Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-mxp-it-058" t=2024-05-29T13:44:13.550035495Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=320906 slug=techcyte instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.549986252Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.549980689Z caller=remote_instance_store.go:51 user=533463 slug=lacewallet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=314947 slug=h10n instance="__name__=node_systemd_unit_state, agent_hostname=hip-training-01, instance=hip-training-01:12345, job=integrations/node_exporter, name=app-frontend.service, state=active, type=simple" t=2024-05-29T13:44:13.549907478Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.549927365Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=320906 slug=techcyte instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.549931048Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=320906 slug=techcyte instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.549920991Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.549914066Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=320906 slug=techcyte instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.549903263Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.549895082Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=320906 slug=techcyte instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.549892934Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=320906 slug=techcyte instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.549837131Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=320906 slug=techcyte instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.549798723Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=320906 slug=techcyte instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.549751786Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-mxp-it-057" t=2024-05-29T13:44:13.549784685Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=320906 slug=techcyte instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.549703283Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=320906 slug=techcyte instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.549681838Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=320906 slug=techcyte instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.549653603Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=320906 slug=techcyte instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.549606239Z level=debug msg="Setting next state" handler=resultNoData + level=info component=discovery ts=2024-05-29T13:44:13.549592844Z caller=client.go:80 msg="creating client for grafana instance" user=616641 addr=dns:///fliu104-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=320906 slug=techcyte instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.549592781Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.549436372Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.54935686Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=320906 slug=techcyte instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.549414921Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-mxp-it-055" t=2024-05-29T13:44:13.549144859Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.548964027Z caller=ruler.go:522 msg="tenant is owned by this instance" user=461798 slug=betfair groups=8 + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:13.548794608Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:13.548788808Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=warn ts=2024-05-29T13:44:13.548824242Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=389170 slug=creativity + level=warn ts=2024-05-29T13:44:13.548793774Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=487423 slug=deam + level=debug ts=2024-05-29T13:44:13.548786156Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=543604 slug=kingmakers instance="datasource_uid=fecf3b69-94b4-4c95-9a93-63d7e7966e74, ref_id=input" t=2024-05-29T13:44:13.548764407Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=548157 slug=kushkiprod t=2024-05-29T13:44:13.548553668Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.548438092Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.661852ms + logger=ngalert.state.manager user=142180 slug=luxtronic instance= t=2024-05-29T13:44:13.548620702Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=142180 slug=luxtronic instance= t=2024-05-29T13:44:13.548613078Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.548442804Z caller=ruler.go:522 msg="tenant is owned by this instance" user=389170 slug=creativity groups=0 + logger=ngalert.state.manager.persist user=310637 slug=notino t=2024-05-29T13:44:13.548415316Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.87361ms + level=debug ts=2024-05-29T13:44:13.548385961Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.548386113Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.548328605Z caller=remote_instance_store.go:51 user=438761 slug=wasabicloudprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.548296545Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.547877985Z caller=client.go:80 msg="creating client for grafana instance" user=331366 addr=dns:///fiberhome-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.scheduler user=438761 slug=wasabicloudprod version=3 fingerprint=3751e65e3c57fbab attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.548055489Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.547687361s EvaluationString:}]" duration=124.522444ms + level=debug ts=2024-05-29T13:44:13.548001408Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.547933963Z caller=client.go:80 msg="creating client for grafana instance" user=659368 addr=dns:///fidelio-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=173730 slug=nikon instance= t=2024-05-29T13:44:13.547824828Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=173730 slug=nikon instance= t=2024-05-29T13:44:13.547810858Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=491157 slug=prd01wr t=2024-05-29T13:44:13.547683524Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.39988ms + level=debug ts=2024-05-29T13:44:13.547675113Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.547431635Z caller=remote_instance_store.go:51 user=646202 slug=kairosaerospace msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=127813 slug=clearsale instance= t=2024-05-29T13:44:13.547443234Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=127813 slug=clearsale instance= t=2024-05-29T13:44:13.547423298Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.547391076Z caller=ruler.go:522 msg="tenant is owned by this instance" user=705776 slug=datacomcloudservices groups=2 + level=info component=discovery ts=2024-05-29T13:44:13.547222543Z caller=client.go:80 msg="creating client for grafana instance" user=334836 addr=dns:///fakihsoumi-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.547188798Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=535960 slug=cathodechurch + level=debug ts=2024-05-29T13:44:13.547225543Z caller=remote_instance_store.go:51 user=466402 slug=apexfsnzprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.547154809Z caller=ruler.go:522 msg="tenant is owned by this instance" user=535960 slug=cathodechurch groups=0 + logger=ngalert.state.manager user=466402 slug=apexfsnzprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.547160203Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=466402 slug=apexfsnzprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.547137164Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:13.54708883Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=44.651411ms + logger=ngalert.state.manager.persist user=656158 slug=muonspacegroundprod t=2024-05-29T13:44:13.547053971Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=656158 slug=muonspacegroundprod instance="datasource_uid=a27bb067-67c3-4636-aa16-ed387b9bc21e, ref_id=uptime" previous_handler=resultNoData t=2024-05-29T13:44:13.54704336Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=656158 slug=muonspacegroundprod instance="datasource_uid=a27bb067-67c3-4636-aa16-ed387b9bc21e, ref_id=uptime" previous_handler=resultNoData t=2024-05-29T13:44:13.54703848Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.547030917Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-mrs-fr-003" t=2024-05-29T13:44:13.54677921Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.546846135Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.546831722Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=466402 slug=apexfsnzprod t=2024-05-29T13:44:13.546864811Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=145127 slug=detooperp t=2024-05-29T13:44:13.54683468Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=69.187391ms + logger=ngalert.scheduler user=466402 slug=apexfsnzprod version=1 fingerprint=fc3beb3a33ad3cfa attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.546783861Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.546507706s EvaluationString:}]" duration=12.438507ms + level=debug ts=2024-05-29T13:44:13.546749667Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:13.546705271Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=34.333786ms + level=debug ts=2024-05-29T13:44:13.546644944Z caller=remote_instance_store.go:51 user=159918 slug=mibus32 msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.546559725Z caller=remote_image_capturer.go:61 user=159918 slug=mibus32 rule_org_id=1 rule_uid=VpLuOod7z dashboard=R-VTFNbGz panel=6 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=159918 slug=mibus32 instance= t=2024-05-29T13:44:13.546587487Z level=warn msg="Failed to take an image" dashboard=R-VTFNbGz panel=6 error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:13.546347607Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-mrs-fr-002" t=2024-05-29T13:44:13.546519497Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.546355758Z caller=remote_instance_store.go:51 user=455282 slug=rockwool msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-mrs-fr-001" t=2024-05-29T13:44:13.546364415Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.546080779Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.545860347Z caller=client.go:80 msg="creating client for grafana instance" user=330999 addr=dns:///evansopc-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-mia-us-016" t=2024-05-29T13:44:13.545930052Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.545829608Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=330459 slug=dgoodlad + level=info component=discovery ts=2024-05-29T13:44:13.54574682Z caller=client.go:80 msg="creating client for grafana instance" user=364718 addr=dns:///ericm-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.545763683Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:13.545470151Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-mia-us-014" t=2024-05-29T13:44:13.545515011Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=70430 slug=dapperlabs instance="datasource_uid=9F7TNpxVk, ref_id=A" t=2024-05-29T13:44:13.545434688Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-mia-us-013" t=2024-05-29T13:44:13.545368736Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-mia-us-012" t=2024-05-29T13:44:13.545204219Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-mia-us-011" t=2024-05-29T13:44:13.545093443Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.544994578Z caller=remote_instance_store.go:51 user=299574 slug=caidev msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.544990348Z caller=remote_alert_sender.go:94 user=334408 slug=voltagrid host=voltagrid-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.194.196:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=YHueUVaVk alerts=1 + level=debug ts=2024-05-29T13:44:13.544874234Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=299574 slug=caidev version=5 fingerprint=e59c91ad618efb52 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.544829275Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=fXpT1b14k, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.54444844s EvaluationString:}]" duration=52.084698ms + level=debug ts=2024-05-29T13:44:13.544834991Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.544852364Z caller=remote_alert_sender.go:94 user=334408 slug=voltagrid host=voltagrid-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.62.7:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=YHueUVaVk alerts=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-mia-us-009" t=2024-05-29T13:44:13.544812526Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.544745986Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-mia-us-008" t=2024-05-29T13:44:13.544689409Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.544669598Z caller=remote_alert_sender.go:94 user=334408 slug=voltagrid host=voltagrid-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.22.9:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=YHueUVaVk alerts=1 + level=warn ts=2024-05-29T13:44:13.54462432Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=534244 slug=anaplanprodau + level=debug ts=2024-05-29T13:44:13.544597124Z caller=ruler.go:522 msg="tenant is owned by this instance" user=534244 slug=anaplanprodau groups=0 + level=debug ts=2024-05-29T13:44:13.544579391Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.544491257Z caller=remote_alert_sender.go:94 user=334408 slug=voltagrid host=voltagrid-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.245.203:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=YHueUVaVk alerts=1 + level=debug ts=2024-05-29T13:44:13.54447882Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=27737 slug=edfmancapital version=4 fingerprint=41b9fee47dd015d6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.544320416Z level=debug msg="Alert rule evaluated" results="[{Instance:ConnectionId=dxcon-ffmxpihr, VirtualInterfaceId=dxvif-ffzm8ypf State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ConnectionId=dxcon-ffmxpihr, VirtualInterfaceId=dxvif-ffzm8ypf Value:0xc01d9e9af8} C:{Var:C Labels:ConnectionId=dxcon-ffmxpihr, VirtualInterfaceId=dxvif-ffzm8ypf Value:0xc01d9e9af0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.543960692s EvaluationString:[ var='B' labels={ConnectionId=dxcon-ffmxpihr, VirtualInterfaceId=dxvif-ffzm8ypf} value=6.650095944603244e+07 ], [ var='C' labels={ConnectionId=dxcon-ffmxpihr, VirtualInterfaceId=dxvif-ffzm8ypf} value=0 ]}]" duration=163.81371ms + level=debug ts=2024-05-29T13:44:13.544305674Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=60603 slug=avalaratax instance= t=2024-05-29T13:44:13.54434624Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.544304752Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.544323529Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.544312291Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.544270779Z caller=ruler.go:522 msg="tenant is owned by this instance" user=701324 slug=devdw groups=0 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-mia-us-006" t=2024-05-29T13:44:13.544028209Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.543971139Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-mia-us-005" t=2024-05-29T13:44:13.543919746Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-mia-us-005" t=2024-05-29T13:44:13.543890663Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-mia-us-004" t=2024-05-29T13:44:13.54374458Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=675095 slug=aideatestack t=2024-05-29T13:44:13.543545094Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.340413ms + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.543459791Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.02101ms + level=debug ts=2024-05-29T13:44:13.543316027Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.543273681Z caller=ruler.go:522 msg="tenant is owned by this instance" user=392320 slug=backyard groups=2 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-mad-es-004" t=2024-05-29T13:44:13.543145074Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.541702825Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.542994628Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.542948623Z caller=remote_instance_store.go:51 user=338059 slug=ninetailed msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.541820782Z caller=client.go:80 msg="creating client for grafana instance" user=494010 addr=dns:///dtlan-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-mad-es-002" t=2024-05-29T13:44:13.542806259Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.542595255Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-mad-es-001" t=2024-05-29T13:44:13.542662152Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=21051 slug=mojio t=2024-05-29T13:44:13.542599334Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.44884ms + level=debug ts=2024-05-29T13:44:13.542540863Z caller=ruler.go:522 msg="tenant is owned by this instance" user=303649 slug=cs1 groups=0 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-lgw-uk-015" t=2024-05-29T13:44:13.542473668Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.542389603Z caller=remote_alert_sender.go:94 user=651430 slug=qawolf host=qawolf-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.121.164:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=edkx7y7dfzg8wf alerts=1 + level=info ts=2024-05-29T13:44:13.542244572Z caller=remote_alert_sender.go:94 user=391359 slug=linklogistics host=linklogistics-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.49.117.106:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=edlrdy89g61a8d alerts=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-lgw-uk-013" t=2024-05-29T13:44:13.542152842Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.541981801Z caller=remote_instance_store.go:51 user=544997 slug=cloudbuilders msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.541905505Z caller=remote_image_capturer.go:61 user=544997 slug=cloudbuilders rule_org_id=1 rule_uid=PuNLUAbVz dashboard=b4_Rds74z panel=32 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=544997 slug=cloudbuilders instance="Policy=cis-s3-mfadelete-is-not-enabled, ResType=aws.s3" t=2024-05-29T13:44:13.541929411Z level=warn msg="Failed to take an image" dashboard=b4_Rds74z panel=32 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-lgw-uk-012" t=2024-05-29T13:44:13.541958132Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.541891984Z caller=client.go:80 msg="creating client for grafana instance" user=305351 addr=dns:///dylspeaking-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.541802896Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.541804936Z caller=client.go:80 msg="creating client for grafana instance" user=368431 addr=dns:///dsiddharthc-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.541788589Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=305394 slug=csenergy + level=debug ts=2024-05-29T13:44:13.541601243Z caller=ruler.go:522 msg="tenant is owned by this instance" user=305394 slug=csenergy groups=0 + level=debug ts=2024-05-29T13:44:13.541758029Z caller=ruler.go:522 msg="tenant is owned by this instance" user=346066 slug=dashboardpina groups=0 + level=warn ts=2024-05-29T13:44:13.541730105Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=656479 slug=cyaraaup + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.54141413Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.541370849Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.541276777Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.541267868Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.541493552Z caller=remote_instance_store.go:51 user=783114 slug=cleardil msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.541238777Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-lgw-uk-009" t=2024-05-29T13:44:13.541436055Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.541196086Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.541171095Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.541131106Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.541118514Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.541063763Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-lgw-uk-008" t=2024-05-29T13:44:13.541273193Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.541047433Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.541016423Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540981252Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540967871Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-lgw-uk-007" t=2024-05-29T13:44:13.541153721Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540891421Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.541050136Z caller=remote_instance_store.go:51 user=471861 slug=planetstaging msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.54087556Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.54087183Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540862339Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=544997 slug=cloudbuilders t=2024-05-29T13:44:13.541000654Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540857129Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540818829Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540813308Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.540917119Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540793308Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540788008Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540782398Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540778458Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540742227Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540724647Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540653135Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540649235Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.540758584Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-lgw-uk-005" t=2024-05-29T13:44:13.540777197Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540623145Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.540711091Z caller=remote_instance_store.go:51 user=236496 slug=improbable msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540619045Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=3a218f8fa9a57fef attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.540681116Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=MILAN Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc07d1dde18} Threshold:{Var:Threshold Labels: Value:0xc07d1dde40} compare:{Var:compare Labels:aggregatedBy=sum, name=MILAN Query Value:0xc07d1dde80} sum:{Var:sum Labels:aggregatedBy=sum, name=MILAN Query Value:0xc07d1ddeb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.54020036s EvaluationString:[ var='Breaches' labels={} value=72 ], [ var='Threshold' labels={} value=2 ], [ var='compare' labels={aggregatedBy=sum, name=MILAN Query} value=0 ], [ var='sum' labels={aggregatedBy=sum, name=MILAN Query} value=0 ]}]" duration=56.934621ms + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540613435Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540605584Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540601724Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager.persist user=236496 slug=improbable t=2024-05-29T13:44:13.54065117Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.540636636Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540569814Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.540692513Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:13.540627307Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:13.540615089Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540486432Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540482572Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540471872Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540453572Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540443731Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540437771Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540408811Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.54039499Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.54039019Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.54036288Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540342659Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:13.540401791Z caller=remote_alert_sender.go:94 user=442863 slug=numan host=numan-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.156.32.36:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=kfw5pitVz alerts=1 + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540336389Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540327949Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540309209Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540281628Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.540269879Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.540244662Z caller=remote_instance_store.go:51 user=306551 slug=teckresourcesalerts msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.540251317Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540237387Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540232237Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.540198972Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540207917Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-lgw-uk-002" t=2024-05-29T13:44:13.540223372Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540198737Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540189066Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540130975Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540123375Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540069644Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.540014403Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.539991133Z level=debug msg="Keeping state" state=Normal + level=error ts=2024-05-29T13:44:13.53987094Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'C': data source not found" + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=4 fingerprint=7d5e5aae32b62b32 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.539904569Z level=error msg="Failed to evaluate rule" error="failed to build query 'C': data source not found" duration=12.366245ms + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.539955012Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.539914141Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.539894561Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-lax-us-031" t=2024-05-29T13:44:13.539914829Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-lax-us-031" t=2024-05-29T13:44:13.539857083Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.53985742Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.539803069Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.539784849Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.539777849Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.539758518Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.539753508Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.539738648Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.539708017Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=901230 slug=integromonitor instance= previous_handler=resultError t=2024-05-29T13:44:13.539647194Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.539686777Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=901230 slug=integromonitor instance= previous_handler=resultError t=2024-05-29T13:44:13.539641844Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.539644425Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.539638345Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=783114 slug=cleardil instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.539634305Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.539564711Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.539403245Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=245291 slug=pismo version=31 fingerprint=4b3b1c2db4e514f9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.539355049Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.539090375s EvaluationString:}]" duration=238.453594ms + level=debug ts=2024-05-29T13:44:13.539360617Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.539359977Z caller=remote_instance_store.go:51 user=485459 slug=heroiclabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.539318307Z caller=remote_instance_store.go:51 user=753403 slug=romich msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-lax-us-027" t=2024-05-29T13:44:13.539122647Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.53903839Z caller=remote_instance_store.go:51 user=183427 slug=kaiku msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=363785 slug=moonletmonitor instance= t=2024-05-29T13:44:13.539018028Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183427 slug=kaiku t=2024-05-29T13:44:13.538944579Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-lax-us-026" t=2024-05-29T13:44:13.539010891Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=183427 slug=kaiku version=1 fingerprint=47f3c8e6fc1be5a1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.5388531Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.538562298s EvaluationString:}]" duration=2.351830276s + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-lax-us-025" t=2024-05-29T13:44:13.538878772Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-lax-us-024" t=2024-05-29T13:44:13.538751111Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.538678137Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=304032 slug=clearbanc t=2024-05-29T13:44:13.538668363Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kjhlnj03-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.538450868Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=DY6IOGSVz, ref_id=A" t=2024-05-29T13:44:13.538580511Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-lax-us-023" t=2024-05-29T13:44:13.538614538Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kjfhbr6h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.538377607Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance="datasource_uid=DTySajW4z, ref_id=A" t=2024-05-29T13:44:13.538351377Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-lax-us-022" t=2024-05-29T13:44:13.538364584Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-lax-us-022" t=2024-05-29T13:44:13.538337468Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.538235156Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kjfhbr6h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.538252926Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kjfhbr6h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.538218446Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kjfhbr6h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.538095595Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.538007245Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-lax-us-020" t=2024-05-29T13:44:13.538021362Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.537978644Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.537877591Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.53787293Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kjf7to4b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.537820182Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.537855837Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-lax-us-019" t=2024-05-29T13:44:13.537859604Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.537780356Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-lax-us-018" t=2024-05-29T13:44:13.537738539Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.537679543Z caller=remote_instance_store.go:51 user=482907 slug=wavelonp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kjf0fixt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.53761876Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kjf0fixt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.537580399Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=514500 slug=rever t=2024-05-29T13:44:13.537468185Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=37.688975ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-lax-us-017" t=2024-05-29T13:44:13.537531452Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kjf0fixt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.537470088Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.537395957Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=538037 slug=drivewealth version=103 fingerprint=9a597914bc65ddc4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.537373644Z level=debug msg="Alert rule evaluated" results="[{Instance:host=ny4ap-intel-01 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=ny4ap-intel-01 Value:0xc053795d50} B:{Var:B Labels:host=ny4ap-intel-01 Value:0xc053795e00} C:{Var:C Labels:host=ny4ap-intel-01 Value:0xc053795e30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.536977418s EvaluationString:[ var='A' labels={host=ny4ap-intel-01} value=40.880121396054626 ], [ var='B' labels={host=ny4ap-intel-01} value=40.880121396054626 ], [ var='C' labels={host=ny4ap-intel-01} value=0 ]} {Instance:host=ny4ap-intel-02 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=ny4ap-intel-02 Value:0xc053795ef0} B:{Var:B Labels:host=ny4ap-intel-02 Value:0xc053795f30} C:{Var:C Labels:host=ny4ap-intel-02 Value:0xc053795f60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.536995301s EvaluationString:[ var='A' labels={host=ny4ap-intel-02} value=40.81476493633693 ], [ var='B' labels={host=ny4ap-intel-02} value=40.81476493633693 ], [ var='C' labels={host=ny4ap-intel-02} value=0 ]} {Instance:host=ny4ap-intel-03 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=ny4ap-intel-03 Value:0xc053795fc0} B:{Var:B Labels:host=ny4ap-intel-03 Value:0xc053795ff0} C:{Var:C Labels:host=ny4ap-intel-03 Value:0xc0227c0010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.537003362s EvaluationString:[ var='A' labels={host=ny4ap-intel-03} value=123.00410688962964 ], [ var='B' labels={host=ny4ap-intel-03} value=123.00410688962964 ], [ var='C' labels={host=ny4ap-intel-03} value=0 ]}]" duration=24.46146ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kjf0fixt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.537385527Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-lax-us-016" t=2024-05-29T13:44:13.537388046Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kj8izdof-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.537203265Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kj8izdof-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.537152185Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jp-yo-36" t=2024-05-29T13:44:13.537243265Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.537155984Z caller=remote_instance_store.go:51 user=344017 slug=descript msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.537098431Z caller=remote_instance_store.go:51 user=843304 slug=ppcgroup msg="calling SaveAlertInstance" + Error parsing panelUID for alert annotationruleID838dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=344017 slug=descript version=3 fingerprint=8c9f3cf3a8d72b9e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.536975153Z level=debug msg="Alert rule evaluated" results="[{Instance:resource.label.project_id=production-273614, resource.type=k8s_container State:Normal Error: Results:map[] Values:map[Reduce:{Var:Reduce Labels:resource.label.project_id=production-273614, resource.type=k8s_container Value:0xc01ce3b958} Threshold:{Var:Threshold Labels:resource.label.project_id=production-273614, resource.type=k8s_container Value:0xc01ce3b9c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.53662011s EvaluationString:[ var='Reduce' labels={resource.label.project_id=production-273614, resource.type=k8s_container} value=0.171881640625 ], [ var='Threshold' labels={resource.label.project_id=production-273614, resource.type=k8s_container} value=0 ]}]" duration=550.175133ms + level=debug ts=2024-05-29T13:44:13.536878363Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=843304 slug=ppcgroup t=2024-05-29T13:44:13.536986929Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=843304 slug=ppcgroup instance= t=2024-05-29T13:44:13.536954229Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=843304 slug=ppcgroup t=2024-05-29T13:44:13.536910028Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jp-yo-32" t=2024-05-29T13:44:13.536587608Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kj6k8c06-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.536127294Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.536347027Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kj4zuh6v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.535810641Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kj4zuh6v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.535789201Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kj2kbr4q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.535535218Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kj2kbr4q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.535403677Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kj2kbr4q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.535367466Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kj2kbr4q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.535269745Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kj22748b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.535138474Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kj22748b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.535015153Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jp-yo-30" t=2024-05-29T13:44:13.536181463Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jp-yo-29" t=2024-05-29T13:44:13.536073576Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.535918103Z caller=client.go:80 msg="creating client for grafana instance" user=330459 addr=dns:///dgoodlad-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.535878182Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.535830112Z caller=ruler.go:522 msg="tenant is owned by this instance" user=360441 slug=barygame groups=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kiwv522s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.534656799Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kiwv522s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.534565498Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.535663118Z caller=client.go:80 msg="creating client for grafana instance" user=385603 addr=dns:///devs47-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kivbx8x4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.534271515Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kivbx8x4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.534170324Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kivbx8x4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.534029822Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.535681133Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kitqtqhu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.533887681Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.535588028Z caller=ruler.go:522 msg="tenant is owned by this instance" user=537138 slug=consultdanny groups=0 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kitqtqhu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.533724639Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kitqtqhu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.533699489Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kit2063l-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.533628788Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kit2063l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.533490457Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kim0qey8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.532995942Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kiibjg9k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.53279877Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jp-yo-26" t=2024-05-29T13:44:13.535601033Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kiibjg9k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.532489437Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ki7cp19h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.53180769Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ki3b2o6t-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.531527537Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jp-yo-25" t=2024-05-29T13:44:13.535455177Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ki2r3b9q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.530967701Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ki2r3b9q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.530686878Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.535236504Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-khxnpwwx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.530311624Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.535214943Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jp-yo-24" t=2024-05-29T13:44:13.535250263Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.53518934Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-khtza70i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.529789569Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.535177417Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-khtuy0m1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.529512436Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.535158469Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-khtuy0m1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.529393995Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-khq4x91i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.529286274Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-khq4x91i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.528980421Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jp-yo-23" t=2024-05-29T13:44:13.535138567Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-khdkvh4q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.528542716Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-khboy4io-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.528411405Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-khboy4io-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.528371764Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=214309 slug=spenmo instance="datasource_uid=grafanacloud-prom, ref_id=A,C" t=2024-05-29T13:44:13.534856113Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jp-yo-21" t=2024-05-29T13:44:13.534849486Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.534542194Z caller=remote_instance_store.go:51 user=638425 slug=docktech msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.534698495Z caller=remote_image_capturer.go:54 user=80822 slug=corescientific rule_org_id=1 rule_uid=a5148bc0-ffe7-404f-97a5-992555e3fd8c dashboard=ARUwaiKGz panel=10 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.534752935Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:13.53469357Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.534616493Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=80822 slug=corescientific instance= t=2024-05-29T13:44:13.534558014Z level=debug msg="Execution error state is Alerting" handler=resultAlerting previous_handler=resultError + logger=ngalert.state.manager user=80822 slug=corescientific instance= t=2024-05-29T13:44:13.534548662Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=80822 slug=corescientific t=2024-05-29T13:44:13.534512377Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=679831 slug=joveostageaws instance="datasource_uid=a6f971c0-2a39-492e-8c6a-8034f1702671, ref_id=A" t=2024-05-29T13:44:13.534336529Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.534307861Z caller=remote_instance_store.go:51 user=679029 slug=joveoprodaws msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jp-yo-19" t=2024-05-29T13:44:13.534337687Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.534233272Z caller=remote_instance_store.go:51 user=350551 slug=loopme msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.534288385Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=679831 slug=joveostageaws t=2024-05-29T13:44:13.53428765Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.534134837Z caller=remote_instance_store.go:51 user=309009 slug=elestyle msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.534109023Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.534139401Z caller=remote_instance_store.go:51 user=765752 slug=octave msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jp-yo-18" t=2024-05-29T13:44:13.53418035Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.533995989Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.533967594Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.533965434Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=173730 slug=nikon t=2024-05-29T13:44:13.533836574Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=173730 slug=nikon instance= t=2024-05-29T13:44:13.533815389Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=173730 slug=nikon t=2024-05-29T13:44:13.533782026Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.533736091Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=419587 slug=greenpass t=2024-05-29T13:44:13.53372243Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=419587 slug=greenpass instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.533709654Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=419587 slug=greenpass instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.533703895Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=419587 slug=greenpass version=70 fingerprint=77d586f7cb888200 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.533607479Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.533360535s EvaluationString:}]" duration=21.431743ms + level=debug ts=2024-05-29T13:44:13.533486787Z caller=remote_instance_store.go:51 user=706031 slug=miceutest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.53344555Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jp-yo-14" t=2024-05-29T13:44:13.533411074Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.533354562Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=491157 slug=prd01wr t=2024-05-29T13:44:13.533281002Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=491157 slug=prd01wr instance="datasource_uid=ddgrtm1lys268f, ref_id=A" t=2024-05-29T13:44:13.533261929Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.533249677Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=491157 slug=prd01wr t=2024-05-29T13:44:13.53324407Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=491157 slug=prd01wr version=1 fingerprint=58b0b8595f64574d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.533196432Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=ddgrtm1lys268f, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.53289311s EvaluationString:}]" duration=139.786143ms + level=debug ts=2024-05-29T13:44:13.533203773Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.532965892Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.533153895Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.533139931Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.533102555Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jp-yo-12" t=2024-05-29T13:44:13.533099718Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:13.533059131Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.532976857Z caller=remote_instance_store.go:51 user=174054 slug=netrading msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.532966737Z caller=remote_instance_store.go:51 user=174054 slug=netrading msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jp-yo-10" t=2024-05-29T13:44:13.532771268Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.532602827Z caller=remote_alert_sender.go:94 user=288032 slug=dapperlabssre host=dapperlabssre-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.145.122:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=WeP6yRPVz alerts=1 + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:13.532525762Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=806229 slug=simplisafe instance= previous_handler=resultError t=2024-05-29T13:44:13.532508292Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe instance= previous_handler=resultError t=2024-05-29T13:44:13.532498332Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + level=warn ts=2024-05-29T13:44:13.532444961Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=305126 slug=cloud0 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jp-yo-08" t=2024-05-29T13:44:13.53247025Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.532412569Z caller=client.go:80 msg="creating client for grafana instance" user=464177 addr=dns:///dawidhomecloud-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.532371985Z caller=client.go:80 msg="creating client for grafana instance" user=399720 addr=dns:///davz-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jp-yo-07" t=2024-05-29T13:44:13.532349516Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:13.531896277Z caller=remote_alert_sender.go:94 user=520892 slug=userled host=userled-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.156.43.120:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=d39e2590-dae3-4d9e-9fea-3f5462cef393 alerts=1 + level=info ts=2024-05-29T13:44:13.531876366Z caller=remote_alert_sender.go:94 user=520892 slug=userled host=userled-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.156.22.9:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=d39e2590-dae3-4d9e-9fea-3f5462cef393 alerts=1 + level=debug ts=2024-05-29T13:44:13.531703669Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=325783 slug=bloxprod t=2024-05-29T13:44:13.531687855Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=29.305247ms + level=debug ts=2024-05-29T13:44:13.531731442Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.531615879Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jp-yo-02" t=2024-05-29T13:44:13.531651063Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=310637 slug=notino instance= t=2024-05-29T13:44:13.531509586Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.531532601Z caller=remote_instance_store.go:51 user=181845 slug=novol msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.531528476Z caller=remote_alert_sender.go:94 user=288032 slug=dapperlabssre host=dapperlabssre-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.145.122:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=NO-esgPVk alerts=1 + level=info ts=2024-05-29T13:44:13.53147581Z caller=remote_alert_sender.go:94 user=288032 slug=dapperlabssre host=dapperlabssre-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.118.165:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=NO-esgPVk alerts=1 + level=debug ts=2024-05-29T13:44:13.531433472Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.531390828Z caller=client.go:80 msg="creating client for grafana instance" user=442451 addr=dns:///davidchoisysadm-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.531407264Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:13.531330588Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=88.645118ms + level=info component=discovery ts=2024-05-29T13:44:13.531344757Z caller=client.go:80 msg="creating client for grafana instance" user=353936 addr=dns:///davemartinez-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.531322337Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=507163 slug=cbsanz + level=debug ts=2024-05-29T13:44:13.531252751Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-096" t=2024-05-29T13:44:13.531282216Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.531153651Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-095" t=2024-05-29T13:44:13.53113432Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.531106046Z caller=client.go:80 msg="creating client for grafana instance" user=705776 addr=dns:///datacomcloudservices-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.531066471Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=299594 slug=bigmickyd73 + level=debug ts=2024-05-29T13:44:13.531051086Z caller=remote_instance_store.go:51 user=630233 slug=bettercloudprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.530982848Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.83293ms + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:13.530986202Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=39.17481ms + level=info ts=2024-05-29T13:44:13.530816784Z caller=remote_alert_sender.go:94 user=884866 slug=cnonumerique host=cnonumerique-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.51.155:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=adlo21w1fwq9sb alerts=1 + level=debug ts=2024-05-29T13:44:13.530904961Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.530948707Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.530899556Z caller=remote_instance_store.go:51 user=147497 slug=rhodev msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.530800557Z caller=client.go:80 msg="creating client for grafana instance" user=346066 addr=dns:///dashboardpina-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.530615291Z caller=client.go:80 msg="creating client for grafana instance" user=421206 addr=dns:///daltrey-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-092" t=2024-05-29T13:44:13.530733742Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.530608249Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=618621 slug=sendamatic t=2024-05-29T13:44:13.530644211Z level=debug msg="Saving alert states done" count=8 max_state_save_concurrency=1 duration=120.851663ms + level=warn ts=2024-05-29T13:44:13.530615609Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=643898 slug=brendandoyle + level=warn ts=2024-05-29T13:44:13.530542607Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=766149 slug=atomixlp + level=debug ts=2024-05-29T13:44:13.530511436Z caller=ruler.go:522 msg="tenant is owned by this instance" user=438946 slug=afirmo groups=0 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-091" t=2024-05-29T13:44:13.530589651Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.530389362Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=669219 slug=cmcdev + level=debug ts=2024-05-29T13:44:13.530361015Z caller=ruler.go:522 msg="tenant is owned by this instance" user=618159 slug=atomixwegive groups=0 + logger=ngalert.state.manager user=374423 slug=bitburst instance= t=2024-05-29T13:44:13.530477383Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.530359299Z caller=ruler.go:522 msg="tenant is owned by this instance" user=669219 slug=cmcdev groups=0 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-089" t=2024-05-29T13:44:13.530231762Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.529749091Z caller=client.go:80 msg="creating client for grafana instance" user=303649 addr=dns:///cs1-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.529909496Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.530003898Z caller=remote_instance_store.go:51 user=338059 slug=ninetailed msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=338059 slug=ninetailed instance="DBInstanceIdentifier=zde9b64e5-postgresql" t=2024-05-29T13:44:13.529838771Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.529718551Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=523054 slug=vialtopartners t=2024-05-29T13:44:13.529713653Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.529696318Z caller=ruler.go:522 msg="tenant is owned by this instance" user=316234 slug=admatt01 groups=0 + level=warn ts=2024-05-29T13:44:13.529658049Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=296643 slug=alexanderswift + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-085" t=2024-05-29T13:44:13.529570689Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=340750 slug=aptoslabs instance= t=2024-05-29T13:44:13.52947636Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=340750 slug=aptoslabs instance= t=2024-05-29T13:44:13.529469027Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=340750 slug=aptoslabs t=2024-05-29T13:44:13.529442685Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.529356976Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=337951 slug=pawapay instance= t=2024-05-29T13:44:13.529225208Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=337951 slug=pawapay t=2024-05-29T13:44:13.529175006Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=spectrum-cable, endpoint=NotifyConnectionWithResponse, service=UserPresenceNotifier" t=2024-05-29T13:44:13.529208777Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.529143832Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.529050468Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-080" t=2024-05-29T13:44:13.52901336Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=rcn, endpoint=NotifyPong, service=UserPresenceNotifier" t=2024-05-29T13:44:13.528878026Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-079" t=2024-05-29T13:44:13.528855133Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.528806256Z caller=client.go:80 msg="creating client for grafana instance" user=357617 addr=dns:///axpopoc-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=rcn, endpoint=NotifyConnectionWithResponse, service=UserPresenceNotifier" t=2024-05-29T13:44:13.52872013Z level=debug msg="Keeping state" state=Normal + level=warn ts=2024-05-29T13:44:13.528657155Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=540683 slug=andrealfter + level=debug ts=2024-05-29T13:44:13.528624755Z caller=ruler.go:522 msg="tenant is owned by this instance" user=540683 slug=andrealfter groups=0 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-078" t=2024-05-29T13:44:13.528671143Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-078" t=2024-05-29T13:44:13.528635001Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.528452095Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.528571968Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.528395293Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=533463 slug=lacewallet version=11 fingerprint=52a310d9da0d8c6b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.528391381Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.528093632s EvaluationString:}]" duration=16.214477ms + level=warn ts=2024-05-29T13:44:13.528378452Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=740512 slug=acipayondev + level=debug ts=2024-05-29T13:44:13.528354143Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.528314526Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.528304136Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-076" t=2024-05-29T13:44:13.52833172Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.52821109Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-khboy4io-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.528112872Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.528046109Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kh2xfiym-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.52795876Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.528067786Z caller=remote_instance_store.go:51 user=35611 slug=play msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-073" t=2024-05-29T13:44:13.527979654Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=optimumfixed, endpoint=NotifyPong, service=UserPresenceNotifier" t=2024-05-29T13:44:13.5279145Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-072" t=2024-05-29T13:44:13.527878641Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-071" t=2024-05-29T13:44:13.527763931Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.527425368Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.527417646Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kh2xfiym-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.527631487Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.527518414Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kh23qvdx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.527330414Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.527315918Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-068" t=2024-05-29T13:44:13.527351451Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kh23qvdx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.527189292Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-067" t=2024-05-29T13:44:13.527242602Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-067" t=2024-05-29T13:44:13.527210711Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=jetblue, endpoint=NotifyConnection, service=UserPresenceNotifier" t=2024-05-29T13:44:13.52714921Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=jetblue, endpoint=NotifyConnection, service=UserPresenceNotifier" t=2024-05-29T13:44:13.527133539Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:13.527021256Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kh05wjm8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.526745898Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-066" t=2024-05-29T13:44:13.527055824Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=bffb092aed848c37 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.526869098Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=BEHRAIN Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc0105b9fd0} IgnoreBelow:{Var:IgnoreBelow Labels: Value:0xc0105b9ef8} Threshold:{Var:Threshold Labels: Value:0xc0105b9f40} compare:{Var:compare Labels:aggregatedBy=sum, name=BEHRAIN Query Value:0xc0105b9f80} sum:{Var:sum Labels:aggregatedBy=sum, name=BEHRAIN Query Value:0xc0105b9fb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.526632606s EvaluationString:[ var='Breaches' labels={} value=1 ], [ var='IgnoreBelow' labels={} value=100 ], [ var='Threshold' labels={} value=-20 ], [ var='compare' labels={aggregatedBy=sum, name=BEHRAIN Query} value=0 ], [ var='sum' labels={aggregatedBy=sum, name=BEHRAIN Query} value=0 ]}]" duration=151.589222ms + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishwireless, endpoint=NotifyConnectionWithResponse, service=UserPresenceNotifier" t=2024-05-29T13:44:13.52701689Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-066" t=2024-05-29T13:44:13.527022814Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-us-west-2-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox" t=2024-05-29T13:44:13.526978195Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-us-west-2-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox" t=2024-05-29T13:44:13.526969964Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-us-east-2-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox" t=2024-05-29T13:44:13.526872202Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.526840838Z caller=ruler.go:522 msg="tenant is owned by this instance" user=738759 slug=alexandreandrefort groups=0 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-065" t=2024-05-29T13:44:13.526866543Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.526719737Z caller=remote_instance_store.go:51 user=512398 slug=brightdigital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-sa-east-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox" t=2024-05-29T13:44:13.526753567Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kh05wjm8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.526578066Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishpostpaid, endpoint=NotifyConnectionWithResponse, service=UserPresenceNotifier" t=2024-05-29T13:44:13.526642524Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-064" t=2024-05-29T13:44:13.526711201Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.526464523Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.526605835Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=371182 slug=aldisouthtest + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-ap-southeast-2-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox" t=2024-05-29T13:44:13.526561521Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.526467474Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kgwkiavg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.526421384Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.526413958Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:13.526518464Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="cluster=firewalk-production" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishpostpaid, endpoint=NotifyConnection, service=UserPresenceNotifier" t=2024-05-29T13:44:13.526482255Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-ap-southeast-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox" t=2024-05-29T13:44:13.526437017Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.526425012Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.526267601Z caller=remote_instance_store.go:51 user=208505 slug=liberachat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kgwkiavg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.526249503Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.526326957Z caller=remote_instance_store.go:51 user=471861 slug=planetstaging msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.526318285Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dishget, endpoint=NotifyConnectionWithResponse, service=UserPresenceNotifier" t=2024-05-29T13:44:13.526327618Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-ap-northeast-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox" t=2024-05-29T13:44:13.526290061Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.526271621Z caller=remote_instance_store.go:51 user=482907 slug=wavelonp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kgwkiavg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.526077621Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=530405 slug=zetetic t=2024-05-29T13:44:13.526131649Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.526107774Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte t=2024-05-29T13:44:13.526099522Z level=debug msg="State manager processing evaluation results" resultCount=7 + level=info component=discovery ts=2024-05-29T13:44:13.52602143Z caller=client.go:80 msg="creating client for grafana instance" user=314191 addr=dns:///axelavtestingstuff-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.scheduler user=426229 slug=accelbyte version=1 fingerprint=36896cd5550d0a6c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.525892814Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-ap-northeast-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-ap-northeast-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox Value:0xc050f05b88} C:{Var:C Labels:__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-ap-northeast-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox Value:0xc050f05c48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.525214334s EvaluationString:[ var='B' labels={__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-ap-northeast-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox} value=200 ], [ var='C' labels={__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-ap-northeast-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox} value=0 ]} {Instance:__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-ap-southeast-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-ap-southeast-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox Value:0xc050f05dc8} C:{Var:C Labels:__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-ap-southeast-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox Value:0xc050f05e68}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.525235125s EvaluationString:[ var='B' labels={__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-ap-southeast-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox} value=200 ], [ var='C' labels={__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-ap-southeast-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox} value=0 ]} {Instance:__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-ap-southeast-2-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-ap-southeast-2-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox Value:0xc050f05f88} C:{Var:C Labels:__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-ap-southeast-2-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox Value:0xc0220b8078}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.525242618s EvaluationString:[ var='B' labels={__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-ap-southeast-2-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox} value=200 ], [ var='C' labels={__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-ap-southeast-2-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox} value=0 ]} {Instance:__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-eu-central-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-eu-central-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox Value:0xc0220b8358} C:{Var:C Labels:__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-eu-central-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox Value:0xc0220b8218}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.525250779s EvaluationString:[ var='B' labels={__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-eu-central-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox} value=200 ], [ var='C' labels={__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-eu-central-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox} value=0 ]} {Instance:__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-sa-east-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-sa-east-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox Value:0xc0220b85c8} C:{Var:C Labels:__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-sa-east-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox Value:0xc0220b86a8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.525260659s EvaluationString:[ var='B' labels={__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-sa-east-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox} value=200 ], [ var='C' labels={__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-sa-east-1-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox} value=0 ]} {Instance:__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-us-east-2-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-us-east-2-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox Value:0xc0220b8838} C:{Var:C Labels:__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-us-east-2-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox Value:0xc0220b88b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.525267162s EvaluationString:[ var='B' labels={__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-us-east-2-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox} value=200 ], [ var='C' labels={__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-us-east-2-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox} value=0 ]} {Instance:__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-us-west-2-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-us-west-2-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox Value:0xc0220b8bb8} C:{Var:C Labels:__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-us-west-2-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox Value:0xc0220b8a48}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.525272534s EvaluationString:[ var='B' labels={__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-us-west-2-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox} value=200 ], [ var='C' labels={__name__=probe_http_status_code, agent_hostname=ip-10-17-29-25.us-east-2.compute.internal:8080, blackbox_target=https://nomad-us-west-2-ui.prod.ams.firewalk.accelbyte.io/v1/status/leader, cluster=firewalk-production, environment=production, environment_name=firewalk-production, instance=blackbox, job=integrations/blackbox} value=0 ]}]" duration=50.312208ms + logger=ngalert.scheduler user=530405 slug=zetetic version=41 fingerprint=74ea129f018772a2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.52603324Z level=debug msg="Alert rule evaluated" results="[{Instance:chain=Polkadot, pool=Watermelon State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:chain=Polkadot, pool=Watermelon Value:0xc01b3d3ea0} C:{Var:C Labels:chain=Polkadot, pool=Watermelon Value:0xc01b3d3f00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.525695299s EvaluationString:[ var='B' labels={chain=Polkadot, pool=Watermelon} value=1 ], [ var='C' labels={chain=Polkadot, pool=Watermelon} value=0 ]}]" duration=6.143331ms + logger=ngalert.state.manager.persist user=327842 slug=exabeam t=2024-05-29T13:44:13.526063937Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-060" t=2024-05-29T13:44:13.526105571Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.52600999Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.525600504Z caller=remote_instance_store.go:51 user=115097 slug=controlplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dish, endpoint=NotifyConnectionWithResponse, service=UserPresenceNotifier" t=2024-05-29T13:44:13.526052493Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kgw2f94h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.52597888Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=dish, endpoint=NotifyConnectionWithResponse, service=UserPresenceNotifier" t=2024-05-29T13:44:13.52599534Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=250150 slug=bizagi version=58 fingerprint=1d43eb3cdd6a3b46 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.525768962Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.525578364s EvaluationString:}]" duration=203.480877ms + logger=ngalert.state.manager user=327842 slug=exabeam instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.525956344Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager.persist user=82372 slug=fout t=2024-05-29T13:44:13.525919758Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=327842 slug=exabeam instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.525942911Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-059" t=2024-05-29T13:44:13.525913215Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kgw2f94h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.525824358Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kgw2f94h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.525764008Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kgw2f94h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.525661977Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kgw2f94h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.525597506Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=assurantlifestyle, endpoint=NotifyPong, service=UserPresenceNotifier" t=2024-05-29T13:44:13.525656193Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=assurantlifestyle, endpoint=NotifyPong, service=UserPresenceNotifier" t=2024-05-29T13:44:13.525643156Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-057" t=2024-05-29T13:44:13.525632928Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-057" t=2024-05-29T13:44:13.525606088Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=277970 slug=teckresourcestest t=2024-05-29T13:44:13.525527871Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=40.699175ms + logger=ngalert.scheduler user=452240 slug=trulioo version=90 fingerprint=4d6e31a2e979509a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.525500128Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.525221574s EvaluationString:}]" duration=48.040492ms + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=assurantlifestyle, endpoint=NotifyConnectionWithResponse, service=UserPresenceNotifier" t=2024-05-29T13:44:13.525510767Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=assurantlifestyle, endpoint=NotifyConnection, service=UserPresenceNotifier" t=2024-05-29T13:44:13.52535942Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=assurantlifestyle, endpoint=NotifyConnection, service=UserPresenceNotifier" t=2024-05-29T13:44:13.525347527Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.525261651Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-jfk-us-055" t=2024-05-29T13:44:13.525302213Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.525257046Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=307381 slug=kambitaskforce t=2024-05-29T13:44:13.525233383Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kgthtsqe-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.525106291Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.52515598Z caller=remote_instance_store.go:51 user=527202 slug=lnrsusinsurancedev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=527202 slug=lnrsusinsurancedev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.525042405Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:13.525011736Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=49.930099ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kgqc72pw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.524917999Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.525018249Z caller=remote_alert_sender.go:94 user=512940 slug=gruppoquattroits host=gruppoquattroits-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.148.79.125:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=-7Yb2mT4k alerts=1 + level=debug ts=2024-05-29T13:44:13.525045753Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.5249582Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=237629 slug=ocrolus t=2024-05-29T13:44:13.524991451Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=527202 slug=lnrsusinsurancedev version=40 fingerprint=c79d767e987ce7b1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.524892348Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.524449723s EvaluationString:}]" duration=17.596959ms + level=debug ts=2024-05-29T13:44:13.524973484Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=american-airlines, endpoint=NotifyPong, service=UserPresenceNotifier" t=2024-05-29T13:44:13.52491135Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=237629 slug=ocrolus version=29 fingerprint=fbfd92b82d7354f5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.524876096Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.524611731s EvaluationString:}]" duration=204.958177ms + logger=ngalert.state.manager.persist user=620731 slug=masonite t=2024-05-29T13:44:13.524849572Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.781993ms + logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:13.524853882Z level=debug msg="Saving alert states" count=45 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.524824082Z caller=remote_instance_store.go:51 user=159781 slug=suncornoc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.524853423Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=american-airlines, endpoint=NotifyConnectionWithResponse, service=UserPresenceNotifier" t=2024-05-29T13:44:13.5247703Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kgqc72pw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.524798938Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.52471803Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=american-airlines, endpoint=NotifyConnection, service=UserPresenceNotifier" t=2024-05-29T13:44:13.524591944Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=product.product-specification-0-server" t=2024-05-29T13:44:13.524525279Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.524489512Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.524463689Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company_marker=aizhomesol, endpoint=NotifyConnectionWithResponse, service=UserPresenceNotifier" t=2024-05-29T13:44:13.524432281Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=product.product-offering-0-server" t=2024-05-29T13:44:13.524239376Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.52417425Z caller=remote_instance_store.go:51 user=21051 slug=mojio msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.524119112Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=630779 slug=anujsrivastava1 + logger=ngalert.state.manager.persist user=21051 slug=mojio t=2024-05-29T13:44:13.524146231Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=21051 slug=mojio instance="datasource_uid=grafanacloud-mojio, ref_id=A" t=2024-05-29T13:44:13.524133802Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=21051 slug=mojio t=2024-05-29T13:44:13.524088132Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kgn4jo71-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.523850528Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=21051 slug=mojio version=2 fingerprint=ee221d512c7bef3a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.524030074Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-mojio, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.523746877s EvaluationString:}]" duration=30.974571ms + level=debug ts=2024-05-29T13:44:13.523898838Z caller=ruler.go:522 msg="tenant is owned by this instance" user=314055 slug=axiefriends groups=1 + level=debug ts=2024-05-29T13:44:13.523725367Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.523804739Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.523736532Z caller=ruler.go:522 msg="tenant is owned by this instance" user=540513 slug=afk47org groups=0 + level=debug ts=2024-05-29T13:44:13.523729697Z caller=remote_instance_store.go:51 user=795224 slug=gannettdigital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kgf1r54l-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.523567115Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kgf1r54l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.523443134Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=product.loyalty-synchronization-0-server" t=2024-05-29T13:44:13.523433467Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-iah-us-003" t=2024-05-29T13:44:13.523370414Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.520888681Z caller=ruler.go:522 msg="tenant is owned by this instance" user=539399 slug=alterplan groups=0 + logger=ngalert.state.manager.persist user=482192 slug=the55group t=2024-05-29T13:44:13.52322621Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=482192 slug=the55group instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.523214862Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-iah-us-002" t=2024-05-29T13:44:13.523264072Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-iah-us-002" t=2024-05-29T13:44:13.523250361Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.523165803Z caller=ruler.go:522 msg="tenant is owned by this instance" user=538019 slug=althouse groups=0 + logger=ngalert.state.manager user=286924 slug=kmpdashboard instance= t=2024-05-29T13:44:13.523208947Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=286924 slug=kmpdashboard t=2024-05-29T13:44:13.523167737Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kgeo1f4c-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.522945999Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-iah-us-001" t=2024-05-29T13:44:13.523145373Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=product.loyalty-management-0-server" t=2024-05-29T13:44:13.523084963Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.522597698Z caller=client.go:80 msg="creating client for grafana instance" user=307450 addr=dns:///asosltd-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager.persist user=334408 slug=voltagrid t=2024-05-29T13:44:13.522909593Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-hnd-jp-063" t=2024-05-29T13:44:13.522903121Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.522954499Z caller=remote_instance_store.go:51 user=334408 slug=voltagrid msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.522873043Z caller=remote_image_capturer.go:33 user=334408 slug=voltagrid rule_org_id=1 rule_uid=YHueUVaVk msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager.persist user=326874 slug=fastpath t=2024-05-29T13:44:13.522788171Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-hnd-jp-062" t=2024-05-29T13:44:13.522761561Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=326874 slug=fastpath version=3 fingerprint=8a425ce930c0aaf4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.52257454Z level=debug msg="Alert rule evaluated" results="[{Instance:alert_sensitivity=high, instance=https://connector-oracleebs-cus.dev.gofastpath.com/api/health, job=http-check-connector-oracleebs-cus-dev State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:alert_sensitivity=high, instance=https://connector-oracleebs-cus.dev.gofastpath.com/api/health, job=http-check-connector-oracleebs-cus-dev Value:0xc02404e320} C:{Var:C Labels:alert_sensitivity=high, instance=https://connector-oracleebs-cus.dev.gofastpath.com/api/health, job=http-check-connector-oracleebs-cus-dev Value:0xc02404e340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.522146538s EvaluationString:[ var='A' labels={alert_sensitivity=high, instance=https://connector-oracleebs-cus.dev.gofastpath.com/api/health, job=http-check-connector-oracleebs-cus-dev} value=100 ], [ var='C' labels={alert_sensitivity=high, instance=https://connector-oracleebs-cus.dev.gofastpath.com/api/health, job=http-check-connector-oracleebs-cus-dev} value=0 ]}]" duration=23.712735ms + level=debug ts=2024-05-29T13:44:13.522727604Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=party.party-synchronization-0-server" t=2024-05-29T13:44:13.522657258Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.522589197Z caller=ruler.go:522 msg="tenant is owned by this instance" user=486932 slug=alkali groups=0 + level=debug ts=2024-05-29T13:44:13.522650975Z caller=remote_instance_store.go:51 user=174016 slug=journalstaging msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:13.522644693Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-hnd-jp-061" t=2024-05-29T13:44:13.522600877Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-hnd-jp-061" t=2024-05-29T13:44:13.52257816Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.522582136Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.522512128Z caller=remote_instance_store.go:51 user=765752 slug=octave msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:13.522463824Z level=debug msg="State manager processing evaluation results" resultCount=3 + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=party.party-id-management-0-server" t=2024-05-29T13:44:13.522488957Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:13.522472391Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=party.party-id-management-0-server" t=2024-05-29T13:44:13.522475556Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.522419698Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.522321634Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=party.party-authentication-0-server" t=2024-05-29T13:44:13.522315555Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-hnd-jp-060" t=2024-05-29T13:44:13.522276268Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.522161011Z caller=remote_instance_store.go:51 user=796993 slug=marketops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-hnd-jp-059" t=2024-05-29T13:44:13.522122248Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.519404767Z caller=ruler.go:522 msg="tenant is owned by this instance" user=792996 slug=algoterm groups=0 + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=mss-consumer-pro1-server" t=2024-05-29T13:44:13.521750448Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=mss-consumer-pro1-server" t=2024-05-29T13:44:13.521738448Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=150145 slug=pleasant t=2024-05-29T13:44:13.52175591Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.521642273Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.52155523Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.521419302Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=150145 slug=pleasant version=4 fingerprint=01fa1ac607f5142c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.521378008Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.521091329s EvaluationString:}]" duration=15.923387ms + level=debug ts=2024-05-29T13:44:13.521352792Z caller=remote_instance_store.go:51 user=201790 slug=veedmo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:13.521359826Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.711695ms + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=mobile-data-recorder-0-server" t=2024-05-29T13:44:13.521356944Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.521213543Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=mobile-backend-instance-prod1-server" t=2024-05-29T13:44:13.521141042Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.521107689Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=mobile-backend-instance-prod-extend4-server" t=2024-05-29T13:44:13.520895639Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-fra-de-009" t=2024-05-29T13:44:13.520722418Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=mobile-backend-instance-prod-extend2-server" t=2024-05-29T13:44:13.520531035Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-fra-de-007" t=2024-05-29T13:44:13.520340679Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=mobile-backend-instance-prod-extend1-server" t=2024-05-29T13:44:13.520175531Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.520129676Z caller=remote_instance_store.go:51 user=442863 slug=numan msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.520047199Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=442863 slug=numan version=6 fingerprint=8a7ba0188ff1a5e6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.519874618Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.519500537s EvaluationString:}]" duration=19.473034ms + logger=ngalert.state.manager.persist user=349246 slug=metricgamingdev t=2024-05-29T13:44:13.519844116Z level=debug msg="Saving alert states done" count=4 max_state_save_concurrency=1 duration=53.273278ms + logger=ngalert.state.manager user=895137 slug=uid2 instance="datasource_uid=grafanacloud-prom, ref_id=QUERY" t=2024-05-29T13:44:13.519865949Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=895137 slug=uid2 instance="datasource_uid=grafanacloud-prom, ref_id=QUERY" t=2024-05-29T13:44:13.519860379Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=895137 slug=uid2 instance="datasource_uid=grafanacloud-prom, ref_id=QUERY" t=2024-05-29T13:44:13.519845388Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance="datasource_uid=Ejs1P5xVk, ref_id=A" t=2024-05-29T13:44:13.519834674Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.519813483Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=895137 slug=uid2 version=42 fingerprint=26b8125d734b3a1a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.519756395Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=QUERY State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.519397297s EvaluationString:}]" duration=31.231511ms + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:13.519574433Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=114492 slug=railsbank instance="DBInstanceIdentifier=prod-playlive-card-data-synchronise-20220707100444096800000007" t=2024-05-29T13:44:13.519564211Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.519399261Z caller=client.go:80 msg="creating client for grafana instance" user=662032 addr=dns:///clipsalsolar-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.519400444Z caller=ruler.go:522 msg="tenant is owned by this instance" user=713442 slug=anaplannextnonprodau groups=0 + level=debug ts=2024-05-29T13:44:13.519441061Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-fra-de-004" t=2024-05-29T13:44:13.519445112Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-fra-de-003" t=2024-05-29T13:44:13.519324662Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=471861 slug=planetstaging instance= t=2024-05-29T13:44:13.519322529Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=471861 slug=planetstaging instance= t=2024-05-29T13:44:13.51930974Z level=debug msg="Setting next state" handler=resultError + level=debug ts=2024-05-29T13:44:13.519127107Z caller=remote_instance_store.go:51 user=177465 slug=fairtiq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.519148444Z caller=ruler.go:522 msg="tenant is owned by this instance" user=315013 slug=boileau groups=0 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-fra-de-002" t=2024-05-29T13:44:13.519201231Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.518975079Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.51891396Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=engagedParty.payment-methods-0-server" t=2024-05-29T13:44:13.518835217Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-dub-ie-003" t=2024-05-29T13:44:13.518768035Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.518717309Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.518700849Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.518731563Z caller=remote_instance_store.go:51 user=223160 slug=saince msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=223160 slug=saince instance="datasource_uid=Ft0ZgCR7z, ref_id=A" t=2024-05-29T13:44:13.518639697Z level=warn msg="Failed to take an image" dashboard=CZZzGoHVz panel=348 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.518581246Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.518557969Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=1b804f5e6f6a7ef3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.518466709Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.518191166s EvaluationString:}]" duration=159.949591ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-dub-ie-001" t=2024-05-29T13:44:13.51851588Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-den-us-009" t=2024-05-29T13:44:13.518210713Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.518134276Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.518049658Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.518129977Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.518122329Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:13.518111273Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=eacs-consumer-prod3-server" t=2024-05-29T13:44:13.518022008Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=316418 slug=workmotion t=2024-05-29T13:44:13.517899575Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.326017ms + level=debug ts=2024-05-29T13:44:13.517861346Z caller=remote_instance_store.go:51 user=60603 slug=avalaratax msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-den-us-006" t=2024-05-29T13:44:13.517835925Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=223160 slug=saince instance="datasource_uid=Ft0ZgCR7z, ref_id=A" t=2024-05-29T13:44:13.517561842Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=eacs-consumer-prod2-server" t=2024-05-29T13:44:13.517680104Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=eacs-consumer-prod2-server" t=2024-05-29T13:44:13.517658604Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=223160 slug=saince t=2024-05-29T13:44:13.517534655Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=eacs-consumer-prod1-server" t=2024-05-29T13:44:13.5172842Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=eacs-consumer-prod1-server" t=2024-05-29T13:44:13.517260499Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-dca-us-040" t=2024-05-29T13:44:13.517289483Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.517145846Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=customer.product-order-0-server" t=2024-05-29T13:44:13.517054397Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-dca-us-039" t=2024-05-29T13:44:13.517175536Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=customer.product-order-0-server" t=2024-05-29T13:44:13.517020497Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.516983727Z caller=client.go:80 msg="creating client for grafana instance" user=353453 addr=dns:///chipserver-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-dca-us-038" t=2024-05-29T13:44:13.517052213Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-dca-us-038" t=2024-05-29T13:44:13.517039954Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-dca-us-037" t=2024-05-29T13:44:13.516917697Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=customer.payment-management-0-server" t=2024-05-29T13:44:13.516736994Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.516600386Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.516629974Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=85008 slug=kalypsolp instance= t=2024-05-29T13:44:13.516626574Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-dca-us-035" t=2024-05-29T13:44:13.516667783Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-dca-us-033" t=2024-05-29T13:44:13.51639335Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=856040 slug=kuady t=2024-05-29T13:44:13.516339555Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.516241256Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.516331333Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.516227568Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.516299831Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=460305 slug=betwatch + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-dca-us-032" t=2024-05-29T13:44:13.516274866Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.516201391Z caller=ruler.go:522 msg="tenant is owned by this instance" user=663364 slug=belmontnorth groups=0 + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=34384ee77cd3ebd2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.516139701Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.51596446s EvaluationString:}]" duration=447.515705ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-dca-us-031" t=2024-05-29T13:44:13.516165054Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.515999221Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-dca-us-030" t=2024-05-29T13:44:13.516041139Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.515957143Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=687021 slug=heviai t=2024-05-29T13:44:13.515899538Z level=debug msg="Saving alert states done" count=20 max_state_save_concurrency=1 duration=325.027912ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-dca-us-029" t=2024-05-29T13:44:13.515918045Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=common.party-role-0-server" t=2024-05-29T13:44:13.515827884Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-dca-us-028" t=2024-05-29T13:44:13.515755712Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=642786 slug=sophoscomnsg t=2024-05-29T13:44:13.515679703Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=642786 slug=sophoscomnsg version=5 fingerprint=17c384face0aba6e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.515604572Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc03c566c10} B:{Var:B Labels: Value:0xc03c566c18} C:{Var:C Labels: Value:0xc03c566c20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.515206205s EvaluationString:[ var='A' labels={} value=1.6235845839018015 ], [ var='B' labels={} value=1.6235845839018015 ], [ var='C' labels={} value=0 ]}]" duration=12.551277ms + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=common.entity-catalog-management-0-server" t=2024-05-29T13:44:13.51549198Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.515566496Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.51540802Z caller=remote_instance_store.go:51 user=548157 slug=kushkiprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-cdg-fr-069" t=2024-05-29T13:44:13.51530464Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=common.communication.subscribe-0-server" t=2024-05-29T13:44:13.515273578Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.514913068Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=444728 slug=stgnextgen instance= t=2024-05-29T13:44:13.515211514Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.515107507Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-cdg-fr-066" t=2024-05-29T13:44:13.514887664Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.514886535Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=common.communication.pushindividual-0-server" t=2024-05-29T13:44:13.514910874Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.514727355Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.514652034Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=807219 slug=felipepinheiro186 t=2024-05-29T13:44:13.514524364Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info component=discovery ts=2024-05-29T13:44:13.514569175Z caller=client.go:80 msg="creating client for grafana instance" user=750138 addr=dns:///cassius-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-cdg-fr-063" t=2024-05-29T13:44:13.514514966Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.514416227Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.514304994Z caller=client.go:80 msg="creating client for grafana instance" user=304227 addr=dns:///captortw-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.514399704Z caller=remote_instance_store.go:51 user=112732 slug=gleamer msg="calling SaveAlertInstance" + level=warn ts=2024-05-29T13:44:13.514272924Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=525225 slug=adjuvant + level=debug ts=2024-05-29T13:44:13.514337401Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.514259398Z caller=ruler.go:522 msg="tenant is owned by this instance" user=338951 slug=bifrostai groups=0 + level=debug ts=2024-05-29T13:44:13.514247443Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112732 slug=gleamer t=2024-05-29T13:44:13.514246994Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=301974 slug=arguspreprod t=2024-05-29T13:44:13.514104583Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=MFIS-0-server" t=2024-05-29T13:44:13.514198266Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.514164857Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.514172949Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-bru-be-002" t=2024-05-29T13:44:13.514168339Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.514069559Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=612525 slug=adleyeview instance="consumer_group_id=IACS-devlocal1-server" t=2024-05-29T13:44:13.513781962Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.513699521Z caller=ruler.go:522 msg="tenant is owned by this instance" user=333378 slug=bojanaus groups=0 + level=debug ts=2024-05-29T13:44:13.513707586Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=390300 slug=astrachain t=2024-05-29T13:44:13.513662992Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.513651329Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.51361129Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-arn-se-005" t=2024-05-29T13:44:13.513623126Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.513572787Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.513562778Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-arn-se-004" t=2024-05-29T13:44:13.513443112Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.51318444Z caller=client.go:80 msg="creating client for grafana instance" user=657273 addr=dns:///canberragiro-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=warn ts=2024-05-29T13:44:13.513160982Z caller=ruler.go:575 msg="user has no rule groups, ignoring" user=356745 slug=asia + level=debug ts=2024-05-29T13:44:13.512986027Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.513132525Z caller=ruler.go:522 msg="tenant is owned by this instance" user=356745 slug=asia groups=0 + level=debug ts=2024-05-29T13:44:13.513095807Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-arn-se-001" t=2024-05-29T13:44:13.512907387Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.512695532Z caller=remote_instance_store.go:51 user=512940 slug=gruppoquattroits msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=512940 slug=gruppoquattroits t=2024-05-29T13:44:13.51263932Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=512940 slug=gruppoquattroits instance="datasource_uid=nxIk2Y24k, ref_id=A" t=2024-05-29T13:44:13.512619174Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.512537157Z caller=remote_instance_store.go:51 user=795224 slug=gannettdigital msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.512534547Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-ams-nl-05" t=2024-05-29T13:44:13.512558586Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=70430 slug=dapperlabs instance= t=2024-05-29T13:44:13.512342817Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.512284668Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.511902705Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.511846104Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-ams-nl-03" t=2024-05-29T13:44:13.51206968Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.511773829Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.511625646Z caller=remote_instance_store.go:51 user=253106 slug=elenasmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=dp-ams-nl-01" t=2024-05-29T13:44:13.511629304Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=253106 slug=elenasmonitor instance= t=2024-05-29T13:44:13.511554553Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + level=debug ts=2024-05-29T13:44:13.511529276Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=767797 slug=mgmresorts t=2024-05-29T13:44:13.511488206Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.300703ms + logger=ngalert.state.manager user=253106 slug=elenasmonitor instance= t=2024-05-29T13:44:13.511546027Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=253106 slug=elenasmonitor t=2024-05-29T13:44:13.511518845Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.511463028Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.511279067Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-phx-us-005" t=2024-05-29T13:44:13.511299599Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.511108128Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-phx-us-004" t=2024-05-29T13:44:13.51114681Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-phx-us-001" t=2024-05-29T13:44:13.510776818Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lgw-uk-020" t=2024-05-29T13:44:13.510642514Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lgw-uk-019" t=2024-05-29T13:44:13.51052925Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=922741 slug=johnnyleeothon instance= t=2024-05-29T13:44:13.510462316Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=922741 slug=johnnyleeothon t=2024-05-29T13:44:13.510348533Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lgw-uk-018" t=2024-05-29T13:44:13.510310736Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.510187248Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.510124021Z caller=remote_instance_store.go:51 user=765752 slug=octave msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lgw-uk-015" t=2024-05-29T13:44:13.509873261Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lgw-uk-015" t=2024-05-29T13:44:13.509857266Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.509733617Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lgw-uk-014" t=2024-05-29T13:44:13.509670273Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.509547835Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.509552364Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lgw-uk-013" t=2024-05-29T13:44:13.509552759Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.509499188Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.509454562Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:13.509409507Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.509364051Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.509210275Z caller=remote_instance_store.go:51 user=482907 slug=wavelonp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lgw-uk-012" t=2024-05-29T13:44:13.509261667Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:13.509216146Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.109231ms + level=debug ts=2024-05-29T13:44:13.509103841Z caller=remote_instance_store.go:51 user=620731 slug=masonite msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lgw-uk-010" t=2024-05-29T13:44:13.508954601Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lgw-uk-008" t=2024-05-29T13:44:13.50869483Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lgw-uk-008" t=2024-05-29T13:44:13.508680855Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lgw-uk-007" t=2024-05-29T13:44:13.508562501Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=513734 slug=rgitsolutions t=2024-05-29T13:44:13.508205364Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=46.886386ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lgw-uk-004" t=2024-05-29T13:44:13.508150904Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lgw-uk-004" t=2024-05-29T13:44:13.508136421Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.508058445Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.507932608Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lgw-uk-003" t=2024-05-29T13:44:13.507980233Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112732 slug=gleamer t=2024-05-29T13:44:13.507671274Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.507756002Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lgw-uk-002" t=2024-05-29T13:44:13.507802146Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.507766421Z caller=remote_instance_store.go:51 user=461798 slug=betfair msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=461798 slug=betfair instance="datasource_uid=bdesiazx5kxkwc, ref_id=A" t=2024-05-29T13:44:13.507702722Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=461798 slug=betfair t=2024-05-29T13:44:13.50763741Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.507452467Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=479692 slug=dealstack version=3 fingerprint=26487ab7cafb6aa0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.507482611Z level=debug msg="Alert rule evaluated" results="[{Instance:DBInstanceIdentifier=dealstack-prod State:Normal Error: Results:map[] Values:map[rds_free_memory_min:{Var:rds_free_memory_min Labels:DBInstanceIdentifier=dealstack-prod Value:0xc0432df868} rds_free_memory_min_mb:{Var:rds_free_memory_min_mb Labels:DBInstanceIdentifier=dealstack-prod Value:0xc0432df890} rds_free_memory_min_mb_threshold:{Var:rds_free_memory_min_mb_threshold Labels:DBInstanceIdentifier=dealstack-prod Value:0xc0432df898}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.507069008s EvaluationString:[ var='rds_free_memory_min' labels={DBInstanceIdentifier=dealstack-prod} value=9.15443712e+08 ], [ var='rds_free_memory_min_mb' labels={DBInstanceIdentifier=dealstack-prod} value=915.443712 ], [ var='rds_free_memory_min_mb_threshold' labels={DBInstanceIdentifier=dealstack-prod} value=0 ]}]" duration=32.828277ms + level=debug ts=2024-05-29T13:44:13.507443734Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.507447802Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.50738462Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lax-us-015" t=2024-05-29T13:44:13.507294272Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lax-us-015" t=2024-05-29T13:44:13.507259446Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=520892 slug=userled version=6 fingerprint=d95c2e2e2b93535f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.507162597Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.506804698s EvaluationString:}]" duration=17.736152ms + level=debug ts=2024-05-29T13:44:13.506969915Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lax-us-012" t=2024-05-29T13:44:13.506911792Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.506458304Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.506415503Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=935198 slug=provable t=2024-05-29T13:44:13.506771092Z level=debug msg="Saving alert states done" count=7 max_state_save_concurrency=1 duration=81.855004ms + level=info ts=2024-05-29T13:44:13.506577417Z caller=grafana.go:247 user=438185 slug=nodeinfra msg="rules manager rule groups request" path=/prometheus/api/v1/alerts grafana_org_id=1 query= groups=0 alerts=373 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lax-us-009" t=2024-05-29T13:44:13.506504856Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lax-us-008" t=2024-05-29T13:44:13.506292434Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lax-us-007" t=2024-05-29T13:44:13.506179677Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:13.506108813Z caller=grafana.go:247 user=438185 slug=nodeinfra msg="rules manager rule groups request" path=/prometheus/api/v1/alerts grafana_org_id=1 query= groups=0 alerts=256 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lax-us-006" t=2024-05-29T13:44:13.506030187Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank instance="DBInstanceIdentifier=db-enduser-verification-120230202174313780600000009" t=2024-05-29T13:44:13.50597126Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.505964503Z caller=remote_instance_store.go:51 user=739013 slug=altoglobalsharing msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lax-us-006" t=2024-05-29T13:44:13.506018891Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.505881635Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-lax-us-005" t=2024-05-29T13:44:13.505901249Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=114492 slug=railsbank version=3 fingerprint=508e067b952b8594 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.505757683Z level=debug msg="Alert rule evaluated" results="[{Instance:DBInstanceIdentifier=db-enduser-verification-120230202174313780600000009 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBInstanceIdentifier=db-enduser-verification-120230202174313780600000009 Value:0xc022212cf8} C:{Var:C Labels:DBInstanceIdentifier=db-enduser-verification-120230202174313780600000009 Value:0xc022212d00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.505311063s EvaluationString:[ var='B' labels={DBInstanceIdentifier=db-enduser-verification-120230202174313780600000009} value=0 ], [ var='C' labels={DBInstanceIdentifier=db-enduser-verification-120230202174313780600000009} value=0 ]}]" duration=124.978098ms + level=info component=discovery ts=2024-05-29T13:44:13.504513327Z caller=client.go:80 msg="creating client for grafana instance" user=494971 addr=dns:///adaptera-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.505587957Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.870272ms + level=info ts=2024-05-29T13:44:13.50550599Z caller=remote_alert_sender.go:94 user=746993 slug=pegacloud host=pegacloud-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.87.30:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=adk6o3dktoc1sb alerts=1 + level=info component=discovery ts=2024-05-29T13:44:13.505312434Z caller=client.go:80 msg="creating client for grafana instance" user=537747 addr=dns:///aqsmed-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.504785429Z caller=client.go:80 msg="creating client for grafana instance" user=645505 addr=dns:///alidehkharqani-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.504330025Z caller=client.go:80 msg="creating client for grafana instance" user=531167 addr=dns:///aaebvreports-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.505131243Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=523054 slug=vialtopartners instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.505148805Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-fra-de-005" t=2024-05-29T13:44:13.505122491Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=523054 slug=vialtopartners t=2024-05-29T13:44:13.505108916Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info component=discovery ts=2024-05-29T13:44:13.504340725Z caller=client.go:80 msg="creating client for grafana instance" user=528756 addr=dns:///absbv-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.50483423Z caller=client.go:80 msg="creating client for grafana instance" user=559754 addr=dns:///alopem-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.504726429Z caller=client.go:80 msg="creating client for grafana instance" user=678409 addr=dns:///alejndr0-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.504796329Z caller=client.go:80 msg="creating client for grafana instance" user=486932 addr=dns:///alkali-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.504661028Z caller=client.go:80 msg="creating client for grafana instance" user=503394 addr=dns:///aivf-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.504632128Z caller=client.go:80 msg="creating client for grafana instance" user=729661 addr=dns:///airbusatlanticval-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager.persist user=602335 slug=gcbgrupo t=2024-05-29T13:44:13.504616888Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=449554 slug=metricgamingppe t=2024-05-29T13:44:13.50463956Z level=debug msg="Saving alert states" count=6 max_state_save_concurrency=1 + logger=ngalert.state.manager user=449554 slug=metricgamingppe instance="DBInstanceIdentifier=rds-trading" t=2024-05-29T13:44:13.504623936Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.504505827Z caller=client.go:80 msg="creating client for grafana instance" user=708062 addr=dns:///acipayonload-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=449554 slug=metricgamingppe instance="DBInstanceIdentifier=rds-tenant-comeon-nl" t=2024-05-29T13:44:13.504482451Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-ewr-us-117" t=2024-05-29T13:44:13.504433801Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.504389925Z caller=client.go:80 msg="creating client for grafana instance" user=690978 addr=dns:///acidecathlonnpd-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.504383425Z caller=client.go:80 msg="creating client for grafana instance" user=517273 addr=dns:///acif-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-ewr-us-116" t=2024-05-29T13:44:13.504281257Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.504219024Z caller=ruler.go:498 msg="number of tenants owned by this instance" owned=859 total=1269 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-ewr-us-116" t=2024-05-29T13:44:13.504261965Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.504195274Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-ewr-us-115" t=2024-05-29T13:44:13.504100264Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=696798 slug=mcv t=2024-05-29T13:44:13.504047412Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=449554 slug=metricgamingppe instance="DBInstanceIdentifier=rds-ingestion" t=2024-05-29T13:44:13.503930683Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.503804327Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-ewr-us-113" t=2024-05-29T13:44:13.503816095Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=150145 slug=pleasant t=2024-05-29T13:44:13.503767111Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.894322ms + logger=ngalert.scheduler user=127813 slug=clearsale version=4 fingerprint=787e66b94127a132 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.503720712Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.503386849s EvaluationString:}]" duration=205.342296ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-ewr-us-112" t=2024-05-29T13:44:13.503664942Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=90284 slug=volantio t=2024-05-29T13:44:13.503664027Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-ewr-us-111" t=2024-05-29T13:44:13.503518274Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-ewr-us-110" t=2024-05-29T13:44:13.503375077Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=532653 slug=chathamdirectprd t=2024-05-29T13:44:13.503146029Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.649317ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-ewr-us-109" t=2024-05-29T13:44:13.503230927Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-ewr-us-107" t=2024-05-29T13:44:13.502979431Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-ewr-us-107" t=2024-05-29T13:44:13.502963925Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.502840084Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=636670 slug=generacces instance= t=2024-05-29T13:44:13.502463552Z level=debug msg="Keeping state" state=Error previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager.persist user=325783 slug=bloxprod t=2024-05-29T13:44:13.502379194Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.scheduler user=636670 slug=generacces version=11 fingerprint=e06a31158b512599 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.502354161Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=3.636883ms + level=error ts=2024-05-29T13:44:13.50230264Z caller=remote_rule_evaluator.go:110 user=636670 slug=generacces msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + level=debug ts=2024-05-29T13:44:13.502297427Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.502168636Z caller=remote_alert_sender.go:94 user=630233 slug=bettercloudprod host=bettercloudprod-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.157.113:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=ba0e64de-37e3-4dba-a697-2b2f80d3d2ea alerts=1 + level=debug ts=2024-05-29T13:44:13.502053413Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-ewr-us-104" t=2024-05-29T13:44:13.502175261Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:13.501921594Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.237241ms + level=debug ts=2024-05-29T13:44:13.501781251Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.501765491Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=316418 slug=workmotion t=2024-05-29T13:44:13.501571508Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=316418 slug=workmotion instance="ApiId=1mr10216z5, Method=--, Resource=/v1/payroll-funding, Stage=--" t=2024-05-29T13:44:13.501540634Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=316418 slug=workmotion t=2024-05-29T13:44:13.50150432Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-atl-us-015" t=2024-05-29T13:44:13.501522203Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-atl-us-015" t=2024-05-29T13:44:13.501510615Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-atl-us-014" t=2024-05-29T13:44:13.501386959Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-atl-us-013" t=2024-05-29T13:44:13.50127871Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-atl-us-013" t=2024-05-29T13:44:13.501269196Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.501165677Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-atl-us-012" t=2024-05-29T13:44:13.50115972Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.501004981Z caller=remote_instance_store.go:51 user=548157 slug=kushkiprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-atl-us-011" t=2024-05-29T13:44:13.501027459Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.500789097Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=316418 slug=workmotion t=2024-05-29T13:44:13.500848493Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=29.934783ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-atl-us-010" t=2024-05-29T13:44:13.500927587Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-atl-us-009" t=2024-05-29T13:44:13.500806772Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-atl-us-008" t=2024-05-29T13:44:13.500661467Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.500550384Z caller=remote_instance_store.go:51 user=243675 slug=oneschema msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.500585524Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-atl-us-007" t=2024-05-29T13:44:13.500562717Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:13.500500866Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.391785ms + level=debug ts=2024-05-29T13:44:13.50052142Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.500508777Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-atl-us-006" t=2024-05-29T13:44:13.500450657Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.500305394Z caller=remote_instance_store.go:51 user=706031 slug=miceutest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.500332967Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-atl-us-005" t=2024-05-29T13:44:13.50031383Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:13.500119113Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.500197349Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=767797 slug=mgmresorts t=2024-05-29T13:44:13.500183383Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.500241133Z caller=remote_instance_store.go:51 user=767797 slug=mgmresorts msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.500232081Z caller=remote_alert_sender.go:94 user=199295 slug=flexpoolio host=flexpoolio-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.160.26.226:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=a5ba8f0f-7da4-4b19-96ab-d83da84a42f2 alerts=1 + logger=ngalert.state.manager user=767797 slug=mgmresorts instance= t=2024-05-29T13:44:13.500151452Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.500087743Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-atl-us-004" t=2024-05-29T13:44:13.500204345Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.500169891Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.500072587Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:13.500042805Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.500115419Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.499595974Z caller=client.go:80 msg="creating client for grafana instance" user=661118 addr=dns:///boylegiro-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.499557046Z caller=client.go:80 msg="creating client for grafana instance" user=415341 addr=dns:///boxfish-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.499459207Z caller=client.go:80 msg="creating client for grafana instance" user=333894 addr=dns:///bortana-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.499372218Z caller=client.go:80 msg="creating client for grafana instance" user=344576 addr=dns:///blairm-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.49927246Z caller=client.go:80 msg="creating client for grafana instance" user=344582 addr=dns:///bimap-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.499025736Z caller=client.go:80 msg="creating client for grafana instance" user=766149 addr=dns:///atomixlp-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-atl-us-002" t=2024-05-29T13:44:13.49995974Z level=debug msg="Keeping state" state=Normal + level=info component=discovery ts=2024-05-29T13:44:13.498905905Z caller=client.go:80 msg="creating client for grafana instance" user=296643 addr=dns:///alexanderswift-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.499187222Z caller=client.go:80 msg="creating client for grafana instance" user=338951 addr=dns:///bifrostai-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.49990035Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.49985174Z caller=remote_instance_store.go:51 user=514500 slug=rever msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.499048691Z caller=client.go:80 msg="creating client for grafana instance" user=314055 addr=dns:///axiefriends-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.499734727Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=info component=discovery ts=2024-05-29T13:44:13.499020912Z caller=client.go:80 msg="creating client for grafana instance" user=525539 addr=dns:///ashleyalanrodgerson-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager.persist user=514500 slug=rever t=2024-05-29T13:44:13.499767006Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.499663349Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=514500 slug=rever instance="datasource_uid=grafanacloud-graphite, ref_id=A" t=2024-05-29T13:44:13.49973953Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=514500 slug=rever instance="datasource_uid=grafanacloud-graphite, ref_id=A" t=2024-05-29T13:44:13.499717278Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=514500 slug=rever instance="datasource_uid=grafanacloud-graphite, ref_id=A" t=2024-05-29T13:44:13.499709804Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=info component=discovery ts=2024-05-29T13:44:13.498911452Z caller=client.go:80 msg="creating client for grafana instance" user=305169 addr=dns:///amarbank-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=514500 slug=rever instance="datasource_uid=grafanacloud-graphite, ref_id=A" t=2024-05-29T13:44:13.49970162Z level=debug msg="Setting next state" handler=resultNoData + level=info component=discovery ts=2024-05-29T13:44:13.49889899Z caller=client.go:80 msg="creating client for grafana instance" user=384361 addr=dns:///alfiankan-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=info component=discovery ts=2024-05-29T13:44:13.498856486Z caller=client.go:80 msg="creating client for grafana instance" user=471966 addr=dns:///akahu-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + logger=ngalert.state.manager user=514500 slug=rever instance="datasource_uid=grafanacloud-graphite, ref_id=A" t=2024-05-29T13:44:13.499656713Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=_KDw8Zn4z, ref_id=A" t=2024-05-29T13:44:13.499395989Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.499451428Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-ams-nl-006" t=2024-05-29T13:44:13.499477661Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.499456602Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=430961 slug=solifi version=1 fingerprint=998a33c6a6fa821a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.499308684Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=_KDw8Zn4z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.499031863s EvaluationString:}]" duration=35.045483ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-ams-nl-005" t=2024-05-29T13:44:13.499348554Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-ams-nl-005" t=2024-05-29T13:44:13.499335434Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.499030901Z caller=remote_instance_store.go:51 user=159781 slug=suncornoc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=clouv-ams-nl-003" t=2024-05-29T13:44:13.499092743Z level=debug msg="Setting next state" handler=resultNormal + level=info component=discovery ts=2024-05-29T13:44:13.49877313Z caller=client.go:80 msg="creating client for grafana instance" user=438946 addr=dns:///afirmo-grafana-http.hosted-grafana.svc.cluster.local.:10000 retry_max=5 retry_backoff=100ms retry_jitter=0.1 + level=debug ts=2024-05-29T13:44:13.498610353Z caller=ruler.go:498 msg="number of tenants owned by this instance" owned=419 total=634 + level=debug ts=2024-05-29T13:44:13.498500897Z caller=remote_instance_store.go:51 user=407477 slug=inventa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=astra-bud-hu-008" t=2024-05-29T13:44:13.498488127Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=astra-bud-hu-007" t=2024-05-29T13:44:13.498326084Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=astra-bud-hu-006" t=2024-05-29T13:44:13.498206822Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=148654 slug=tinybeans instance= t=2024-05-29T13:44:13.498177077Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.498082829Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=angani-ke-04" t=2024-05-29T13:44:13.498041762Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:13.498001864Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=430961 slug=solifi instance="Instance=--" t=2024-05-29T13:44:13.497976708Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.497946488Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.497929474Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=430961 slug=solifi instance="Instance=--" t=2024-05-29T13:44:13.4979534Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:13.49793346Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.497919512Z caller=remote_instance_store.go:51 user=482906 slug=wavelo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=angani-ke-03" t=2024-05-29T13:44:13.497911526Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=310637 slug=notino t=2024-05-29T13:44:13.497856Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=36.8109ms + level=debug ts=2024-05-29T13:44:13.497769605Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=177453 slug=clabs instance="datasource_uid=7UodHjDnz, ref_id=A" t=2024-05-29T13:44:13.497782605Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=177453 slug=clabs instance="datasource_uid=7UodHjDnz, ref_id=A" t=2024-05-29T13:44:13.49772336Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=177453 slug=clabs version=63 fingerprint=722604cde8964429 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.497616256Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=7UodHjDnz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.497343721s EvaluationString:}]" duration=83.429018ms + level=debug ts=2024-05-29T13:44:13.497574866Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-us-slc-09" t=2024-05-29T13:44:13.497263726Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.497111611Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-us-slc-07" t=2024-05-29T13:44:13.497049745Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-us-slc-06" t=2024-05-29T13:44:13.496937687Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-us-slc-05" t=2024-05-29T13:44:13.496824078Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.496663205Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=147806 slug=adevintaengprod instance= t=2024-05-29T13:44:13.496682785Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.496739715Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=f90b96f3f8878005 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.496547359Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.496268811s EvaluationString:}]" duration=357.271616ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-us-slc-03" t=2024-05-29T13:44:13.496587878Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-us-slc-02" t=2024-05-29T13:44:13.49648659Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-050" t=2024-05-29T13:44:13.496259615Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-050" t=2024-05-29T13:44:13.496249832Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.495924327Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-046" t=2024-05-29T13:44:13.495834718Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-045" t=2024-05-29T13:44:13.495737019Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-044" t=2024-05-29T13:44:13.495644429Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-044" t=2024-05-29T13:44:13.495629715Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-043" t=2024-05-29T13:44:13.495519423Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-042" t=2024-05-29T13:44:13.495416658Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.49513505Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-041" t=2024-05-29T13:44:13.495294793Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.495230436Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-040" t=2024-05-29T13:44:13.495180295Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.495176376Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-039" t=2024-05-29T13:44:13.495083565Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.495009787Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.494934642Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.494787583Z caller=remote_instance_store.go:51 user=304032 slug=clearbanc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.494702187Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.487209ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-036" t=2024-05-29T13:44:13.494718505Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-034" t=2024-05-29T13:44:13.494500163Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-033" t=2024-05-29T13:44:13.494392925Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.494318337Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.494066799Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.493950937Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.494016093Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:13.494040665Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:13.494032793Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:13.494010783Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.494018268Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=245291 slug=pismo version=26 fingerprint=e31f809487c9022e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.493934047Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.493694398s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=369.827643ms + logger=ngalert.scheduler user=430961 slug=solifi version=1 fingerprint=8b37adc1293bd401 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.493873282Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.493438493s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=70.641559ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-029" t=2024-05-29T13:44:13.493835287Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.493673888Z caller=remote_instance_store.go:51 user=753403 slug=romich msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-028" t=2024-05-29T13:44:13.493734155Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.493674167Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-028" t=2024-05-29T13:44:13.493725829Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-027" t=2024-05-29T13:44:13.493652793Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-025" t=2024-05-29T13:44:13.49338812Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-025" t=2024-05-29T13:44:13.493376217Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.493096387Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.492929749Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-022" t=2024-05-29T13:44:13.492987453Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.492820941Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-021" t=2024-05-29T13:44:13.492894544Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-020" t=2024-05-29T13:44:13.492824995Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.492564684Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-018" t=2024-05-29T13:44:13.492612183Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-018" t=2024-05-29T13:44:13.492604438Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-017" t=2024-05-29T13:44:13.492529532Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.492448255Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=450735 slug=oplogtech t=2024-05-29T13:44:13.492345356Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.492298472Z caller=remote_instance_store.go:51 user=739130 slug=redphasetech msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-014" t=2024-05-29T13:44:13.492116772Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-013" t=2024-05-29T13:44:13.491987878Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.491807269Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=432323 slug=lithic instance="FunctionName=approval-request-consumer-live" t=2024-05-29T13:44:13.491798408Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.491869291Z caller=remote_instance_store.go:51 user=432323 slug=lithic msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=432323 slug=lithic instance="FunctionName=approval-request-consumer-live" t=2024-05-29T13:44:13.491783938Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.491699406Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=432323 slug=lithic version=1 fingerprint=34cd3716e8f711f4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.491657414Z level=debug msg="Alert rule evaluated" results="[{Instance:FunctionName=approval-request-consumer-live State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:FunctionName=approval-request-consumer-live Value:0xc01a7770b0} C:{Var:C Labels:FunctionName=approval-request-consumer-live Value:0xc01a7770b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.491332592s EvaluationString:[ var='B' labels={FunctionName=approval-request-consumer-live} value=0 ], [ var='C' labels={FunctionName=approval-request-consumer-live} value=0 ]}]" duration=89.089896ms + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.491699695Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.491605015Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:13.491680383Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=806229 slug=simplisafe instance= t=2024-05-29T13:44:13.491664413Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe instance= t=2024-05-29T13:44:13.491650553Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-010" t=2024-05-29T13:44:13.491588837Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.491591025Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=806229 slug=simplisafe t=2024-05-29T13:44:13.491590962Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=746993 slug=pegacloud version=6 fingerprint=7fc4bf74cebb3e57 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.491431282Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=f256a68f-9383-46b8-b2a3-2b84e473b3da, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.490355512s EvaluationString:}]" duration=30.820201ms + logger=ngalert.state.manager.persist user=174675 slug=journalprod t=2024-05-29T13:44:13.491393649Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=89.731576ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-008" t=2024-05-29T13:44:13.491315227Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.491282341Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:13.491235646Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:13.491219805Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-007" t=2024-05-29T13:44:13.491193107Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-007" t=2024-05-29T13:44:13.491181873Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-006" t=2024-05-29T13:44:13.491076445Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-006" t=2024-05-29T13:44:13.491058768Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=612525 slug=adleyeview instance= t=2024-05-29T13:44:13.491015575Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.490738034Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-uk-we-003" t=2024-05-29T13:44:13.490766968Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.490713273Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.490510367Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.490557843Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=770817 slug=exproment t=2024-05-29T13:44:13.490248717Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.962937ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-ams-si-001" t=2024-05-29T13:44:13.490264583Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:13.489309195Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=806229 slug=simplisafe instance="env=prd-west2, region=us-west-2" t=2024-05-29T13:44:13.489234804Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806229 slug=simplisafe instance="env=prd-east, region=us-east-1" t=2024-05-29T13:44:13.489114643Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.490035805Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-ams-mk-2" t=2024-05-29T13:44:13.489855237Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.489800572Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-ams-mk-2" t=2024-05-29T13:44:13.489843187Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.489721472Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.489760369Z caller=remote_instance_store.go:51 user=306551 slug=teckresourcesalerts msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.48967755Z caller=remote_instance_store.go:51 user=777267 slug=digikare msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=306551 slug=teckresourcesalerts instance="equipmentName=SH503, site=LCO" t=2024-05-29T13:44:13.48957687Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=777267 slug=digikare instance="cluster=dgk-v3, instance=aks-dgk129-40792515-vmss000000, job=integrations/kubernetes/kubelet, namespace=dgk-elk, persistentvolumeclaim=elasticsearch-data-elk-1-es-data-hot-1" t=2024-05-29T13:44:13.489618269Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=777267 slug=digikare t=2024-05-29T13:44:13.489552277Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=777267 slug=digikare version=1 fingerprint=a9c04bcf933a9878 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.489469916Z level=debug msg="Alert rule evaluated" results="[{Instance:cluster=dgk-v3, instance=aks-dgk129-40792515-vmss000000, job=integrations/kubernetes/kubelet, namespace=dgk-elk, persistentvolumeclaim=elasticsearch-data-elk-1-es-data-hot-1 State:Normal Error: Results:map[] Values:map[ALERT:{Var:ALERT Labels:cluster=dgk-v3, instance=aks-dgk129-40792515-vmss000000, job=integrations/kubernetes/kubelet, namespace=dgk-elk, persistentvolumeclaim=elasticsearch-data-elk-1-es-data-hot-1 Value:0xc057dcaaf8} PERC_FREE:{Var:PERC_FREE Labels:cluster=dgk-v3, instance=aks-dgk129-40792515-vmss000000, job=integrations/kubernetes/kubelet, namespace=dgk-elk, persistentvolumeclaim=elasticsearch-data-elk-1-es-data-hot-1 Value:0xc057dcaab0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.489131044s EvaluationString:[ var='ALERT' labels={cluster=dgk-v3, instance=aks-dgk129-40792515-vmss000000, job=integrations/kubernetes/kubelet, namespace=dgk-elk, persistentvolumeclaim=elasticsearch-data-elk-1-es-data-hot-1} value=0 ], [ var='PERC_FREE' labels={cluster=dgk-v3, instance=aks-dgk129-40792515-vmss000000, job=integrations/kubernetes/kubelet, namespace=dgk-elk, persistentvolumeclaim=elasticsearch-data-elk-1-es-data-hot-1} value=0.47012926878909117 ]}]" duration=54.876942ms + logger=ngalert.state.manager user=306551 slug=teckresourcesalerts instance="equipmentName=SH406, site=LCO" t=2024-05-29T13:44:13.489330406Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=806229 slug=simplisafe t=2024-05-29T13:44:13.488988201Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.state.manager.persist user=112387 slug=lucidhq t=2024-05-29T13:44:13.489269809Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=26.002307ms + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-ams-mc-2" t=2024-05-29T13:44:13.489310757Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-ams-mc-1" t=2024-05-29T13:44:13.489195314Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=306551 slug=teckresourcesalerts instance="equipmentName=SH326, site=EVO" t=2024-05-29T13:44:13.489157453Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-ams-li-2" t=2024-05-29T13:44:13.489032742Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=306551 slug=teckresourcesalerts instance="equipmentName=SH325, site=EVO" t=2024-05-29T13:44:13.489030918Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=698103 slug=vericast t=2024-05-29T13:44:13.488842757Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.866069ms + logger=ngalert.state.manager user=306551 slug=teckresourcesalerts instance="equipmentName=SH324, site=EVO" t=2024-05-29T13:44:13.488914805Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=306551 slug=teckresourcesalerts instance="equipmentName=SH310, site=FRO" t=2024-05-29T13:44:13.488827696Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=306551 slug=teckresourcesalerts instance="equipmentName=SH305, site=FRO" t=2024-05-29T13:44:13.488730777Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.488648619Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-ams-je-1" t=2024-05-29T13:44:13.488634698Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-ams-im-2" t=2024-05-29T13:44:13.488508973Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=306551 slug=teckresourcesalerts instance="equipmentName=SH140, site=GHO" t=2024-05-29T13:44:13.48847791Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=306551 slug=teckresourcesalerts instance="equipmentName=SH140, site=GHO" t=2024-05-29T13:44:13.488466116Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=306551 slug=teckresourcesalerts instance="equipmentName=SH130, site=GHO" t=2024-05-29T13:44:13.488401971Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=199295 slug=flexpoolio t=2024-05-29T13:44:13.488401848Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=199295 slug=flexpoolio version=1 fingerprint=f7ccbe31c30ff073 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.488348611Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.488091346s EvaluationString:}]" duration=13.265981ms + logger=ngalert.state.manager user=306551 slug=teckresourcesalerts t=2024-05-29T13:44:13.488272908Z level=debug msg="State manager processing evaluation results" resultCount=16 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-ams-by-3" t=2024-05-29T13:44:13.488092525Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.487934828Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.487608816Z caller=remote_instance_store.go:51 user=738479 slug=gohero msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=787184 slug=brownlabatyale t=2024-05-29T13:44:13.487571974Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.482624ms + level=debug ts=2024-05-29T13:44:13.487502926Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.487502291Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=197492 slug=nbi instance= t=2024-05-29T13:44:13.487456776Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=197492 slug=nbi t=2024-05-29T13:44:13.487402193Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=Node Status" + level=debug ts=2024-05-29T13:44:13.48741646Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.487395337Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-ams-am-1" t=2024-05-29T13:44:13.487334857Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=166137 slug=teletracking t=2024-05-29T13:44:13.487358782Z level=debug msg="Deleting alert states" count=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-ams-am-1" t=2024-05-29T13:44:13.487322721Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.487297663Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.487255437Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.487212877Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.487173531Z caller=remote_instance_store.go:51 user=740833 slug=prospectprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=740833 slug=prospectprod instance= t=2024-05-29T13:44:13.48710335Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=740833 slug=prospectprod t=2024-05-29T13:44:13.48706119Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=260796 slug=expressvpn instance="host=100tb-ams-ad-1" t=2024-05-29T13:44:13.487164347Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166137 slug=teletracking instance="Series=query2e0b1de6cb534cfa86b100e40401a46c" t=2024-05-29T13:44:13.487158623Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.487111598Z caller=remote_instance_store.go:51 user=64313 slug=supplypike msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=327842 slug=exabeam t=2024-05-29T13:44:13.487102798Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.012079ms + level=debug ts=2024-05-29T13:44:13.487020842Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.486909076Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=386776 slug=rcsworks t=2024-05-29T13:44:13.486822374Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=19.318791ms + logger=ngalert.scheduler user=260796 slug=expressvpn version=128 fingerprint=894e3b84b8f9e1ca attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.471455807Z level=debug msg="Alert rule evaluated" results="[{Instance:host=100tb-ams-ad-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-ad-1 Value:0xc02e3dfab8} B:{Var:B Labels:host=100tb-ams-ad-1 Value:0xc02e3dfa68} C:{Var:C Labels:host=100tb-ams-ad-1 Value:0xc02e3dfa98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429695784s EvaluationString:[ var='A' labels={host=100tb-ams-ad-1} value=-0.12116739566140447 ], [ var='B' labels={host=100tb-ams-ad-1} value=-0.12116739566140447 ], [ var='C' labels={host=100tb-ams-ad-1} value=0 ]} {Instance:host=100tb-ams-am-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-am-1 Value:0xc02e3dfb10} B:{Var:B Labels:host=100tb-ams-am-1 Value:0xc02e3dfb30} C:{Var:C Labels:host=100tb-ams-am-1 Value:0xc02e3dfb50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429723637s EvaluationString:[ var='A' labels={host=100tb-ams-am-1} value=0.0896600139551739 ], [ var='B' labels={host=100tb-ams-am-1} value=0.0896600139551739 ], [ var='C' labels={host=100tb-ams-am-1} value=0 ]} {Instance:host=100tb-ams-am-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-am-2 Value:0xc02e3dfba0} B:{Var:B Labels:host=100tb-ams-am-2 Value:0xc02e3dfbd0} C:{Var:C Labels:host=100tb-ams-am-2 Value:0xc02e3dfbf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429732927s EvaluationString:[ var='A' labels={host=100tb-ams-am-2} value=-0.1577880943248644 ], [ var='B' labels={host=100tb-ams-am-2} value=-0.1577880943248644 ], [ var='C' labels={host=100tb-ams-am-2} value=0 ]} {Instance:host=100tb-ams-ba-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-ba-1 Value:0xc02e3dfc30} B:{Var:B Labels:host=100tb-ams-ba-1 Value:0xc02e3dfc50} C:{Var:C Labels:host=100tb-ams-ba-1 Value:0xc02e3dfc70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429742419s EvaluationString:[ var='A' labels={host=100tb-ams-ba-1} value=0.004722103306455949 ], [ var='B' labels={host=100tb-ams-ba-1} value=0.004722103306455949 ], [ var='C' labels={host=100tb-ams-ba-1} value=0 ]} {Instance:host=100tb-ams-ba-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-ba-2 Value:0xc02e3dfcb0} B:{Var:B Labels:host=100tb-ams-ba-2 Value:0xc02e3dfcd0} C:{Var:C Labels:host=100tb-ams-ba-2 Value:0xc02e3dfd00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429750592s EvaluationString:[ var='A' labels={host=100tb-ams-ba-2} value=0.012997219274257787 ], [ var='B' labels={host=100tb-ams-ba-2} value=0.012997219274257787 ], [ var='C' labels={host=100tb-ams-ba-2} value=0 ]} {Instance:host=100tb-ams-by-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-by-1 Value:0xc02e3dfd70} B:{Var:B Labels:host=100tb-ams-by-1 Value:0xc02e3dfd90} C:{Var:C Labels:host=100tb-ams-by-1 Value:0xc02e3dfd50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429760149s EvaluationString:[ var='A' labels={host=100tb-ams-by-1} value=0.47403824160326735 ], [ var='B' labels={host=100tb-ams-by-1} value=0.47403824160326735 ], [ var='C' labels={host=100tb-ams-by-1} value=0 ]} {Instance:host=100tb-ams-by-3 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-by-3 Value:0xc02e3dfdd0} B:{Var:B Labels:host=100tb-ams-by-3 Value:0xc02e3dfdf0} C:{Var:C Labels:host=100tb-ams-by-3 Value:0xc02e3dfe30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429768589s EvaluationString:[ var='A' labels={host=100tb-ams-by-3} value=0.05907713221027993 ], [ var='B' labels={host=100tb-ams-by-3} value=0.05907713221027993 ], [ var='C' labels={host=100tb-ams-by-3} value=0 ]} {Instance:host=100tb-ams-by-4 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-by-4 Value:0xc02e3dfe70} B:{Var:B Labels:host=100tb-ams-by-4 Value:0xc02e3dfe90} C:{Var:C Labels:host=100tb-ams-by-4 Value:0xc02e3dfeb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429775954s EvaluationString:[ var='A' labels={host=100tb-ams-by-4} value=-0.02895816785809302 ], [ var='B' labels={host=100tb-ams-by-4} value=-0.02895816785809302 ], [ var='C' labels={host=100tb-ams-by-4} value=0 ]} {Instance:host=100tb-ams-im-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-im-1 Value:0xc02e3dff30} B:{Var:B Labels:host=100tb-ams-im-1 Value:0xc02e3dfef0} C:{Var:C Labels:host=100tb-ams-im-1 Value:0xc02e3dff10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429784406s EvaluationString:[ var='A' labels={host=100tb-ams-im-1} value=0.031414869198395465 ], [ var='B' labels={host=100tb-ams-im-1} value=0.031414869198395465 ], [ var='C' labels={host=100tb-ams-im-1} value=0 ]} {Instance:host=100tb-ams-im-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-im-2 Value:0xc02e3dffc0} B:{Var:B Labels:host=100tb-ams-im-2 Value:0xc02e3dff70} C:{Var:C Labels:host=100tb-ams-im-2 Value:0xc02e3dffa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429791449s EvaluationString:[ var='A' labels={host=100tb-ams-im-2} value=0.30827928196266896 ], [ var='B' labels={host=100tb-ams-im-2} value=0.30827928196266896 ], [ var='C' labels={host=100tb-ams-im-2} value=0 ]} {Instance:host=100tb-ams-je-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-je-1 Value:0xc02e18c000} B:{Var:B Labels:host=100tb-ams-je-1 Value:0xc02e18c020} C:{Var:C Labels:host=100tb-ams-je-1 Value:0xc02e18c050}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429801898s EvaluationString:[ var='A' labels={host=100tb-ams-je-1} value=0.03976417008701105 ], [ var='B' labels={host=100tb-ams-je-1} value=0.03976417008701105 ], [ var='C' labels={host=100tb-ams-je-1} value=0 ]} {Instance:host=100tb-ams-je-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-je-2 Value:0xc02e18c0b0} B:{Var:B Labels:host=100tb-ams-je-2 Value:0xc02e18c0d0} C:{Var:C Labels:host=100tb-ams-je-2 Value:0xc02e18c100}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429810208s EvaluationString:[ var='A' labels={host=100tb-ams-je-2} value=-0.0366654754128572 ], [ var='B' labels={host=100tb-ams-je-2} value=-0.0366654754128572 ], [ var='C' labels={host=100tb-ams-je-2} value=0 ]} {Instance:host=100tb-ams-li-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-li-1 Value:0xc02e18c190} B:{Var:B Labels:host=100tb-ams-li-1 Value:0xc02e18c150} C:{Var:C Labels:host=100tb-ams-li-1 Value:0xc02e18c170}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429818677s EvaluationString:[ var='A' labels={host=100tb-ams-li-1} value=-0.13957969003809487 ], [ var='B' labels={host=100tb-ams-li-1} value=-0.13957969003809487 ], [ var='C' labels={host=100tb-ams-li-1} value=0 ]} {Instance:host=100tb-ams-li-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-li-2 Value:0xc02e18c1f0} B:{Var:B Labels:host=100tb-ams-li-2 Value:0xc02e18c210} C:{Var:C Labels:host=100tb-ams-li-2 Value:0xc02e18c250}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429826211s EvaluationString:[ var='A' labels={host=100tb-ams-li-2} value=-0.1185082661882575 ], [ var='B' labels={host=100tb-ams-li-2} value=-0.1185082661882575 ], [ var='C' labels={host=100tb-ams-li-2} value=0 ]} {Instance:host=100tb-ams-mc-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-mc-1 Value:0xc02e18c2e0} B:{Var:B Labels:host=100tb-ams-mc-1 Value:0xc02e18c2a0} C:{Var:C Labels:host=100tb-ams-mc-1 Value:0xc02e18c2c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429846036s EvaluationString:[ var='A' labels={host=100tb-ams-mc-1} value=-0.09404691761199047 ], [ var='B' labels={host=100tb-ams-mc-1} value=-0.09404691761199047 ], [ var='C' labels={host=100tb-ams-mc-1} value=0 ]} {Instance:host=100tb-ams-mc-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-mc-2 Value:0xc02e18c3a0} B:{Var:B Labels:host=100tb-ams-mc-2 Value:0xc02e18c350} C:{Var:C Labels:host=100tb-ams-mc-2 Value:0xc02e18c370}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429852795s EvaluationString:[ var='A' labels={host=100tb-ams-mc-2} value=0.21148371384877052 ], [ var='B' labels={host=100tb-ams-mc-2} value=0.21148371384877052 ], [ var='C' labels={host=100tb-ams-mc-2} value=0 ]} {Instance:host=100tb-ams-me-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-me-2 Value:0xc02e18c3f0} B:{Var:B Labels:host=100tb-ams-me-2 Value:0xc02e18c410} C:{Var:C Labels:host=100tb-ams-me-2 Value:0xc02e18c430}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429861371s EvaluationString:[ var='A' labels={host=100tb-ams-me-2} value=0.016110100658835336 ], [ var='B' labels={host=100tb-ams-me-2} value=0.016110100658835336 ], [ var='C' labels={host=100tb-ams-me-2} value=0 ]} {Instance:host=100tb-ams-me-3 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-me-3 Value:0xc02e18c4f0} B:{Var:B Labels:host=100tb-ams-me-3 Value:0xc02e18c490} C:{Var:C Labels:host=100tb-ams-me-3 Value:0xc02e18c4d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429870911s EvaluationString:[ var='A' labels={host=100tb-ams-me-3} value=-0.14688374949104244 ], [ var='B' labels={host=100tb-ams-me-3} value=-0.14688374949104244 ], [ var='C' labels={host=100tb-ams-me-3} value=0 ]} {Instance:host=100tb-ams-mk-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-mk-1 Value:0xc02e18c550} B:{Var:B Labels:host=100tb-ams-mk-1 Value:0xc02e18c570} C:{Var:C Labels:host=100tb-ams-mk-1 Value:0xc02e18c5a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429879096s EvaluationString:[ var='A' labels={host=100tb-ams-mk-1} value=0.031631882663418764 ], [ var='B' labels={host=100tb-ams-mk-1} value=0.031631882663418764 ], [ var='C' labels={host=100tb-ams-mk-1} value=0 ]} {Instance:host=100tb-ams-mk-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-mk-2 Value:0xc02e18c5f0} B:{Var:B Labels:host=100tb-ams-mk-2 Value:0xc02e18c630} C:{Var:C Labels:host=100tb-ams-mk-2 Value:0xc02e18c650}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429889695s EvaluationString:[ var='A' labels={host=100tb-ams-mk-2} value=-0.23275806164103113 ], [ var='B' labels={host=100tb-ams-mk-2} value=-0.23275806164103113 ], [ var='C' labels={host=100tb-ams-mk-2} value=0 ]} {Instance:host=100tb-ams-mt-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-mt-1 Value:0xc02e18c6a0} B:{Var:B Labels:host=100tb-ams-mt-1 Value:0xc02e18c6d0} C:{Var:C Labels:host=100tb-ams-mt-1 Value:0xc02e18c700}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429896936s EvaluationString:[ var='A' labels={host=100tb-ams-mt-1} value=-0.17393636539347312 ], [ var='B' labels={host=100tb-ams-mt-1} value=-0.17393636539347312 ], [ var='C' labels={host=100tb-ams-mt-1} value=0 ]} {Instance:host=100tb-ams-mt-3 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-mt-3 Value:0xc02e18c7a0} B:{Var:B Labels:host=100tb-ams-mt-3 Value:0xc02e18c740} C:{Var:C Labels:host=100tb-ams-mt-3 Value:0xc02e18c760}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429904641s EvaluationString:[ var='A' labels={host=100tb-ams-mt-3} value=-0.02086771744186779 ], [ var='B' labels={host=100tb-ams-mt-3} value=-0.02086771744186779 ], [ var='C' labels={host=100tb-ams-mt-3} value=0 ]} {Instance:host=100tb-ams-si-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-si-001 Value:0xc02e18c7e0} B:{Var:B Labels:host=100tb-ams-si-001 Value:0xc02e18c820} C:{Var:C Labels:host=100tb-ams-si-001 Value:0xc02e18c840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.42991234s EvaluationString:[ var='A' labels={host=100tb-ams-si-001} value=0.11822603953280719 ], [ var='B' labels={host=100tb-ams-si-001} value=0.11822603953280719 ], [ var='C' labels={host=100tb-ams-si-001} value=0 ]} {Instance:host=100tb-ams-si-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-si-002 Value:0xc02e18c8b0} B:{Var:B Labels:host=100tb-ams-si-002 Value:0xc02e18c8d0} C:{Var:C Labels:host=100tb-ams-si-002 Value:0xc02e18c880}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429920116s EvaluationString:[ var='A' labels={host=100tb-ams-si-002} value=-0.03949502441926711 ], [ var='B' labels={host=100tb-ams-si-002} value=-0.03949502441926711 ], [ var='C' labels={host=100tb-ams-si-002} value=0 ]} {Instance:host=100tb-ams-uz-01 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-uz-01 Value:0xc02e18c920} B:{Var:B Labels:host=100tb-ams-uz-01 Value:0xc02e18c950} C:{Var:C Labels:host=100tb-ams-uz-01 Value:0xc02e18c970}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429927819s EvaluationString:[ var='A' labels={host=100tb-ams-uz-01} value=0.12969737617375188 ], [ var='B' labels={host=100tb-ams-uz-01} value=0.12969737617375188 ], [ var='C' labels={host=100tb-ams-uz-01} value=0 ]} {Instance:host=100tb-ams-uz-02 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-ams-uz-02 Value:0xc02e18c9e0} B:{Var:B Labels:host=100tb-ams-uz-02 Value:0xc02e18ca00} C:{Var:C Labels:host=100tb-ams-uz-02 Value:0xc02e18c9b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429937859s EvaluationString:[ var='A' labels={host=100tb-ams-uz-02} value=-0.2443412252515133 ], [ var='B' labels={host=100tb-ams-uz-02} value=-0.2443412252515133 ], [ var='C' labels={host=100tb-ams-uz-02} value=0 ]} {Instance:host=100tb-uk-we-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-001 Value:0xc02e18ca90} B:{Var:B Labels:host=100tb-uk-we-001 Value:0xc02e18cab0} C:{Var:C Labels:host=100tb-uk-we-001 Value:0xc02e18ca70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429945648s EvaluationString:[ var='A' labels={host=100tb-uk-we-001} value=0.18548123984452936 ], [ var='B' labels={host=100tb-uk-we-001} value=0.18548123984452936 ], [ var='C' labels={host=100tb-uk-we-001} value=0 ]} {Instance:host=100tb-uk-we-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-002 Value:0xc02e18cb10} B:{Var:B Labels:host=100tb-uk-we-002 Value:0xc02e18cb40} C:{Var:C Labels:host=100tb-uk-we-002 Value:0xc02e18cb60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429954084s EvaluationString:[ var='A' labels={host=100tb-uk-we-002} value=-0.2862041449573975 ], [ var='B' labels={host=100tb-uk-we-002} value=-0.2862041449573975 ], [ var='C' labels={host=100tb-uk-we-002} value=0 ]} {Instance:host=100tb-uk-we-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-003 Value:0xc02e18cc40} B:{Var:B Labels:host=100tb-uk-we-003 Value:0xc02e18cc80} C:{Var:C Labels:host=100tb-uk-we-003 Value:0xc02e18cca0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429961979s EvaluationString:[ var='A' labels={host=100tb-uk-we-003} value=-0.022545655746749764 ], [ var='B' labels={host=100tb-uk-we-003} value=-0.022545655746749764 ], [ var='C' labels={host=100tb-uk-we-003} value=0 ]} {Instance:host=100tb-uk-we-004 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-004 Value:0xc02e18cd50} B:{Var:B Labels:host=100tb-uk-we-004 Value:0xc02e18ccf0} C:{Var:C Labels:host=100tb-uk-we-004 Value:0xc02e18cd20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.42996968s EvaluationString:[ var='A' labels={host=100tb-uk-we-004} value=0.00716239141876177 ], [ var='B' labels={host=100tb-uk-we-004} value=0.00716239141876177 ], [ var='C' labels={host=100tb-uk-we-004} value=0 ]} {Instance:host=100tb-uk-we-005 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-005 Value:0xc02e18cd90} B:{Var:B Labels:host=100tb-uk-we-005 Value:0xc02e18cdc0} C:{Var:C Labels:host=100tb-uk-we-005 Value:0xc02e18cdf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.42997796s EvaluationString:[ var='A' labels={host=100tb-uk-we-005} value=-0.04664816135238503 ], [ var='B' labels={host=100tb-uk-we-005} value=-0.04664816135238503 ], [ var='C' labels={host=100tb-uk-we-005} value=0 ]} {Instance:host=100tb-uk-we-006 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-006 Value:0xc02e18ce50} B:{Var:B Labels:host=100tb-uk-we-006 Value:0xc02e18ce70} C:{Var:C Labels:host=100tb-uk-we-006 Value:0xc02e18cea0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429985786s EvaluationString:[ var='A' labels={host=100tb-uk-we-006} value=-0.06347360671111346 ], [ var='B' labels={host=100tb-uk-we-006} value=-0.06347360671111346 ], [ var='C' labels={host=100tb-uk-we-006} value=0 ]} {Instance:host=100tb-uk-we-007 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-007 Value:0xc02e18cee0} B:{Var:B Labels:host=100tb-uk-we-007 Value:0xc02e18cf30} C:{Var:C Labels:host=100tb-uk-we-007 Value:0xc02e18cf60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.429993846s EvaluationString:[ var='A' labels={host=100tb-uk-we-007} value=0.19572057815152927 ], [ var='B' labels={host=100tb-uk-we-007} value=0.19572057815152927 ], [ var='C' labels={host=100tb-uk-we-007} value=0 ]} {Instance:host=100tb-uk-we-008 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-008 Value:0xc02e18d000} B:{Var:B Labels:host=100tb-uk-we-008 Value:0xc02e18d020} C:{Var:C Labels:host=100tb-uk-we-008 Value:0xc02e18d040}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430002301s EvaluationString:[ var='A' labels={host=100tb-uk-we-008} value=0.04575288502502772 ], [ var='B' labels={host=100tb-uk-we-008} value=0.04575288502502772 ], [ var='C' labels={host=100tb-uk-we-008} value=0 ]} {Instance:host=100tb-uk-we-009 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-009 Value:0xc02e18d100} B:{Var:B Labels:host=100tb-uk-we-009 Value:0xc02e18d090} C:{Var:C Labels:host=100tb-uk-we-009 Value:0xc02e18d0c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430010246s EvaluationString:[ var='A' labels={host=100tb-uk-we-009} value=0.03247774039888185 ], [ var='B' labels={host=100tb-uk-we-009} value=0.03247774039888185 ], [ var='C' labels={host=100tb-uk-we-009} value=0 ]} {Instance:host=100tb-uk-we-010 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-010 Value:0xc02e18d140} B:{Var:B Labels:host=100tb-uk-we-010 Value:0xc02e18d160} C:{Var:C Labels:host=100tb-uk-we-010 Value:0xc02e18d1b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430017499s EvaluationString:[ var='A' labels={host=100tb-uk-we-010} value=-0.08370530431066965 ], [ var='B' labels={host=100tb-uk-we-010} value=-0.08370530431066965 ], [ var='C' labels={host=100tb-uk-we-010} value=0 ]} {Instance:host=100tb-uk-we-011 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-011 Value:0xc02e18d200} B:{Var:B Labels:host=100tb-uk-we-011 Value:0xc02e18d220} C:{Var:C Labels:host=100tb-uk-we-011 Value:0xc02e18d240}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430025164s EvaluationString:[ var='A' labels={host=100tb-uk-we-011} value=0.0589927017348454 ], [ var='B' labels={host=100tb-uk-we-011} value=0.0589927017348454 ], [ var='C' labels={host=100tb-uk-we-011} value=0 ]} {Instance:host=100tb-uk-we-012 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-012 Value:0xc02e18d310} B:{Var:B Labels:host=100tb-uk-we-012 Value:0xc02e18d2c0} C:{Var:C Labels:host=100tb-uk-we-012 Value:0xc02e18d2e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430034377s EvaluationString:[ var='A' labels={host=100tb-uk-we-012} value=-0.16194774803539402 ], [ var='B' labels={host=100tb-uk-we-012} value=-0.16194774803539402 ], [ var='C' labels={host=100tb-uk-we-012} value=0 ]} {Instance:host=100tb-uk-we-013 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-013 Value:0xc02e18d390} B:{Var:B Labels:host=100tb-uk-we-013 Value:0xc02e18d350} C:{Var:C Labels:host=100tb-uk-we-013 Value:0xc02e18d370}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430042193s EvaluationString:[ var='A' labels={host=100tb-uk-we-013} value=-0.1433713178825021 ], [ var='B' labels={host=100tb-uk-we-013} value=-0.1433713178825021 ], [ var='C' labels={host=100tb-uk-we-013} value=0 ]} {Instance:host=100tb-uk-we-014 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-014 Value:0xc02e18d410} B:{Var:B Labels:host=100tb-uk-we-014 Value:0xc02e18d430} C:{Var:C Labels:host=100tb-uk-we-014 Value:0xc02e18d3e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430050952s EvaluationString:[ var='A' labels={host=100tb-uk-we-014} value=-0.010784750297219148 ], [ var='B' labels={host=100tb-uk-we-014} value=-0.010784750297219148 ], [ var='C' labels={host=100tb-uk-we-014} value=0 ]} {Instance:host=100tb-uk-we-015 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-015 Value:0xc02e18d4b0} B:{Var:B Labels:host=100tb-uk-we-015 Value:0xc02e18d4e0} C:{Var:C Labels:host=100tb-uk-we-015 Value:0xc02e18d490}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430060536s EvaluationString:[ var='A' labels={host=100tb-uk-we-015} value=0.06506132894681319 ], [ var='B' labels={host=100tb-uk-we-015} value=0.06506132894681319 ], [ var='C' labels={host=100tb-uk-we-015} value=0 ]} {Instance:host=100tb-uk-we-016 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-016 Value:0xc02e18d530} B:{Var:B Labels:host=100tb-uk-we-016 Value:0xc02e18d550} C:{Var:C Labels:host=100tb-uk-we-016 Value:0xc02e18d590}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430068079s EvaluationString:[ var='A' labels={host=100tb-uk-we-016} value=-0.07252950390724304 ], [ var='B' labels={host=100tb-uk-we-016} value=-0.07252950390724304 ], [ var='C' labels={host=100tb-uk-we-016} value=0 ]} {Instance:host=100tb-uk-we-017 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-017 Value:0xc02e18d650} B:{Var:B Labels:host=100tb-uk-we-017 Value:0xc02e18d600} C:{Var:C Labels:host=100tb-uk-we-017 Value:0xc02e18d620}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430075933s EvaluationString:[ var='A' labels={host=100tb-uk-we-017} value=0.1181618170514095 ], [ var='B' labels={host=100tb-uk-we-017} value=0.1181618170514095 ], [ var='C' labels={host=100tb-uk-we-017} value=0 ]} {Instance:host=100tb-uk-we-018 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-018 Value:0xc02e18d6a0} B:{Var:B Labels:host=100tb-uk-we-018 Value:0xc02e18d6c0} C:{Var:C Labels:host=100tb-uk-we-018 Value:0xc02e18d6f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430087639s EvaluationString:[ var='A' labels={host=100tb-uk-we-018} value=-0.010027347986266477 ], [ var='B' labels={host=100tb-uk-we-018} value=-0.010027347986266477 ], [ var='C' labels={host=100tb-uk-we-018} value=0 ]} {Instance:host=100tb-uk-we-019 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-019 Value:0xc02e18d740} B:{Var:B Labels:host=100tb-uk-we-019 Value:0xc02e18d770} C:{Var:C Labels:host=100tb-uk-we-019 Value:0xc02e18d7b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430095284s EvaluationString:[ var='A' labels={host=100tb-uk-we-019} value=-0.04865486584470042 ], [ var='B' labels={host=100tb-uk-we-019} value=-0.04865486584470042 ], [ var='C' labels={host=100tb-uk-we-019} value=0 ]} {Instance:host=100tb-uk-we-020 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-020 Value:0xc02e18d7f0} B:{Var:B Labels:host=100tb-uk-we-020 Value:0xc02e18d820} C:{Var:C Labels:host=100tb-uk-we-020 Value:0xc02e18d840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430102962s EvaluationString:[ var='A' labels={host=100tb-uk-we-020} value=0.018379354991246544 ], [ var='B' labels={host=100tb-uk-we-020} value=0.018379354991246544 ], [ var='C' labels={host=100tb-uk-we-020} value=0 ]} {Instance:host=100tb-uk-we-021 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-021 Value:0xc02e18d8c0} B:{Var:B Labels:host=100tb-uk-we-021 Value:0xc02e18d8e0} C:{Var:C Labels:host=100tb-uk-we-021 Value:0xc02e18d900}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430110472s EvaluationString:[ var='A' labels={host=100tb-uk-we-021} value=0.06367943828396817 ], [ var='B' labels={host=100tb-uk-we-021} value=0.06367943828396817 ], [ var='C' labels={host=100tb-uk-we-021} value=0 ]} {Instance:host=100tb-uk-we-022 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-022 Value:0xc02e18d980} B:{Var:B Labels:host=100tb-uk-we-022 Value:0xc02e18d9c0} C:{Var:C Labels:host=100tb-uk-we-022 Value:0xc02e18d950}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430136795s EvaluationString:[ var='A' labels={host=100tb-uk-we-022} value=0.2012879464826085 ], [ var='B' labels={host=100tb-uk-we-022} value=0.2012879464826085 ], [ var='C' labels={host=100tb-uk-we-022} value=0 ]} {Instance:host=100tb-uk-we-023 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-023 Value:0xc02e18da20} B:{Var:B Labels:host=100tb-uk-we-023 Value:0xc02e18da40} C:{Var:C Labels:host=100tb-uk-we-023 Value:0xc02e18da60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430144977s EvaluationString:[ var='A' labels={host=100tb-uk-we-023} value=0.17403792182586625 ], [ var='B' labels={host=100tb-uk-we-023} value=0.17403792182586625 ], [ var='C' labels={host=100tb-uk-we-023} value=0 ]} {Instance:host=100tb-uk-we-024 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-024 Value:0xc02e18db10} B:{Var:B Labels:host=100tb-uk-we-024 Value:0xc02e18dac0} C:{Var:C Labels:host=100tb-uk-we-024 Value:0xc02e18daf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430153075s EvaluationString:[ var='A' labels={host=100tb-uk-we-024} value=0.05256539287178915 ], [ var='B' labels={host=100tb-uk-we-024} value=0.05256539287178915 ], [ var='C' labels={host=100tb-uk-we-024} value=0 ]} {Instance:host=100tb-uk-we-025 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-025 Value:0xc02e18dba0} B:{Var:B Labels:host=100tb-uk-we-025 Value:0xc02e18dbc0} C:{Var:C Labels:host=100tb-uk-we-025 Value:0xc02e18db70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430160459s EvaluationString:[ var='A' labels={host=100tb-uk-we-025} value=-0.07312638791949291 ], [ var='B' labels={host=100tb-uk-we-025} value=-0.07312638791949291 ], [ var='C' labels={host=100tb-uk-we-025} value=0 ]} {Instance:host=100tb-uk-we-026 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-026 Value:0xc02e18dc40} B:{Var:B Labels:host=100tb-uk-we-026 Value:0xc02e18dc60} C:{Var:C Labels:host=100tb-uk-we-026 Value:0xc02e18dc10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430195162s EvaluationString:[ var='A' labels={host=100tb-uk-we-026} value=0.02254566131506002 ], [ var='B' labels={host=100tb-uk-we-026} value=0.02254566131506002 ], [ var='C' labels={host=100tb-uk-we-026} value=0 ]} {Instance:host=100tb-uk-we-027 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-027 Value:0xc02e18dcc0} B:{Var:B Labels:host=100tb-uk-we-027 Value:0xc02e18dce0} C:{Var:C Labels:host=100tb-uk-we-027 Value:0xc02e18dd00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430203392s EvaluationString:[ var='A' labels={host=100tb-uk-we-027} value=-0.060304057586293425 ], [ var='B' labels={host=100tb-uk-we-027} value=-0.060304057586293425 ], [ var='C' labels={host=100tb-uk-we-027} value=0 ]} {Instance:host=100tb-uk-we-028 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-028 Value:0xc02e18ddc0} B:{Var:B Labels:host=100tb-uk-we-028 Value:0xc02e18dd50} C:{Var:C Labels:host=100tb-uk-we-028 Value:0xc02e18dd90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430211152s EvaluationString:[ var='A' labels={host=100tb-uk-we-028} value=0.01807063568627143 ], [ var='B' labels={host=100tb-uk-we-028} value=0.01807063568627143 ], [ var='C' labels={host=100tb-uk-we-028} value=0 ]} {Instance:host=100tb-uk-we-029 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-029 Value:0xc02e18de00} B:{Var:B Labels:host=100tb-uk-we-029 Value:0xc02e18de20} C:{Var:C Labels:host=100tb-uk-we-029 Value:0xc02e18de80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43021923s EvaluationString:[ var='A' labels={host=100tb-uk-we-029} value=0.02248117572102209 ], [ var='B' labels={host=100tb-uk-we-029} value=0.02248117572102209 ], [ var='C' labels={host=100tb-uk-we-029} value=0 ]} {Instance:host=100tb-uk-we-030 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-030 Value:0xc02e18df00} B:{Var:B Labels:host=100tb-uk-we-030 Value:0xc02e18dec0} C:{Var:C Labels:host=100tb-uk-we-030 Value:0xc02e18dee0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430227517s EvaluationString:[ var='A' labels={host=100tb-uk-we-030} value=-0.06992252046865252 ], [ var='B' labels={host=100tb-uk-we-030} value=-0.06992252046865252 ], [ var='C' labels={host=100tb-uk-we-030} value=0 ]} {Instance:host=100tb-uk-we-031 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-031 Value:0xc02e18df60} B:{Var:B Labels:host=100tb-uk-we-031 Value:0xc02e18df90} C:{Var:C Labels:host=100tb-uk-we-031 Value:0xc02e18df40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430234882s EvaluationString:[ var='A' labels={host=100tb-uk-we-031} value=-0.0048689944129267 ], [ var='B' labels={host=100tb-uk-we-031} value=-0.0048689944129267 ], [ var='C' labels={host=100tb-uk-we-031} value=0 ]} {Instance:host=100tb-uk-we-032 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-032 Value:0xc09127e050} B:{Var:B Labels:host=100tb-uk-we-032 Value:0xc02e18dfd0} C:{Var:C Labels:host=100tb-uk-we-032 Value:0xc02e18dff0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43024705s EvaluationString:[ var='A' labels={host=100tb-uk-we-032} value=-0.09339295805099418 ], [ var='B' labels={host=100tb-uk-we-032} value=-0.09339295805099418 ], [ var='C' labels={host=100tb-uk-we-032} value=0 ]} {Instance:host=100tb-uk-we-033 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-033 Value:0xc09127e0a0} B:{Var:B Labels:host=100tb-uk-we-033 Value:0xc09127e0c0} C:{Var:C Labels:host=100tb-uk-we-033 Value:0xc09127e230}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430254729s EvaluationString:[ var='A' labels={host=100tb-uk-we-033} value=-0.09844048225907992 ], [ var='B' labels={host=100tb-uk-we-033} value=-0.09844048225907992 ], [ var='C' labels={host=100tb-uk-we-033} value=0 ]} {Instance:host=100tb-uk-we-034 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-034 Value:0xc09127e2a0} B:{Var:B Labels:host=100tb-uk-we-034 Value:0xc09127e2d0} C:{Var:C Labels:host=100tb-uk-we-034 Value:0xc09127e280}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430263004s EvaluationString:[ var='A' labels={host=100tb-uk-we-034} value=-0.028204885026809734 ], [ var='B' labels={host=100tb-uk-we-034} value=-0.028204885026809734 ], [ var='C' labels={host=100tb-uk-we-034} value=0 ]} {Instance:host=100tb-uk-we-035 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-035 Value:0xc09127e4a0} B:{Var:B Labels:host=100tb-uk-we-035 Value:0xc09127e460} C:{Var:C Labels:host=100tb-uk-we-035 Value:0xc09127e480}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430270972s EvaluationString:[ var='A' labels={host=100tb-uk-we-035} value=0.1723090682977831 ], [ var='B' labels={host=100tb-uk-we-035} value=0.1723090682977831 ], [ var='C' labels={host=100tb-uk-we-035} value=0 ]} {Instance:host=100tb-uk-we-036 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-036 Value:0xc09127e4e0} B:{Var:B Labels:host=100tb-uk-we-036 Value:0xc09127e510} C:{Var:C Labels:host=100tb-uk-we-036 Value:0xc09127e530}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430279034s EvaluationString:[ var='A' labels={host=100tb-uk-we-036} value=0.07321165450505873 ], [ var='B' labels={host=100tb-uk-we-036} value=0.07321165450505873 ], [ var='C' labels={host=100tb-uk-we-036} value=0 ]} {Instance:host=100tb-uk-we-037 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-037 Value:0xc09127e5c0} B:{Var:B Labels:host=100tb-uk-we-037 Value:0xc09127e570} C:{Var:C Labels:host=100tb-uk-we-037 Value:0xc09127e590}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430287371s EvaluationString:[ var='A' labels={host=100tb-uk-we-037} value=0.014379667062958864 ], [ var='B' labels={host=100tb-uk-we-037} value=0.014379667062958864 ], [ var='C' labels={host=100tb-uk-we-037} value=0 ]} {Instance:host=100tb-uk-we-038 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-038 Value:0xc09127e690} B:{Var:B Labels:host=100tb-uk-we-038 Value:0xc09127e6b0} C:{Var:C Labels:host=100tb-uk-we-038 Value:0xc09127e670}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430295629s EvaluationString:[ var='A' labels={host=100tb-uk-we-038} value=-0.24505257812748482 ], [ var='B' labels={host=100tb-uk-we-038} value=-0.24505257812748482 ], [ var='C' labels={host=100tb-uk-we-038} value=0 ]} {Instance:host=100tb-uk-we-039 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-039 Value:0xc09127e750} B:{Var:B Labels:host=100tb-uk-we-039 Value:0xc09127e710} C:{Var:C Labels:host=100tb-uk-we-039 Value:0xc09127e730}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430304054s EvaluationString:[ var='A' labels={host=100tb-uk-we-039} value=-0.13837881351916184 ], [ var='B' labels={host=100tb-uk-we-039} value=-0.13837881351916184 ], [ var='C' labels={host=100tb-uk-we-039} value=0 ]} {Instance:host=100tb-uk-we-040 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-040 Value:0xc09127e790} B:{Var:B Labels:host=100tb-uk-we-040 Value:0xc09127e850} C:{Var:C Labels:host=100tb-uk-we-040 Value:0xc09127e870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430311528s EvaluationString:[ var='A' labels={host=100tb-uk-we-040} value=-0.0055570305698419515 ], [ var='B' labels={host=100tb-uk-we-040} value=-0.0055570305698419515 ], [ var='C' labels={host=100tb-uk-we-040} value=0 ]} {Instance:host=100tb-uk-we-041 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-041 Value:0xc09127e8c0} B:{Var:B Labels:host=100tb-uk-we-041 Value:0xc09127e8e0} C:{Var:C Labels:host=100tb-uk-we-041 Value:0xc09127e900}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430320842s EvaluationString:[ var='A' labels={host=100tb-uk-we-041} value=0.06167421864181019 ], [ var='B' labels={host=100tb-uk-we-041} value=0.06167421864181019 ], [ var='C' labels={host=100tb-uk-we-041} value=0 ]} {Instance:host=100tb-uk-we-042 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-042 Value:0xc09127e940} B:{Var:B Labels:host=100tb-uk-we-042 Value:0xc09127e970} C:{Var:C Labels:host=100tb-uk-we-042 Value:0xc09127e990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430329354s EvaluationString:[ var='A' labels={host=100tb-uk-we-042} value=0.08863018346766743 ], [ var='B' labels={host=100tb-uk-we-042} value=0.08863018346766743 ], [ var='C' labels={host=100tb-uk-we-042} value=0 ]} {Instance:host=100tb-uk-we-043 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-043 Value:0xc09127ea30} B:{Var:B Labels:host=100tb-uk-we-043 Value:0xc09127e9d0} C:{Var:C Labels:host=100tb-uk-we-043 Value:0xc09127ea00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430336792s EvaluationString:[ var='A' labels={host=100tb-uk-we-043} value=-0.10558352867316027 ], [ var='B' labels={host=100tb-uk-we-043} value=-0.10558352867316027 ], [ var='C' labels={host=100tb-uk-we-043} value=0 ]} {Instance:host=100tb-uk-we-044 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-044 Value:0xc09127ea90} B:{Var:B Labels:host=100tb-uk-we-044 Value:0xc09127eab0} C:{Var:C Labels:host=100tb-uk-we-044 Value:0xc09127ea70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430344976s EvaluationString:[ var='A' labels={host=100tb-uk-we-044} value=-0.054129567331457906 ], [ var='B' labels={host=100tb-uk-we-044} value=-0.054129567331457906 ], [ var='C' labels={host=100tb-uk-we-044} value=0 ]} {Instance:host=100tb-uk-we-045 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-045 Value:0xc09127eaf0} B:{Var:B Labels:host=100tb-uk-we-045 Value:0xc09127eb10} C:{Var:C Labels:host=100tb-uk-we-045 Value:0xc09127eb30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430353043s EvaluationString:[ var='A' labels={host=100tb-uk-we-045} value=-0.22005830186615505 ], [ var='B' labels={host=100tb-uk-we-045} value=-0.22005830186615505 ], [ var='C' labels={host=100tb-uk-we-045} value=0 ]} {Instance:host=100tb-uk-we-046 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-046 Value:0xc09127ec10} B:{Var:B Labels:host=100tb-uk-we-046 Value:0xc09127ec30} C:{Var:C Labels:host=100tb-uk-we-046 Value:0xc09127ec50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430361649s EvaluationString:[ var='A' labels={host=100tb-uk-we-046} value=-0.29075604474963157 ], [ var='B' labels={host=100tb-uk-we-046} value=-0.29075604474963157 ], [ var='C' labels={host=100tb-uk-we-046} value=0 ]} {Instance:host=100tb-uk-we-047 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-047 Value:0xc09127eca0} B:{Var:B Labels:host=100tb-uk-we-047 Value:0xc09127ecc0} C:{Var:C Labels:host=100tb-uk-we-047 Value:0xc09127ece0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430370382s EvaluationString:[ var='A' labels={host=100tb-uk-we-047} value=0.006174475361007126 ], [ var='B' labels={host=100tb-uk-we-047} value=0.006174475361007126 ], [ var='C' labels={host=100tb-uk-we-047} value=0 ]} {Instance:host=100tb-uk-we-048 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-048 Value:0xc09127eed0} B:{Var:B Labels:host=100tb-uk-we-048 Value:0xc09127ed30} C:{Var:C Labels:host=100tb-uk-we-048 Value:0xc09127eda0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430379199s EvaluationString:[ var='A' labels={host=100tb-uk-we-048} value=0.17004505144202636 ], [ var='B' labels={host=100tb-uk-we-048} value=0.17004505144202636 ], [ var='C' labels={host=100tb-uk-we-048} value=0 ]} {Instance:host=100tb-uk-we-049 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-049 Value:0xc09127ef10} B:{Var:B Labels:host=100tb-uk-we-049 Value:0xc09127ef90} C:{Var:C Labels:host=100tb-uk-we-049 Value:0xc09127efb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430386669s EvaluationString:[ var='A' labels={host=100tb-uk-we-049} value=-0.06573173302614675 ], [ var='B' labels={host=100tb-uk-we-049} value=-0.06573173302614675 ], [ var='C' labels={host=100tb-uk-we-049} value=0 ]} {Instance:host=100tb-uk-we-050 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-uk-we-050 Value:0xc09127f040} B:{Var:B Labels:host=100tb-uk-we-050 Value:0xc09127f060} C:{Var:C Labels:host=100tb-uk-we-050 Value:0xc09127f090}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430396979s EvaluationString:[ var='A' labels={host=100tb-uk-we-050} value=-0.27842767561217246 ], [ var='B' labels={host=100tb-uk-we-050} value=-0.27842767561217246 ], [ var='C' labels={host=100tb-uk-we-050} value=0 ]} {Instance:host=100tb-us-slc-01 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-us-slc-01 Value:0xc09127f0d0} B:{Var:B Labels:host=100tb-us-slc-01 Value:0xc09127f0f0} C:{Var:C Labels:host=100tb-us-slc-01 Value:0xc09127f110}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430404522s EvaluationString:[ var='A' labels={host=100tb-us-slc-01} value=0.06640098928420457 ], [ var='B' labels={host=100tb-us-slc-01} value=0.06640098928420457 ], [ var='C' labels={host=100tb-us-slc-01} value=0 ]} {Instance:host=100tb-us-slc-02 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-us-slc-02 Value:0xc09127f160} B:{Var:B Labels:host=100tb-us-slc-02 Value:0xc09127f180} C:{Var:C Labels:host=100tb-us-slc-02 Value:0xc09127f1a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430412159s EvaluationString:[ var='A' labels={host=100tb-us-slc-02} value=-0.042516246777140054 ], [ var='B' labels={host=100tb-us-slc-02} value=-0.042516246777140054 ], [ var='C' labels={host=100tb-us-slc-02} value=0 ]} {Instance:host=100tb-us-slc-03 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-us-slc-03 Value:0xc09127f1f0} B:{Var:B Labels:host=100tb-us-slc-03 Value:0xc09127f210} C:{Var:C Labels:host=100tb-us-slc-03 Value:0xc09127f240}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430419901s EvaluationString:[ var='A' labels={host=100tb-us-slc-03} value=0.1127033097919572 ], [ var='B' labels={host=100tb-us-slc-03} value=0.1127033097919572 ], [ var='C' labels={host=100tb-us-slc-03} value=0 ]} {Instance:host=100tb-us-slc-04 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-us-slc-04 Value:0xc09127f2f0} B:{Var:B Labels:host=100tb-us-slc-04 Value:0xc09127f310} C:{Var:C Labels:host=100tb-us-slc-04 Value:0xc09127f330}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430427661s EvaluationString:[ var='A' labels={host=100tb-us-slc-04} value=0.08787919674435152 ], [ var='B' labels={host=100tb-us-slc-04} value=0.08787919674435152 ], [ var='C' labels={host=100tb-us-slc-04} value=0 ]} {Instance:host=100tb-us-slc-05 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-us-slc-05 Value:0xc09127f3a0} B:{Var:B Labels:host=100tb-us-slc-05 Value:0xc09127f3c0} C:{Var:C Labels:host=100tb-us-slc-05 Value:0xc09127f380}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430437115s EvaluationString:[ var='A' labels={host=100tb-us-slc-05} value=-0.5504132231405023 ], [ var='B' labels={host=100tb-us-slc-05} value=-0.5504132231405023 ], [ var='C' labels={host=100tb-us-slc-05} value=0 ]} {Instance:host=100tb-us-slc-06 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-us-slc-06 Value:0xc09127f630} B:{Var:B Labels:host=100tb-us-slc-06 Value:0xc09127f650} C:{Var:C Labels:host=100tb-us-slc-06 Value:0xc09127f680}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430446147s EvaluationString:[ var='A' labels={host=100tb-us-slc-06} value=-0.1799016426895098 ], [ var='B' labels={host=100tb-us-slc-06} value=-0.1799016426895098 ], [ var='C' labels={host=100tb-us-slc-06} value=0 ]} {Instance:host=100tb-us-slc-07 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-us-slc-07 Value:0xc09127f7a0} B:{Var:B Labels:host=100tb-us-slc-07 Value:0xc09127f7c0} C:{Var:C Labels:host=100tb-us-slc-07 Value:0xc09127f7f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430453448s EvaluationString:[ var='A' labels={host=100tb-us-slc-07} value=0.14831139679625238 ], [ var='B' labels={host=100tb-us-slc-07} value=0.14831139679625238 ], [ var='C' labels={host=100tb-us-slc-07} value=0 ]} {Instance:host=100tb-us-slc-08 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-us-slc-08 Value:0xc09127f840} B:{Var:B Labels:host=100tb-us-slc-08 Value:0xc09127f880} C:{Var:C Labels:host=100tb-us-slc-08 Value:0xc09127f8a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430461722s EvaluationString:[ var='A' labels={host=100tb-us-slc-08} value=-0.15048538577522308 ], [ var='B' labels={host=100tb-us-slc-08} value=-0.15048538577522308 ], [ var='C' labels={host=100tb-us-slc-08} value=0 ]} {Instance:host=100tb-us-slc-09 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-us-slc-09 Value:0xc09127f8e0} B:{Var:B Labels:host=100tb-us-slc-09 Value:0xc09127f900} C:{Var:C Labels:host=100tb-us-slc-09 Value:0xc09127f920}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430469535s EvaluationString:[ var='A' labels={host=100tb-us-slc-09} value=-0.22010781434864107 ], [ var='B' labels={host=100tb-us-slc-09} value=-0.22010781434864107 ], [ var='C' labels={host=100tb-us-slc-09} value=0 ]} {Instance:host=100tb-us-slc-10 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-us-slc-10 Value:0xc09127f9b0} B:{Var:B Labels:host=100tb-us-slc-10 Value:0xc09127f970} C:{Var:C Labels:host=100tb-us-slc-10 Value:0xc09127f990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430477302s EvaluationString:[ var='A' labels={host=100tb-us-slc-10} value=0.3865701481977387 ], [ var='B' labels={host=100tb-us-slc-10} value=0.3865701481977387 ], [ var='C' labels={host=100tb-us-slc-10} value=0 ]} {Instance:host=100tb-us-slc-11 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-us-slc-11 Value:0xc09127f9f0} B:{Var:B Labels:host=100tb-us-slc-11 Value:0xc09127fa10} C:{Var:C Labels:host=100tb-us-slc-11 Value:0xc09127fa40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430491536s EvaluationString:[ var='A' labels={host=100tb-us-slc-11} value=-0.5254351544104148 ], [ var='B' labels={host=100tb-us-slc-11} value=-0.5254351544104148 ], [ var='C' labels={host=100tb-us-slc-11} value=0 ]} {Instance:host=100tb-us-slc-12 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-us-slc-12 Value:0xc09127fac0} B:{Var:B Labels:host=100tb-us-slc-12 Value:0xc09127fa80} C:{Var:C Labels:host=100tb-us-slc-12 Value:0xc09127faa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430499739s EvaluationString:[ var='A' labels={host=100tb-us-slc-12} value=-0.4047769062163997 ], [ var='B' labels={host=100tb-us-slc-12} value=-0.4047769062163997 ], [ var='C' labels={host=100tb-us-slc-12} value=0 ]} {Instance:host=100tb-us-slc-13 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=100tb-us-slc-13 Value:0xc09127fb10} B:{Var:B Labels:host=100tb-us-slc-13 Value:0xc09127fb30} C:{Var:C Labels:host=100tb-us-slc-13 Value:0xc09127fb50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430507689s EvaluationString:[ var='A' labels={host=100tb-us-slc-13} value=0.031119232846705813 ], [ var='B' labels={host=100tb-us-slc-13} value=0.031119232846705813 ], [ var='C' labels={host=100tb-us-slc-13} value=0 ]} {Instance:host=angani-ke-03 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=angani-ke-03 Value:0xc09127fb90} B:{Var:B Labels:host=angani-ke-03 Value:0xc09127fbb0} C:{Var:C Labels:host=angani-ke-03 Value:0xc09127fbe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430515706s EvaluationString:[ var='A' labels={host=angani-ke-03} value=0.11218869948780569 ], [ var='B' labels={host=angani-ke-03} value=0.11218869948780569 ], [ var='C' labels={host=angani-ke-03} value=0 ]} {Instance:host=angani-ke-04 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=angani-ke-04 Value:0xc09127fd90} B:{Var:B Labels:host=angani-ke-04 Value:0xc09127fdb0} C:{Var:C Labels:host=angani-ke-04 Value:0xc09127fdd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430524006s EvaluationString:[ var='A' labels={host=angani-ke-04} value=0.07038257607574394 ], [ var='B' labels={host=angani-ke-04} value=0.07038257607574394 ], [ var='C' labels={host=angani-ke-04} value=0 ]} {Instance:host=astra-bud-hu-006 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=astra-bud-hu-006 Value:0xc09127ff20} B:{Var:B Labels:host=astra-bud-hu-006 Value:0xc09127fe10} C:{Var:C Labels:host=astra-bud-hu-006 Value:0xc09127fef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430532995s EvaluationString:[ var='A' labels={host=astra-bud-hu-006} value=-0.08028277167637063 ], [ var='B' labels={host=astra-bud-hu-006} value=-0.08028277167637063 ], [ var='C' labels={host=astra-bud-hu-006} value=0 ]} {Instance:host=astra-bud-hu-007 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=astra-bud-hu-007 Value:0xc06996e020} B:{Var:B Labels:host=astra-bud-hu-007 Value:0xc09127ff70} C:{Var:C Labels:host=astra-bud-hu-007 Value:0xc06996e000}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430540035s EvaluationString:[ var='A' labels={host=astra-bud-hu-007} value=0.22888397192070267 ], [ var='B' labels={host=astra-bud-hu-007} value=0.22888397192070267 ], [ var='C' labels={host=astra-bud-hu-007} value=0 ]} {Instance:host=astra-bud-hu-008 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=astra-bud-hu-008 Value:0xc06996e060} B:{Var:B Labels:host=astra-bud-hu-008 Value:0xc06996e080} C:{Var:C Labels:host=astra-bud-hu-008 Value:0xc06996e0a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430548749s EvaluationString:[ var='A' labels={host=astra-bud-hu-008} value=-0.24658584580321255 ], [ var='B' labels={host=astra-bud-hu-008} value=-0.24658584580321255 ], [ var='C' labels={host=astra-bud-hu-008} value=0 ]} {Instance:host=astra-bud-hu-009 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=astra-bud-hu-009 Value:0xc06996e0e0} B:{Var:B Labels:host=astra-bud-hu-009 Value:0xc06996e100} C:{Var:C Labels:host=astra-bud-hu-009 Value:0xc06996e120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430556905s EvaluationString:[ var='A' labels={host=astra-bud-hu-009} value=-0.014110870671943943 ], [ var='B' labels={host=astra-bud-hu-009} value=-0.014110870671943943 ], [ var='C' labels={host=astra-bud-hu-009} value=0 ]} {Instance:host=astra-bud-hu-010 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=astra-bud-hu-010 Value:0xc06996e180} B:{Var:B Labels:host=astra-bud-hu-010 Value:0xc06996e1a0} C:{Var:C Labels:host=astra-bud-hu-010 Value:0xc06996e1c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430565058s EvaluationString:[ var='A' labels={host=astra-bud-hu-010} value=-0.5478239313556088 ], [ var='B' labels={host=astra-bud-hu-010} value=-0.5478239313556088 ], [ var='C' labels={host=astra-bud-hu-010} value=0 ]} {Instance:host=clouv-ams-nl-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ams-nl-001 Value:0xc06996e200} B:{Var:B Labels:host=clouv-ams-nl-001 Value:0xc06996e220} C:{Var:C Labels:host=clouv-ams-nl-001 Value:0xc06996e250}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430573747s EvaluationString:[ var='A' labels={host=clouv-ams-nl-001} value=-0.07969140425400509 ], [ var='B' labels={host=clouv-ams-nl-001} value=-0.07969140425400509 ], [ var='C' labels={host=clouv-ams-nl-001} value=0 ]} {Instance:host=clouv-ams-nl-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ams-nl-002 Value:0xc06996e340} B:{Var:B Labels:host=clouv-ams-nl-002 Value:0xc06996e3b0} C:{Var:C Labels:host=clouv-ams-nl-002 Value:0xc06996e2d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430582254s EvaluationString:[ var='A' labels={host=clouv-ams-nl-002} value=-0.27790705178964004 ], [ var='B' labels={host=clouv-ams-nl-002} value=-0.27790705178964004 ], [ var='C' labels={host=clouv-ams-nl-002} value=0 ]} {Instance:host=clouv-ams-nl-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ams-nl-003 Value:0xc06996e3f0} B:{Var:B Labels:host=clouv-ams-nl-003 Value:0xc06996e410} C:{Var:C Labels:host=clouv-ams-nl-003 Value:0xc06996e430}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430589688s EvaluationString:[ var='A' labels={host=clouv-ams-nl-003} value=0.02731234923920007 ], [ var='B' labels={host=clouv-ams-nl-003} value=0.02731234923920007 ], [ var='C' labels={host=clouv-ams-nl-003} value=0 ]} {Instance:host=clouv-ams-nl-004 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ams-nl-004 Value:0xc06996e480} B:{Var:B Labels:host=clouv-ams-nl-004 Value:0xc06996e4a0} C:{Var:C Labels:host=clouv-ams-nl-004 Value:0xc06996e4e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430597413s EvaluationString:[ var='A' labels={host=clouv-ams-nl-004} value=0.30046846158324314 ], [ var='B' labels={host=clouv-ams-nl-004} value=0.30046846158324314 ], [ var='C' labels={host=clouv-ams-nl-004} value=0 ]} {Instance:host=clouv-ams-nl-005 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ams-nl-005 Value:0xc06996e520} B:{Var:B Labels:host=clouv-ams-nl-005 Value:0xc06996e540} C:{Var:C Labels:host=clouv-ams-nl-005 Value:0xc06996e560}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430606621s EvaluationString:[ var='A' labels={host=clouv-ams-nl-005} value=0.32199143401470565 ], [ var='B' labels={host=clouv-ams-nl-005} value=0.32199143401470565 ], [ var='C' labels={host=clouv-ams-nl-005} value=0 ]} {Instance:host=clouv-ams-nl-006 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ams-nl-006 Value:0xc06996e5e0} B:{Var:B Labels:host=clouv-ams-nl-006 Value:0xc06996e600} C:{Var:C Labels:host=clouv-ams-nl-006 Value:0xc06996e5b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43061458s EvaluationString:[ var='A' labels={host=clouv-ams-nl-006} value=-0.23648280926593657 ], [ var='B' labels={host=clouv-ams-nl-006} value=-0.23648280926593657 ], [ var='C' labels={host=clouv-ams-nl-006} value=0 ]} {Instance:host=clouv-ams-nl-007 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ams-nl-007 Value:0xc06996e660} B:{Var:B Labels:host=clouv-ams-nl-007 Value:0xc06996e680} C:{Var:C Labels:host=clouv-ams-nl-007 Value:0xc06996e640}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430622477s EvaluationString:[ var='A' labels={host=clouv-ams-nl-007} value=-0.10367580248397544 ], [ var='B' labels={host=clouv-ams-nl-007} value=-0.10367580248397544 ], [ var='C' labels={host=clouv-ams-nl-007} value=0 ]} {Instance:host=clouv-ams-nl-008 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ams-nl-008 Value:0xc06996e6c0} B:{Var:B Labels:host=clouv-ams-nl-008 Value:0xc06996e6f0} C:{Var:C Labels:host=clouv-ams-nl-008 Value:0xc06996e710}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430630083s EvaluationString:[ var='A' labels={host=clouv-ams-nl-008} value=-0.23169049528713498 ], [ var='B' labels={host=clouv-ams-nl-008} value=-0.23169049528713498 ], [ var='C' labels={host=clouv-ams-nl-008} value=0 ]} {Instance:host=clouv-atl-us-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-atl-us-001 Value:0xc06996e760} B:{Var:B Labels:host=clouv-atl-us-001 Value:0xc06996e780} C:{Var:C Labels:host=clouv-atl-us-001 Value:0xc06996e7a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43063801s EvaluationString:[ var='A' labels={host=clouv-atl-us-001} value=-0.19899555406436775 ], [ var='B' labels={host=clouv-atl-us-001} value=-0.19899555406436775 ], [ var='C' labels={host=clouv-atl-us-001} value=0 ]} {Instance:host=clouv-atl-us-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-atl-us-002 Value:0xc06996e870} B:{Var:B Labels:host=clouv-atl-us-002 Value:0xc06996e8d0} C:{Var:C Labels:host=clouv-atl-us-002 Value:0xc06996e840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43067088s EvaluationString:[ var='A' labels={host=clouv-atl-us-002} value=-0.3068916588782303 ], [ var='B' labels={host=clouv-atl-us-002} value=-0.3068916588782303 ], [ var='C' labels={host=clouv-atl-us-002} value=0 ]} {Instance:host=clouv-atl-us-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-atl-us-003 Value:0xc06996e960} B:{Var:B Labels:host=clouv-atl-us-003 Value:0xc06996e980} C:{Var:C Labels:host=clouv-atl-us-003 Value:0xc06996e9a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430679003s EvaluationString:[ var='A' labels={host=clouv-atl-us-003} value=-0.15601337868908952 ], [ var='B' labels={host=clouv-atl-us-003} value=-0.15601337868908952 ], [ var='C' labels={host=clouv-atl-us-003} value=0 ]} {Instance:host=clouv-atl-us-004 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-atl-us-004 Value:0xc06996ea20} B:{Var:B Labels:host=clouv-atl-us-004 Value:0xc06996e9e0} C:{Var:C Labels:host=clouv-atl-us-004 Value:0xc06996ea00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43068766s EvaluationString:[ var='A' labels={host=clouv-atl-us-004} value=-0.2219844541168722 ], [ var='B' labels={host=clouv-atl-us-004} value=-0.2219844541168722 ], [ var='C' labels={host=clouv-atl-us-004} value=0 ]} {Instance:host=clouv-atl-us-005 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-atl-us-005 Value:0xc06996eab0} B:{Var:B Labels:host=clouv-atl-us-005 Value:0xc06996ea60} C:{Var:C Labels:host=clouv-atl-us-005 Value:0xc06996ea80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430695298s EvaluationString:[ var='A' labels={host=clouv-atl-us-005} value=0.8643752268351317 ], [ var='B' labels={host=clouv-atl-us-005} value=0.8643752268351317 ], [ var='C' labels={host=clouv-atl-us-005} value=0 ]} {Instance:host=clouv-atl-us-006 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-atl-us-006 Value:0xc06996eaf0} B:{Var:B Labels:host=clouv-atl-us-006 Value:0xc06996eb10} C:{Var:C Labels:host=clouv-atl-us-006 Value:0xc06996eb30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430704318s EvaluationString:[ var='A' labels={host=clouv-atl-us-006} value=-0.1935515073433199 ], [ var='B' labels={host=clouv-atl-us-006} value=-0.1935515073433199 ], [ var='C' labels={host=clouv-atl-us-006} value=0 ]} {Instance:host=clouv-atl-us-007 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-atl-us-007 Value:0xc06996ec30} B:{Var:B Labels:host=clouv-atl-us-007 Value:0xc06996ec50} C:{Var:C Labels:host=clouv-atl-us-007 Value:0xc06996ec70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430714332s EvaluationString:[ var='A' labels={host=clouv-atl-us-007} value=0.06708071218788447 ], [ var='B' labels={host=clouv-atl-us-007} value=0.06708071218788447 ], [ var='C' labels={host=clouv-atl-us-007} value=0 ]} {Instance:host=clouv-atl-us-008 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-atl-us-008 Value:0xc06996ecf0} B:{Var:B Labels:host=clouv-atl-us-008 Value:0xc06996ecb0} C:{Var:C Labels:host=clouv-atl-us-008 Value:0xc06996ecd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430723348s EvaluationString:[ var='A' labels={host=clouv-atl-us-008} value=0.7988009993535741 ], [ var='B' labels={host=clouv-atl-us-008} value=0.7988009993535741 ], [ var='C' labels={host=clouv-atl-us-008} value=0 ]} {Instance:host=clouv-atl-us-009 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-atl-us-009 Value:0xc06996ed40} B:{Var:B Labels:host=clouv-atl-us-009 Value:0xc06996ed60} C:{Var:C Labels:host=clouv-atl-us-009 Value:0xc06996ed80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430731554s EvaluationString:[ var='A' labels={host=clouv-atl-us-009} value=0.24371989128528387 ], [ var='B' labels={host=clouv-atl-us-009} value=0.24371989128528387 ], [ var='C' labels={host=clouv-atl-us-009} value=0 ]} {Instance:host=clouv-atl-us-010 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-atl-us-010 Value:0xc06996ee60} B:{Var:B Labels:host=clouv-atl-us-010 Value:0xc06996eeb0} C:{Var:C Labels:host=clouv-atl-us-010 Value:0xc06996eed0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43073941s EvaluationString:[ var='A' labels={host=clouv-atl-us-010} value=-0.1734022007507414 ], [ var='B' labels={host=clouv-atl-us-010} value=-0.1734022007507414 ], [ var='C' labels={host=clouv-atl-us-010} value=0 ]} {Instance:host=clouv-atl-us-011 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-atl-us-011 Value:0xc06996ef30} B:{Var:B Labels:host=clouv-atl-us-011 Value:0xc06996ef60} C:{Var:C Labels:host=clouv-atl-us-011 Value:0xc06996ef90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430746807s EvaluationString:[ var='A' labels={host=clouv-atl-us-011} value=-0.08905653325594898 ], [ var='B' labels={host=clouv-atl-us-011} value=-0.08905653325594898 ], [ var='C' labels={host=clouv-atl-us-011} value=0 ]} {Instance:host=clouv-atl-us-012 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-atl-us-012 Value:0xc06996eff0} B:{Var:B Labels:host=clouv-atl-us-012 Value:0xc06996f010} C:{Var:C Labels:host=clouv-atl-us-012 Value:0xc06996efd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430754727s EvaluationString:[ var='A' labels={host=clouv-atl-us-012} value=0.06303764062392807 ], [ var='B' labels={host=clouv-atl-us-012} value=0.06303764062392807 ], [ var='C' labels={host=clouv-atl-us-012} value=0 ]} {Instance:host=clouv-atl-us-013 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-atl-us-013 Value:0xc06996f050} B:{Var:B Labels:host=clouv-atl-us-013 Value:0xc06996f070} C:{Var:C Labels:host=clouv-atl-us-013 Value:0xc06996f0a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430763382s EvaluationString:[ var='A' labels={host=clouv-atl-us-013} value=-0.06601865537271173 ], [ var='B' labels={host=clouv-atl-us-013} value=-0.06601865537271173 ], [ var='C' labels={host=clouv-atl-us-013} value=0 ]} {Instance:host=clouv-atl-us-014 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-atl-us-014 Value:0xc06996f140} B:{Var:B Labels:host=clouv-atl-us-014 Value:0xc06996f160} C:{Var:C Labels:host=clouv-atl-us-014 Value:0xc06996f190}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430771647s EvaluationString:[ var='A' labels={host=clouv-atl-us-014} value=0.19946310338202977 ], [ var='B' labels={host=clouv-atl-us-014} value=0.19946310338202977 ], [ var='C' labels={host=clouv-atl-us-014} value=0 ]} {Instance:host=clouv-atl-us-015 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-atl-us-015 Value:0xc06996f220} B:{Var:B Labels:host=clouv-atl-us-015 Value:0xc06996f1d0} C:{Var:C Labels:host=clouv-atl-us-015 Value:0xc06996f1f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43077933s EvaluationString:[ var='A' labels={host=clouv-atl-us-015} value=-0.07858516730229732 ], [ var='B' labels={host=clouv-atl-us-015} value=-0.07858516730229732 ], [ var='C' labels={host=clouv-atl-us-015} value=0 ]} {Instance:host=clouv-ewr-us-100 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ewr-us-100 Value:0xc06996f270} B:{Var:B Labels:host=clouv-ewr-us-100 Value:0xc06996f290} C:{Var:C Labels:host=clouv-ewr-us-100 Value:0xc06996f2b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430788505s EvaluationString:[ var='A' labels={host=clouv-ewr-us-100} value=0.3474636720598667 ], [ var='B' labels={host=clouv-ewr-us-100} value=0.3474636720598667 ], [ var='C' labels={host=clouv-ewr-us-100} value=0 ]} {Instance:host=clouv-ewr-us-101 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ewr-us-101 Value:0xc06996f300} B:{Var:B Labels:host=clouv-ewr-us-101 Value:0xc06996f390} C:{Var:C Labels:host=clouv-ewr-us-101 Value:0xc06996f410}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430797034s EvaluationString:[ var='A' labels={host=clouv-ewr-us-101} value=0.5457262207623614 ], [ var='B' labels={host=clouv-ewr-us-101} value=0.5457262207623614 ], [ var='C' labels={host=clouv-ewr-us-101} value=0 ]} {Instance:host=clouv-ewr-us-102 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ewr-us-102 Value:0xc06996f480} B:{Var:B Labels:host=clouv-ewr-us-102 Value:0xc06996f4a0} C:{Var:C Labels:host=clouv-ewr-us-102 Value:0xc06996f460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430805125s EvaluationString:[ var='A' labels={host=clouv-ewr-us-102} value=0.24422230848369464 ], [ var='B' labels={host=clouv-ewr-us-102} value=0.24422230848369464 ], [ var='C' labels={host=clouv-ewr-us-102} value=0 ]} {Instance:host=clouv-ewr-us-103 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ewr-us-103 Value:0xc06996f4f0} B:{Var:B Labels:host=clouv-ewr-us-103 Value:0xc06996f510} C:{Var:C Labels:host=clouv-ewr-us-103 Value:0xc06996f530}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430813191s EvaluationString:[ var='A' labels={host=clouv-ewr-us-103} value=-0.24598641474603156 ], [ var='B' labels={host=clouv-ewr-us-103} value=-0.24598641474603156 ], [ var='C' labels={host=clouv-ewr-us-103} value=0 ]} {Instance:host=clouv-ewr-us-104 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ewr-us-104 Value:0xc06996f5d0} B:{Var:B Labels:host=clouv-ewr-us-104 Value:0xc06996f570} C:{Var:C Labels:host=clouv-ewr-us-104 Value:0xc06996f5b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43082101s EvaluationString:[ var='A' labels={host=clouv-ewr-us-104} value=-0.15971759579645567 ], [ var='B' labels={host=clouv-ewr-us-104} value=-0.15971759579645567 ], [ var='C' labels={host=clouv-ewr-us-104} value=0 ]} {Instance:host=clouv-ewr-us-105 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ewr-us-105 Value:0xc06996f610} B:{Var:B Labels:host=clouv-ewr-us-105 Value:0xc06996f630} C:{Var:C Labels:host=clouv-ewr-us-105 Value:0xc06996f650}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430828931s EvaluationString:[ var='A' labels={host=clouv-ewr-us-105} value=-0.22124898682857647 ], [ var='B' labels={host=clouv-ewr-us-105} value=-0.22124898682857647 ], [ var='C' labels={host=clouv-ewr-us-105} value=0 ]} {Instance:host=clouv-ewr-us-106 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ewr-us-106 Value:0xc06996f6a0} B:{Var:B Labels:host=clouv-ewr-us-106 Value:0xc06996f6c0} C:{Var:C Labels:host=clouv-ewr-us-106 Value:0xc06996f6e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430848997s EvaluationString:[ var='A' labels={host=clouv-ewr-us-106} value=0.051499330918898883 ], [ var='B' labels={host=clouv-ewr-us-106} value=0.051499330918898883 ], [ var='C' labels={host=clouv-ewr-us-106} value=0 ]} {Instance:host=clouv-ewr-us-107 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ewr-us-107 Value:0xc06996f720} B:{Var:B Labels:host=clouv-ewr-us-107 Value:0xc06996f750} C:{Var:C Labels:host=clouv-ewr-us-107 Value:0xc06996f770}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430856066s EvaluationString:[ var='A' labels={host=clouv-ewr-us-107} value=-0.06513440978833293 ], [ var='B' labels={host=clouv-ewr-us-107} value=-0.06513440978833293 ], [ var='C' labels={host=clouv-ewr-us-107} value=0 ]} {Instance:host=clouv-ewr-us-108 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ewr-us-108 Value:0xc06996f8a0} B:{Var:B Labels:host=clouv-ewr-us-108 Value:0xc06996f7b0} C:{Var:C Labels:host=clouv-ewr-us-108 Value:0xc06996f7e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430863552s EvaluationString:[ var='A' labels={host=clouv-ewr-us-108} value=-0.04381905718570107 ], [ var='B' labels={host=clouv-ewr-us-108} value=-0.04381905718570107 ], [ var='C' labels={host=clouv-ewr-us-108} value=0 ]} {Instance:host=clouv-ewr-us-109 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ewr-us-109 Value:0xc06996f930} B:{Var:B Labels:host=clouv-ewr-us-109 Value:0xc06996f950} C:{Var:C Labels:host=clouv-ewr-us-109 Value:0xc06996f970}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43087077s EvaluationString:[ var='A' labels={host=clouv-ewr-us-109} value=-0.14037789109936583 ], [ var='B' labels={host=clouv-ewr-us-109} value=-0.14037789109936583 ], [ var='C' labels={host=clouv-ewr-us-109} value=0 ]} {Instance:host=clouv-ewr-us-110 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ewr-us-110 Value:0xc06996fa00} B:{Var:B Labels:host=clouv-ewr-us-110 Value:0xc06996f9b0} C:{Var:C Labels:host=clouv-ewr-us-110 Value:0xc06996f9e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430879646s EvaluationString:[ var='A' labels={host=clouv-ewr-us-110} value=-0.18138929038335455 ], [ var='B' labels={host=clouv-ewr-us-110} value=-0.18138929038335455 ], [ var='C' labels={host=clouv-ewr-us-110} value=0 ]} {Instance:host=clouv-ewr-us-111 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ewr-us-111 Value:0xc06996fa40} B:{Var:B Labels:host=clouv-ewr-us-111 Value:0xc06996fa60} C:{Var:C Labels:host=clouv-ewr-us-111 Value:0xc06996fa90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430888131s EvaluationString:[ var='A' labels={host=clouv-ewr-us-111} value=-0.24827352551747595 ], [ var='B' labels={host=clouv-ewr-us-111} value=-0.24827352551747595 ], [ var='C' labels={host=clouv-ewr-us-111} value=0 ]} {Instance:host=clouv-ewr-us-112 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ewr-us-112 Value:0xc06996fad0} B:{Var:B Labels:host=clouv-ewr-us-112 Value:0xc06996faf0} C:{Var:C Labels:host=clouv-ewr-us-112 Value:0xc06996fb20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430895807s EvaluationString:[ var='A' labels={host=clouv-ewr-us-112} value=-0.1477236212702652 ], [ var='B' labels={host=clouv-ewr-us-112} value=-0.1477236212702652 ], [ var='C' labels={host=clouv-ewr-us-112} value=0 ]} {Instance:host=clouv-ewr-us-113 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ewr-us-113 Value:0xc06996fb70} B:{Var:B Labels:host=clouv-ewr-us-113 Value:0xc06996fb90} C:{Var:C Labels:host=clouv-ewr-us-113 Value:0xc06996fbb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430903803s EvaluationString:[ var='A' labels={host=clouv-ewr-us-113} value=-0.10179951398157527 ], [ var='B' labels={host=clouv-ewr-us-113} value=-0.10179951398157527 ], [ var='C' labels={host=clouv-ewr-us-113} value=0 ]} {Instance:host=clouv-ewr-us-114 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ewr-us-114 Value:0xc06996fbf0} B:{Var:B Labels:host=clouv-ewr-us-114 Value:0xc06996fc10} C:{Var:C Labels:host=clouv-ewr-us-114 Value:0xc06996fc40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43091224s EvaluationString:[ var='A' labels={host=clouv-ewr-us-114} value=0.01964918507030215 ], [ var='B' labels={host=clouv-ewr-us-114} value=0.01964918507030215 ], [ var='C' labels={host=clouv-ewr-us-114} value=0 ]} {Instance:host=clouv-ewr-us-115 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ewr-us-115 Value:0xc06996fca0} B:{Var:B Labels:host=clouv-ewr-us-115 Value:0xc06996fcc0} C:{Var:C Labels:host=clouv-ewr-us-115 Value:0xc06996fc80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430919503s EvaluationString:[ var='A' labels={host=clouv-ewr-us-115} value=-0.07166483189869055 ], [ var='B' labels={host=clouv-ewr-us-115} value=-0.07166483189869055 ], [ var='C' labels={host=clouv-ewr-us-115} value=0 ]} {Instance:host=clouv-ewr-us-116 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ewr-us-116 Value:0xc06996fd10} B:{Var:B Labels:host=clouv-ewr-us-116 Value:0xc06996fdb0} C:{Var:C Labels:host=clouv-ewr-us-116 Value:0xc06996fde0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430927932s EvaluationString:[ var='A' labels={host=clouv-ewr-us-116} value=-0.2038842528445084 ], [ var='B' labels={host=clouv-ewr-us-116} value=-0.2038842528445084 ], [ var='C' labels={host=clouv-ewr-us-116} value=0 ]} {Instance:host=clouv-ewr-us-117 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-ewr-us-117 Value:0xc06996fee0} B:{Var:B Labels:host=clouv-ewr-us-117 Value:0xc06996fe80} C:{Var:C Labels:host=clouv-ewr-us-117 Value:0xc06996fea0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430936908s EvaluationString:[ var='A' labels={host=clouv-ewr-us-117} value=-0.013289786689156813 ], [ var='B' labels={host=clouv-ewr-us-117} value=-0.013289786689156813 ], [ var='C' labels={host=clouv-ewr-us-117} value=0 ]} {Instance:host=clouv-fra-de-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-fra-de-001 Value:0xc06996ff60} B:{Var:B Labels:host=clouv-fra-de-001 Value:0xc06996ff20} C:{Var:C Labels:host=clouv-fra-de-001 Value:0xc06996ff40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430945759s EvaluationString:[ var='A' labels={host=clouv-fra-de-001} value=0.3274492272396875 ], [ var='B' labels={host=clouv-fra-de-001} value=0.3274492272396875 ], [ var='C' labels={host=clouv-fra-de-001} value=0 ]} {Instance:host=clouv-fra-de-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-fra-de-002 Value:0xc06996ffa0} B:{Var:B Labels:host=clouv-fra-de-002 Value:0xc06996ffc0} C:{Var:C Labels:host=clouv-fra-de-002 Value:0xc06996ffe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430953206s EvaluationString:[ var='A' labels={host=clouv-fra-de-002} value=-0.009838616605872375 ], [ var='B' labels={host=clouv-fra-de-002} value=-0.009838616605872375 ], [ var='C' labels={host=clouv-fra-de-002} value=0 ]} {Instance:host=clouv-fra-de-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-fra-de-003 Value:0xc052960020} B:{Var:B Labels:host=clouv-fra-de-003 Value:0xc052960040} C:{Var:C Labels:host=clouv-fra-de-003 Value:0xc052960070}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430963567s EvaluationString:[ var='A' labels={host=clouv-fra-de-003} value=-0.29868476062631544 ], [ var='B' labels={host=clouv-fra-de-003} value=-0.29868476062631544 ], [ var='C' labels={host=clouv-fra-de-003} value=0 ]} {Instance:host=clouv-fra-de-004 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-fra-de-004 Value:0xc052960100} B:{Var:B Labels:host=clouv-fra-de-004 Value:0xc0529600b0} C:{Var:C Labels:host=clouv-fra-de-004 Value:0xc0529600d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430971156s EvaluationString:[ var='A' labels={host=clouv-fra-de-004} value=-0.025926806547006482 ], [ var='B' labels={host=clouv-fra-de-004} value=-0.025926806547006482 ], [ var='C' labels={host=clouv-fra-de-004} value=0 ]} {Instance:host=clouv-fra-de-005 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-fra-de-005 Value:0xc052960180} B:{Var:B Labels:host=clouv-fra-de-005 Value:0xc052960140} C:{Var:C Labels:host=clouv-fra-de-005 Value:0xc052960160}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430979351s EvaluationString:[ var='A' labels={host=clouv-fra-de-005} value=0.8748895942940952 ], [ var='B' labels={host=clouv-fra-de-005} value=0.8748895942940952 ], [ var='C' labels={host=clouv-fra-de-005} value=0 ]} {Instance:host=clouv-fra-de-006 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-fra-de-006 Value:0xc052960200} B:{Var:B Labels:host=clouv-fra-de-006 Value:0xc052960220} C:{Var:C Labels:host=clouv-fra-de-006 Value:0xc0529601d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430987738s EvaluationString:[ var='A' labels={host=clouv-fra-de-006} value=0.0027043225861892294 ], [ var='B' labels={host=clouv-fra-de-006} value=0.0027043225861892294 ], [ var='C' labels={host=clouv-fra-de-006} value=0 ]} {Instance:host=clouv-lax-us-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lax-us-001 Value:0xc052960280} B:{Var:B Labels:host=clouv-lax-us-001 Value:0xc0529602a0} C:{Var:C Labels:host=clouv-lax-us-001 Value:0xc052960260}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.430996877s EvaluationString:[ var='A' labels={host=clouv-lax-us-001} value=-0.11177360262107547 ], [ var='B' labels={host=clouv-lax-us-001} value=-0.11177360262107547 ], [ var='C' labels={host=clouv-lax-us-001} value=0 ]} {Instance:host=clouv-lax-us-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lax-us-002 Value:0xc0529602f0} B:{Var:B Labels:host=clouv-lax-us-002 Value:0xc052960310} C:{Var:C Labels:host=clouv-lax-us-002 Value:0xc052960330}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431004663s EvaluationString:[ var='A' labels={host=clouv-lax-us-002} value=-0.15872470377575593 ], [ var='B' labels={host=clouv-lax-us-002} value=-0.15872470377575593 ], [ var='C' labels={host=clouv-lax-us-002} value=0 ]} {Instance:host=clouv-lax-us-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lax-us-003 Value:0xc052960380} B:{Var:B Labels:host=clouv-lax-us-003 Value:0xc0529603a0} C:{Var:C Labels:host=clouv-lax-us-003 Value:0xc0529603c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431012006s EvaluationString:[ var='A' labels={host=clouv-lax-us-003} value=0.0586518951907955 ], [ var='B' labels={host=clouv-lax-us-003} value=0.0586518951907955 ], [ var='C' labels={host=clouv-lax-us-003} value=0 ]} {Instance:host=clouv-lax-us-004 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lax-us-004 Value:0xc052960410} B:{Var:B Labels:host=clouv-lax-us-004 Value:0xc052960430} C:{Var:C Labels:host=clouv-lax-us-004 Value:0xc052960460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431019623s EvaluationString:[ var='A' labels={host=clouv-lax-us-004} value=0.1610283095502333 ], [ var='B' labels={host=clouv-lax-us-004} value=0.1610283095502333 ], [ var='C' labels={host=clouv-lax-us-004} value=0 ]} {Instance:host=clouv-lax-us-005 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lax-us-005 Value:0xc0529604c0} B:{Var:B Labels:host=clouv-lax-us-005 Value:0xc0529604e0} C:{Var:C Labels:host=clouv-lax-us-005 Value:0xc0529604a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43102822s EvaluationString:[ var='A' labels={host=clouv-lax-us-005} value=-0.04923690245675516 ], [ var='B' labels={host=clouv-lax-us-005} value=-0.04923690245675516 ], [ var='C' labels={host=clouv-lax-us-005} value=0 ]} {Instance:host=clouv-lax-us-006 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lax-us-006 Value:0xc052960550} B:{Var:B Labels:host=clouv-lax-us-006 Value:0xc052960570} C:{Var:C Labels:host=clouv-lax-us-006 Value:0xc052960520}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431035943s EvaluationString:[ var='A' labels={host=clouv-lax-us-006} value=-0.1414816497279503 ], [ var='B' labels={host=clouv-lax-us-006} value=-0.1414816497279503 ], [ var='C' labels={host=clouv-lax-us-006} value=0 ]} {Instance:host=clouv-lax-us-007 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lax-us-007 Value:0xc0529605b0} B:{Var:B Labels:host=clouv-lax-us-007 Value:0xc0529605e0} C:{Var:C Labels:host=clouv-lax-us-007 Value:0xc052960600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431044517s EvaluationString:[ var='A' labels={host=clouv-lax-us-007} value=-0.17600058880631803 ], [ var='B' labels={host=clouv-lax-us-007} value=-0.17600058880631803 ], [ var='C' labels={host=clouv-lax-us-007} value=0 ]} {Instance:host=clouv-lax-us-008 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lax-us-008 Value:0xc052960640} B:{Var:B Labels:host=clouv-lax-us-008 Value:0xc052960660} C:{Var:C Labels:host=clouv-lax-us-008 Value:0xc052960680}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431051907s EvaluationString:[ var='A' labels={host=clouv-lax-us-008} value=0.010548160328137816 ], [ var='B' labels={host=clouv-lax-us-008} value=0.010548160328137816 ], [ var='C' labels={host=clouv-lax-us-008} value=0 ]} {Instance:host=clouv-lax-us-009 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lax-us-009 Value:0xc0529606f0} B:{Var:B Labels:host=clouv-lax-us-009 Value:0xc052960710} C:{Var:C Labels:host=clouv-lax-us-009 Value:0xc0529606d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431059466s EvaluationString:[ var='A' labels={host=clouv-lax-us-009} value=-0.15802562168400888 ], [ var='B' labels={host=clouv-lax-us-009} value=-0.15802562168400888 ], [ var='C' labels={host=clouv-lax-us-009} value=0 ]} {Instance:host=clouv-lax-us-010 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lax-us-010 Value:0xc052960750} B:{Var:B Labels:host=clouv-lax-us-010 Value:0xc052960770} C:{Var:C Labels:host=clouv-lax-us-010 Value:0xc052960790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431067859s EvaluationString:[ var='A' labels={host=clouv-lax-us-010} value=0.00871949903520254 ], [ var='B' labels={host=clouv-lax-us-010} value=0.00871949903520254 ], [ var='C' labels={host=clouv-lax-us-010} value=0 ]} {Instance:host=clouv-lax-us-011 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lax-us-011 Value:0xc0529607e0} B:{Var:B Labels:host=clouv-lax-us-011 Value:0xc052960800} C:{Var:C Labels:host=clouv-lax-us-011 Value:0xc052960820}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.4310753s EvaluationString:[ var='A' labels={host=clouv-lax-us-011} value=-0.19261461655972312 ], [ var='B' labels={host=clouv-lax-us-011} value=-0.19261461655972312 ], [ var='C' labels={host=clouv-lax-us-011} value=0 ]} {Instance:host=clouv-lax-us-012 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lax-us-012 Value:0xc052960890} B:{Var:B Labels:host=clouv-lax-us-012 Value:0xc0529608b0} C:{Var:C Labels:host=clouv-lax-us-012 Value:0xc052960870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431083881s EvaluationString:[ var='A' labels={host=clouv-lax-us-012} value=0.10562811550091819 ], [ var='B' labels={host=clouv-lax-us-012} value=0.10562811550091819 ], [ var='C' labels={host=clouv-lax-us-012} value=0 ]} {Instance:host=clouv-lax-us-013 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lax-us-013 Value:0xc052960900} B:{Var:B Labels:host=clouv-lax-us-013 Value:0xc052960930} C:{Var:C Labels:host=clouv-lax-us-013 Value:0xc052960950}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431092166s EvaluationString:[ var='A' labels={host=clouv-lax-us-013} value=-0.1463004899747252 ], [ var='B' labels={host=clouv-lax-us-013} value=-0.1463004899747252 ], [ var='C' labels={host=clouv-lax-us-013} value=0 ]} {Instance:host=clouv-lax-us-014 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lax-us-014 Value:0xc052960990} B:{Var:B Labels:host=clouv-lax-us-014 Value:0xc0529609b0} C:{Var:C Labels:host=clouv-lax-us-014 Value:0xc0529609d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431099485s EvaluationString:[ var='A' labels={host=clouv-lax-us-014} value=-0.015095419356020065 ], [ var='B' labels={host=clouv-lax-us-014} value=-0.015095419356020065 ], [ var='C' labels={host=clouv-lax-us-014} value=0 ]} {Instance:host=clouv-lax-us-015 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lax-us-015 Value:0xc052960a20} B:{Var:B Labels:host=clouv-lax-us-015 Value:0xc052960a40} C:{Var:C Labels:host=clouv-lax-us-015 Value:0xc052960a60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431107282s EvaluationString:[ var='A' labels={host=clouv-lax-us-015} value=-0.015386904069083347 ], [ var='B' labels={host=clouv-lax-us-015} value=-0.015386904069083347 ], [ var='C' labels={host=clouv-lax-us-015} value=0 ]} {Instance:host=clouv-lgw-in-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lgw-in-001 Value:0xc052960aa0} B:{Var:B Labels:host=clouv-lgw-in-001 Value:0xc052960ac0} C:{Var:C Labels:host=clouv-lgw-in-001 Value:0xc052960af0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431115149s EvaluationString:[ var='A' labels={host=clouv-lgw-in-001} value=-0.18109516408502813 ], [ var='B' labels={host=clouv-lgw-in-001} value=-0.18109516408502813 ], [ var='C' labels={host=clouv-lgw-in-001} value=0 ]} {Instance:host=clouv-lgw-in-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lgw-in-002 Value:0xc052960b30} B:{Var:B Labels:host=clouv-lgw-in-002 Value:0xc052960b60} C:{Var:C Labels:host=clouv-lgw-in-002 Value:0xc052960b80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431123162s EvaluationString:[ var='A' labels={host=clouv-lgw-in-002} value=0.058644419951536166 ], [ var='B' labels={host=clouv-lgw-in-002} value=0.058644419951536166 ], [ var='C' labels={host=clouv-lgw-in-002} value=0 ]} {Instance:host=clouv-lgw-uk-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lgw-uk-001 Value:0xc052960bd0} B:{Var:B Labels:host=clouv-lgw-uk-001 Value:0xc052960bf0} C:{Var:C Labels:host=clouv-lgw-uk-001 Value:0xc052960c10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431131045s EvaluationString:[ var='A' labels={host=clouv-lgw-uk-001} value=0.1504567017009606 ], [ var='B' labels={host=clouv-lgw-uk-001} value=0.1504567017009606 ], [ var='C' labels={host=clouv-lgw-uk-001} value=0 ]} {Instance:host=clouv-lgw-uk-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lgw-uk-002 Value:0xc052960ca0} B:{Var:B Labels:host=clouv-lgw-uk-002 Value:0xc052960c50} C:{Var:C Labels:host=clouv-lgw-uk-002 Value:0xc052960c70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431139117s EvaluationString:[ var='A' labels={host=clouv-lgw-uk-002} value=0.07440938531606398 ], [ var='B' labels={host=clouv-lgw-uk-002} value=0.07440938531606398 ], [ var='C' labels={host=clouv-lgw-uk-002} value=0 ]} {Instance:host=clouv-lgw-uk-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lgw-uk-003 Value:0xc052960d00} B:{Var:B Labels:host=clouv-lgw-uk-003 Value:0xc052960d20} C:{Var:C Labels:host=clouv-lgw-uk-003 Value:0xc052960ce0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431146893s EvaluationString:[ var='A' labels={host=clouv-lgw-uk-003} value=-0.2311486812755703 ], [ var='B' labels={host=clouv-lgw-uk-003} value=-0.2311486812755703 ], [ var='C' labels={host=clouv-lgw-uk-003} value=0 ]} {Instance:host=clouv-lgw-uk-004 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lgw-uk-004 Value:0xc052960d70} B:{Var:B Labels:host=clouv-lgw-uk-004 Value:0xc052960d90} C:{Var:C Labels:host=clouv-lgw-uk-004 Value:0xc052960db0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431155065s EvaluationString:[ var='A' labels={host=clouv-lgw-uk-004} value=0.0684702573892082 ], [ var='B' labels={host=clouv-lgw-uk-004} value=0.0684702573892082 ], [ var='C' labels={host=clouv-lgw-uk-004} value=0 ]} {Instance:host=clouv-lgw-uk-005 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lgw-uk-005 Value:0xc052960e00} B:{Var:B Labels:host=clouv-lgw-uk-005 Value:0xc052960e20} C:{Var:C Labels:host=clouv-lgw-uk-005 Value:0xc052960e50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.4311623s EvaluationString:[ var='A' labels={host=clouv-lgw-uk-005} value=-0.04174481475262819 ], [ var='B' labels={host=clouv-lgw-uk-005} value=-0.04174481475262819 ], [ var='C' labels={host=clouv-lgw-uk-005} value=0 ]} {Instance:host=clouv-lgw-uk-006 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lgw-uk-006 Value:0xc052960ed0} B:{Var:B Labels:host=clouv-lgw-uk-006 Value:0xc052960e90} C:{Var:C Labels:host=clouv-lgw-uk-006 Value:0xc052960eb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43117074s EvaluationString:[ var='A' labels={host=clouv-lgw-uk-006} value=-0.12875158290230004 ], [ var='B' labels={host=clouv-lgw-uk-006} value=-0.12875158290230004 ], [ var='C' labels={host=clouv-lgw-uk-006} value=0 ]} {Instance:host=clouv-lgw-uk-007 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lgw-uk-007 Value:0xc052960f10} B:{Var:B Labels:host=clouv-lgw-uk-007 Value:0xc052960f30} C:{Var:C Labels:host=clouv-lgw-uk-007 Value:0xc052960f50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431177962s EvaluationString:[ var='A' labels={host=clouv-lgw-uk-007} value=-0.009229640287120588 ], [ var='B' labels={host=clouv-lgw-uk-007} value=-0.009229640287120588 ], [ var='C' labels={host=clouv-lgw-uk-007} value=0 ]} {Instance:host=clouv-lgw-uk-008 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lgw-uk-008 Value:0xc052960fc0} B:{Var:B Labels:host=clouv-lgw-uk-008 Value:0xc052960fe0} C:{Var:C Labels:host=clouv-lgw-uk-008 Value:0xc052960f90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431187639s EvaluationString:[ var='A' labels={host=clouv-lgw-uk-008} value=-0.04544933155374409 ], [ var='B' labels={host=clouv-lgw-uk-008} value=-0.04544933155374409 ], [ var='C' labels={host=clouv-lgw-uk-008} value=0 ]} {Instance:host=clouv-lgw-uk-009 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lgw-uk-009 Value:0xc052961020} B:{Var:B Labels:host=clouv-lgw-uk-009 Value:0xc052961050} C:{Var:C Labels:host=clouv-lgw-uk-009 Value:0xc052961080}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431195648s EvaluationString:[ var='A' labels={host=clouv-lgw-uk-009} value=-0.13398930881008297 ], [ var='B' labels={host=clouv-lgw-uk-009} value=-0.13398930881008297 ], [ var='C' labels={host=clouv-lgw-uk-009} value=0 ]} {Instance:host=clouv-lgw-uk-010 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lgw-uk-010 Value:0xc0529610e0} B:{Var:B Labels:host=clouv-lgw-uk-010 Value:0xc052961100} C:{Var:C Labels:host=clouv-lgw-uk-010 Value:0xc0529610c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431204423s EvaluationString:[ var='A' labels={host=clouv-lgw-uk-010} value=-0.09960820240285291 ], [ var='B' labels={host=clouv-lgw-uk-010} value=-0.09960820240285291 ], [ var='C' labels={host=clouv-lgw-uk-010} value=0 ]} {Instance:host=clouv-lgw-uk-011 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lgw-uk-011 Value:0xc052961160} B:{Var:B Labels:host=clouv-lgw-uk-011 Value:0xc052961190} C:{Var:C Labels:host=clouv-lgw-uk-011 Value:0xc052961140}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431213007s EvaluationString:[ var='A' labels={host=clouv-lgw-uk-011} value=-0.3227806415236216 ], [ var='B' labels={host=clouv-lgw-uk-011} value=-0.3227806415236216 ], [ var='C' labels={host=clouv-lgw-uk-011} value=0 ]} {Instance:host=clouv-lgw-uk-012 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lgw-uk-012 Value:0xc0529611f0} B:{Var:B Labels:host=clouv-lgw-uk-012 Value:0xc052961220} C:{Var:C Labels:host=clouv-lgw-uk-012 Value:0xc0529611d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431231227s EvaluationString:[ var='A' labels={host=clouv-lgw-uk-012} value=-0.11068807538071113 ], [ var='B' labels={host=clouv-lgw-uk-012} value=-0.11068807538071113 ], [ var='C' labels={host=clouv-lgw-uk-012} value=0 ]} {Instance:host=clouv-lgw-uk-013 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lgw-uk-013 Value:0xc052961280} B:{Var:B Labels:host=clouv-lgw-uk-013 Value:0xc0529612a0} C:{Var:C Labels:host=clouv-lgw-uk-013 Value:0xc052961260}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431239558s EvaluationString:[ var='A' labels={host=clouv-lgw-uk-013} value=0.17955891688335632 ], [ var='B' labels={host=clouv-lgw-uk-013} value=0.17955891688335632 ], [ var='C' labels={host=clouv-lgw-uk-013} value=0 ]} {Instance:host=clouv-lgw-uk-014 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lgw-uk-014 Value:0xc052961320} B:{Var:B Labels:host=clouv-lgw-uk-014 Value:0xc052961340} C:{Var:C Labels:host=clouv-lgw-uk-014 Value:0xc052961300}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43124748s EvaluationString:[ var='A' labels={host=clouv-lgw-uk-014} value=0.18811078843240933 ], [ var='B' labels={host=clouv-lgw-uk-014} value=0.18811078843240933 ], [ var='C' labels={host=clouv-lgw-uk-014} value=0 ]} {Instance:host=clouv-lgw-uk-015 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lgw-uk-015 Value:0xc052961380} B:{Var:B Labels:host=clouv-lgw-uk-015 Value:0xc0529613a0} C:{Var:C Labels:host=clouv-lgw-uk-015 Value:0xc0529613d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431255509s EvaluationString:[ var='A' labels={host=clouv-lgw-uk-015} value=-0.4831221657471178 ], [ var='B' labels={host=clouv-lgw-uk-015} value=-0.4831221657471178 ], [ var='C' labels={host=clouv-lgw-uk-015} value=0 ]} {Instance:host=clouv-lgw-uk-016 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lgw-uk-016 Value:0xc052961410} B:{Var:B Labels:host=clouv-lgw-uk-016 Value:0xc052961430} C:{Var:C Labels:host=clouv-lgw-uk-016 Value:0xc052961450}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431263667s EvaluationString:[ var='A' labels={host=clouv-lgw-uk-016} value=0.73627030869032 ], [ var='B' labels={host=clouv-lgw-uk-016} value=0.73627030869032 ], [ var='C' labels={host=clouv-lgw-uk-016} value=0 ]} {Instance:host=clouv-lgw-uk-017 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lgw-uk-017 Value:0xc0529614a0} B:{Var:B Labels:host=clouv-lgw-uk-017 Value:0xc0529614c0} C:{Var:C Labels:host=clouv-lgw-uk-017 Value:0xc0529614e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431271546s EvaluationString:[ var='A' labels={host=clouv-lgw-uk-017} value=-0.1285269461545673 ], [ var='B' labels={host=clouv-lgw-uk-017} value=-0.1285269461545673 ], [ var='C' labels={host=clouv-lgw-uk-017} value=0 ]} {Instance:host=clouv-lgw-uk-018 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lgw-uk-018 Value:0xc052961560} B:{Var:B Labels:host=clouv-lgw-uk-018 Value:0xc052961580} C:{Var:C Labels:host=clouv-lgw-uk-018 Value:0xc052961530}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.4312793s EvaluationString:[ var='A' labels={host=clouv-lgw-uk-018} value=-0.08887801757959816 ], [ var='B' labels={host=clouv-lgw-uk-018} value=-0.08887801757959816 ], [ var='C' labels={host=clouv-lgw-uk-018} value=0 ]} {Instance:host=clouv-lgw-uk-019 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lgw-uk-019 Value:0xc0529615c0} B:{Var:B Labels:host=clouv-lgw-uk-019 Value:0xc0529615e0} C:{Var:C Labels:host=clouv-lgw-uk-019 Value:0xc052961600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43128842s EvaluationString:[ var='A' labels={host=clouv-lgw-uk-019} value=-0.3576432568356028 ], [ var='B' labels={host=clouv-lgw-uk-019} value=-0.3576432568356028 ], [ var='C' labels={host=clouv-lgw-uk-019} value=0 ]} {Instance:host=clouv-lgw-uk-020 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-lgw-uk-020 Value:0xc052961650} B:{Var:B Labels:host=clouv-lgw-uk-020 Value:0xc052961670} C:{Var:C Labels:host=clouv-lgw-uk-020 Value:0xc052961690}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431296745s EvaluationString:[ var='A' labels={host=clouv-lgw-uk-020} value=0.3993392119565679 ], [ var='B' labels={host=clouv-lgw-uk-020} value=0.3993392119565679 ], [ var='C' labels={host=clouv-lgw-uk-020} value=0 ]} {Instance:host=clouv-phx-us-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-phx-us-001 Value:0xc0529616d0} B:{Var:B Labels:host=clouv-phx-us-001 Value:0xc052961700} C:{Var:C Labels:host=clouv-phx-us-001 Value:0xc052961720}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431304244s EvaluationString:[ var='A' labels={host=clouv-phx-us-001} value=0.02581260088222237 ], [ var='B' labels={host=clouv-phx-us-001} value=0.02581260088222237 ], [ var='C' labels={host=clouv-phx-us-001} value=0 ]} {Instance:host=clouv-phx-us-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-phx-us-002 Value:0xc052961760} B:{Var:B Labels:host=clouv-phx-us-002 Value:0xc052961780} C:{Var:C Labels:host=clouv-phx-us-002 Value:0xc0529617a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43131121s EvaluationString:[ var='A' labels={host=clouv-phx-us-002} value=0.3754753377393984 ], [ var='B' labels={host=clouv-phx-us-002} value=0.3754753377393984 ], [ var='C' labels={host=clouv-phx-us-002} value=0 ]} {Instance:host=clouv-phx-us-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-phx-us-003 Value:0xc052961830} B:{Var:B Labels:host=clouv-phx-us-003 Value:0xc0529617f0} C:{Var:C Labels:host=clouv-phx-us-003 Value:0xc052961810}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431319064s EvaluationString:[ var='A' labels={host=clouv-phx-us-003} value=-0.06702101861589327 ], [ var='B' labels={host=clouv-phx-us-003} value=-0.06702101861589327 ], [ var='C' labels={host=clouv-phx-us-003} value=0 ]} {Instance:host=clouv-phx-us-004 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-phx-us-004 Value:0xc052961890} B:{Var:B Labels:host=clouv-phx-us-004 Value:0xc0529618d0} C:{Var:C Labels:host=clouv-phx-us-004 Value:0xc052961870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.4313274s EvaluationString:[ var='A' labels={host=clouv-phx-us-004} value=0.0652126628132758 ], [ var='B' labels={host=clouv-phx-us-004} value=0.0652126628132758 ], [ var='C' labels={host=clouv-phx-us-004} value=0 ]} {Instance:host=clouv-phx-us-005 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-phx-us-005 Value:0xc052961990} B:{Var:B Labels:host=clouv-phx-us-005 Value:0xc052961940} C:{Var:C Labels:host=clouv-phx-us-005 Value:0xc052961970}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431335372s EvaluationString:[ var='A' labels={host=clouv-phx-us-005} value=0.07978004842965644 ], [ var='B' labels={host=clouv-phx-us-005} value=0.07978004842965644 ], [ var='C' labels={host=clouv-phx-us-005} value=0 ]} {Instance:host=clouv-phx-us-006 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=clouv-phx-us-006 Value:0xc052961ad0} B:{Var:B Labels:host=clouv-phx-us-006 Value:0xc052961af0} C:{Var:C Labels:host=clouv-phx-us-006 Value:0xc0529619e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431343071s EvaluationString:[ var='A' labels={host=clouv-phx-us-006} value=0.04159752736275177 ], [ var='B' labels={host=clouv-phx-us-006} value=0.04159752736275177 ], [ var='C' labels={host=clouv-phx-us-006} value=0 ]} {Instance:host=dp-ams-nl-01 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-ams-nl-01 Value:0xc052961b90} B:{Var:B Labels:host=dp-ams-nl-01 Value:0xc052961b40} C:{Var:C Labels:host=dp-ams-nl-01 Value:0xc052961b70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431350693s EvaluationString:[ var='A' labels={host=dp-ams-nl-01} value=0.08032473680949742 ], [ var='B' labels={host=dp-ams-nl-01} value=0.08032473680949742 ], [ var='C' labels={host=dp-ams-nl-01} value=0 ]} {Instance:host=dp-ams-nl-02 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-ams-nl-02 Value:0xc052961c20} B:{Var:B Labels:host=dp-ams-nl-02 Value:0xc052961c50} C:{Var:C Labels:host=dp-ams-nl-02 Value:0xc052961c90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431358281s EvaluationString:[ var='A' labels={host=dp-ams-nl-02} value=-0.05450921669015992 ], [ var='B' labels={host=dp-ams-nl-02} value=-0.05450921669015992 ], [ var='C' labels={host=dp-ams-nl-02} value=0 ]} {Instance:host=dp-ams-nl-03 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-ams-nl-03 Value:0xc052961ce0} B:{Var:B Labels:host=dp-ams-nl-03 Value:0xc052961d00} C:{Var:C Labels:host=dp-ams-nl-03 Value:0xc052961d20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431367863s EvaluationString:[ var='A' labels={host=dp-ams-nl-03} value=-0.005921903561603159 ], [ var='B' labels={host=dp-ams-nl-03} value=-0.005921903561603159 ], [ var='C' labels={host=dp-ams-nl-03} value=0 ]} {Instance:host=dp-ams-nl-04 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-ams-nl-04 Value:0xc052961d70} B:{Var:B Labels:host=dp-ams-nl-04 Value:0xc052961d90} C:{Var:C Labels:host=dp-ams-nl-04 Value:0xc052961db0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431375462s EvaluationString:[ var='A' labels={host=dp-ams-nl-04} value=0.05287549555963267 ], [ var='B' labels={host=dp-ams-nl-04} value=0.05287549555963267 ], [ var='C' labels={host=dp-ams-nl-04} value=0 ]} {Instance:host=dp-ams-nl-05 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-ams-nl-05 Value:0xc052961e20} B:{Var:B Labels:host=dp-ams-nl-05 Value:0xc052961e50} C:{Var:C Labels:host=dp-ams-nl-05 Value:0xc052961df0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431383067s EvaluationString:[ var='A' labels={host=dp-ams-nl-05} value=-0.026261713186628857 ], [ var='B' labels={host=dp-ams-nl-05} value=-0.026261713186628857 ], [ var='C' labels={host=dp-ams-nl-05} value=0 ]} {Instance:host=dp-ams-nl-06 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-ams-nl-06 Value:0xc052961e90} B:{Var:B Labels:host=dp-ams-nl-06 Value:0xc052961eb0} C:{Var:C Labels:host=dp-ams-nl-06 Value:0xc052961ed0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431391051s EvaluationString:[ var='A' labels={host=dp-ams-nl-06} value=0.06873814422752796 ], [ var='B' labels={host=dp-ams-nl-06} value=0.06873814422752796 ], [ var='C' labels={host=dp-ams-nl-06} value=0 ]} {Instance:host=dp-arn-se-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-arn-se-001 Value:0xc052961f20} B:{Var:B Labels:host=dp-arn-se-001 Value:0xc052961f40} C:{Var:C Labels:host=dp-arn-se-001 Value:0xc052961f60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431398453s EvaluationString:[ var='A' labels={host=dp-arn-se-001} value=-0.011190097592622312 ], [ var='B' labels={host=dp-arn-se-001} value=-0.011190097592622312 ], [ var='C' labels={host=dp-arn-se-001} value=0 ]} {Instance:host=dp-arn-se-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-arn-se-002 Value:0xc052961fa0} B:{Var:B Labels:host=dp-arn-se-002 Value:0xc052961fd0} C:{Var:C Labels:host=dp-arn-se-002 Value:0xc052961ff0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431406656s EvaluationString:[ var='A' labels={host=dp-arn-se-002} value=0.0513143227162279 ], [ var='B' labels={host=dp-arn-se-002} value=0.0513143227162279 ], [ var='C' labels={host=dp-arn-se-002} value=0 ]} {Instance:host=dp-arn-se-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-arn-se-003 Value:0xc05207e0a0} B:{Var:B Labels:host=dp-arn-se-003 Value:0xc05207e0d0} C:{Var:C Labels:host=dp-arn-se-003 Value:0xc05207e0f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431415574s EvaluationString:[ var='A' labels={host=dp-arn-se-003} value=-0.05355948258754613 ], [ var='B' labels={host=dp-arn-se-003} value=-0.05355948258754613 ], [ var='C' labels={host=dp-arn-se-003} value=0 ]} {Instance:host=dp-arn-se-004 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-arn-se-004 Value:0xc05207e150} B:{Var:B Labels:host=dp-arn-se-004 Value:0xc05207e170} C:{Var:C Labels:host=dp-arn-se-004 Value:0xc05207e190}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431423632s EvaluationString:[ var='A' labels={host=dp-arn-se-004} value=-0.2479937854883271 ], [ var='B' labels={host=dp-arn-se-004} value=-0.2479937854883271 ], [ var='C' labels={host=dp-arn-se-004} value=0 ]} {Instance:host=dp-arn-se-005 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-arn-se-005 Value:0xc05207e1e0} B:{Var:B Labels:host=dp-arn-se-005 Value:0xc05207e200} C:{Var:C Labels:host=dp-arn-se-005 Value:0xc05207e230}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431431939s EvaluationString:[ var='A' labels={host=dp-arn-se-005} value=-0.180939185064517 ], [ var='B' labels={host=dp-arn-se-005} value=-0.180939185064517 ], [ var='C' labels={host=dp-arn-se-005} value=0 ]} {Instance:host=dp-arn-se-006 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-arn-se-006 Value:0xc05207e2e0} B:{Var:B Labels:host=dp-arn-se-006 Value:0xc05207e280} C:{Var:C Labels:host=dp-arn-se-006 Value:0xc05207e2c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431441873s EvaluationString:[ var='A' labels={host=dp-arn-se-006} value=-0.2999187043634189 ], [ var='B' labels={host=dp-arn-se-006} value=-0.2999187043634189 ], [ var='C' labels={host=dp-arn-se-006} value=0 ]} {Instance:host=dp-bru-be-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-bru-be-001 Value:0xc05207e320} B:{Var:B Labels:host=dp-bru-be-001 Value:0xc05207e340} C:{Var:C Labels:host=dp-bru-be-001 Value:0xc05207e360}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431449896s EvaluationString:[ var='A' labels={host=dp-bru-be-001} value=0.06659128000617898 ], [ var='B' labels={host=dp-bru-be-001} value=0.06659128000617898 ], [ var='C' labels={host=dp-bru-be-001} value=0 ]} {Instance:host=dp-bru-be-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-bru-be-002 Value:0xc05207e3d0} B:{Var:B Labels:host=dp-bru-be-002 Value:0xc05207e400} C:{Var:C Labels:host=dp-bru-be-002 Value:0xc05207e3b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431457353s EvaluationString:[ var='A' labels={host=dp-bru-be-002} value=-0.15111869461160649 ], [ var='B' labels={host=dp-bru-be-002} value=-0.15111869461160649 ], [ var='C' labels={host=dp-bru-be-002} value=0 ]} {Instance:host=dp-bru-be-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-bru-be-003 Value:0xc05207e480} B:{Var:B Labels:host=dp-bru-be-003 Value:0xc05207e4b0} C:{Var:C Labels:host=dp-bru-be-003 Value:0xc05207e450}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431465248s EvaluationString:[ var='A' labels={host=dp-bru-be-003} value=-0.12321027579170973 ], [ var='B' labels={host=dp-bru-be-003} value=-0.12321027579170973 ], [ var='C' labels={host=dp-bru-be-003} value=0 ]} {Instance:host=dp-cdg-fr-063 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-cdg-fr-063 Value:0xc05207e530} B:{Var:B Labels:host=dp-cdg-fr-063 Value:0xc05207e570} C:{Var:C Labels:host=dp-cdg-fr-063 Value:0xc05207e500}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431473713s EvaluationString:[ var='A' labels={host=dp-cdg-fr-063} value=-0.04028476258000069 ], [ var='B' labels={host=dp-cdg-fr-063} value=-0.04028476258000069 ], [ var='C' labels={host=dp-cdg-fr-063} value=0 ]} {Instance:host=dp-cdg-fr-064 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-cdg-fr-064 Value:0xc05207e630} B:{Var:B Labels:host=dp-cdg-fr-064 Value:0xc05207e5c0} C:{Var:C Labels:host=dp-cdg-fr-064 Value:0xc05207e5e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431491256s EvaluationString:[ var='A' labels={host=dp-cdg-fr-064} value=-0.012574110658671128 ], [ var='B' labels={host=dp-cdg-fr-064} value=-0.012574110658671128 ], [ var='C' labels={host=dp-cdg-fr-064} value=0 ]} {Instance:host=dp-cdg-fr-065 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-cdg-fr-065 Value:0xc05207e6d0} B:{Var:B Labels:host=dp-cdg-fr-065 Value:0xc05207e6f0} C:{Var:C Labels:host=dp-cdg-fr-065 Value:0xc05207e680}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431507583s EvaluationString:[ var='A' labels={host=dp-cdg-fr-065} value=-0.18168399842984684 ], [ var='B' labels={host=dp-cdg-fr-065} value=-0.18168399842984684 ], [ var='C' labels={host=dp-cdg-fr-065} value=0 ]} {Instance:host=dp-cdg-fr-066 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-cdg-fr-066 Value:0xc05207e740} B:{Var:B Labels:host=dp-cdg-fr-066 Value:0xc05207e770} C:{Var:C Labels:host=dp-cdg-fr-066 Value:0xc05207e790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43151529s EvaluationString:[ var='A' labels={host=dp-cdg-fr-066} value=-0.4398370518532805 ], [ var='B' labels={host=dp-cdg-fr-066} value=-0.4398370518532805 ], [ var='C' labels={host=dp-cdg-fr-066} value=0 ]} {Instance:host=dp-cdg-fr-067 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-cdg-fr-067 Value:0xc05207e7d0} B:{Var:B Labels:host=dp-cdg-fr-067 Value:0xc05207e7f0} C:{Var:C Labels:host=dp-cdg-fr-067 Value:0xc05207e830}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431523386s EvaluationString:[ var='A' labels={host=dp-cdg-fr-067} value=0.004826068247159773 ], [ var='B' labels={host=dp-cdg-fr-067} value=0.004826068247159773 ], [ var='C' labels={host=dp-cdg-fr-067} value=0 ]} {Instance:host=dp-cdg-fr-068 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-cdg-fr-068 Value:0xc05207e890} B:{Var:B Labels:host=dp-cdg-fr-068 Value:0xc05207e8c0} C:{Var:C Labels:host=dp-cdg-fr-068 Value:0xc05207e8e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431531284s EvaluationString:[ var='A' labels={host=dp-cdg-fr-068} value=0.21865025268161908 ], [ var='B' labels={host=dp-cdg-fr-068} value=0.21865025268161908 ], [ var='C' labels={host=dp-cdg-fr-068} value=0 ]} {Instance:host=dp-cdg-fr-069 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-cdg-fr-069 Value:0xc05207e970} B:{Var:B Labels:host=dp-cdg-fr-069 Value:0xc05207e920} C:{Var:C Labels:host=dp-cdg-fr-069 Value:0xc05207e950}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43153939s EvaluationString:[ var='A' labels={host=dp-cdg-fr-069} value=-0.16803795196546883 ], [ var='B' labels={host=dp-cdg-fr-069} value=-0.16803795196546883 ], [ var='C' labels={host=dp-cdg-fr-069} value=0 ]} {Instance:host=dp-dca-us-026 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-dca-us-026 Value:0xc05207e9f0} B:{Var:B Labels:host=dp-dca-us-026 Value:0xc05207ea10} C:{Var:C Labels:host=dp-dca-us-026 Value:0xc05207ea30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431547447s EvaluationString:[ var='A' labels={host=dp-dca-us-026} value=0.19612554502404578 ], [ var='B' labels={host=dp-dca-us-026} value=0.19612554502404578 ], [ var='C' labels={host=dp-dca-us-026} value=0 ]} {Instance:host=dp-dca-us-027 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-dca-us-027 Value:0xc05207eb20} B:{Var:B Labels:host=dp-dca-us-027 Value:0xc05207eaa0} C:{Var:C Labels:host=dp-dca-us-027 Value:0xc05207eaf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431555927s EvaluationString:[ var='A' labels={host=dp-dca-us-027} value=1.3618539704619934 ], [ var='B' labels={host=dp-dca-us-027} value=1.3618539704619934 ], [ var='C' labels={host=dp-dca-us-027} value=0 ]} {Instance:host=dp-dca-us-028 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-dca-us-028 Value:0xc05207eb70} B:{Var:B Labels:host=dp-dca-us-028 Value:0xc05207eba0} C:{Var:C Labels:host=dp-dca-us-028 Value:0xc05207ebd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431564109s EvaluationString:[ var='A' labels={host=dp-dca-us-028} value=-0.11604449276440616 ], [ var='B' labels={host=dp-dca-us-028} value=-0.11604449276440616 ], [ var='C' labels={host=dp-dca-us-028} value=0 ]} {Instance:host=dp-dca-us-029 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-dca-us-029 Value:0xc05207ec70} B:{Var:B Labels:host=dp-dca-us-029 Value:0xc05207ec20} C:{Var:C Labels:host=dp-dca-us-029 Value:0xc05207ec40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431571566s EvaluationString:[ var='A' labels={host=dp-dca-us-029} value=-0.3040731700310825 ], [ var='B' labels={host=dp-dca-us-029} value=-0.3040731700310825 ], [ var='C' labels={host=dp-dca-us-029} value=0 ]} {Instance:host=dp-dca-us-030 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-dca-us-030 Value:0xc05207ecf0} B:{Var:B Labels:host=dp-dca-us-030 Value:0xc05207ed20} C:{Var:C Labels:host=dp-dca-us-030 Value:0xc05207ecd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431579147s EvaluationString:[ var='A' labels={host=dp-dca-us-030} value=-0.003935780421398268 ], [ var='B' labels={host=dp-dca-us-030} value=-0.003935780421398268 ], [ var='C' labels={host=dp-dca-us-030} value=0 ]} {Instance:host=dp-dca-us-031 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-dca-us-031 Value:0xc05207ed60} B:{Var:B Labels:host=dp-dca-us-031 Value:0xc05207ed90} C:{Var:C Labels:host=dp-dca-us-031 Value:0xc05207edb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431586568s EvaluationString:[ var='A' labels={host=dp-dca-us-031} value=-0.09807145485940591 ], [ var='B' labels={host=dp-dca-us-031} value=-0.09807145485940591 ], [ var='C' labels={host=dp-dca-us-031} value=0 ]} {Instance:host=dp-dca-us-032 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-dca-us-032 Value:0xc05207edf0} B:{Var:B Labels:host=dp-dca-us-032 Value:0xc05207ee20} C:{Var:C Labels:host=dp-dca-us-032 Value:0xc05207ee40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431594761s EvaluationString:[ var='A' labels={host=dp-dca-us-032} value=-0.20032333939534416 ], [ var='B' labels={host=dp-dca-us-032} value=-0.20032333939534416 ], [ var='C' labels={host=dp-dca-us-032} value=0 ]} {Instance:host=dp-dca-us-033 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-dca-us-033 Value:0xc05207eeb0} B:{Var:B Labels:host=dp-dca-us-033 Value:0xc05207eee0} C:{Var:C Labels:host=dp-dca-us-033 Value:0xc05207ee90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43160298s EvaluationString:[ var='A' labels={host=dp-dca-us-033} value=-0.3604693574543205 ], [ var='B' labels={host=dp-dca-us-033} value=-0.3604693574543205 ], [ var='C' labels={host=dp-dca-us-033} value=0 ]} {Instance:host=dp-dca-us-034 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-dca-us-034 Value:0xc05207ef20} B:{Var:B Labels:host=dp-dca-us-034 Value:0xc05207ef50} C:{Var:C Labels:host=dp-dca-us-034 Value:0xc05207ef80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431611226s EvaluationString:[ var='A' labels={host=dp-dca-us-034} value=-0.08939778982425294 ], [ var='B' labels={host=dp-dca-us-034} value=-0.08939778982425294 ], [ var='C' labels={host=dp-dca-us-034} value=0 ]} {Instance:host=dp-dca-us-035 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-dca-us-035 Value:0xc05207efd0} B:{Var:B Labels:host=dp-dca-us-035 Value:0xc05207f000} C:{Var:C Labels:host=dp-dca-us-035 Value:0xc05207f020}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43161906s EvaluationString:[ var='A' labels={host=dp-dca-us-035} value=-0.3335410570474145 ], [ var='B' labels={host=dp-dca-us-035} value=-0.3335410570474145 ], [ var='C' labels={host=dp-dca-us-035} value=0 ]} {Instance:host=dp-dca-us-036 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-dca-us-036 Value:0xc05207f070} B:{Var:B Labels:host=dp-dca-us-036 Value:0xc05207f090} C:{Var:C Labels:host=dp-dca-us-036 Value:0xc05207f0d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431628074s EvaluationString:[ var='A' labels={host=dp-dca-us-036} value=-0.0178885297238196 ], [ var='B' labels={host=dp-dca-us-036} value=-0.0178885297238196 ], [ var='C' labels={host=dp-dca-us-036} value=0 ]} {Instance:host=dp-dca-us-037 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-dca-us-037 Value:0xc05207f120} B:{Var:B Labels:host=dp-dca-us-037 Value:0xc05207f140} C:{Var:C Labels:host=dp-dca-us-037 Value:0xc05207f170}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43163592s EvaluationString:[ var='A' labels={host=dp-dca-us-037} value=-0.18621898666059641 ], [ var='B' labels={host=dp-dca-us-037} value=-0.18621898666059641 ], [ var='C' labels={host=dp-dca-us-037} value=0 ]} {Instance:host=dp-dca-us-038 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-dca-us-038 Value:0xc05207f250} B:{Var:B Labels:host=dp-dca-us-038 Value:0xc05207f280} C:{Var:C Labels:host=dp-dca-us-038 Value:0xc05207f230}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431648406s EvaluationString:[ var='A' labels={host=dp-dca-us-038} value=0.05059258084473939 ], [ var='B' labels={host=dp-dca-us-038} value=0.05059258084473939 ], [ var='C' labels={host=dp-dca-us-038} value=0 ]} {Instance:host=dp-dca-us-039 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-dca-us-039 Value:0xc05207f2d0} B:{Var:B Labels:host=dp-dca-us-039 Value:0xc05207f300} C:{Var:C Labels:host=dp-dca-us-039 Value:0xc05207f330}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431656478s EvaluationString:[ var='A' labels={host=dp-dca-us-039} value=0.20086176552120655 ], [ var='B' labels={host=dp-dca-us-039} value=0.20086176552120655 ], [ var='C' labels={host=dp-dca-us-039} value=0 ]} {Instance:host=dp-dca-us-040 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-dca-us-040 Value:0xc05207f3a0} B:{Var:B Labels:host=dp-dca-us-040 Value:0xc05207f3c0} C:{Var:C Labels:host=dp-dca-us-040 Value:0xc05207f370}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43166453s EvaluationString:[ var='A' labels={host=dp-dca-us-040} value=-0.18771813864195983 ], [ var='B' labels={host=dp-dca-us-040} value=-0.18771813864195983 ], [ var='C' labels={host=dp-dca-us-040} value=0 ]} {Instance:host=dp-dca-us-041 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-dca-us-041 Value:0xc05207f410} B:{Var:B Labels:host=dp-dca-us-041 Value:0xc05207f440} C:{Var:C Labels:host=dp-dca-us-041 Value:0xc05207f4c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431672345s EvaluationString:[ var='A' labels={host=dp-dca-us-041} value=-0.07032846085640662 ], [ var='B' labels={host=dp-dca-us-041} value=-0.07032846085640662 ], [ var='C' labels={host=dp-dca-us-041} value=0 ]} {Instance:host=dp-dca-us-042 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-dca-us-042 Value:0xc05207f530} B:{Var:B Labels:host=dp-dca-us-042 Value:0xc05207f550} C:{Var:C Labels:host=dp-dca-us-042 Value:0xc05207f580}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431679878s EvaluationString:[ var='A' labels={host=dp-dca-us-042} value=0.09961132904043404 ], [ var='B' labels={host=dp-dca-us-042} value=0.09961132904043404 ], [ var='C' labels={host=dp-dca-us-042} value=0 ]} {Instance:host=dp-dca-us-043 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-dca-us-043 Value:0xc05207f620} B:{Var:B Labels:host=dp-dca-us-043 Value:0xc05207f5d0} C:{Var:C Labels:host=dp-dca-us-043 Value:0xc05207f5f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431689033s EvaluationString:[ var='A' labels={host=dp-dca-us-043} value=-0.15363899469767667 ], [ var='B' labels={host=dp-dca-us-043} value=-0.15363899469767667 ], [ var='C' labels={host=dp-dca-us-043} value=0 ]} {Instance:host=dp-den-us-006 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-den-us-006 Value:0xc05207f6a0} B:{Var:B Labels:host=dp-den-us-006 Value:0xc05207f6c0} C:{Var:C Labels:host=dp-den-us-006 Value:0xc05207f680}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431697063s EvaluationString:[ var='A' labels={host=dp-den-us-006} value=-0.04682575186629142 ], [ var='B' labels={host=dp-den-us-006} value=-0.04682575186629142 ], [ var='C' labels={host=dp-den-us-006} value=0 ]} {Instance:host=dp-den-us-007 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-den-us-007 Value:0xc05207f740} B:{Var:B Labels:host=dp-den-us-007 Value:0xc05207f760} C:{Var:C Labels:host=dp-den-us-007 Value:0xc05207f720}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431705154s EvaluationString:[ var='A' labels={host=dp-den-us-007} value=-0.15694245230130427 ], [ var='B' labels={host=dp-den-us-007} value=-0.15694245230130427 ], [ var='C' labels={host=dp-den-us-007} value=0 ]} {Instance:host=dp-den-us-008 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-den-us-008 Value:0xc05207f830} B:{Var:B Labels:host=dp-den-us-008 Value:0xc05207f7c0} C:{Var:C Labels:host=dp-den-us-008 Value:0xc05207f800}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431712512s EvaluationString:[ var='A' labels={host=dp-den-us-008} value=0.0841455621088727 ], [ var='B' labels={host=dp-den-us-008} value=0.0841455621088727 ], [ var='C' labels={host=dp-den-us-008} value=0 ]} {Instance:host=dp-den-us-009 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-den-us-009 Value:0xc05207f8c0} B:{Var:B Labels:host=dp-den-us-009 Value:0xc05207f870} C:{Var:C Labels:host=dp-den-us-009 Value:0xc05207f890}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431720398s EvaluationString:[ var='A' labels={host=dp-den-us-009} value=-0.22863384901623837 ], [ var='B' labels={host=dp-den-us-009} value=-0.22863384901623837 ], [ var='C' labels={host=dp-den-us-009} value=0 ]} {Instance:host=dp-den-us-010 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-den-us-010 Value:0xc05207f900} B:{Var:B Labels:host=dp-den-us-010 Value:0xc05207f930} C:{Var:C Labels:host=dp-den-us-010 Value:0xc05207f950}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431729146s EvaluationString:[ var='A' labels={host=dp-den-us-010} value=0.11109181002533812 ], [ var='B' labels={host=dp-den-us-010} value=0.11109181002533812 ], [ var='C' labels={host=dp-den-us-010} value=0 ]} {Instance:host=dp-dub-ie-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-dub-ie-001 Value:0xc05207f9f0} B:{Var:B Labels:host=dp-dub-ie-001 Value:0xc05207f9b0} C:{Var:C Labels:host=dp-dub-ie-001 Value:0xc05207f9d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431738529s EvaluationString:[ var='A' labels={host=dp-dub-ie-001} value=-0.06058581760508257 ], [ var='B' labels={host=dp-dub-ie-001} value=-0.06058581760508257 ], [ var='C' labels={host=dp-dub-ie-001} value=0 ]} {Instance:host=dp-dub-ie-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-dub-ie-002 Value:0xc05207fa40} B:{Var:B Labels:host=dp-dub-ie-002 Value:0xc05207fa70} C:{Var:C Labels:host=dp-dub-ie-002 Value:0xc05207fac0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431746199s EvaluationString:[ var='A' labels={host=dp-dub-ie-002} value=-0.11922833973855518 ], [ var='B' labels={host=dp-dub-ie-002} value=-0.11922833973855518 ], [ var='C' labels={host=dp-dub-ie-002} value=0 ]} {Instance:host=dp-dub-ie-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-dub-ie-003 Value:0xc05207fb00} B:{Var:B Labels:host=dp-dub-ie-003 Value:0xc05207fb20} C:{Var:C Labels:host=dp-dub-ie-003 Value:0xc05207fb40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431753816s EvaluationString:[ var='A' labels={host=dp-dub-ie-003} value=0.09211915086258468 ], [ var='B' labels={host=dp-dub-ie-003} value=0.09211915086258468 ], [ var='C' labels={host=dp-dub-ie-003} value=0 ]} {Instance:host=dp-dub-ie-004 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-dub-ie-004 Value:0xc05207fb90} B:{Var:B Labels:host=dp-dub-ie-004 Value:0xc05207fbc0} C:{Var:C Labels:host=dp-dub-ie-004 Value:0xc05207fbe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431761856s EvaluationString:[ var='A' labels={host=dp-dub-ie-004} value=-0.11224565562598739 ], [ var='B' labels={host=dp-dub-ie-004} value=-0.11224565562598739 ], [ var='C' labels={host=dp-dub-ie-004} value=0 ]} {Instance:host=dp-fra-de-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-fra-de-001 Value:0xc05207fc40} B:{Var:B Labels:host=dp-fra-de-001 Value:0xc05207fc80} C:{Var:C Labels:host=dp-fra-de-001 Value:0xc05207fca0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431769752s EvaluationString:[ var='A' labels={host=dp-fra-de-001} value=-0.09247568197135284 ], [ var='B' labels={host=dp-fra-de-001} value=-0.09247568197135284 ], [ var='C' labels={host=dp-fra-de-001} value=0 ]} {Instance:host=dp-fra-de-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-fra-de-002 Value:0xc05207fd00} B:{Var:B Labels:host=dp-fra-de-002 Value:0xc05207fd20} C:{Var:C Labels:host=dp-fra-de-002 Value:0xc05207fd40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431778056s EvaluationString:[ var='A' labels={host=dp-fra-de-002} value=0.016252630771163012 ], [ var='B' labels={host=dp-fra-de-002} value=0.016252630771163012 ], [ var='C' labels={host=dp-fra-de-002} value=0 ]} {Instance:host=dp-fra-de-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-fra-de-003 Value:0xc05207fde0} B:{Var:B Labels:host=dp-fra-de-003 Value:0xc05207fe10} C:{Var:C Labels:host=dp-fra-de-003 Value:0xc05207fdc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431786996s EvaluationString:[ var='A' labels={host=dp-fra-de-003} value=-0.26202239107062064 ], [ var='B' labels={host=dp-fra-de-003} value=-0.26202239107062064 ], [ var='C' labels={host=dp-fra-de-003} value=0 ]} {Instance:host=dp-fra-de-004 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-fra-de-004 Value:0xc05207fea0} B:{Var:B Labels:host=dp-fra-de-004 Value:0xc05207fe50} C:{Var:C Labels:host=dp-fra-de-004 Value:0xc05207fe80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431795574s EvaluationString:[ var='A' labels={host=dp-fra-de-004} value=-0.16671312856045814 ], [ var='B' labels={host=dp-fra-de-004} value=-0.16671312856045814 ], [ var='C' labels={host=dp-fra-de-004} value=0 ]} {Instance:host=dp-fra-de-005 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-fra-de-005 Value:0xc05207ff00} B:{Var:B Labels:host=dp-fra-de-005 Value:0xc05207ff30} C:{Var:C Labels:host=dp-fra-de-005 Value:0xc05207ff70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431802976s EvaluationString:[ var='A' labels={host=dp-fra-de-005} value=-0.1513738324986965 ], [ var='B' labels={host=dp-fra-de-005} value=-0.1513738324986965 ], [ var='C' labels={host=dp-fra-de-005} value=0 ]} {Instance:host=dp-fra-de-006 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-fra-de-006 Value:0xc05207ffc0} B:{Var:B Labels:host=dp-fra-de-006 Value:0xc05207fff0} C:{Var:C Labels:host=dp-fra-de-006 Value:0xc01e226010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431810656s EvaluationString:[ var='A' labels={host=dp-fra-de-006} value=-0.4337082135062609 ], [ var='B' labels={host=dp-fra-de-006} value=-0.4337082135062609 ], [ var='C' labels={host=dp-fra-de-006} value=0 ]} {Instance:host=dp-fra-de-007 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-fra-de-007 Value:0xc01e226070} B:{Var:B Labels:host=dp-fra-de-007 Value:0xc01e226090} C:{Var:C Labels:host=dp-fra-de-007 Value:0xc01e2260b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431817901s EvaluationString:[ var='A' labels={host=dp-fra-de-007} value=0.10884755031323974 ], [ var='B' labels={host=dp-fra-de-007} value=0.10884755031323974 ], [ var='C' labels={host=dp-fra-de-007} value=0 ]} {Instance:host=dp-fra-de-008 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-fra-de-008 Value:0xc01e2260f0} B:{Var:B Labels:host=dp-fra-de-008 Value:0xc01e226110} C:{Var:C Labels:host=dp-fra-de-008 Value:0xc01e226230}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431825983s EvaluationString:[ var='A' labels={host=dp-fra-de-008} value=0.035634791950472554 ], [ var='B' labels={host=dp-fra-de-008} value=0.035634791950472554 ], [ var='C' labels={host=dp-fra-de-008} value=0 ]} {Instance:host=dp-fra-de-009 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-fra-de-009 Value:0xc01e226270} B:{Var:B Labels:host=dp-fra-de-009 Value:0xc01e226290} C:{Var:C Labels:host=dp-fra-de-009 Value:0xc01e2262c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431868686s EvaluationString:[ var='A' labels={host=dp-fra-de-009} value=0.17268107699248958 ], [ var='B' labels={host=dp-fra-de-009} value=0.17268107699248958 ], [ var='C' labels={host=dp-fra-de-009} value=0 ]} {Instance:host=dp-hkg-hk-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-hkg-hk-001 Value:0xc01e226300} B:{Var:B Labels:host=dp-hkg-hk-001 Value:0xc01e226320} C:{Var:C Labels:host=dp-hkg-hk-001 Value:0xc01e226340}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431876729s EvaluationString:[ var='A' labels={host=dp-hkg-hk-001} value=0.1464576944696382 ], [ var='B' labels={host=dp-hkg-hk-001} value=0.1464576944696382 ], [ var='C' labels={host=dp-hkg-hk-001} value=0 ]} {Instance:host=dp-hkg-hk-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-hkg-hk-002 Value:0xc01e226390} B:{Var:B Labels:host=dp-hkg-hk-002 Value:0xc01e2263b0} C:{Var:C Labels:host=dp-hkg-hk-002 Value:0xc01e2263d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431884867s EvaluationString:[ var='A' labels={host=dp-hkg-hk-002} value=-0.015858307494198698 ], [ var='B' labels={host=dp-hkg-hk-002} value=-0.015858307494198698 ], [ var='C' labels={host=dp-hkg-hk-002} value=0 ]} {Instance:host=dp-hkg-hk-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-hkg-hk-003 Value:0xc01e226410} B:{Var:B Labels:host=dp-hkg-hk-003 Value:0xc01e226440} C:{Var:C Labels:host=dp-hkg-hk-003 Value:0xc01e226460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43189319s EvaluationString:[ var='A' labels={host=dp-hkg-hk-003} value=-0.07877233788499893 ], [ var='B' labels={host=dp-hkg-hk-003} value=-0.07877233788499893 ], [ var='C' labels={host=dp-hkg-hk-003} value=0 ]} {Instance:host=dp-hkg-tw-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-hkg-tw-001 Value:0xc01e2264a0} B:{Var:B Labels:host=dp-hkg-tw-001 Value:0xc01e2264c0} C:{Var:C Labels:host=dp-hkg-tw-001 Value:0xc01e2264e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431901714s EvaluationString:[ var='A' labels={host=dp-hkg-tw-001} value=-0.037195176186770595 ], [ var='B' labels={host=dp-hkg-tw-001} value=-0.037195176186770595 ], [ var='C' labels={host=dp-hkg-tw-001} value=0 ]} {Instance:host=dp-hkg-tw-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-hkg-tw-002 Value:0xc01e226540} B:{Var:B Labels:host=dp-hkg-tw-002 Value:0xc01e226560} C:{Var:C Labels:host=dp-hkg-tw-002 Value:0xc01e226520}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431909613s EvaluationString:[ var='A' labels={host=dp-hkg-tw-002} value=0.10082050309939916 ], [ var='B' labels={host=dp-hkg-tw-002} value=0.10082050309939916 ], [ var='C' labels={host=dp-hkg-tw-002} value=0 ]} {Instance:host=dp-hnd-jp-058 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-hnd-jp-058 Value:0xc01e2265f0} B:{Var:B Labels:host=dp-hnd-jp-058 Value:0xc01e2265a0} C:{Var:C Labels:host=dp-hnd-jp-058 Value:0xc01e2265d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431917536s EvaluationString:[ var='A' labels={host=dp-hnd-jp-058} value=0.02772848724758603 ], [ var='B' labels={host=dp-hnd-jp-058} value=0.02772848724758603 ], [ var='C' labels={host=dp-hnd-jp-058} value=0 ]} {Instance:host=dp-hnd-jp-059 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-hnd-jp-059 Value:0xc01e226640} B:{Var:B Labels:host=dp-hnd-jp-059 Value:0xc01e226660} C:{Var:C Labels:host=dp-hnd-jp-059 Value:0xc01e226690}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431925268s EvaluationString:[ var='A' labels={host=dp-hnd-jp-059} value=-0.07257921470954898 ], [ var='B' labels={host=dp-hnd-jp-059} value=-0.07257921470954898 ], [ var='C' labels={host=dp-hnd-jp-059} value=0 ]} {Instance:host=dp-hnd-jp-060 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-hnd-jp-060 Value:0xc01e2267e0} B:{Var:B Labels:host=dp-hnd-jp-060 Value:0xc01e226800} C:{Var:C Labels:host=dp-hnd-jp-060 Value:0xc01e2267c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431933016s EvaluationString:[ var='A' labels={host=dp-hnd-jp-060} value=-0.07895726709676865 ], [ var='B' labels={host=dp-hnd-jp-060} value=-0.07895726709676865 ], [ var='C' labels={host=dp-hnd-jp-060} value=0 ]} {Instance:host=dp-hnd-jp-061 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-hnd-jp-061 Value:0xc01e226890} B:{Var:B Labels:host=dp-hnd-jp-061 Value:0xc01e226850} C:{Var:C Labels:host=dp-hnd-jp-061 Value:0xc01e226870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431941739s EvaluationString:[ var='A' labels={host=dp-hnd-jp-061} value=0.11860086524541609 ], [ var='B' labels={host=dp-hnd-jp-061} value=0.11860086524541609 ], [ var='C' labels={host=dp-hnd-jp-061} value=0 ]} {Instance:host=dp-hnd-jp-062 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-hnd-jp-062 Value:0xc01e226b00} B:{Var:B Labels:host=dp-hnd-jp-062 Value:0xc01e226ab0} C:{Var:C Labels:host=dp-hnd-jp-062 Value:0xc01e226ad0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431950029s EvaluationString:[ var='A' labels={host=dp-hnd-jp-062} value=-0.011113123576507178 ], [ var='B' labels={host=dp-hnd-jp-062} value=-0.011113123576507178 ], [ var='C' labels={host=dp-hnd-jp-062} value=0 ]} {Instance:host=dp-hnd-jp-063 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-hnd-jp-063 Value:0xc01e226b50} B:{Var:B Labels:host=dp-hnd-jp-063 Value:0xc01e226b70} C:{Var:C Labels:host=dp-hnd-jp-063 Value:0xc01e226b90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431958807s EvaluationString:[ var='A' labels={host=dp-hnd-jp-063} value=0.04294867838559924 ], [ var='B' labels={host=dp-hnd-jp-063} value=0.04294867838559924 ], [ var='C' labels={host=dp-hnd-jp-063} value=0 ]} {Instance:host=dp-hnd-jp-064 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-hnd-jp-064 Value:0xc01e226be0} B:{Var:B Labels:host=dp-hnd-jp-064 Value:0xc01e226c00} C:{Var:C Labels:host=dp-hnd-jp-064 Value:0xc01e226c20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431967303s EvaluationString:[ var='A' labels={host=dp-hnd-jp-064} value=-0.339664436178627 ], [ var='B' labels={host=dp-hnd-jp-064} value=-0.339664436178627 ], [ var='C' labels={host=dp-hnd-jp-064} value=0 ]} {Instance:host=dp-iah-us-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-iah-us-001 Value:0xc01e226c70} B:{Var:B Labels:host=dp-iah-us-001 Value:0xc01e226c90} C:{Var:C Labels:host=dp-iah-us-001 Value:0xc01e226cb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431975775s EvaluationString:[ var='A' labels={host=dp-iah-us-001} value=-0.12425314890284907 ], [ var='B' labels={host=dp-iah-us-001} value=-0.12425314890284907 ], [ var='C' labels={host=dp-iah-us-001} value=0 ]} {Instance:host=dp-iah-us-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-iah-us-002 Value:0xc01e226d10} B:{Var:B Labels:host=dp-iah-us-002 Value:0xc01e226d40} C:{Var:C Labels:host=dp-iah-us-002 Value:0xc01e226cf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431983114s EvaluationString:[ var='A' labels={host=dp-iah-us-002} value=0.1447530835688345 ], [ var='B' labels={host=dp-iah-us-002} value=0.1447530835688345 ], [ var='C' labels={host=dp-iah-us-002} value=0 ]} {Instance:host=dp-iah-us-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-iah-us-003 Value:0xc01e226db0} B:{Var:B Labels:host=dp-iah-us-003 Value:0xc01e226dd0} C:{Var:C Labels:host=dp-iah-us-003 Value:0xc01e226d90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431990495s EvaluationString:[ var='A' labels={host=dp-iah-us-003} value=0.08269217399927697 ], [ var='B' labels={host=dp-iah-us-003} value=0.08269217399927697 ], [ var='C' labels={host=dp-iah-us-003} value=0 ]} {Instance:host=dp-iah-us-004 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-iah-us-004 Value:0xc01e226e70} B:{Var:B Labels:host=dp-iah-us-004 Value:0xc01e226e90} C:{Var:C Labels:host=dp-iah-us-004 Value:0xc01e226eb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431999221s EvaluationString:[ var='A' labels={host=dp-iah-us-004} value=0.01863093319511696 ], [ var='B' labels={host=dp-iah-us-004} value=0.01863093319511696 ], [ var='C' labels={host=dp-iah-us-004} value=0 ]} {Instance:host=dp-iah-us-005 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-iah-us-005 Value:0xc01e226f20} B:{Var:B Labels:host=dp-iah-us-005 Value:0xc01e226fa0} C:{Var:C Labels:host=dp-iah-us-005 Value:0xc01e226ef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432008874s EvaluationString:[ var='A' labels={host=dp-iah-us-005} value=0.011863085564150262 ], [ var='B' labels={host=dp-iah-us-005} value=0.011863085564150262 ], [ var='C' labels={host=dp-iah-us-005} value=0 ]} {Instance:host=dp-iah-us-006 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-iah-us-006 Value:0xc01e227000} B:{Var:B Labels:host=dp-iah-us-006 Value:0xc01e227020} C:{Var:C Labels:host=dp-iah-us-006 Value:0xc01e226fe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432016739s EvaluationString:[ var='A' labels={host=dp-iah-us-006} value=-0.03494857193970802 ], [ var='B' labels={host=dp-iah-us-006} value=-0.03494857193970802 ], [ var='C' labels={host=dp-iah-us-006} value=0 ]} {Instance:host=dp-iis-pt-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-iis-pt-001 Value:0xc01e227070} B:{Var:B Labels:host=dp-iis-pt-001 Value:0xc01e2270f0} C:{Var:C Labels:host=dp-iis-pt-001 Value:0xc01e227110}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432024569s EvaluationString:[ var='A' labels={host=dp-iis-pt-001} value=0.014913828455513329 ], [ var='B' labels={host=dp-iis-pt-001} value=0.014913828455513329 ], [ var='C' labels={host=dp-iis-pt-001} value=0 ]} {Instance:host=dp-iis-pt-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-iis-pt-002 Value:0xc01e227160} B:{Var:B Labels:host=dp-iis-pt-002 Value:0xc01e227180} C:{Var:C Labels:host=dp-iis-pt-002 Value:0xc01e2271a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43203249s EvaluationString:[ var='A' labels={host=dp-iis-pt-002} value=0.20360162048095987 ], [ var='B' labels={host=dp-iis-pt-002} value=0.20360162048095987 ], [ var='C' labels={host=dp-iis-pt-002} value=0 ]} {Instance:host=dp-jfk-us-055 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-055 Value:0xc01e2271e0} B:{Var:B Labels:host=dp-jfk-us-055 Value:0xc01e227350} C:{Var:C Labels:host=dp-jfk-us-055 Value:0xc01e227370}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432040142s EvaluationString:[ var='A' labels={host=dp-jfk-us-055} value=-0.1461199214488874 ], [ var='B' labels={host=dp-jfk-us-055} value=-0.1461199214488874 ], [ var='C' labels={host=dp-jfk-us-055} value=0 ]} {Instance:host=dp-jfk-us-056 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-056 Value:0xc01e2273c0} B:{Var:B Labels:host=dp-jfk-us-056 Value:0xc01e2273e0} C:{Var:C Labels:host=dp-jfk-us-056 Value:0xc01e227400}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432047766s EvaluationString:[ var='A' labels={host=dp-jfk-us-056} value=0.08781181436904244 ], [ var='B' labels={host=dp-jfk-us-056} value=0.08781181436904244 ], [ var='C' labels={host=dp-jfk-us-056} value=0 ]} {Instance:host=dp-jfk-us-057 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-057 Value:0xc01e227450} B:{Var:B Labels:host=dp-jfk-us-057 Value:0xc01e227480} C:{Var:C Labels:host=dp-jfk-us-057 Value:0xc01e2274a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432055712s EvaluationString:[ var='A' labels={host=dp-jfk-us-057} value=0.033618820829393395 ], [ var='B' labels={host=dp-jfk-us-057} value=0.033618820829393395 ], [ var='C' labels={host=dp-jfk-us-057} value=0 ]} {Instance:host=dp-jfk-us-058 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-058 Value:0xc01e227510} B:{Var:B Labels:host=dp-jfk-us-058 Value:0xc01e227540} C:{Var:C Labels:host=dp-jfk-us-058 Value:0xc01e2274f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432063947s EvaluationString:[ var='A' labels={host=dp-jfk-us-058} value=-0.05931686878865487 ], [ var='B' labels={host=dp-jfk-us-058} value=-0.05931686878865487 ], [ var='C' labels={host=dp-jfk-us-058} value=0 ]} {Instance:host=dp-jfk-us-059 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-059 Value:0xc01e2275a0} B:{Var:B Labels:host=dp-jfk-us-059 Value:0xc01e2275c0} C:{Var:C Labels:host=dp-jfk-us-059 Value:0xc01e227580}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432072472s EvaluationString:[ var='A' labels={host=dp-jfk-us-059} value=0.018482113434828307 ], [ var='B' labels={host=dp-jfk-us-059} value=0.018482113434828307 ], [ var='C' labels={host=dp-jfk-us-059} value=0 ]} {Instance:host=dp-jfk-us-060 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-060 Value:0xc01e227650} B:{Var:B Labels:host=dp-jfk-us-060 Value:0xc01e227610} C:{Var:C Labels:host=dp-jfk-us-060 Value:0xc01e227630}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432079915s EvaluationString:[ var='A' labels={host=dp-jfk-us-060} value=-0.2136629285243272 ], [ var='B' labels={host=dp-jfk-us-060} value=-0.2136629285243272 ], [ var='C' labels={host=dp-jfk-us-060} value=0 ]} {Instance:host=dp-jfk-us-061 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-061 Value:0xc01e227690} B:{Var:B Labels:host=dp-jfk-us-061 Value:0xc01e2276b0} C:{Var:C Labels:host=dp-jfk-us-061 Value:0xc01e2276e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432088269s EvaluationString:[ var='A' labels={host=dp-jfk-us-061} value=-0.2829993850640449 ], [ var='B' labels={host=dp-jfk-us-061} value=-0.2829993850640449 ], [ var='C' labels={host=dp-jfk-us-061} value=0 ]} {Instance:host=dp-jfk-us-062 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-062 Value:0xc01e227720} B:{Var:B Labels:host=dp-jfk-us-062 Value:0xc01e227750} C:{Var:C Labels:host=dp-jfk-us-062 Value:0xc01e227780}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43209759s EvaluationString:[ var='A' labels={host=dp-jfk-us-062} value=-0.07654792267656615 ], [ var='B' labels={host=dp-jfk-us-062} value=-0.07654792267656615 ], [ var='C' labels={host=dp-jfk-us-062} value=0 ]} {Instance:host=dp-jfk-us-063 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-063 Value:0xc01e227800} B:{Var:B Labels:host=dp-jfk-us-063 Value:0xc01e2277c0} C:{Var:C Labels:host=dp-jfk-us-063 Value:0xc01e2277e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.4321061s EvaluationString:[ var='A' labels={host=dp-jfk-us-063} value=0.10572060290343188 ], [ var='B' labels={host=dp-jfk-us-063} value=0.10572060290343188 ], [ var='C' labels={host=dp-jfk-us-063} value=0 ]} {Instance:host=dp-jfk-us-064 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-064 Value:0xc01e227860} B:{Var:B Labels:host=dp-jfk-us-064 Value:0xc01e227890} C:{Var:C Labels:host=dp-jfk-us-064 Value:0xc01e227840}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432115366s EvaluationString:[ var='A' labels={host=dp-jfk-us-064} value=0.1927980353896904 ], [ var='B' labels={host=dp-jfk-us-064} value=0.1927980353896904 ], [ var='C' labels={host=dp-jfk-us-064} value=0 ]} {Instance:host=dp-jfk-us-065 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-065 Value:0xc01e227910} B:{Var:B Labels:host=dp-jfk-us-065 Value:0xc01e2278d0} C:{Var:C Labels:host=dp-jfk-us-065 Value:0xc01e2278f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432123388s EvaluationString:[ var='A' labels={host=dp-jfk-us-065} value=0.035571179285309285 ], [ var='B' labels={host=dp-jfk-us-065} value=0.035571179285309285 ], [ var='C' labels={host=dp-jfk-us-065} value=0 ]} {Instance:host=dp-jfk-us-066 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-066 Value:0xc01e227960} B:{Var:B Labels:host=dp-jfk-us-066 Value:0xc01e227980} C:{Var:C Labels:host=dp-jfk-us-066 Value:0xc01e2279a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432131137s EvaluationString:[ var='A' labels={host=dp-jfk-us-066} value=0.03162545034086861 ], [ var='B' labels={host=dp-jfk-us-066} value=0.03162545034086861 ], [ var='C' labels={host=dp-jfk-us-066} value=0 ]} {Instance:host=dp-jfk-us-067 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-067 Value:0xc01e2279f0} B:{Var:B Labels:host=dp-jfk-us-067 Value:0xc01e227a50} C:{Var:C Labels:host=dp-jfk-us-067 Value:0xc01e227a70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43213902s EvaluationString:[ var='A' labels={host=dp-jfk-us-067} value=-0.22940064136761862 ], [ var='B' labels={host=dp-jfk-us-067} value=-0.22940064136761862 ], [ var='C' labels={host=dp-jfk-us-067} value=0 ]} {Instance:host=dp-jfk-us-068 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-068 Value:0xc01e227af0} B:{Var:B Labels:host=dp-jfk-us-068 Value:0xc01e227ab0} C:{Var:C Labels:host=dp-jfk-us-068 Value:0xc01e227ad0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432146595s EvaluationString:[ var='A' labels={host=dp-jfk-us-068} value=-0.22327479911305304 ], [ var='B' labels={host=dp-jfk-us-068} value=-0.22327479911305304 ], [ var='C' labels={host=dp-jfk-us-068} value=0 ]} {Instance:host=dp-jfk-us-069 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-069 Value:0xc01e227b30} B:{Var:B Labels:host=dp-jfk-us-069 Value:0xc01e227b50} C:{Var:C Labels:host=dp-jfk-us-069 Value:0xc01e227b70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432155274s EvaluationString:[ var='A' labels={host=dp-jfk-us-069} value=0.10448608163496687 ], [ var='B' labels={host=dp-jfk-us-069} value=0.10448608163496687 ], [ var='C' labels={host=dp-jfk-us-069} value=0 ]} {Instance:host=dp-jfk-us-070 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-070 Value:0xc01e227c00} B:{Var:B Labels:host=dp-jfk-us-070 Value:0xc01e227bb0} C:{Var:C Labels:host=dp-jfk-us-070 Value:0xc01e227be0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.4321641s EvaluationString:[ var='A' labels={host=dp-jfk-us-070} value=-0.04362836147987024 ], [ var='B' labels={host=dp-jfk-us-070} value=-0.04362836147987024 ], [ var='C' labels={host=dp-jfk-us-070} value=0 ]} {Instance:host=dp-jfk-us-071 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-071 Value:0xc01e227c50} B:{Var:B Labels:host=dp-jfk-us-071 Value:0xc01e227c70} C:{Var:C Labels:host=dp-jfk-us-071 Value:0xc01e227c90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432171941s EvaluationString:[ var='A' labels={host=dp-jfk-us-071} value=-0.32752923510786996 ], [ var='B' labels={host=dp-jfk-us-071} value=-0.32752923510786996 ], [ var='C' labels={host=dp-jfk-us-071} value=0 ]} {Instance:host=dp-jfk-us-072 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-072 Value:0xc01e227d10} B:{Var:B Labels:host=dp-jfk-us-072 Value:0xc01e227d30} C:{Var:C Labels:host=dp-jfk-us-072 Value:0xc01e227d50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432179439s EvaluationString:[ var='A' labels={host=dp-jfk-us-072} value=-0.32226702897117676 ], [ var='B' labels={host=dp-jfk-us-072} value=-0.32226702897117676 ], [ var='C' labels={host=dp-jfk-us-072} value=0 ]} {Instance:host=dp-jfk-us-073 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-073 Value:0xc01e227da0} B:{Var:B Labels:host=dp-jfk-us-073 Value:0xc01e227dc0} C:{Var:C Labels:host=dp-jfk-us-073 Value:0xc01e227de0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43218727s EvaluationString:[ var='A' labels={host=dp-jfk-us-073} value=-0.031529944519681816 ], [ var='B' labels={host=dp-jfk-us-073} value=-0.031529944519681816 ], [ var='C' labels={host=dp-jfk-us-073} value=0 ]} {Instance:host=dp-jfk-us-074 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-074 Value:0xc01e227e70} B:{Var:B Labels:host=dp-jfk-us-074 Value:0xc01e227e20} C:{Var:C Labels:host=dp-jfk-us-074 Value:0xc01e227e40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432195425s EvaluationString:[ var='A' labels={host=dp-jfk-us-074} value=-0.22367030568901214 ], [ var='B' labels={host=dp-jfk-us-074} value=-0.22367030568901214 ], [ var='C' labels={host=dp-jfk-us-074} value=0 ]} {Instance:host=dp-jfk-us-075 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-075 Value:0xc01e227f00} B:{Var:B Labels:host=dp-jfk-us-075 Value:0xc01e227eb0} C:{Var:C Labels:host=dp-jfk-us-075 Value:0xc01e227ee0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432203029s EvaluationString:[ var='A' labels={host=dp-jfk-us-075} value=-0.04209983550251195 ], [ var='B' labels={host=dp-jfk-us-075} value=-0.04209983550251195 ], [ var='C' labels={host=dp-jfk-us-075} value=0 ]} {Instance:host=dp-jfk-us-076 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-076 Value:0xc01e227f90} B:{Var:B Labels:host=dp-jfk-us-076 Value:0xc01e227f40} C:{Var:C Labels:host=dp-jfk-us-076 Value:0xc01e227f60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432210691s EvaluationString:[ var='A' labels={host=dp-jfk-us-076} value=0.6233578126763665 ], [ var='B' labels={host=dp-jfk-us-076} value=0.6233578126763665 ], [ var='C' labels={host=dp-jfk-us-076} value=0 ]} {Instance:host=dp-jfk-us-077 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-077 Value:0xc09129a020} B:{Var:B Labels:host=dp-jfk-us-077 Value:0xc01e227fd0} C:{Var:C Labels:host=dp-jfk-us-077 Value:0xc09129a000}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432218937s EvaluationString:[ var='A' labels={host=dp-jfk-us-077} value=-0.208952410929637 ], [ var='B' labels={host=dp-jfk-us-077} value=-0.208952410929637 ], [ var='C' labels={host=dp-jfk-us-077} value=0 ]} {Instance:host=dp-jfk-us-078 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-078 Value:0xc09129a060} B:{Var:B Labels:host=dp-jfk-us-078 Value:0xc09129a0a0} C:{Var:C Labels:host=dp-jfk-us-078 Value:0xc09129a0c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432226599s EvaluationString:[ var='A' labels={host=dp-jfk-us-078} value=-0.29136221346304425 ], [ var='B' labels={host=dp-jfk-us-078} value=-0.29136221346304425 ], [ var='C' labels={host=dp-jfk-us-078} value=0 ]} {Instance:host=dp-jfk-us-079 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-079 Value:0xc09129a150} B:{Var:B Labels:host=dp-jfk-us-079 Value:0xc09129a100} C:{Var:C Labels:host=dp-jfk-us-079 Value:0xc09129a130}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432234045s EvaluationString:[ var='A' labels={host=dp-jfk-us-079} value=-0.19044598748376196 ], [ var='B' labels={host=dp-jfk-us-079} value=-0.19044598748376196 ], [ var='C' labels={host=dp-jfk-us-079} value=0 ]} {Instance:host=dp-jfk-us-080 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-080 Value:0xc09129a190} B:{Var:B Labels:host=dp-jfk-us-080 Value:0xc09129a1b0} C:{Var:C Labels:host=dp-jfk-us-080 Value:0xc09129a1d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432241982s EvaluationString:[ var='A' labels={host=dp-jfk-us-080} value=-0.18333970320984605 ], [ var='B' labels={host=dp-jfk-us-080} value=-0.18333970320984605 ], [ var='C' labels={host=dp-jfk-us-080} value=0 ]} {Instance:host=dp-jfk-us-081 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-081 Value:0xc09129a220} B:{Var:B Labels:host=dp-jfk-us-081 Value:0xc09129a240} C:{Var:C Labels:host=dp-jfk-us-081 Value:0xc09129a270}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43224989s EvaluationString:[ var='A' labels={host=dp-jfk-us-081} value=0.010807568963095093 ], [ var='B' labels={host=dp-jfk-us-081} value=0.010807568963095093 ], [ var='C' labels={host=dp-jfk-us-081} value=0 ]} {Instance:host=dp-jfk-us-084 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-084 Value:0xc09129a2c0} B:{Var:B Labels:host=dp-jfk-us-084 Value:0xc09129a2e0} C:{Var:C Labels:host=dp-jfk-us-084 Value:0xc09129a310}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432257889s EvaluationString:[ var='A' labels={host=dp-jfk-us-084} value=-0.04795260459372841 ], [ var='B' labels={host=dp-jfk-us-084} value=-0.04795260459372841 ], [ var='C' labels={host=dp-jfk-us-084} value=0 ]} {Instance:host=dp-jfk-us-085 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-085 Value:0xc09129a350} B:{Var:B Labels:host=dp-jfk-us-085 Value:0xc09129a370} C:{Var:C Labels:host=dp-jfk-us-085 Value:0xc09129a3a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432266s EvaluationString:[ var='A' labels={host=dp-jfk-us-085} value=-0.13150717462953107 ], [ var='B' labels={host=dp-jfk-us-085} value=-0.13150717462953107 ], [ var='C' labels={host=dp-jfk-us-085} value=0 ]} {Instance:host=dp-jfk-us-086 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-086 Value:0xc09129a3e0} B:{Var:B Labels:host=dp-jfk-us-086 Value:0xc09129a400} C:{Var:C Labels:host=dp-jfk-us-086 Value:0xc09129a420}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432273837s EvaluationString:[ var='A' labels={host=dp-jfk-us-086} value=-0.3155309744602401 ], [ var='B' labels={host=dp-jfk-us-086} value=-0.3155309744602401 ], [ var='C' labels={host=dp-jfk-us-086} value=0 ]} {Instance:host=dp-jfk-us-087 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-087 Value:0xc09129a4b0} B:{Var:B Labels:host=dp-jfk-us-087 Value:0xc09129a460} C:{Var:C Labels:host=dp-jfk-us-087 Value:0xc09129a480}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432282194s EvaluationString:[ var='A' labels={host=dp-jfk-us-087} value=-0.14068441581289093 ], [ var='B' labels={host=dp-jfk-us-087} value=-0.14068441581289093 ], [ var='C' labels={host=dp-jfk-us-087} value=0 ]} {Instance:host=dp-jfk-us-088 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-088 Value:0xc09129a500} B:{Var:B Labels:host=dp-jfk-us-088 Value:0xc09129a520} C:{Var:C Labels:host=dp-jfk-us-088 Value:0xc09129a540}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432290613s EvaluationString:[ var='A' labels={host=dp-jfk-us-088} value=-0.46305056825000673 ], [ var='B' labels={host=dp-jfk-us-088} value=-0.46305056825000673 ], [ var='C' labels={host=dp-jfk-us-088} value=0 ]} {Instance:host=dp-jfk-us-089 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-089 Value:0xc09129a590} B:{Var:B Labels:host=dp-jfk-us-089 Value:0xc09129a5b0} C:{Var:C Labels:host=dp-jfk-us-089 Value:0xc09129a5d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43229925s EvaluationString:[ var='A' labels={host=dp-jfk-us-089} value=-0.23910461162548322 ], [ var='B' labels={host=dp-jfk-us-089} value=-0.23910461162548322 ], [ var='C' labels={host=dp-jfk-us-089} value=0 ]} {Instance:host=dp-jfk-us-090 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-090 Value:0xc09129a610} B:{Var:B Labels:host=dp-jfk-us-090 Value:0xc09129a630} C:{Var:C Labels:host=dp-jfk-us-090 Value:0xc09129a660}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432306663s EvaluationString:[ var='A' labels={host=dp-jfk-us-090} value=-0.15028323319690845 ], [ var='B' labels={host=dp-jfk-us-090} value=-0.15028323319690845 ], [ var='C' labels={host=dp-jfk-us-090} value=0 ]} {Instance:host=dp-jfk-us-091 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-091 Value:0xc09129a6e0} B:{Var:B Labels:host=dp-jfk-us-091 Value:0xc09129a6a0} C:{Var:C Labels:host=dp-jfk-us-091 Value:0xc09129a6c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43231535s EvaluationString:[ var='A' labels={host=dp-jfk-us-091} value=-0.13386877311757775 ], [ var='B' labels={host=dp-jfk-us-091} value=-0.13386877311757775 ], [ var='C' labels={host=dp-jfk-us-091} value=0 ]} {Instance:host=dp-jfk-us-092 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-092 Value:0xc09129a730} B:{Var:B Labels:host=dp-jfk-us-092 Value:0xc09129a750} C:{Var:C Labels:host=dp-jfk-us-092 Value:0xc09129a770}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432324622s EvaluationString:[ var='A' labels={host=dp-jfk-us-092} value=-0.42049751085220544 ], [ var='B' labels={host=dp-jfk-us-092} value=-0.42049751085220544 ], [ var='C' labels={host=dp-jfk-us-092} value=0 ]} {Instance:host=dp-jfk-us-093 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-093 Value:0xc09129a810} B:{Var:B Labels:host=dp-jfk-us-093 Value:0xc09129a7c0} C:{Var:C Labels:host=dp-jfk-us-093 Value:0xc09129a7e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432332435s EvaluationString:[ var='A' labels={host=dp-jfk-us-093} value=-0.2302220873056595 ], [ var='B' labels={host=dp-jfk-us-093} value=-0.2302220873056595 ], [ var='C' labels={host=dp-jfk-us-093} value=0 ]} {Instance:host=dp-jfk-us-094 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-094 Value:0xc09129a850} B:{Var:B Labels:host=dp-jfk-us-094 Value:0xc09129a870} C:{Var:C Labels:host=dp-jfk-us-094 Value:0xc09129a890}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43234031s EvaluationString:[ var='A' labels={host=dp-jfk-us-094} value=-0.16853757977480655 ], [ var='B' labels={host=dp-jfk-us-094} value=-0.16853757977480655 ], [ var='C' labels={host=dp-jfk-us-094} value=0 ]} {Instance:host=dp-jfk-us-095 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-095 Value:0xc09129a8e0} B:{Var:B Labels:host=dp-jfk-us-095 Value:0xc09129a900} C:{Var:C Labels:host=dp-jfk-us-095 Value:0xc09129a920}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432349521s EvaluationString:[ var='A' labels={host=dp-jfk-us-095} value=0.24574675481252584 ], [ var='B' labels={host=dp-jfk-us-095} value=0.24574675481252584 ], [ var='C' labels={host=dp-jfk-us-095} value=0 ]} {Instance:host=dp-jfk-us-096 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jfk-us-096 Value:0xc09129a9b0} B:{Var:B Labels:host=dp-jfk-us-096 Value:0xc09129a960} C:{Var:C Labels:host=dp-jfk-us-096 Value:0xc09129a990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432359316s EvaluationString:[ var='A' labels={host=dp-jfk-us-096} value=-0.18480187183961683 ], [ var='B' labels={host=dp-jfk-us-096} value=-0.18480187183961683 ], [ var='C' labels={host=dp-jfk-us-096} value=0 ]} {Instance:host=dp-jp-yo-01 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-01 Value:0xc09129aa40} B:{Var:B Labels:host=dp-jp-yo-01 Value:0xc09129a9f0} C:{Var:C Labels:host=dp-jp-yo-01 Value:0xc09129aa20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432366246s EvaluationString:[ var='A' labels={host=dp-jp-yo-01} value=0.2229024312376891 ], [ var='B' labels={host=dp-jp-yo-01} value=0.2229024312376891 ], [ var='C' labels={host=dp-jp-yo-01} value=0 ]} {Instance:host=dp-jp-yo-02 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-02 Value:0xc09129aab0} B:{Var:B Labels:host=dp-jp-yo-02 Value:0xc09129aad0} C:{Var:C Labels:host=dp-jp-yo-02 Value:0xc09129aa90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432373906s EvaluationString:[ var='A' labels={host=dp-jp-yo-02} value=0.0782862464706662 ], [ var='B' labels={host=dp-jp-yo-02} value=0.0782862464706662 ], [ var='C' labels={host=dp-jp-yo-02} value=0 ]} {Instance:host=dp-jp-yo-03 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-03 Value:0xc09129ab10} B:{Var:B Labels:host=dp-jp-yo-03 Value:0xc09129ab40} C:{Var:C Labels:host=dp-jp-yo-03 Value:0xc09129ab60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432382205s EvaluationString:[ var='A' labels={host=dp-jp-yo-03} value=0.13309900698063853 ], [ var='B' labels={host=dp-jp-yo-03} value=0.13309900698063853 ], [ var='C' labels={host=dp-jp-yo-03} value=0 ]} {Instance:host=dp-jp-yo-04 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-04 Value:0xc09129aba0} B:{Var:B Labels:host=dp-jp-yo-04 Value:0xc09129abc0} C:{Var:C Labels:host=dp-jp-yo-04 Value:0xc09129abf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43239008s EvaluationString:[ var='A' labels={host=dp-jp-yo-04} value=0.04978202339672796 ], [ var='B' labels={host=dp-jp-yo-04} value=0.04978202339672796 ], [ var='C' labels={host=dp-jp-yo-04} value=0 ]} {Instance:host=dp-jp-yo-05 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-05 Value:0xc09129ac30} B:{Var:B Labels:host=dp-jp-yo-05 Value:0xc09129ac50} C:{Var:C Labels:host=dp-jp-yo-05 Value:0xc09129ac70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432397711s EvaluationString:[ var='A' labels={host=dp-jp-yo-05} value=0.12532116841325905 ], [ var='B' labels={host=dp-jp-yo-05} value=0.12532116841325905 ], [ var='C' labels={host=dp-jp-yo-05} value=0 ]} {Instance:host=dp-jp-yo-06 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-06 Value:0xc09129ad00} B:{Var:B Labels:host=dp-jp-yo-06 Value:0xc09129acc0} C:{Var:C Labels:host=dp-jp-yo-06 Value:0xc09129ace0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432406409s EvaluationString:[ var='A' labels={host=dp-jp-yo-06} value=0.47089718720167184 ], [ var='B' labels={host=dp-jp-yo-06} value=0.47089718720167184 ], [ var='C' labels={host=dp-jp-yo-06} value=0 ]} {Instance:host=dp-jp-yo-07 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-07 Value:0xc09129ad40} B:{Var:B Labels:host=dp-jp-yo-07 Value:0xc09129ad60} C:{Var:C Labels:host=dp-jp-yo-07 Value:0xc09129ad80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432414321s EvaluationString:[ var='A' labels={host=dp-jp-yo-07} value=0.529797603831627 ], [ var='B' labels={host=dp-jp-yo-07} value=0.529797603831627 ], [ var='C' labels={host=dp-jp-yo-07} value=0 ]} {Instance:host=dp-jp-yo-08 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-08 Value:0xc09129add0} B:{Var:B Labels:host=dp-jp-yo-08 Value:0xc09129adf0} C:{Var:C Labels:host=dp-jp-yo-08 Value:0xc09129ae10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432422094s EvaluationString:[ var='A' labels={host=dp-jp-yo-08} value=-0.2985610488112182 ], [ var='B' labels={host=dp-jp-yo-08} value=-0.2985610488112182 ], [ var='C' labels={host=dp-jp-yo-08} value=0 ]} {Instance:host=dp-jp-yo-09 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-09 Value:0xc09129ae60} B:{Var:B Labels:host=dp-jp-yo-09 Value:0xc09129ae80} C:{Var:C Labels:host=dp-jp-yo-09 Value:0xc09129aea0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432429976s EvaluationString:[ var='A' labels={host=dp-jp-yo-09} value=-0.002621855872485905 ], [ var='B' labels={host=dp-jp-yo-09} value=-0.002621855872485905 ], [ var='C' labels={host=dp-jp-yo-09} value=0 ]} {Instance:host=dp-jp-yo-10 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-10 Value:0xc09129aee0} B:{Var:B Labels:host=dp-jp-yo-10 Value:0xc09129af20} C:{Var:C Labels:host=dp-jp-yo-10 Value:0xc09129af40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432437752s EvaluationString:[ var='A' labels={host=dp-jp-yo-10} value=-0.043506420884066685 ], [ var='B' labels={host=dp-jp-yo-10} value=-0.043506420884066685 ], [ var='C' labels={host=dp-jp-yo-10} value=0 ]} {Instance:host=dp-jp-yo-11 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-11 Value:0xc09129af80} B:{Var:B Labels:host=dp-jp-yo-11 Value:0xc09129afa0} C:{Var:C Labels:host=dp-jp-yo-11 Value:0xc09129afc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432446927s EvaluationString:[ var='A' labels={host=dp-jp-yo-11} value=0.036654467764840604 ], [ var='B' labels={host=dp-jp-yo-11} value=0.036654467764840604 ], [ var='C' labels={host=dp-jp-yo-11} value=0 ]} {Instance:host=dp-jp-yo-12 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-12 Value:0xc09129b000} B:{Var:B Labels:host=dp-jp-yo-12 Value:0xc09129b030} C:{Var:C Labels:host=dp-jp-yo-12 Value:0xc09129b050}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432454257s EvaluationString:[ var='A' labels={host=dp-jp-yo-12} value=0.12270794415495818 ], [ var='B' labels={host=dp-jp-yo-12} value=0.12270794415495818 ], [ var='C' labels={host=dp-jp-yo-12} value=0 ]} {Instance:host=dp-jp-yo-13 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-13 Value:0xc09129b090} B:{Var:B Labels:host=dp-jp-yo-13 Value:0xc09129b0c0} C:{Var:C Labels:host=dp-jp-yo-13 Value:0xc09129b0e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432462462s EvaluationString:[ var='A' labels={host=dp-jp-yo-13} value=0.048351756394241975 ], [ var='B' labels={host=dp-jp-yo-13} value=0.048351756394241975 ], [ var='C' labels={host=dp-jp-yo-13} value=0 ]} {Instance:host=dp-jp-yo-14 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-14 Value:0xc09129b140} B:{Var:B Labels:host=dp-jp-yo-14 Value:0xc09129b180} C:{Var:C Labels:host=dp-jp-yo-14 Value:0xc09129b120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432470082s EvaluationString:[ var='A' labels={host=dp-jp-yo-14} value=0.025857831088387684 ], [ var='B' labels={host=dp-jp-yo-14} value=0.025857831088387684 ], [ var='C' labels={host=dp-jp-yo-14} value=0 ]} {Instance:host=dp-jp-yo-15 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-15 Value:0xc09129b1c0} B:{Var:B Labels:host=dp-jp-yo-15 Value:0xc09129b1e0} C:{Var:C Labels:host=dp-jp-yo-15 Value:0xc09129b200}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432477686s EvaluationString:[ var='A' labels={host=dp-jp-yo-15} value=0.03594779274408211 ], [ var='B' labels={host=dp-jp-yo-15} value=0.03594779274408211 ], [ var='C' labels={host=dp-jp-yo-15} value=0 ]} {Instance:host=dp-jp-yo-16 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-16 Value:0xc09129b240} B:{Var:B Labels:host=dp-jp-yo-16 Value:0xc09129b260} C:{Var:C Labels:host=dp-jp-yo-16 Value:0xc09129b280}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432485417s EvaluationString:[ var='A' labels={host=dp-jp-yo-16} value=-0.01642742437650687 ], [ var='B' labels={host=dp-jp-yo-16} value=-0.01642742437650687 ], [ var='C' labels={host=dp-jp-yo-16} value=0 ]} {Instance:host=dp-jp-yo-17 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-17 Value:0xc09129b2d0} B:{Var:B Labels:host=dp-jp-yo-17 Value:0xc09129b300} C:{Var:C Labels:host=dp-jp-yo-17 Value:0xc09129b320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432492697s EvaluationString:[ var='A' labels={host=dp-jp-yo-17} value=-0.07714949365844603 ], [ var='B' labels={host=dp-jp-yo-17} value=-0.07714949365844603 ], [ var='C' labels={host=dp-jp-yo-17} value=0 ]} {Instance:host=dp-jp-yo-18 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-18 Value:0xc09129b370} B:{Var:B Labels:host=dp-jp-yo-18 Value:0xc09129b390} C:{Var:C Labels:host=dp-jp-yo-18 Value:0xc09129b3b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432500847s EvaluationString:[ var='A' labels={host=dp-jp-yo-18} value=0.02079441549503258 ], [ var='B' labels={host=dp-jp-yo-18} value=0.02079441549503258 ], [ var='C' labels={host=dp-jp-yo-18} value=0 ]} {Instance:host=dp-jp-yo-19 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-19 Value:0xc09129b410} B:{Var:B Labels:host=dp-jp-yo-19 Value:0xc09129b430} C:{Var:C Labels:host=dp-jp-yo-19 Value:0xc09129b450}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432509897s EvaluationString:[ var='A' labels={host=dp-jp-yo-19} value=-0.015146811623818479 ], [ var='B' labels={host=dp-jp-yo-19} value=-0.015146811623818479 ], [ var='C' labels={host=dp-jp-yo-19} value=0 ]} {Instance:host=dp-jp-yo-20 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-20 Value:0xc09129b490} B:{Var:B Labels:host=dp-jp-yo-20 Value:0xc09129b4b0} C:{Var:C Labels:host=dp-jp-yo-20 Value:0xc09129b4d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43251756s EvaluationString:[ var='A' labels={host=dp-jp-yo-20} value=0.07816123482952106 ], [ var='B' labels={host=dp-jp-yo-20} value=0.07816123482952106 ], [ var='C' labels={host=dp-jp-yo-20} value=0 ]} {Instance:host=dp-jp-yo-21 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-21 Value:0xc09129b510} B:{Var:B Labels:host=dp-jp-yo-21 Value:0xc09129b530} C:{Var:C Labels:host=dp-jp-yo-21 Value:0xc09129b550}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432525267s EvaluationString:[ var='A' labels={host=dp-jp-yo-21} value=0.015605619710859978 ], [ var='B' labels={host=dp-jp-yo-21} value=0.015605619710859978 ], [ var='C' labels={host=dp-jp-yo-21} value=0 ]} {Instance:host=dp-jp-yo-22 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-22 Value:0xc09129b5a0} B:{Var:B Labels:host=dp-jp-yo-22 Value:0xc09129b5c0} C:{Var:C Labels:host=dp-jp-yo-22 Value:0xc09129b5e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432533323s EvaluationString:[ var='A' labels={host=dp-jp-yo-22} value=0.01221928561420782 ], [ var='B' labels={host=dp-jp-yo-22} value=0.01221928561420782 ], [ var='C' labels={host=dp-jp-yo-22} value=0 ]} {Instance:host=dp-jp-yo-23 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-23 Value:0xc09129b640} B:{Var:B Labels:host=dp-jp-yo-23 Value:0xc09129b680} C:{Var:C Labels:host=dp-jp-yo-23 Value:0xc09129b620}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432541437s EvaluationString:[ var='A' labels={host=dp-jp-yo-23} value=0.009866714729156456 ], [ var='B' labels={host=dp-jp-yo-23} value=0.009866714729156456 ], [ var='C' labels={host=dp-jp-yo-23} value=0 ]} {Instance:host=dp-jp-yo-24 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-24 Value:0xc09129b6e0} B:{Var:B Labels:host=dp-jp-yo-24 Value:0xc09129b700} C:{Var:C Labels:host=dp-jp-yo-24 Value:0xc09129b6c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43254888s EvaluationString:[ var='A' labels={host=dp-jp-yo-24} value=0.008497508112235153 ], [ var='B' labels={host=dp-jp-yo-24} value=0.008497508112235153 ], [ var='C' labels={host=dp-jp-yo-24} value=0 ]} {Instance:host=dp-jp-yo-25 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-25 Value:0xc09129b760} B:{Var:B Labels:host=dp-jp-yo-25 Value:0xc09129b780} C:{Var:C Labels:host=dp-jp-yo-25 Value:0xc09129b740}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432556982s EvaluationString:[ var='A' labels={host=dp-jp-yo-25} value=-0.15076351810436606 ], [ var='B' labels={host=dp-jp-yo-25} value=-0.15076351810436606 ], [ var='C' labels={host=dp-jp-yo-25} value=0 ]} {Instance:host=dp-jp-yo-26 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-26 Value:0xc09129b800} B:{Var:B Labels:host=dp-jp-yo-26 Value:0xc09129b820} C:{Var:C Labels:host=dp-jp-yo-26 Value:0xc09129b7d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432564967s EvaluationString:[ var='A' labels={host=dp-jp-yo-26} value=-0.10351857464056499 ], [ var='B' labels={host=dp-jp-yo-26} value=-0.10351857464056499 ], [ var='C' labels={host=dp-jp-yo-26} value=0 ]} {Instance:host=dp-jp-yo-27 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-27 Value:0xc09129b860} B:{Var:B Labels:host=dp-jp-yo-27 Value:0xc09129b880} C:{Var:C Labels:host=dp-jp-yo-27 Value:0xc09129b8a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432573187s EvaluationString:[ var='A' labels={host=dp-jp-yo-27} value=-0.17210219065293586 ], [ var='B' labels={host=dp-jp-yo-27} value=-0.17210219065293586 ], [ var='C' labels={host=dp-jp-yo-27} value=0 ]} {Instance:host=dp-jp-yo-28 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-28 Value:0xc09129b900} B:{Var:B Labels:host=dp-jp-yo-28 Value:0xc09129b920} C:{Var:C Labels:host=dp-jp-yo-28 Value:0xc09129b940}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432584641s EvaluationString:[ var='A' labels={host=dp-jp-yo-28} value=-0.010990326370909591 ], [ var='B' labels={host=dp-jp-yo-28} value=-0.010990326370909591 ], [ var='C' labels={host=dp-jp-yo-28} value=0 ]} {Instance:host=dp-jp-yo-29 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-29 Value:0xc09129b980} B:{Var:B Labels:host=dp-jp-yo-29 Value:0xc09129b9a0} C:{Var:C Labels:host=dp-jp-yo-29 Value:0xc09129b9c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432591819s EvaluationString:[ var='A' labels={host=dp-jp-yo-29} value=0.0027447324283258467 ], [ var='B' labels={host=dp-jp-yo-29} value=0.0027447324283258467 ], [ var='C' labels={host=dp-jp-yo-29} value=0 ]} {Instance:host=dp-jp-yo-30 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-30 Value:0xc09129ba30} B:{Var:B Labels:host=dp-jp-yo-30 Value:0xc09129ba50} C:{Var:C Labels:host=dp-jp-yo-30 Value:0xc09129ba80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432600007s EvaluationString:[ var='A' labels={host=dp-jp-yo-30} value=-0.09024334275386943 ], [ var='B' labels={host=dp-jp-yo-30} value=-0.09024334275386943 ], [ var='C' labels={host=dp-jp-yo-30} value=0 ]} {Instance:host=dp-jp-yo-31 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-31 Value:0xc09129bac0} B:{Var:B Labels:host=dp-jp-yo-31 Value:0xc09129baf0} C:{Var:C Labels:host=dp-jp-yo-31 Value:0xc09129bb10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432607467s EvaluationString:[ var='A' labels={host=dp-jp-yo-31} value=-0.2562740281865086 ], [ var='B' labels={host=dp-jp-yo-31} value=-0.2562740281865086 ], [ var='C' labels={host=dp-jp-yo-31} value=0 ]} {Instance:host=dp-jp-yo-32 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-32 Value:0xc09129bb70} B:{Var:B Labels:host=dp-jp-yo-32 Value:0xc09129bb90} C:{Var:C Labels:host=dp-jp-yo-32 Value:0xc09129bbb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432616128s EvaluationString:[ var='A' labels={host=dp-jp-yo-32} value=-0.0404734276790227 ], [ var='B' labels={host=dp-jp-yo-32} value=-0.0404734276790227 ], [ var='C' labels={host=dp-jp-yo-32} value=0 ]} {Instance:host=dp-jp-yo-33 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-33 Value:0xc09129bbf0} B:{Var:B Labels:host=dp-jp-yo-33 Value:0xc09129bc10} C:{Var:C Labels:host=dp-jp-yo-33 Value:0xc09129bc30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432624883s EvaluationString:[ var='A' labels={host=dp-jp-yo-33} value=-0.06889341998312862 ], [ var='B' labels={host=dp-jp-yo-33} value=-0.06889341998312862 ], [ var='C' labels={host=dp-jp-yo-33} value=0 ]} {Instance:host=dp-jp-yo-34 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-34 Value:0xc09129bcc0} B:{Var:B Labels:host=dp-jp-yo-34 Value:0xc09129bc70} C:{Var:C Labels:host=dp-jp-yo-34 Value:0xc09129bc90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43263538s EvaluationString:[ var='A' labels={host=dp-jp-yo-34} value=0.12158414306815266 ], [ var='B' labels={host=dp-jp-yo-34} value=0.12158414306815266 ], [ var='C' labels={host=dp-jp-yo-34} value=0 ]} {Instance:host=dp-jp-yo-35 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-35 Value:0xc09129bd10} B:{Var:B Labels:host=dp-jp-yo-35 Value:0xc09129bd30} C:{Var:C Labels:host=dp-jp-yo-35 Value:0xc09129bd50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432648487s EvaluationString:[ var='A' labels={host=dp-jp-yo-35} value=-0.004571822970163453 ], [ var='B' labels={host=dp-jp-yo-35} value=-0.004571822970163453 ], [ var='C' labels={host=dp-jp-yo-35} value=0 ]} {Instance:host=dp-jp-yo-36 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-jp-yo-36 Value:0xc09129bda0} B:{Var:B Labels:host=dp-jp-yo-36 Value:0xc09129bdc0} C:{Var:C Labels:host=dp-jp-yo-36 Value:0xc09129bde0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432655887s EvaluationString:[ var='A' labels={host=dp-jp-yo-36} value=0.24985934956685243 ], [ var='B' labels={host=dp-jp-yo-36} value=0.24985934956685243 ], [ var='C' labels={host=dp-jp-yo-36} value=0 ]} {Instance:host=dp-lax-us-016 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lax-us-016 Value:0xc09129be60} B:{Var:B Labels:host=dp-lax-us-016 Value:0xc09129be20} C:{Var:C Labels:host=dp-lax-us-016 Value:0xc09129be40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43266342s EvaluationString:[ var='A' labels={host=dp-lax-us-016} value=-0.06389248463889885 ], [ var='B' labels={host=dp-lax-us-016} value=-0.06389248463889885 ], [ var='C' labels={host=dp-lax-us-016} value=0 ]} {Instance:host=dp-lax-us-017 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lax-us-017 Value:0xc09129bea0} B:{Var:B Labels:host=dp-lax-us-017 Value:0xc09129bec0} C:{Var:C Labels:host=dp-lax-us-017 Value:0xc09129bee0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432671107s EvaluationString:[ var='A' labels={host=dp-lax-us-017} value=-0.019073688545667662 ], [ var='B' labels={host=dp-lax-us-017} value=-0.019073688545667662 ], [ var='C' labels={host=dp-lax-us-017} value=0 ]} {Instance:host=dp-lax-us-018 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lax-us-018 Value:0xc09129bf40} B:{Var:B Labels:host=dp-lax-us-018 Value:0xc09129bf60} C:{Var:C Labels:host=dp-lax-us-018 Value:0xc09129bf80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432696751s EvaluationString:[ var='A' labels={host=dp-lax-us-018} value=-0.23259825882547602 ], [ var='B' labels={host=dp-lax-us-018} value=-0.23259825882547602 ], [ var='C' labels={host=dp-lax-us-018} value=0 ]} {Instance:host=dp-lax-us-019 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lax-us-019 Value:0xc09129bfe0} B:{Var:B Labels:host=dp-lax-us-019 Value:0xc01c6780c0} C:{Var:C Labels:host=dp-lax-us-019 Value:0xc09129bfc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43270526s EvaluationString:[ var='A' labels={host=dp-lax-us-019} value=0.3368765283203423 ], [ var='B' labels={host=dp-lax-us-019} value=0.3368765283203423 ], [ var='C' labels={host=dp-lax-us-019} value=0 ]} {Instance:host=dp-lax-us-020 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lax-us-020 Value:0xc01c678190} B:{Var:B Labels:host=dp-lax-us-020 Value:0xc01c6781f0} C:{Var:C Labels:host=dp-lax-us-020 Value:0xc01c678210}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432713269s EvaluationString:[ var='A' labels={host=dp-lax-us-020} value=-0.139072812065861 ], [ var='B' labels={host=dp-lax-us-020} value=-0.139072812065861 ], [ var='C' labels={host=dp-lax-us-020} value=0 ]} {Instance:host=dp-lax-us-021 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lax-us-021 Value:0xc01c678260} B:{Var:B Labels:host=dp-lax-us-021 Value:0xc01c678280} C:{Var:C Labels:host=dp-lax-us-021 Value:0xc01c6782b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432722385s EvaluationString:[ var='A' labels={host=dp-lax-us-021} value=-0.1413789942414449 ], [ var='B' labels={host=dp-lax-us-021} value=-0.1413789942414449 ], [ var='C' labels={host=dp-lax-us-021} value=0 ]} {Instance:host=dp-lax-us-022 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lax-us-022 Value:0xc01c678330} B:{Var:B Labels:host=dp-lax-us-022 Value:0xc01c6782f0} C:{Var:C Labels:host=dp-lax-us-022 Value:0xc01c678310}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432743124s EvaluationString:[ var='A' labels={host=dp-lax-us-022} value=-0.1590714343162704 ], [ var='B' labels={host=dp-lax-us-022} value=-0.1590714343162704 ], [ var='C' labels={host=dp-lax-us-022} value=0 ]} {Instance:host=dp-lax-us-023 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lax-us-023 Value:0xc01c6783a0} B:{Var:B Labels:host=dp-lax-us-023 Value:0xc01c678420} C:{Var:C Labels:host=dp-lax-us-023 Value:0xc01c678440}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432752297s EvaluationString:[ var='A' labels={host=dp-lax-us-023} value=0.2418165724658334 ], [ var='B' labels={host=dp-lax-us-023} value=0.2418165724658334 ], [ var='C' labels={host=dp-lax-us-023} value=0 ]} {Instance:host=dp-lax-us-024 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lax-us-024 Value:0xc01c678480} B:{Var:B Labels:host=dp-lax-us-024 Value:0xc01c6784a0} C:{Var:C Labels:host=dp-lax-us-024 Value:0xc01c6784d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432760014s EvaluationString:[ var='A' labels={host=dp-lax-us-024} value=0.040040667803481256 ], [ var='B' labels={host=dp-lax-us-024} value=0.040040667803481256 ], [ var='C' labels={host=dp-lax-us-024} value=0 ]} {Instance:host=dp-lax-us-025 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lax-us-025 Value:0xc01c678530} B:{Var:B Labels:host=dp-lax-us-025 Value:0xc01c678560} C:{Var:C Labels:host=dp-lax-us-025 Value:0xc01c678510}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432767668s EvaluationString:[ var='A' labels={host=dp-lax-us-025} value=0.04776237845018964 ], [ var='B' labels={host=dp-lax-us-025} value=0.04776237845018964 ], [ var='C' labels={host=dp-lax-us-025} value=0 ]} {Instance:host=dp-lax-us-026 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lax-us-026 Value:0xc01c678600} B:{Var:B Labels:host=dp-lax-us-026 Value:0xc01c6785b0} C:{Var:C Labels:host=dp-lax-us-026 Value:0xc01c6785e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432777663s EvaluationString:[ var='A' labels={host=dp-lax-us-026} value=0.0690264531451928 ], [ var='B' labels={host=dp-lax-us-026} value=0.0690264531451928 ], [ var='C' labels={host=dp-lax-us-026} value=0 ]} {Instance:host=dp-lax-us-027 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lax-us-027 Value:0xc01c6786c0} B:{Var:B Labels:host=dp-lax-us-027 Value:0xc01c678660} C:{Var:C Labels:host=dp-lax-us-027 Value:0xc01c678690}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432785568s EvaluationString:[ var='A' labels={host=dp-lax-us-027} value=-0.035017696476220155 ], [ var='B' labels={host=dp-lax-us-027} value=-0.035017696476220155 ], [ var='C' labels={host=dp-lax-us-027} value=0 ]} {Instance:host=dp-lax-us-028 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lax-us-028 Value:0xc01c678740} B:{Var:B Labels:host=dp-lax-us-028 Value:0xc01c678700} C:{Var:C Labels:host=dp-lax-us-028 Value:0xc01c678720}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432794033s EvaluationString:[ var='A' labels={host=dp-lax-us-028} value=-0.15849650988620567 ], [ var='B' labels={host=dp-lax-us-028} value=-0.15849650988620567 ], [ var='C' labels={host=dp-lax-us-028} value=0 ]} {Instance:host=dp-lax-us-029 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lax-us-029 Value:0xc01c6787b0} B:{Var:B Labels:host=dp-lax-us-029 Value:0xc01c6787d0} C:{Var:C Labels:host=dp-lax-us-029 Value:0xc01c678790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43280193s EvaluationString:[ var='A' labels={host=dp-lax-us-029} value=0.07947998285570179 ], [ var='B' labels={host=dp-lax-us-029} value=0.07947998285570179 ], [ var='C' labels={host=dp-lax-us-029} value=0 ]} {Instance:host=dp-lax-us-030 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lax-us-030 Value:0xc01c678810} B:{Var:B Labels:host=dp-lax-us-030 Value:0xc01c678840} C:{Var:C Labels:host=dp-lax-us-030 Value:0xc01c678860}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432811952s EvaluationString:[ var='A' labels={host=dp-lax-us-030} value=-0.08412023994151716 ], [ var='B' labels={host=dp-lax-us-030} value=-0.08412023994151716 ], [ var='C' labels={host=dp-lax-us-030} value=0 ]} {Instance:host=dp-lax-us-031 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lax-us-031 Value:0xc01c678940} B:{Var:B Labels:host=dp-lax-us-031 Value:0xc01c6788a0} C:{Var:C Labels:host=dp-lax-us-031 Value:0xc01c6788d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432820771s EvaluationString:[ var='A' labels={host=dp-lax-us-031} value=0.21004520770879204 ], [ var='B' labels={host=dp-lax-us-031} value=0.21004520770879204 ], [ var='C' labels={host=dp-lax-us-031} value=0 ]} {Instance:host=dp-lgw-uk-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lgw-uk-001 Value:0xc01c678aa0} B:{Var:B Labels:host=dp-lgw-uk-001 Value:0xc01c678b40} C:{Var:C Labels:host=dp-lgw-uk-001 Value:0xc01c6789f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.4328291s EvaluationString:[ var='A' labels={host=dp-lgw-uk-001} value=-0.013995461628013572 ], [ var='B' labels={host=dp-lgw-uk-001} value=-0.013995461628013572 ], [ var='C' labels={host=dp-lgw-uk-001} value=0 ]} {Instance:host=dp-lgw-uk-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lgw-uk-002 Value:0xc01c678b80} B:{Var:B Labels:host=dp-lgw-uk-002 Value:0xc01c678bb0} C:{Var:C Labels:host=dp-lgw-uk-002 Value:0xc01c678bd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432848276s EvaluationString:[ var='A' labels={host=dp-lgw-uk-002} value=0.31635272246354873 ], [ var='B' labels={host=dp-lgw-uk-002} value=0.31635272246354873 ], [ var='C' labels={host=dp-lgw-uk-002} value=0 ]} {Instance:host=dp-lgw-uk-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lgw-uk-003 Value:0xc01c678c70} B:{Var:B Labels:host=dp-lgw-uk-003 Value:0xc01c678ca0} C:{Var:C Labels:host=dp-lgw-uk-003 Value:0xc01c678cc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432856293s EvaluationString:[ var='A' labels={host=dp-lgw-uk-003} value=-0.03620755917201753 ], [ var='B' labels={host=dp-lgw-uk-003} value=-0.03620755917201753 ], [ var='C' labels={host=dp-lgw-uk-003} value=0 ]} {Instance:host=dp-lgw-uk-004 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lgw-uk-004 Value:0xc01c678d00} B:{Var:B Labels:host=dp-lgw-uk-004 Value:0xc01c678d20} C:{Var:C Labels:host=dp-lgw-uk-004 Value:0xc01c678d40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432864671s EvaluationString:[ var='A' labels={host=dp-lgw-uk-004} value=0.06423055887810374 ], [ var='B' labels={host=dp-lgw-uk-004} value=0.06423055887810374 ], [ var='C' labels={host=dp-lgw-uk-004} value=0 ]} {Instance:host=dp-lgw-uk-005 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lgw-uk-005 Value:0xc01c678da0} B:{Var:B Labels:host=dp-lgw-uk-005 Value:0xc01c678df0} C:{Var:C Labels:host=dp-lgw-uk-005 Value:0xc01c678e20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432873059s EvaluationString:[ var='A' labels={host=dp-lgw-uk-005} value=-0.09328314027793486 ], [ var='B' labels={host=dp-lgw-uk-005} value=-0.09328314027793486 ], [ var='C' labels={host=dp-lgw-uk-005} value=0 ]} {Instance:host=dp-lgw-uk-006 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lgw-uk-006 Value:0xc01c678e80} B:{Var:B Labels:host=dp-lgw-uk-006 Value:0xc01c678ec0} C:{Var:C Labels:host=dp-lgw-uk-006 Value:0xc01c678f00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432880567s EvaluationString:[ var='A' labels={host=dp-lgw-uk-006} value=0.03597289461636264 ], [ var='B' labels={host=dp-lgw-uk-006} value=0.03597289461636264 ], [ var='C' labels={host=dp-lgw-uk-006} value=0 ]} {Instance:host=dp-lgw-uk-007 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lgw-uk-007 Value:0xc01c678fa0} B:{Var:B Labels:host=dp-lgw-uk-007 Value:0xc01c678f50} C:{Var:C Labels:host=dp-lgw-uk-007 Value:0xc01c678f80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432889082s EvaluationString:[ var='A' labels={host=dp-lgw-uk-007} value=-0.0652961216664868 ], [ var='B' labels={host=dp-lgw-uk-007} value=-0.0652961216664868 ], [ var='C' labels={host=dp-lgw-uk-007} value=0 ]} {Instance:host=dp-lgw-uk-008 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lgw-uk-008 Value:0xc01c6791c0} B:{Var:B Labels:host=dp-lgw-uk-008 Value:0xc01c679280} C:{Var:C Labels:host=dp-lgw-uk-008 Value:0xc01c6792b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432896251s EvaluationString:[ var='A' labels={host=dp-lgw-uk-008} value=-0.007745196511930175 ], [ var='B' labels={host=dp-lgw-uk-008} value=-0.007745196511930175 ], [ var='C' labels={host=dp-lgw-uk-008} value=0 ]} {Instance:host=dp-lgw-uk-009 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lgw-uk-009 Value:0xc01c679300} B:{Var:B Labels:host=dp-lgw-uk-009 Value:0xc01c679370} C:{Var:C Labels:host=dp-lgw-uk-009 Value:0xc01c679580}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432904615s EvaluationString:[ var='A' labels={host=dp-lgw-uk-009} value=0.04627973331181323 ], [ var='B' labels={host=dp-lgw-uk-009} value=0.04627973331181323 ], [ var='C' labels={host=dp-lgw-uk-009} value=0 ]} {Instance:host=dp-lgw-uk-010 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lgw-uk-010 Value:0xc01c679890} B:{Var:B Labels:host=dp-lgw-uk-010 Value:0xc01c6799e0} C:{Var:C Labels:host=dp-lgw-uk-010 Value:0xc01c679a90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43291301s EvaluationString:[ var='A' labels={host=dp-lgw-uk-010} value=-0.040919903213888026 ], [ var='B' labels={host=dp-lgw-uk-010} value=-0.040919903213888026 ], [ var='C' labels={host=dp-lgw-uk-010} value=0 ]} {Instance:host=dp-lgw-uk-011 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lgw-uk-011 Value:0xc01c679c80} B:{Var:B Labels:host=dp-lgw-uk-011 Value:0xc01c679b30} C:{Var:C Labels:host=dp-lgw-uk-011 Value:0xc01c679c00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432921351s EvaluationString:[ var='A' labels={host=dp-lgw-uk-011} value=-0.19666240350386963 ], [ var='B' labels={host=dp-lgw-uk-011} value=-0.19666240350386963 ], [ var='C' labels={host=dp-lgw-uk-011} value=0 ]} {Instance:host=dp-lgw-uk-012 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lgw-uk-012 Value:0xc01c679df0} B:{Var:B Labels:host=dp-lgw-uk-012 Value:0xc01c679e30} C:{Var:C Labels:host=dp-lgw-uk-012 Value:0xc01c679dc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432929443s EvaluationString:[ var='A' labels={host=dp-lgw-uk-012} value=-0.13748931197609085 ], [ var='B' labels={host=dp-lgw-uk-012} value=-0.13748931197609085 ], [ var='C' labels={host=dp-lgw-uk-012} value=0 ]} {Instance:host=dp-lgw-uk-013 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lgw-uk-013 Value:0xc01c679e80} B:{Var:B Labels:host=dp-lgw-uk-013 Value:0xc01c679ea0} C:{Var:C Labels:host=dp-lgw-uk-013 Value:0xc01c679ef0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432938068s EvaluationString:[ var='A' labels={host=dp-lgw-uk-013} value=-0.10014724622874857 ], [ var='B' labels={host=dp-lgw-uk-013} value=-0.10014724622874857 ], [ var='C' labels={host=dp-lgw-uk-013} value=0 ]} {Instance:host=dp-lgw-uk-014 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lgw-uk-014 Value:0xc0223c0000} B:{Var:B Labels:host=dp-lgw-uk-014 Value:0xc01c679f60} C:{Var:C Labels:host=dp-lgw-uk-014 Value:0xc01c679f80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432946255s EvaluationString:[ var='A' labels={host=dp-lgw-uk-014} value=0.08506352220633419 ], [ var='B' labels={host=dp-lgw-uk-014} value=0.08506352220633419 ], [ var='C' labels={host=dp-lgw-uk-014} value=0 ]} {Instance:host=dp-lgw-uk-015 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-lgw-uk-015 Value:0xc0223c0080} B:{Var:B Labels:host=dp-lgw-uk-015 Value:0xc0223c00b0} C:{Var:C Labels:host=dp-lgw-uk-015 Value:0xc0223c0050}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432954017s EvaluationString:[ var='A' labels={host=dp-lgw-uk-015} value=-0.14910484571603416 ], [ var='B' labels={host=dp-lgw-uk-015} value=-0.14910484571603416 ], [ var='C' labels={host=dp-lgw-uk-015} value=0 ]} {Instance:host=dp-mad-es-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mad-es-001 Value:0xc0223c01a0} B:{Var:B Labels:host=dp-mad-es-001 Value:0xc0223c01d0} C:{Var:C Labels:host=dp-mad-es-001 Value:0xc0223c0180}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432962138s EvaluationString:[ var='A' labels={host=dp-mad-es-001} value=-0.01508774916400076 ], [ var='B' labels={host=dp-mad-es-001} value=-0.01508774916400076 ], [ var='C' labels={host=dp-mad-es-001} value=0 ]} {Instance:host=dp-mad-es-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mad-es-002 Value:0xc0223c0230} B:{Var:B Labels:host=dp-mad-es-002 Value:0xc0223c0250} C:{Var:C Labels:host=dp-mad-es-002 Value:0xc0223c0290}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432970651s EvaluationString:[ var='A' labels={host=dp-mad-es-002} value=-0.002461445482952383 ], [ var='B' labels={host=dp-mad-es-002} value=-0.002461445482952383 ], [ var='C' labels={host=dp-mad-es-002} value=0 ]} {Instance:host=dp-mad-es-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mad-es-003 Value:0xc0223c02e0} B:{Var:B Labels:host=dp-mad-es-003 Value:0xc0223c0310} C:{Var:C Labels:host=dp-mad-es-003 Value:0xc0223c0380}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432977995s EvaluationString:[ var='A' labels={host=dp-mad-es-003} value=0.20360640078241232 ], [ var='B' labels={host=dp-mad-es-003} value=0.20360640078241232 ], [ var='C' labels={host=dp-mad-es-003} value=0 ]} {Instance:host=dp-mad-es-004 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mad-es-004 Value:0xc0223c0420} B:{Var:B Labels:host=dp-mad-es-004 Value:0xc0223c0440} C:{Var:C Labels:host=dp-mad-es-004 Value:0xc0223c0460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432985733s EvaluationString:[ var='A' labels={host=dp-mad-es-004} value=0.3510837399338129 ], [ var='B' labels={host=dp-mad-es-004} value=0.3510837399338129 ], [ var='C' labels={host=dp-mad-es-004} value=0 ]} {Instance:host=dp-mia-us-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mia-us-001 Value:0xc0223c05e0} B:{Var:B Labels:host=dp-mia-us-001 Value:0xc0223c0510} C:{Var:C Labels:host=dp-mia-us-001 Value:0xc0223c0560}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432993211s EvaluationString:[ var='A' labels={host=dp-mia-us-001} value=-0.027767825487951352 ], [ var='B' labels={host=dp-mia-us-001} value=-0.027767825487951352 ], [ var='C' labels={host=dp-mia-us-001} value=0 ]} {Instance:host=dp-mia-us-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mia-us-002 Value:0xc0223c0630} B:{Var:B Labels:host=dp-mia-us-002 Value:0xc0223c0660} C:{Var:C Labels:host=dp-mia-us-002 Value:0xc0223c0680}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433001138s EvaluationString:[ var='A' labels={host=dp-mia-us-002} value=-0.018287384191507293 ], [ var='B' labels={host=dp-mia-us-002} value=-0.018287384191507293 ], [ var='C' labels={host=dp-mia-us-002} value=0 ]} {Instance:host=dp-mia-us-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mia-us-003 Value:0xc0223c06f0} B:{Var:B Labels:host=dp-mia-us-003 Value:0xc0223c0720} C:{Var:C Labels:host=dp-mia-us-003 Value:0xc0223c0780}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433008898s EvaluationString:[ var='A' labels={host=dp-mia-us-003} value=-0.15532546199070296 ], [ var='B' labels={host=dp-mia-us-003} value=-0.15532546199070296 ], [ var='C' labels={host=dp-mia-us-003} value=0 ]} {Instance:host=dp-mia-us-004 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mia-us-004 Value:0xc0223c0870} B:{Var:B Labels:host=dp-mia-us-004 Value:0xc0223c0890} C:{Var:C Labels:host=dp-mia-us-004 Value:0xc0223c0800}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433017556s EvaluationString:[ var='A' labels={host=dp-mia-us-004} value=-0.21800189221713773 ], [ var='B' labels={host=dp-mia-us-004} value=-0.21800189221713773 ], [ var='C' labels={host=dp-mia-us-004} value=0 ]} {Instance:host=dp-mia-us-005 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mia-us-005 Value:0xc0223c0900} B:{Var:B Labels:host=dp-mia-us-005 Value:0xc0223c0930} C:{Var:C Labels:host=dp-mia-us-005 Value:0xc0223c0950}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433025195s EvaluationString:[ var='A' labels={host=dp-mia-us-005} value=-0.1483446277003253 ], [ var='B' labels={host=dp-mia-us-005} value=-0.1483446277003253 ], [ var='C' labels={host=dp-mia-us-005} value=0 ]} {Instance:host=dp-mia-us-006 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mia-us-006 Value:0xc0223c09f0} B:{Var:B Labels:host=dp-mia-us-006 Value:0xc0223c0a10} C:{Var:C Labels:host=dp-mia-us-006 Value:0xc0223c09c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433032875s EvaluationString:[ var='A' labels={host=dp-mia-us-006} value=0.15294527282636394 ], [ var='B' labels={host=dp-mia-us-006} value=0.15294527282636394 ], [ var='C' labels={host=dp-mia-us-006} value=0 ]} {Instance:host=dp-mia-us-007 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mia-us-007 Value:0xc0223c0a80} B:{Var:B Labels:host=dp-mia-us-007 Value:0xc0223c0ab0} C:{Var:C Labels:host=dp-mia-us-007 Value:0xc0223c0ad0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433040313s EvaluationString:[ var='A' labels={host=dp-mia-us-007} value=-0.12376611914440616 ], [ var='B' labels={host=dp-mia-us-007} value=-0.12376611914440616 ], [ var='C' labels={host=dp-mia-us-007} value=0 ]} {Instance:host=dp-mia-us-008 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mia-us-008 Value:0xc0223c0b70} B:{Var:B Labels:host=dp-mia-us-008 Value:0xc0223c0b30} C:{Var:C Labels:host=dp-mia-us-008 Value:0xc0223c0b50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433047886s EvaluationString:[ var='A' labels={host=dp-mia-us-008} value=-0.23414896452510883 ], [ var='B' labels={host=dp-mia-us-008} value=-0.23414896452510883 ], [ var='C' labels={host=dp-mia-us-008} value=0 ]} {Instance:host=dp-mia-us-009 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mia-us-009 Value:0xc0223c0bb0} B:{Var:B Labels:host=dp-mia-us-009 Value:0xc0223c0bd0} C:{Var:C Labels:host=dp-mia-us-009 Value:0xc0223c0bf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433056898s EvaluationString:[ var='A' labels={host=dp-mia-us-009} value=-0.3302742340105643 ], [ var='B' labels={host=dp-mia-us-009} value=-0.3302742340105643 ], [ var='C' labels={host=dp-mia-us-009} value=0 ]} {Instance:host=dp-mia-us-010 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mia-us-010 Value:0xc0223c0c30} B:{Var:B Labels:host=dp-mia-us-010 Value:0xc0223c0c50} C:{Var:C Labels:host=dp-mia-us-010 Value:0xc0223c0c80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433064577s EvaluationString:[ var='A' labels={host=dp-mia-us-010} value=-0.22304443955465086 ], [ var='B' labels={host=dp-mia-us-010} value=-0.22304443955465086 ], [ var='C' labels={host=dp-mia-us-010} value=0 ]} {Instance:host=dp-mia-us-011 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mia-us-011 Value:0xc0223c0cc0} B:{Var:B Labels:host=dp-mia-us-011 Value:0xc0223c0cf0} C:{Var:C Labels:host=dp-mia-us-011 Value:0xc0223c0d10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433072369s EvaluationString:[ var='A' labels={host=dp-mia-us-011} value=0.05836273910540107 ], [ var='B' labels={host=dp-mia-us-011} value=0.05836273910540107 ], [ var='C' labels={host=dp-mia-us-011} value=0 ]} {Instance:host=dp-mia-us-012 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mia-us-012 Value:0xc0223c0d50} B:{Var:B Labels:host=dp-mia-us-012 Value:0xc0223c0d70} C:{Var:C Labels:host=dp-mia-us-012 Value:0xc0223c0d90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433121659s EvaluationString:[ var='A' labels={host=dp-mia-us-012} value=0.7070388193959332 ], [ var='B' labels={host=dp-mia-us-012} value=0.7070388193959332 ], [ var='C' labels={host=dp-mia-us-012} value=0 ]} {Instance:host=dp-mia-us-013 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mia-us-013 Value:0xc0223c0de0} B:{Var:B Labels:host=dp-mia-us-013 Value:0xc0223c0e00} C:{Var:C Labels:host=dp-mia-us-013 Value:0xc0223c0e20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.4331305s EvaluationString:[ var='A' labels={host=dp-mia-us-013} value=-0.02070577056720424 ], [ var='B' labels={host=dp-mia-us-013} value=-0.02070577056720424 ], [ var='C' labels={host=dp-mia-us-013} value=0 ]} {Instance:host=dp-mia-us-014 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mia-us-014 Value:0xc0223c0e60} B:{Var:B Labels:host=dp-mia-us-014 Value:0xc0223c0e80} C:{Var:C Labels:host=dp-mia-us-014 Value:0xc0223c0ea0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43313924s EvaluationString:[ var='A' labels={host=dp-mia-us-014} value=0.009295106410583287 ], [ var='B' labels={host=dp-mia-us-014} value=0.009295106410583287 ], [ var='C' labels={host=dp-mia-us-014} value=0 ]} {Instance:host=dp-mia-us-015 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mia-us-015 Value:0xc0223c0f30} B:{Var:B Labels:host=dp-mia-us-015 Value:0xc0223c0ee0} C:{Var:C Labels:host=dp-mia-us-015 Value:0xc0223c0f00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433147254s EvaluationString:[ var='A' labels={host=dp-mia-us-015} value=-0.12577232033805785 ], [ var='B' labels={host=dp-mia-us-015} value=-0.12577232033805785 ], [ var='C' labels={host=dp-mia-us-015} value=0 ]} {Instance:host=dp-mia-us-016 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mia-us-016 Value:0xc0223c0f90} B:{Var:B Labels:host=dp-mia-us-016 Value:0xc0223c0fc0} C:{Var:C Labels:host=dp-mia-us-016 Value:0xc0223c0ff0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433156435s EvaluationString:[ var='A' labels={host=dp-mia-us-016} value=0.07683876116940626 ], [ var='B' labels={host=dp-mia-us-016} value=0.07683876116940626 ], [ var='C' labels={host=dp-mia-us-016} value=0 ]} {Instance:host=dp-mia-us-017 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mia-us-017 Value:0xc0223c1030} B:{Var:B Labels:host=dp-mia-us-017 Value:0xc0223c1080} C:{Var:C Labels:host=dp-mia-us-017 Value:0xc0223c10c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433164236s EvaluationString:[ var='A' labels={host=dp-mia-us-017} value=0.297996533261856 ], [ var='B' labels={host=dp-mia-us-017} value=0.297996533261856 ], [ var='C' labels={host=dp-mia-us-017} value=0 ]} {Instance:host=dp-mrs-fr-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mrs-fr-001 Value:0xc0223c1120} B:{Var:B Labels:host=dp-mrs-fr-001 Value:0xc0223c1140} C:{Var:C Labels:host=dp-mrs-fr-001 Value:0xc0223c1100}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433173777s EvaluationString:[ var='A' labels={host=dp-mrs-fr-001} value=-0.00575639666448881 ], [ var='B' labels={host=dp-mrs-fr-001} value=-0.00575639666448881 ], [ var='C' labels={host=dp-mrs-fr-001} value=0 ]} {Instance:host=dp-mrs-fr-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mrs-fr-002 Value:0xc0223c1180} B:{Var:B Labels:host=dp-mrs-fr-002 Value:0xc0223c11b0} C:{Var:C Labels:host=dp-mrs-fr-002 Value:0xc0223c11e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433183387s EvaluationString:[ var='A' labels={host=dp-mrs-fr-002} value=-0.211041094298281 ], [ var='B' labels={host=dp-mrs-fr-002} value=-0.211041094298281 ], [ var='C' labels={host=dp-mrs-fr-002} value=0 ]} {Instance:host=dp-mrs-fr-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mrs-fr-003 Value:0xc0223c1250} B:{Var:B Labels:host=dp-mrs-fr-003 Value:0xc0223c12d0} C:{Var:C Labels:host=dp-mrs-fr-003 Value:0xc0223c12f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433191085s EvaluationString:[ var='A' labels={host=dp-mrs-fr-003} value=-0.042371302284654654 ], [ var='B' labels={host=dp-mrs-fr-003} value=-0.042371302284654654 ], [ var='C' labels={host=dp-mrs-fr-003} value=0 ]} {Instance:host=dp-mrs-fr-004 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mrs-fr-004 Value:0xc0223c13b0} B:{Var:B Labels:host=dp-mrs-fr-004 Value:0xc0223c1400} C:{Var:C Labels:host=dp-mrs-fr-004 Value:0xc0223c1350}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433199297s EvaluationString:[ var='A' labels={host=dp-mrs-fr-004} value=0.09639982702317695 ], [ var='B' labels={host=dp-mrs-fr-004} value=0.09639982702317695 ], [ var='C' labels={host=dp-mrs-fr-004} value=0 ]} {Instance:host=dp-mrs-fr-005 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mrs-fr-005 Value:0xc0223c1480} B:{Var:B Labels:host=dp-mrs-fr-005 Value:0xc0223c14a0} C:{Var:C Labels:host=dp-mrs-fr-005 Value:0xc0223c1450}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433206998s EvaluationString:[ var='A' labels={host=dp-mrs-fr-005} value=-0.08709696776788535 ], [ var='B' labels={host=dp-mrs-fr-005} value=-0.08709696776788535 ], [ var='C' labels={host=dp-mrs-fr-005} value=0 ]} {Instance:host=dp-mxp-it-053 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mxp-it-053 Value:0xc0223c14f0} B:{Var:B Labels:host=dp-mxp-it-053 Value:0xc0223c1510} C:{Var:C Labels:host=dp-mxp-it-053 Value:0xc0223c1580}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433215181s EvaluationString:[ var='A' labels={host=dp-mxp-it-053} value=0.17659896549164145 ], [ var='B' labels={host=dp-mxp-it-053} value=0.17659896549164145 ], [ var='C' labels={host=dp-mxp-it-053} value=0 ]} {Instance:host=dp-mxp-it-054 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mxp-it-054 Value:0xc0223c1640} B:{Var:B Labels:host=dp-mxp-it-054 Value:0xc0223c1670} C:{Var:C Labels:host=dp-mxp-it-054 Value:0xc0223c1610}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433222789s EvaluationString:[ var='A' labels={host=dp-mxp-it-054} value=0.1828762829183006 ], [ var='B' labels={host=dp-mxp-it-054} value=0.1828762829183006 ], [ var='C' labels={host=dp-mxp-it-054} value=0 ]} {Instance:host=dp-mxp-it-055 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mxp-it-055 Value:0xc0223c1700} B:{Var:B Labels:host=dp-mxp-it-055 Value:0xc0223c16c0} C:{Var:C Labels:host=dp-mxp-it-055 Value:0xc0223c16e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433231493s EvaluationString:[ var='A' labels={host=dp-mxp-it-055} value=0.17271142472019108 ], [ var='B' labels={host=dp-mxp-it-055} value=0.17271142472019108 ], [ var='C' labels={host=dp-mxp-it-055} value=0 ]} {Instance:host=dp-mxp-it-056 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mxp-it-056 Value:0xc0223c1790} B:{Var:B Labels:host=dp-mxp-it-056 Value:0xc0223c17b0} C:{Var:C Labels:host=dp-mxp-it-056 Value:0xc0223c17d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433239161s EvaluationString:[ var='A' labels={host=dp-mxp-it-056} value=-0.27808762784410135 ], [ var='B' labels={host=dp-mxp-it-056} value=-0.27808762784410135 ], [ var='C' labels={host=dp-mxp-it-056} value=0 ]} {Instance:host=dp-mxp-it-057 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mxp-it-057 Value:0xc0223c1830} B:{Var:B Labels:host=dp-mxp-it-057 Value:0xc0223c1850} C:{Var:C Labels:host=dp-mxp-it-057 Value:0xc0223c1870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433246716s EvaluationString:[ var='A' labels={host=dp-mxp-it-057} value=-0.22505006179999992 ], [ var='B' labels={host=dp-mxp-it-057} value=-0.22505006179999992 ], [ var='C' labels={host=dp-mxp-it-057} value=0 ]} {Instance:host=dp-mxp-it-058 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-mxp-it-058 Value:0xc0223c1920} B:{Var:B Labels:host=dp-mxp-it-058 Value:0xc0223c18c0} C:{Var:C Labels:host=dp-mxp-it-058 Value:0xc0223c18e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433254813s EvaluationString:[ var='A' labels={host=dp-mxp-it-058} value=-0.18437736555384276 ], [ var='B' labels={host=dp-mxp-it-058} value=-0.18437736555384276 ], [ var='C' labels={host=dp-mxp-it-058} value=0 ]} {Instance:host=dp-ord-us-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-ord-us-001 Value:0xc0223c1980} B:{Var:B Labels:host=dp-ord-us-001 Value:0xc0223c19a0} C:{Var:C Labels:host=dp-ord-us-001 Value:0xc0223c19c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433263318s EvaluationString:[ var='A' labels={host=dp-ord-us-001} value=-0.005267291065536028 ], [ var='B' labels={host=dp-ord-us-001} value=-0.005267291065536028 ], [ var='C' labels={host=dp-ord-us-001} value=0 ]} {Instance:host=dp-ord-us-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-ord-us-002 Value:0xc0223c1a80} B:{Var:B Labels:host=dp-ord-us-002 Value:0xc0223c1a30} C:{Var:C Labels:host=dp-ord-us-002 Value:0xc0223c1a50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433271799s EvaluationString:[ var='A' labels={host=dp-ord-us-002} value=0.22928516614740602 ], [ var='B' labels={host=dp-ord-us-002} value=0.22928516614740602 ], [ var='C' labels={host=dp-ord-us-002} value=0 ]} {Instance:host=dp-ord-us-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-ord-us-003 Value:0xc0223c1ad0} B:{Var:B Labels:host=dp-ord-us-003 Value:0xc0223c1af0} C:{Var:C Labels:host=dp-ord-us-003 Value:0xc0223c1b20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433279221s EvaluationString:[ var='A' labels={host=dp-ord-us-003} value=-0.036627929491099095 ], [ var='B' labels={host=dp-ord-us-003} value=-0.036627929491099095 ], [ var='C' labels={host=dp-ord-us-003} value=0 ]} {Instance:host=dp-ord-us-004 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-ord-us-004 Value:0xc0223c1b80} B:{Var:B Labels:host=dp-ord-us-004 Value:0xc0223c1ba0} C:{Var:C Labels:host=dp-ord-us-004 Value:0xc0223c1bd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433287177s EvaluationString:[ var='A' labels={host=dp-ord-us-004} value=-0.23328020358971932 ], [ var='B' labels={host=dp-ord-us-004} value=-0.23328020358971932 ], [ var='C' labels={host=dp-ord-us-004} value=0 ]} {Instance:host=dp-ord-us-005 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-ord-us-005 Value:0xc0223c1c50} B:{Var:B Labels:host=dp-ord-us-005 Value:0xc0223c1c70} C:{Var:C Labels:host=dp-ord-us-005 Value:0xc0223c1ca0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433295103s EvaluationString:[ var='A' labels={host=dp-ord-us-005} value=0.04700761549285678 ], [ var='B' labels={host=dp-ord-us-005} value=0.04700761549285678 ], [ var='C' labels={host=dp-ord-us-005} value=0 ]} {Instance:host=dp-ord-us-006 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-ord-us-006 Value:0xc0223c1d30} B:{Var:B Labels:host=dp-ord-us-006 Value:0xc0223c1d50} C:{Var:C Labels:host=dp-ord-us-006 Value:0xc0223c1d00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433304428s EvaluationString:[ var='A' labels={host=dp-ord-us-006} value=0.10504395863074596 ], [ var='B' labels={host=dp-ord-us-006} value=0.10504395863074596 ], [ var='C' labels={host=dp-ord-us-006} value=0 ]} {Instance:host=dp-ord-us-007 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-ord-us-007 Value:0xc0223c1e00} B:{Var:B Labels:host=dp-ord-us-007 Value:0xc0223c1db0} C:{Var:C Labels:host=dp-ord-us-007 Value:0xc0223c1dd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433312257s EvaluationString:[ var='A' labels={host=dp-ord-us-007} value=-0.06641935453559869 ], [ var='B' labels={host=dp-ord-us-007} value=-0.06641935453559869 ], [ var='C' labels={host=dp-ord-us-007} value=0 ]} {Instance:host=dp-ord-us-008 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-ord-us-008 Value:0xc0223c1e50} B:{Var:B Labels:host=dp-ord-us-008 Value:0xc0223c1e70} C:{Var:C Labels:host=dp-ord-us-008 Value:0xc0223c1ea0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433320618s EvaluationString:[ var='A' labels={host=dp-ord-us-008} value=-0.043804294726991586 ], [ var='B' labels={host=dp-ord-us-008} value=-0.043804294726991586 ], [ var='C' labels={host=dp-ord-us-008} value=0 ]} {Instance:host=dp-ord-us-009 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-ord-us-009 Value:0xc0223c1ef0} B:{Var:B Labels:host=dp-ord-us-009 Value:0xc0223c1f40} C:{Var:C Labels:host=dp-ord-us-009 Value:0xc0223c1f60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433329274s EvaluationString:[ var='A' labels={host=dp-ord-us-009} value=-0.043252307344256266 ], [ var='B' labels={host=dp-ord-us-009} value=-0.043252307344256266 ], [ var='C' labels={host=dp-ord-us-009} value=0 ]} {Instance:host=dp-ord-us-010 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-ord-us-010 Value:0xc0223c1fb0} B:{Var:B Labels:host=dp-ord-us-010 Value:0xc0223c1fe0} C:{Var:C Labels:host=dp-ord-us-010 Value:0xc03ceac000}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433337207s EvaluationString:[ var='A' labels={host=dp-ord-us-010} value=0.0769783267926035 ], [ var='B' labels={host=dp-ord-us-010} value=0.0769783267926035 ], [ var='C' labels={host=dp-ord-us-010} value=0 ]} {Instance:host=dp-ord-us-011 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-ord-us-011 Value:0xc03ceac080} B:{Var:B Labels:host=dp-ord-us-011 Value:0xc03ceac0a0} C:{Var:C Labels:host=dp-ord-us-011 Value:0xc03ceac110}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433344887s EvaluationString:[ var='A' labels={host=dp-ord-us-011} value=-0.2591243515889699 ], [ var='B' labels={host=dp-ord-us-011} value=-0.2591243515889699 ], [ var='C' labels={host=dp-ord-us-011} value=0 ]} {Instance:host=dp-ord-us-012 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-ord-us-012 Value:0xc03ceac160} B:{Var:B Labels:host=dp-ord-us-012 Value:0xc03ceac180} C:{Var:C Labels:host=dp-ord-us-012 Value:0xc03ceac1b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433352842s EvaluationString:[ var='A' labels={host=dp-ord-us-012} value=-0.09483180273502256 ], [ var='B' labels={host=dp-ord-us-012} value=-0.09483180273502256 ], [ var='C' labels={host=dp-ord-us-012} value=0 ]} {Instance:host=dp-ord-us-013 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-ord-us-013 Value:0xc03ceac220} B:{Var:B Labels:host=dp-ord-us-013 Value:0xc03ceac250} C:{Var:C Labels:host=dp-ord-us-013 Value:0xc03ceac290}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433360867s EvaluationString:[ var='A' labels={host=dp-ord-us-013} value=-0.12194625017039179 ], [ var='B' labels={host=dp-ord-us-013} value=-0.12194625017039179 ], [ var='C' labels={host=dp-ord-us-013} value=0 ]} {Instance:host=dp-prg-cz-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-prg-cz-001 Value:0xc03ceac310} B:{Var:B Labels:host=dp-prg-cz-001 Value:0xc03ceac330} C:{Var:C Labels:host=dp-prg-cz-001 Value:0xc03ceac350}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433368487s EvaluationString:[ var='A' labels={host=dp-prg-cz-001} value=-0.13915273167733488 ], [ var='B' labels={host=dp-prg-cz-001} value=-0.13915273167733488 ], [ var='C' labels={host=dp-prg-cz-001} value=0 ]} {Instance:host=dp-prg-cz-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-prg-cz-002 Value:0xc03ceac410} B:{Var:B Labels:host=dp-prg-cz-002 Value:0xc03ceac430} C:{Var:C Labels:host=dp-prg-cz-002 Value:0xc03ceac3e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433378187s EvaluationString:[ var='A' labels={host=dp-prg-cz-002} value=-0.11112279900811158 ], [ var='B' labels={host=dp-prg-cz-002} value=-0.11112279900811158 ], [ var='C' labels={host=dp-prg-cz-002} value=0 ]} {Instance:host=dp-sea-us-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sea-us-002 Value:0xc03ceac500} B:{Var:B Labels:host=dp-sea-us-002 Value:0xc03ceac490} C:{Var:C Labels:host=dp-sea-us-002 Value:0xc03ceac4d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433386056s EvaluationString:[ var='A' labels={host=dp-sea-us-002} value=-0.15952319228126735 ], [ var='B' labels={host=dp-sea-us-002} value=-0.15952319228126735 ], [ var='C' labels={host=dp-sea-us-002} value=0 ]} {Instance:host=dp-sea-us-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sea-us-003 Value:0xc03ceac5c0} B:{Var:B Labels:host=dp-sea-us-003 Value:0xc03ceac560} C:{Var:C Labels:host=dp-sea-us-003 Value:0xc03ceac590}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43339326s EvaluationString:[ var='A' labels={host=dp-sea-us-003} value=-0.16138674961759136 ], [ var='B' labels={host=dp-sea-us-003} value=-0.16138674961759136 ], [ var='C' labels={host=dp-sea-us-003} value=0 ]} {Instance:host=dp-sea-us-004 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sea-us-004 Value:0xc03ceac660} B:{Var:B Labels:host=dp-sea-us-004 Value:0xc03ceac690} C:{Var:C Labels:host=dp-sea-us-004 Value:0xc03ceac6b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433400874s EvaluationString:[ var='A' labels={host=dp-sea-us-004} value=0.01788346502117122 ], [ var='B' labels={host=dp-sea-us-004} value=0.01788346502117122 ], [ var='C' labels={host=dp-sea-us-004} value=0 ]} {Instance:host=dp-sea-us-005 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sea-us-005 Value:0xc03ceac700} B:{Var:B Labels:host=dp-sea-us-005 Value:0xc03ceac750} C:{Var:C Labels:host=dp-sea-us-005 Value:0xc03ceac780}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433409345s EvaluationString:[ var='A' labels={host=dp-sea-us-005} value=-0.08068831010615796 ], [ var='B' labels={host=dp-sea-us-005} value=-0.08068831010615796 ], [ var='C' labels={host=dp-sea-us-005} value=0 ]} {Instance:host=dp-sea-us-006 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sea-us-006 Value:0xc03ceac860} B:{Var:B Labels:host=dp-sea-us-006 Value:0xc03ceac7f0} C:{Var:C Labels:host=dp-sea-us-006 Value:0xc03ceac820}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433417345s EvaluationString:[ var='A' labels={host=dp-sea-us-006} value=0.40615586938120685 ], [ var='B' labels={host=dp-sea-us-006} value=0.40615586938120685 ], [ var='C' labels={host=dp-sea-us-006} value=0 ]} {Instance:host=dp-sea-us-007 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sea-us-007 Value:0xc03ceac910} B:{Var:B Labels:host=dp-sea-us-007 Value:0xc03ceac8b0} C:{Var:C Labels:host=dp-sea-us-007 Value:0xc03ceac8e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433425377s EvaluationString:[ var='A' labels={host=dp-sea-us-007} value=0.2005139072733468 ], [ var='B' labels={host=dp-sea-us-007} value=0.2005139072733468 ], [ var='C' labels={host=dp-sea-us-007} value=0 ]} {Instance:host=dp-sea-us-008 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sea-us-008 Value:0xc03ceaca10} B:{Var:B Labels:host=dp-sea-us-008 Value:0xc03ceac970} C:{Var:C Labels:host=dp-sea-us-008 Value:0xc03ceac9e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43343353s EvaluationString:[ var='A' labels={host=dp-sea-us-008} value=-0.018572513427592696 ], [ var='B' labels={host=dp-sea-us-008} value=-0.018572513427592696 ], [ var='C' labels={host=dp-sea-us-008} value=0 ]} {Instance:host=dp-sea-us-009 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sea-us-009 Value:0xc03ceaca70} B:{Var:B Labels:host=dp-sea-us-009 Value:0xc03ceacaa0} C:{Var:C Labels:host=dp-sea-us-009 Value:0xc03ceacad0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433441071s EvaluationString:[ var='A' labels={host=dp-sea-us-009} value=-0.15180086230547118 ], [ var='B' labels={host=dp-sea-us-009} value=-0.15180086230547118 ], [ var='C' labels={host=dp-sea-us-009} value=0 ]} {Instance:host=dp-sea-us-010 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sea-us-010 Value:0xc03ceacb40} B:{Var:B Labels:host=dp-sea-us-010 Value:0xc03ceacb80} C:{Var:C Labels:host=dp-sea-us-010 Value:0xc03ceacbb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433448575s EvaluationString:[ var='A' labels={host=dp-sea-us-010} value=-0.3673020500384183 ], [ var='B' labels={host=dp-sea-us-010} value=-0.3673020500384183 ], [ var='C' labels={host=dp-sea-us-010} value=0 ]} {Instance:host=dp-sea-us-011 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sea-us-011 Value:0xc03ceacc10} B:{Var:B Labels:host=dp-sea-us-011 Value:0xc03ceacc40} C:{Var:C Labels:host=dp-sea-us-011 Value:0xc03ceacc60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433455811s EvaluationString:[ var='A' labels={host=dp-sea-us-011} value=-0.17347389121842305 ], [ var='B' labels={host=dp-sea-us-011} value=-0.17347389121842305 ], [ var='C' labels={host=dp-sea-us-011} value=0 ]} {Instance:host=dp-sea-us-012 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sea-us-012 Value:0xc03ceaccd0} B:{Var:B Labels:host=dp-sea-us-012 Value:0xc03ceacd00} C:{Var:C Labels:host=dp-sea-us-012 Value:0xc03ceacd40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433463611s EvaluationString:[ var='A' labels={host=dp-sea-us-012} value=-0.029025087299804193 ], [ var='B' labels={host=dp-sea-us-012} value=-0.029025087299804193 ], [ var='C' labels={host=dp-sea-us-012} value=0 ]} {Instance:host=dp-sea-us-013 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sea-us-013 Value:0xc03ceacdf0} B:{Var:B Labels:host=dp-sea-us-013 Value:0xc03ceace10} C:{Var:C Labels:host=dp-sea-us-013 Value:0xc03ceacdd0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43347107s EvaluationString:[ var='A' labels={host=dp-sea-us-013} value=-0.010275558127221172 ], [ var='B' labels={host=dp-sea-us-013} value=-0.010275558127221172 ], [ var='C' labels={host=dp-sea-us-013} value=0 ]} {Instance:host=dp-sea-us-014 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sea-us-014 Value:0xc03ceace90} B:{Var:B Labels:host=dp-sea-us-014 Value:0xc03ceaced0} C:{Var:C Labels:host=dp-sea-us-014 Value:0xc03ceacf20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43347939s EvaluationString:[ var='A' labels={host=dp-sea-us-014} value=-0.19754534999740592 ], [ var='B' labels={host=dp-sea-us-014} value=-0.19754534999740592 ], [ var='C' labels={host=dp-sea-us-014} value=0 ]} {Instance:host=dp-sea-us-015 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sea-us-015 Value:0xc03ceacfb0} B:{Var:B Labels:host=dp-sea-us-015 Value:0xc03ceacfd0} C:{Var:C Labels:host=dp-sea-us-015 Value:0xc03ceacf70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433488269s EvaluationString:[ var='A' labels={host=dp-sea-us-015} value=0.0640786177988812 ], [ var='B' labels={host=dp-sea-us-015} value=0.0640786177988812 ], [ var='C' labels={host=dp-sea-us-015} value=0 ]} {Instance:host=dp-sea-us-016 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sea-us-016 Value:0xc03cead090} B:{Var:B Labels:host=dp-sea-us-016 Value:0xc03cead040} C:{Var:C Labels:host=dp-sea-us-016 Value:0xc03cead070}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433498749s EvaluationString:[ var='A' labels={host=dp-sea-us-016} value=-0.3487470176311073 ], [ var='B' labels={host=dp-sea-us-016} value=-0.3487470176311073 ], [ var='C' labels={host=dp-sea-us-016} value=0 ]} {Instance:host=dp-sea-us-017 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sea-us-017 Value:0xc03cead110} B:{Var:B Labels:host=dp-sea-us-017 Value:0xc03cead170} C:{Var:C Labels:host=dp-sea-us-017 Value:0xc03cead190}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433506477s EvaluationString:[ var='A' labels={host=dp-sea-us-017} value=-0.12489125559710877 ], [ var='B' labels={host=dp-sea-us-017} value=-0.12489125559710877 ], [ var='C' labels={host=dp-sea-us-017} value=0 ]} {Instance:host=dp-sfo-us-016 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sfo-us-016 Value:0xc03cead1f0} B:{Var:B Labels:host=dp-sfo-us-016 Value:0xc03cead240} C:{Var:C Labels:host=dp-sfo-us-016 Value:0xc03cead270}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433514334s EvaluationString:[ var='A' labels={host=dp-sfo-us-016} value=-0.09252199063565078 ], [ var='B' labels={host=dp-sfo-us-016} value=-0.09252199063565078 ], [ var='C' labels={host=dp-sfo-us-016} value=0 ]} {Instance:host=dp-sfo-us-017 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sfo-us-017 Value:0xc03cead2d0} B:{Var:B Labels:host=dp-sfo-us-017 Value:0xc03cead320} C:{Var:C Labels:host=dp-sfo-us-017 Value:0xc03cead350}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433522889s EvaluationString:[ var='A' labels={host=dp-sfo-us-017} value=-0.020809173015708084 ], [ var='B' labels={host=dp-sfo-us-017} value=-0.020809173015708084 ], [ var='C' labels={host=dp-sfo-us-017} value=0 ]} {Instance:host=dp-sfo-us-018 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sfo-us-018 Value:0xc03cead3d0} B:{Var:B Labels:host=dp-sfo-us-018 Value:0xc03cead410} C:{Var:C Labels:host=dp-sfo-us-018 Value:0xc03cead3a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433531006s EvaluationString:[ var='A' labels={host=dp-sfo-us-018} value=0.29725753636893637 ], [ var='B' labels={host=dp-sfo-us-018} value=0.29725753636893637 ], [ var='C' labels={host=dp-sfo-us-018} value=0 ]} {Instance:host=dp-sfo-us-019 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sfo-us-019 Value:0xc03cead510} B:{Var:B Labels:host=dp-sfo-us-019 Value:0xc03cead480} C:{Var:C Labels:host=dp-sfo-us-019 Value:0xc03cead4b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433538073s EvaluationString:[ var='A' labels={host=dp-sfo-us-019} value=-0.17588666097340816 ], [ var='B' labels={host=dp-sfo-us-019} value=-0.17588666097340816 ], [ var='C' labels={host=dp-sfo-us-019} value=0 ]} {Instance:host=dp-sfo-us-020 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sfo-us-020 Value:0xc03cead580} B:{Var:B Labels:host=dp-sfo-us-020 Value:0xc03cead5c0} C:{Var:C Labels:host=dp-sfo-us-020 Value:0xc03cead5e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433547485s EvaluationString:[ var='A' labels={host=dp-sfo-us-020} value=0.08764088065495793 ], [ var='B' labels={host=dp-sfo-us-020} value=0.08764088065495793 ], [ var='C' labels={host=dp-sfo-us-020} value=0 ]} {Instance:host=dp-sfo-us-021 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sfo-us-021 Value:0xc03cead6e0} B:{Var:B Labels:host=dp-sfo-us-021 Value:0xc03cead690} C:{Var:C Labels:host=dp-sfo-us-021 Value:0xc03cead6b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.4335554s EvaluationString:[ var='A' labels={host=dp-sfo-us-021} value=0.06476597030001585 ], [ var='B' labels={host=dp-sfo-us-021} value=0.06476597030001585 ], [ var='C' labels={host=dp-sfo-us-021} value=0 ]} {Instance:host=dp-sfo-us-022 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sfo-us-022 Value:0xc03cead740} B:{Var:B Labels:host=dp-sfo-us-022 Value:0xc03cead770} C:{Var:C Labels:host=dp-sfo-us-022 Value:0xc03cead790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433564588s EvaluationString:[ var='A' labels={host=dp-sfo-us-022} value=-0.09460307413876734 ], [ var='B' labels={host=dp-sfo-us-022} value=-0.09460307413876734 ], [ var='C' labels={host=dp-sfo-us-022} value=0 ]} {Instance:host=dp-sfo-us-023 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sfo-us-023 Value:0xc03cead7f0} B:{Var:B Labels:host=dp-sfo-us-023 Value:0xc03cead820} C:{Var:C Labels:host=dp-sfo-us-023 Value:0xc03cead850}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433572206s EvaluationString:[ var='A' labels={host=dp-sfo-us-023} value=0.16733650904370734 ], [ var='B' labels={host=dp-sfo-us-023} value=0.16733650904370734 ], [ var='C' labels={host=dp-sfo-us-023} value=0 ]} {Instance:host=dp-sfo-us-024 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sfo-us-024 Value:0xc03cead8e0} B:{Var:B Labels:host=dp-sfo-us-024 Value:0xc03cead920} C:{Var:C Labels:host=dp-sfo-us-024 Value:0xc03cead950}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433580878s EvaluationString:[ var='A' labels={host=dp-sfo-us-024} value=-0.07059562392067065 ], [ var='B' labels={host=dp-sfo-us-024} value=-0.07059562392067065 ], [ var='C' labels={host=dp-sfo-us-024} value=0 ]} {Instance:host=dp-sfo-us-025 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sfo-us-025 Value:0xc03cead9b0} B:{Var:B Labels:host=dp-sfo-us-025 Value:0xc03ceada00} C:{Var:C Labels:host=dp-sfo-us-025 Value:0xc03ceada30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433588274s EvaluationString:[ var='A' labels={host=dp-sfo-us-025} value=-0.020734892607379152 ], [ var='B' labels={host=dp-sfo-us-025} value=-0.020734892607379152 ], [ var='C' labels={host=dp-sfo-us-025} value=0 ]} {Instance:host=dp-sfo-us-026 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sfo-us-026 Value:0xc03ceadab0} B:{Var:B Labels:host=dp-sfo-us-026 Value:0xc03ceadad0} C:{Var:C Labels:host=dp-sfo-us-026 Value:0xc03ceadb00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433595954s EvaluationString:[ var='A' labels={host=dp-sfo-us-026} value=0.0047666087130185785 ], [ var='B' labels={host=dp-sfo-us-026} value=0.0047666087130185785 ], [ var='C' labels={host=dp-sfo-us-026} value=0 ]} {Instance:host=dp-sfo-us-027 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sfo-us-027 Value:0xc03ceadb80} B:{Var:B Labels:host=dp-sfo-us-027 Value:0xc03ceadbc0} C:{Var:C Labels:host=dp-sfo-us-027 Value:0xc03ceadbe0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433603794s EvaluationString:[ var='A' labels={host=dp-sfo-us-027} value=0.6581581080434755 ], [ var='B' labels={host=dp-sfo-us-027} value=0.6581581080434755 ], [ var='C' labels={host=dp-sfo-us-027} value=0 ]} {Instance:host=dp-sfo-us-028 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sfo-us-028 Value:0xc03ceadc60} B:{Var:B Labels:host=dp-sfo-us-028 Value:0xc03ceadcb0} C:{Var:C Labels:host=dp-sfo-us-028 Value:0xc03ceadce0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433612676s EvaluationString:[ var='A' labels={host=dp-sfo-us-028} value=-0.10593093200533273 ], [ var='B' labels={host=dp-sfo-us-028} value=-0.10593093200533273 ], [ var='C' labels={host=dp-sfo-us-028} value=0 ]} {Instance:host=dp-sin-bd-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-bd-001 Value:0xc03ceadd60} B:{Var:B Labels:host=dp-sin-bd-001 Value:0xc03ceadd90} C:{Var:C Labels:host=dp-sin-bd-001 Value:0xc03ceaddc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433620997s EvaluationString:[ var='A' labels={host=dp-sin-bd-001} value=-0.7348507193655134 ], [ var='B' labels={host=dp-sin-bd-001} value=-0.7348507193655134 ], [ var='C' labels={host=dp-sin-bd-001} value=0 ]} {Instance:host=dp-sin-bd-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-bd-002 Value:0xc03ceade50} B:{Var:B Labels:host=dp-sin-bd-002 Value:0xc03ceade70} C:{Var:C Labels:host=dp-sin-bd-002 Value:0xc03ceade90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433629444s EvaluationString:[ var='A' labels={host=dp-sin-bd-002} value=0.0767237572182162 ], [ var='B' labels={host=dp-sin-bd-002} value=0.0767237572182162 ], [ var='C' labels={host=dp-sin-bd-002} value=0 ]} {Instance:host=dp-sin-bn-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-bn-001 Value:0xc03ceadf00} B:{Var:B Labels:host=dp-sin-bn-001 Value:0xc03ceadf40} C:{Var:C Labels:host=dp-sin-bn-001 Value:0xc03ceadf80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433638576s EvaluationString:[ var='A' labels={host=dp-sin-bn-001} value=0.04034756669638995 ], [ var='B' labels={host=dp-sin-bn-001} value=0.04034756669638995 ], [ var='C' labels={host=dp-sin-bn-001} value=0 ]} {Instance:host=dp-sin-bn-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-bn-002 Value:0xc02a982030} B:{Var:B Labels:host=dp-sin-bn-002 Value:0xc03ceadfe0} C:{Var:C Labels:host=dp-sin-bn-002 Value:0xc02a982010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433652307s EvaluationString:[ var='A' labels={host=dp-sin-bn-002} value=-0.0579486612263634 ], [ var='B' labels={host=dp-sin-bn-002} value=-0.0579486612263634 ], [ var='C' labels={host=dp-sin-bn-002} value=0 ]} {Instance:host=dp-sin-bt-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-bt-001 Value:0xc02a9820a0} B:{Var:B Labels:host=dp-sin-bt-001 Value:0xc02a9820c0} C:{Var:C Labels:host=dp-sin-bt-001 Value:0xc02a982070}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433660574s EvaluationString:[ var='A' labels={host=dp-sin-bt-001} value=0.08722277624057385 ], [ var='B' labels={host=dp-sin-bt-001} value=0.08722277624057385 ], [ var='C' labels={host=dp-sin-bt-001} value=0 ]} {Instance:host=dp-sin-bt-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-bt-002 Value:0xc02a982150} B:{Var:B Labels:host=dp-sin-bt-002 Value:0xc02a982100} C:{Var:C Labels:host=dp-sin-bt-002 Value:0xc02a982130}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433668524s EvaluationString:[ var='A' labels={host=dp-sin-bt-002} value=0.019614595323616868 ], [ var='B' labels={host=dp-sin-bt-002} value=0.019614595323616868 ], [ var='C' labels={host=dp-sin-bt-002} value=0 ]} {Instance:host=dp-sin-kh-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-kh-001 Value:0xc02a982190} B:{Var:B Labels:host=dp-sin-kh-001 Value:0xc02a9821b0} C:{Var:C Labels:host=dp-sin-kh-001 Value:0xc02a9821d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433676445s EvaluationString:[ var='A' labels={host=dp-sin-kh-001} value=0.04145773932421548 ], [ var='B' labels={host=dp-sin-kh-001} value=0.04145773932421548 ], [ var='C' labels={host=dp-sin-kh-001} value=0 ]} {Instance:host=dp-sin-kh-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-kh-002 Value:0xc02a982220} B:{Var:B Labels:host=dp-sin-kh-002 Value:0xc02a982250} C:{Var:C Labels:host=dp-sin-kh-002 Value:0xc02a982270}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433684514s EvaluationString:[ var='A' labels={host=dp-sin-kh-002} value=-0.0690689214433198 ], [ var='B' labels={host=dp-sin-kh-002} value=-0.0690689214433198 ], [ var='C' labels={host=dp-sin-kh-002} value=0 ]} {Instance:host=dp-sin-kz-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-kz-001 Value:0xc02a9822b0} B:{Var:B Labels:host=dp-sin-kz-001 Value:0xc02a9822e0} C:{Var:C Labels:host=dp-sin-kz-001 Value:0xc02a982300}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433692409s EvaluationString:[ var='A' labels={host=dp-sin-kz-001} value=0.06466166284193224 ], [ var='B' labels={host=dp-sin-kz-001} value=0.06466166284193224 ], [ var='C' labels={host=dp-sin-kz-001} value=0 ]} {Instance:host=dp-sin-kz-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-kz-002 Value:0xc02a982380} B:{Var:B Labels:host=dp-sin-kz-002 Value:0xc02a9823a0} C:{Var:C Labels:host=dp-sin-kz-002 Value:0xc02a982350}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433700789s EvaluationString:[ var='A' labels={host=dp-sin-kz-002} value=0.07222331465276474 ], [ var='B' labels={host=dp-sin-kz-002} value=0.07222331465276474 ], [ var='C' labels={host=dp-sin-kz-002} value=0 ]} {Instance:host=dp-sin-la-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-la-001 Value:0xc02a9823e0} B:{Var:B Labels:host=dp-sin-la-001 Value:0xc02a982400} C:{Var:C Labels:host=dp-sin-la-001 Value:0xc02a982420}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433709287s EvaluationString:[ var='A' labels={host=dp-sin-la-001} value=0.11312898424796103 ], [ var='B' labels={host=dp-sin-la-001} value=0.11312898424796103 ], [ var='C' labels={host=dp-sin-la-001} value=0 ]} {Instance:host=dp-sin-la-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-la-002 Value:0xc02a982480} B:{Var:B Labels:host=dp-sin-la-002 Value:0xc02a9824b0} C:{Var:C Labels:host=dp-sin-la-002 Value:0xc02a982460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433717421s EvaluationString:[ var='A' labels={host=dp-sin-la-002} value=-0.20411147967303123 ], [ var='B' labels={host=dp-sin-la-002} value=-0.20411147967303123 ], [ var='C' labels={host=dp-sin-la-002} value=0 ]} {Instance:host=dp-sin-lk-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-lk-001 Value:0xc02a982530} B:{Var:B Labels:host=dp-sin-lk-001 Value:0xc02a9824f0} C:{Var:C Labels:host=dp-sin-lk-001 Value:0xc02a982510}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433727342s EvaluationString:[ var='A' labels={host=dp-sin-lk-001} value=3.0599222692731587 ], [ var='B' labels={host=dp-sin-lk-001} value=3.0599222692731587 ], [ var='C' labels={host=dp-sin-lk-001} value=0 ]} {Instance:host=dp-sin-lk-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-lk-002 Value:0xc02a9825a0} B:{Var:B Labels:host=dp-sin-lk-002 Value:0xc02a9825c0} C:{Var:C Labels:host=dp-sin-lk-002 Value:0xc02a982580}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433735784s EvaluationString:[ var='A' labels={host=dp-sin-lk-002} value=-0.12950751422152174 ], [ var='B' labels={host=dp-sin-lk-002} value=-0.12950751422152174 ], [ var='C' labels={host=dp-sin-lk-002} value=0 ]} {Instance:host=dp-sin-mm-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-mm-001 Value:0xc02a982650} B:{Var:B Labels:host=dp-sin-mm-001 Value:0xc02a982610} C:{Var:C Labels:host=dp-sin-mm-001 Value:0xc02a982630}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433744196s EvaluationString:[ var='A' labels={host=dp-sin-mm-001} value=-0.1535697899256038 ], [ var='B' labels={host=dp-sin-mm-001} value=-0.1535697899256038 ], [ var='C' labels={host=dp-sin-mm-001} value=0 ]} {Instance:host=dp-sin-mm-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-mm-002 Value:0xc02a982690} B:{Var:B Labels:host=dp-sin-mm-002 Value:0xc02a9826b0} C:{Var:C Labels:host=dp-sin-mm-002 Value:0xc02a9826d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433752421s EvaluationString:[ var='A' labels={host=dp-sin-mm-002} value=-0.04436634656531879 ], [ var='B' labels={host=dp-sin-mm-002} value=-0.04436634656531879 ], [ var='C' labels={host=dp-sin-mm-002} value=0 ]} {Instance:host=dp-sin-mn-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-mn-001 Value:0xc02a9827e0} B:{Var:B Labels:host=dp-sin-mn-001 Value:0xc02a982800} C:{Var:C Labels:host=dp-sin-mn-001 Value:0xc02a9827c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433759659s EvaluationString:[ var='A' labels={host=dp-sin-mn-001} value=-0.06735779879906072 ], [ var='B' labels={host=dp-sin-mn-001} value=-0.06735779879906072 ], [ var='C' labels={host=dp-sin-mn-001} value=0 ]} {Instance:host=dp-sin-mn-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-mn-002 Value:0xc02a982870} B:{Var:B Labels:host=dp-sin-mn-002 Value:0xc02a982890} C:{Var:C Labels:host=dp-sin-mn-002 Value:0xc02a982850}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433767182s EvaluationString:[ var='A' labels={host=dp-sin-mn-002} value=-0.7323990513797678 ], [ var='B' labels={host=dp-sin-mn-002} value=-0.7323990513797678 ], [ var='C' labels={host=dp-sin-mn-002} value=0 ]} {Instance:host=dp-sin-mo-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-mo-001 Value:0xc02a9828e0} B:{Var:B Labels:host=dp-sin-mo-001 Value:0xc02a982900} C:{Var:C Labels:host=dp-sin-mo-001 Value:0xc02a982920}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433775187s EvaluationString:[ var='A' labels={host=dp-sin-mo-001} value=-0.09804222790286587 ], [ var='B' labels={host=dp-sin-mo-001} value=-0.09804222790286587 ], [ var='C' labels={host=dp-sin-mo-001} value=0 ]} {Instance:host=dp-sin-mo-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-mo-002 Value:0xc02a9829a0} B:{Var:B Labels:host=dp-sin-mo-002 Value:0xc02a982960} C:{Var:C Labels:host=dp-sin-mo-002 Value:0xc02a982980}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433783512s EvaluationString:[ var='A' labels={host=dp-sin-mo-002} value=-0.13497759737067636 ], [ var='B' labels={host=dp-sin-mo-002} value=-0.13497759737067636 ], [ var='C' labels={host=dp-sin-mo-002} value=0 ]} {Instance:host=dp-sin-my-005 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-my-005 Value:0xc02a982a00} B:{Var:B Labels:host=dp-sin-my-005 Value:0xc02a982a20} C:{Var:C Labels:host=dp-sin-my-005 Value:0xc02a982a40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433791527s EvaluationString:[ var='A' labels={host=dp-sin-my-005} value=0.018245953024396452 ], [ var='B' labels={host=dp-sin-my-005} value=0.018245953024396452 ], [ var='C' labels={host=dp-sin-my-005} value=0 ]} {Instance:host=dp-sin-my-006 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-my-006 Value:0xc02a982ad0} B:{Var:B Labels:host=dp-sin-my-006 Value:0xc02a982a80} C:{Var:C Labels:host=dp-sin-my-006 Value:0xc02a982aa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433800315s EvaluationString:[ var='A' labels={host=dp-sin-my-006} value=0.37485763500731295 ], [ var='B' labels={host=dp-sin-my-006} value=0.37485763500731295 ], [ var='C' labels={host=dp-sin-my-006} value=0 ]} {Instance:host=dp-sin-np-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-np-001 Value:0xc02a982b20} B:{Var:B Labels:host=dp-sin-np-001 Value:0xc02a982b40} C:{Var:C Labels:host=dp-sin-np-001 Value:0xc02a982b60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433808498s EvaluationString:[ var='A' labels={host=dp-sin-np-001} value=-0.05502780209802438 ], [ var='B' labels={host=dp-sin-np-001} value=-0.05502780209802438 ], [ var='C' labels={host=dp-sin-np-001} value=0 ]} {Instance:host=dp-sin-np-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-np-002 Value:0xc02a982ce0} B:{Var:B Labels:host=dp-sin-np-002 Value:0xc02a982bc0} C:{Var:C Labels:host=dp-sin-np-002 Value:0xc02a982c30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433818238s EvaluationString:[ var='A' labels={host=dp-sin-np-002} value=0.018680562383252663 ], [ var='B' labels={host=dp-sin-np-002} value=0.018680562383252663 ], [ var='C' labels={host=dp-sin-np-002} value=0 ]} {Instance:host=dp-sin-pk-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-pk-001 Value:0xc02a982d20} B:{Var:B Labels:host=dp-sin-pk-001 Value:0xc02a982d50} C:{Var:C Labels:host=dp-sin-pk-001 Value:0xc02a982d80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433825933s EvaluationString:[ var='A' labels={host=dp-sin-pk-001} value=-0.1933497754893576 ], [ var='B' labels={host=dp-sin-pk-001} value=-0.1933497754893576 ], [ var='C' labels={host=dp-sin-pk-001} value=0 ]} {Instance:host=dp-sin-pk-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-pk-002 Value:0xc02a982dd0} B:{Var:B Labels:host=dp-sin-pk-002 Value:0xc02a982e20} C:{Var:C Labels:host=dp-sin-pk-002 Value:0xc02a982e40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433846872s EvaluationString:[ var='A' labels={host=dp-sin-pk-002} value=-0.22793540988883385 ], [ var='B' labels={host=dp-sin-pk-002} value=-0.22793540988883385 ], [ var='C' labels={host=dp-sin-pk-002} value=0 ]} {Instance:host=dp-sin-sg-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-sg-001 Value:0xc02a982e90} B:{Var:B Labels:host=dp-sin-sg-001 Value:0xc02a982eb0} C:{Var:C Labels:host=dp-sin-sg-001 Value:0xc02a982ed0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433855007s EvaluationString:[ var='A' labels={host=dp-sin-sg-001} value=-0.6462175650998461 ], [ var='B' labels={host=dp-sin-sg-001} value=-0.6462175650998461 ], [ var='C' labels={host=dp-sin-sg-001} value=0 ]} {Instance:host=dp-sin-sg-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-sg-002 Value:0xc02a982f30} B:{Var:B Labels:host=dp-sin-sg-002 Value:0xc02a982f50} C:{Var:C Labels:host=dp-sin-sg-002 Value:0xc02a982f10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433863945s EvaluationString:[ var='A' labels={host=dp-sin-sg-002} value=-0.4107917127200089 ], [ var='B' labels={host=dp-sin-sg-002} value=-0.4107917127200089 ], [ var='C' labels={host=dp-sin-sg-002} value=0 ]} {Instance:host=dp-sin-sg-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-sg-003 Value:0xc02a982fb0} B:{Var:B Labels:host=dp-sin-sg-003 Value:0xc02a982fe0} C:{Var:C Labels:host=dp-sin-sg-003 Value:0xc02a982f90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433871485s EvaluationString:[ var='A' labels={host=dp-sin-sg-003} value=-0.013016286179852917 ], [ var='B' labels={host=dp-sin-sg-003} value=-0.013016286179852917 ], [ var='C' labels={host=dp-sin-sg-003} value=0 ]} {Instance:host=dp-sin-sg-004 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-sg-004 Value:0xc02a983030} B:{Var:B Labels:host=dp-sin-sg-004 Value:0xc02a983050} C:{Var:C Labels:host=dp-sin-sg-004 Value:0xc02a983070}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433878792s EvaluationString:[ var='A' labels={host=dp-sin-sg-004} value=-0.36615901935036277 ], [ var='B' labels={host=dp-sin-sg-004} value=-0.36615901935036277 ], [ var='C' labels={host=dp-sin-sg-004} value=0 ]} {Instance:host=dp-sin-sg-005 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-sg-005 Value:0xc02a9830e0} B:{Var:B Labels:host=dp-sin-sg-005 Value:0xc02a983110} C:{Var:C Labels:host=dp-sin-sg-005 Value:0xc02a9830c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433887421s EvaluationString:[ var='A' labels={host=dp-sin-sg-005} value=-0.010019428879449574 ], [ var='B' labels={host=dp-sin-sg-005} value=-0.010019428879449574 ], [ var='C' labels={host=dp-sin-sg-005} value=0 ]} {Instance:host=dp-sin-sg-006 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-sg-006 Value:0xc02a983160} B:{Var:B Labels:host=dp-sin-sg-006 Value:0xc02a983180} C:{Var:C Labels:host=dp-sin-sg-006 Value:0xc02a9831a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433895882s EvaluationString:[ var='A' labels={host=dp-sin-sg-006} value=-0.0015322459688401782 ], [ var='B' labels={host=dp-sin-sg-006} value=-0.0015322459688401782 ], [ var='C' labels={host=dp-sin-sg-006} value=0 ]} {Instance:host=dp-sin-sg-007 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-sg-007 Value:0xc02a9831e0} B:{Var:B Labels:host=dp-sin-sg-007 Value:0xc02a983200} C:{Var:C Labels:host=dp-sin-sg-007 Value:0xc02a983220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433904221s EvaluationString:[ var='A' labels={host=dp-sin-sg-007} value=0.2641039838555912 ], [ var='B' labels={host=dp-sin-sg-007} value=0.2641039838555912 ], [ var='C' labels={host=dp-sin-sg-007} value=0 ]} {Instance:host=dp-sin-th-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-th-001 Value:0xc02a983280} B:{Var:B Labels:host=dp-sin-th-001 Value:0xc02a9832a0} C:{Var:C Labels:host=dp-sin-th-001 Value:0xc02a9832c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433912299s EvaluationString:[ var='A' labels={host=dp-sin-th-001} value=-0.047236310934672635 ], [ var='B' labels={host=dp-sin-th-001} value=-0.047236310934672635 ], [ var='C' labels={host=dp-sin-th-001} value=0 ]} {Instance:host=dp-sin-th-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-th-002 Value:0xc02a983320} B:{Var:B Labels:host=dp-sin-th-002 Value:0xc02a983350} C:{Var:C Labels:host=dp-sin-th-002 Value:0xc02a983300}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433919882s EvaluationString:[ var='A' labels={host=dp-sin-th-002} value=-0.12447447324969321 ], [ var='B' labels={host=dp-sin-th-002} value=-0.12447447324969321 ], [ var='C' labels={host=dp-sin-th-002} value=0 ]} {Instance:host=dp-sin-th-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-th-003 Value:0xc02a9833a0} B:{Var:B Labels:host=dp-sin-th-003 Value:0xc02a9833c0} C:{Var:C Labels:host=dp-sin-th-003 Value:0xc02a9833e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433929285s EvaluationString:[ var='A' labels={host=dp-sin-th-003} value=0.13860933900054256 ], [ var='B' labels={host=dp-sin-th-003} value=0.13860933900054256 ], [ var='C' labels={host=dp-sin-th-003} value=0 ]} {Instance:host=dp-sin-vn-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-vn-001 Value:0xc02a983420} B:{Var:B Labels:host=dp-sin-vn-001 Value:0xc02a983440} C:{Var:C Labels:host=dp-sin-vn-001 Value:0xc02a983460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433937017s EvaluationString:[ var='A' labels={host=dp-sin-vn-001} value=0.21381439339193223 ], [ var='B' labels={host=dp-sin-vn-001} value=0.21381439339193223 ], [ var='C' labels={host=dp-sin-vn-001} value=0 ]} {Instance:host=dp-sin-vn-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-sin-vn-002 Value:0xc02a9834b0} B:{Var:B Labels:host=dp-sin-vn-002 Value:0xc02a9834d0} C:{Var:C Labels:host=dp-sin-vn-002 Value:0xc02a983500}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433944957s EvaluationString:[ var='A' labels={host=dp-sin-vn-002} value=-0.3077865423394428 ], [ var='B' labels={host=dp-sin-vn-002} value=-0.3077865423394428 ], [ var='C' labels={host=dp-sin-vn-002} value=0 ]} {Instance:host=dp-syd-au-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-syd-au-001 Value:0xc02a983560} B:{Var:B Labels:host=dp-syd-au-001 Value:0xc02a983580} C:{Var:C Labels:host=dp-syd-au-001 Value:0xc02a983540}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433952831s EvaluationString:[ var='A' labels={host=dp-syd-au-001} value=0.053881812917195605 ], [ var='B' labels={host=dp-syd-au-001} value=0.053881812917195605 ], [ var='C' labels={host=dp-syd-au-001} value=0 ]} {Instance:host=dp-syd-au-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-syd-au-002 Value:0xc02a9835e0} B:{Var:B Labels:host=dp-syd-au-002 Value:0xc02a983600} C:{Var:C Labels:host=dp-syd-au-002 Value:0xc02a983620}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433961652s EvaluationString:[ var='A' labels={host=dp-syd-au-002} value=-0.0023497331948855544 ], [ var='B' labels={host=dp-syd-au-002} value=-0.0023497331948855544 ], [ var='C' labels={host=dp-syd-au-002} value=0 ]} {Instance:host=dp-syd-au-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-syd-au-003 Value:0xc02a983660} B:{Var:B Labels:host=dp-syd-au-003 Value:0xc02a983680} C:{Var:C Labels:host=dp-syd-au-003 Value:0xc02a9836a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433969723s EvaluationString:[ var='A' labels={host=dp-syd-au-003} value=-0.07749778667510604 ], [ var='B' labels={host=dp-syd-au-003} value=-0.07749778667510604 ], [ var='C' labels={host=dp-syd-au-003} value=0 ]} {Instance:host=dp-tlv-il-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-tlv-il-001 Value:0xc02a9836e0} B:{Var:B Labels:host=dp-tlv-il-001 Value:0xc02a983700} C:{Var:C Labels:host=dp-tlv-il-001 Value:0xc02a983720}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433977174s EvaluationString:[ var='A' labels={host=dp-tlv-il-001} value=-0.05683813683707702 ], [ var='B' labels={host=dp-tlv-il-001} value=-0.05683813683707702 ], [ var='C' labels={host=dp-tlv-il-001} value=0 ]} {Instance:host=dp-tlv-il-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-tlv-il-002 Value:0xc02a9837b0} B:{Var:B Labels:host=dp-tlv-il-002 Value:0xc02a983770} C:{Var:C Labels:host=dp-tlv-il-002 Value:0xc02a983790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433984767s EvaluationString:[ var='A' labels={host=dp-tlv-il-002} value=-0.2528924853389114 ], [ var='B' labels={host=dp-tlv-il-002} value=-0.2528924853389114 ], [ var='C' labels={host=dp-tlv-il-002} value=0 ]} {Instance:host=dp-vie-at-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-vie-at-001 Value:0xc02a9837f0} B:{Var:B Labels:host=dp-vie-at-001 Value:0xc02a983830} C:{Var:C Labels:host=dp-vie-at-001 Value:0xc02a983850}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433992567s EvaluationString:[ var='A' labels={host=dp-vie-at-001} value=-0.03375624309432131 ], [ var='B' labels={host=dp-vie-at-001} value=-0.03375624309432131 ], [ var='C' labels={host=dp-vie-at-001} value=0 ]} {Instance:host=dp-vie-at-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-vie-at-002 Value:0xc02a983890} B:{Var:B Labels:host=dp-vie-at-002 Value:0xc02a9838b0} C:{Var:C Labels:host=dp-vie-at-002 Value:0xc02a9838d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43400134s EvaluationString:[ var='A' labels={host=dp-vie-at-002} value=0.0015975519183898867 ], [ var='B' labels={host=dp-vie-at-002} value=0.0015975519183898867 ], [ var='C' labels={host=dp-vie-at-002} value=0 ]} {Instance:host=dp-vie-at-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-vie-at-003 Value:0xc02a983910} B:{Var:B Labels:host=dp-vie-at-003 Value:0xc02a983930} C:{Var:C Labels:host=dp-vie-at-003 Value:0xc02a983960}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.434009334s EvaluationString:[ var='A' labels={host=dp-vie-at-003} value=0.030271727678155003 ], [ var='B' labels={host=dp-vie-at-003} value=0.030271727678155003 ], [ var='C' labels={host=dp-vie-at-003} value=0 ]} {Instance:host=dp-waw-pl-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-waw-pl-001 Value:0xc02a9839a0} B:{Var:B Labels:host=dp-waw-pl-001 Value:0xc02a9839d0} C:{Var:C Labels:host=dp-waw-pl-001 Value:0xc02a9839f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.434018857s EvaluationString:[ var='A' labels={host=dp-waw-pl-001} value=0.0921877174498557 ], [ var='B' labels={host=dp-waw-pl-001} value=0.0921877174498557 ], [ var='C' labels={host=dp-waw-pl-001} value=0 ]} {Instance:host=dp-waw-pl-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-waw-pl-002 Value:0xc02a983a30} B:{Var:B Labels:host=dp-waw-pl-002 Value:0xc02a983b10} C:{Var:C Labels:host=dp-waw-pl-002 Value:0xc02a983b40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43402639s EvaluationString:[ var='A' labels={host=dp-waw-pl-002} value=-0.08452178353422823 ], [ var='B' labels={host=dp-waw-pl-002} value=-0.08452178353422823 ], [ var='C' labels={host=dp-waw-pl-002} value=0 ]} {Instance:host=dp-waw-pl-003 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-waw-pl-003 Value:0xc02a983bd0} B:{Var:B Labels:host=dp-waw-pl-003 Value:0xc02a983b90} C:{Var:C Labels:host=dp-waw-pl-003 Value:0xc02a983bb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43403568s EvaluationString:[ var='A' labels={host=dp-waw-pl-003} value=0.01317476192771291 ], [ var='B' labels={host=dp-waw-pl-003} value=0.01317476192771291 ], [ var='C' labels={host=dp-waw-pl-003} value=0 ]} {Instance:host=dp-waw-ua-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-waw-ua-001 Value:0xc02a983c60} B:{Var:B Labels:host=dp-waw-ua-001 Value:0xc02a983c10} C:{Var:C Labels:host=dp-waw-ua-001 Value:0xc02a983c30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.434043434s EvaluationString:[ var='A' labels={host=dp-waw-ua-001} value=-0.19811977168846084 ], [ var='B' labels={host=dp-waw-ua-001} value=-0.19811977168846084 ], [ var='C' labels={host=dp-waw-ua-001} value=0 ]} {Instance:host=dp-waw-ua-002 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-waw-ua-002 Value:0xc02a983cf0} B:{Var:B Labels:host=dp-waw-ua-002 Value:0xc02a983ca0} C:{Var:C Labels:host=dp-waw-ua-002 Value:0xc02a983cc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.434050817s EvaluationString:[ var='A' labels={host=dp-waw-ua-002} value=-0.0709374913488503 ], [ var='B' labels={host=dp-waw-ua-002} value=-0.0709374913488503 ], [ var='C' labels={host=dp-waw-ua-002} value=0 ]} {Instance:host=dp-yyz-ca-001 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:host=dp-yyz-ca-001 Value:0xc02a983d30} B:{Var:B Labels:host=dp-yyz-ca-001 Value:0xc02a983d50} C:{Var:C Labels:host=dp-yyz-ca-001 Value:0xc02a983d80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.434059485s EvaluationString:[ var='A' labels={host=dp-yyz-ca-001} value=0.19110723850784694 ], [ var='B' labels={host=dp-yyz-ca-001} value=0.19 + level=debug ts=2024-05-29T13:44:13.486792924Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.48647183Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.486438695Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.486223153Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.486168324Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.486104854Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=uniclient, pod=uniclient-747fbfc77-5sdcw" t=2024-05-29T13:44:13.485910654Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.485866054Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=798928 slug=basepowercompany t=2024-05-29T13:44:13.485760152Z level=debug msg="Saving alert states done" count=4 max_state_save_concurrency=1 duration=66.671407ms + level=debug ts=2024-05-29T13:44:13.485805639Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:13.485815716Z level=debug msg="State manager processing evaluation results" resultCount=2 + level=debug component=discovery ts=2024-05-29T13:44:13.485386886Z caller=hgapi.go:144 msg="received a list of active users from the hg api" count=634 + level=debug ts=2024-05-29T13:44:13.485235564Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=630233 slug=bettercloudprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.485235101Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=630233 slug=bettercloudprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.485221681Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.485128179Z caller=remote_instance_store.go:51 user=528903 slug=nrodcaci msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=528903 slug=nrodcaci instance= t=2024-05-29T13:44:13.48507429Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=528903 slug=nrodcaci instance= t=2024-05-29T13:44:13.485065466Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=528903 slug=nrodcaci version=10 fingerprint=5f42eb100c9601ce attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.484940258Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc0029b70e8} C:{Var:C Labels: Value:0xc0029b70f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.484534089s EvaluationString:[ var='B' labels={} value=NaN ], [ var='C' labels={} value=0 ]}]" duration=29.143029ms + level=debug ts=2024-05-29T13:44:13.484795818Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=642786 slug=sophoscomnsg t=2024-05-29T13:44:13.484780891Z level=debug msg="Saving alert states done" count=13 max_state_save_concurrency=1 duration=188.473076ms + logger=ngalert.state.manager user=277970 slug=teckresourcestest t=2024-05-29T13:44:13.484639707Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=4 fingerprint=0de2d6af9b1346eb attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.484530465Z level=error msg="Failed to evaluate rule" error="failed to build query 'C': data source not found" duration=5.187965ms + level=error ts=2024-05-29T13:44:13.48443059Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'C': data source not found" + logger=ngalert.state.manager.persist user=273163 slug=rushii t=2024-05-29T13:44:13.484362295Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.935109ms + level=debug ts=2024-05-29T13:44:13.484427728Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.484369504Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.484320326Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.484324008Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=134486 slug=podigee t=2024-05-29T13:44:13.484213477Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.464608ms + level=debug ts=2024-05-29T13:44:13.483846688Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.483914179Z caller=remote_instance_store.go:51 user=288032 slug=dapperlabssre msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=150145 slug=pleasant instance= t=2024-05-29T13:44:13.483852584Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=554711 slug=bekci t=2024-05-29T13:44:13.483572026Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=554711 slug=bekci instance= t=2024-05-29T13:44:13.4835469Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=554711 slug=bekci t=2024-05-29T13:44:13.483516784Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.483437702Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=459086 slug=metricgamingprd t=2024-05-29T13:44:13.483290007Z level=debug msg="Saving alert states" count=5 max_state_save_concurrency=1 + logger=ngalert.state.manager user=459086 slug=metricgamingprd instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.483277169Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=459086 slug=metricgamingprd instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.483265634Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=459086 slug=metricgamingprd instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.48325178Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=459086 slug=metricgamingprd t=2024-05-29T13:44:13.48315469Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.48298242Z caller=remote_instance_store.go:51 user=795224 slug=gannettdigital msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.482978543Z caller=remote_instance_store.go:51 user=151082 slug=butler msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=151082 slug=butler instance= t=2024-05-29T13:44:13.482917235Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance="datasource_uid=sAAhZ0a7z, ref_id=A" t=2024-05-29T13:44:13.482701129Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.482714092Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.482751765Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=288032 slug=dapperlabssre t=2024-05-29T13:44:13.4826631Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=532653 slug=chathamdirectprd instance= t=2024-05-29T13:44:13.482474655Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.4825552Z caller=remote_instance_store.go:51 user=532653 slug=chathamdirectprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=532653 slug=chathamdirectprd instance= t=2024-05-29T13:44:13.482458254Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=471861 slug=planetstaging t=2024-05-29T13:44:13.482530578Z level=debug msg="Saving alert states" count=6 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.482446432Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=471861 slug=planetstaging instance="cluster=pgw-01, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-gketest-prod, instance=172.24.14.77:9090, job=prometheus-k8s, k8s_cluster=pgw-01, kubernetes_cluster=pgw-01, namespace=monitoring, pod=prom-agent-k8s-0, prometheus=monitoring/k8s, prometheus_shard=pgw-01-0, service=prometheus-k8s" t=2024-05-29T13:44:13.482385318Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=471861 slug=planetstaging t=2024-05-29T13:44:13.482347797Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service=prometheus-k8s" + logger=ngalert.state.manager.persist user=183214 slug=vectorizedio t=2024-05-29T13:44:13.482378941Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=VtFd5GIVz, ref_id=A,C" t=2024-05-29T13:44:13.482365084Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="datasource_uid=VtFd5GIVz, ref_id=A,C" t=2024-05-29T13:44:13.482338788Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=488634 slug=smartcall version=14 fingerprint=aa5f2fb045922ffe attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.482202381Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.481840059s EvaluationString:}]" duration=45.979984ms + logger=ngalert.scheduler user=183214 slug=vectorizedio version=22 fingerprint=35a53b6c66bef474 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.482254994Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=VtFd5GIVz, ref_id=A,C State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.482031514s EvaluationString:}]" duration=68.275647ms + logger=ngalert.state.manager user=471861 slug=planetstaging instance="cluster=erb-01, cluster_type=gke, container=prometheus, endpoint=http, gcp_project=planet-gketest-prod, instance=172.24.123.167:9090, job=prometheus-k8s, k8s_cluster=erb-01, kubernetes_cluster=erb-01, namespace=monitoring, pod=prom-agent-k8s-shard-1-0, prometheus=monitoring/k8s, prometheus_shard=erb-01-0, service=prometheus-k8s" t=2024-05-29T13:44:13.482257088Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.482124612Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=326874 slug=fastpath t=2024-05-29T13:44:13.482075199Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=326874 slug=fastpath instance="alert_sensitivity=high, instance=https://workflow-api.gofastpath.com/actuator/health, job=http-check-workflow-api-prod" t=2024-05-29T13:44:13.482044841Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=114286 slug=enverus t=2024-05-29T13:44:13.482073004Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.481914525Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=471861 slug=planetstaging t=2024-05-29T13:44:13.481934439Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="service=prometheus-k8s" + logger=ngalert.state.manager.persist user=314015 slug=fberggren t=2024-05-29T13:44:13.481748928Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=314015 slug=fberggren version=22 fingerprint=acdb31adcd9c4f3a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.481616012Z level=debug msg="Alert rule evaluated" results="[{Instance:id=smokegas, instance=192.168.2.105:80, job=pannan, name=smokegas, unit=°C State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:id=smokegas, instance=192.168.2.105:80, job=pannan, name=smokegas, unit=°C Value:0xc0816d2660} B:{Var:B Labels:id=smokegas, instance=192.168.2.105:80, job=pannan, name=smokegas, unit=°C Value:0xc0816d26c8} C:{Var:C Labels:id=smokegas, instance=192.168.2.105:80, job=pannan, name=smokegas, unit=°C Value:0xc0816d2708}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.481206175s EvaluationString:[ var='A' labels={id=smokegas, instance=192.168.2.105:80, job=pannan, name=smokegas, unit=°C} value=55.281818181818174 ], [ var='B' labels={id=smokegas, instance=192.168.2.105:80, job=pannan, name=smokegas, unit=°C} value=55.281818181818174 ], [ var='C' labels={id=smokegas, instance=192.168.2.105:80, job=pannan, name=smokegas, unit=°C} value=1 ]}]" duration=13.653094ms + level=debug ts=2024-05-29T13:44:13.481373974Z caller=remote_instance_store.go:51 user=201790 slug=veedmo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.481261614Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.48084466Z caller=remote_alert_sender.go:94 user=507549 slug=coindcx host=coindcx-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.184.17.223:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=b924a99c-a898-4b8b-a82d-7a6805bd6974 alerts=8 + logger=ngalert.state.manager user=807840 slug=aidungeon t=2024-05-29T13:44:13.480810157Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=507549 slug=coindcx t=2024-05-29T13:44:13.480662559Z level=debug msg="Saving alert states done" count=8 max_state_save_concurrency=1 duration=96.839416ms + level=debug ts=2024-05-29T13:44:13.480583368Z caller=remote_instance_store.go:51 user=792486 slug=sendman msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=111653 slug=theassociationmxp t=2024-05-29T13:44:13.480542415Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=111653 slug=theassociationmxp t=2024-05-29T13:44:13.480458435Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.480467521Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.480259803Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.480211778Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:13.480150759Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.480093894Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.480081784Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=794290 slug=mcogenai t=2024-05-29T13:44:13.479916505Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.479904389Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.479228219Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=708531 slug=dooshimagbamwuan t=2024-05-29T13:44:13.479226426Z level=debug msg="Saving alert states done" count=7 max_state_save_concurrency=1 duration=68.185488ms + level=debug ts=2024-05-29T13:44:13.478689133Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.478684371Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.47860339Z caller=remote_instance_store.go:51 user=396586 slug=opengov msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=396586 slug=opengov instance= t=2024-05-29T13:44:13.478564881Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=396586 slug=opengov instance= t=2024-05-29T13:44:13.478551532Z level=debug msg="Setting next state" handler=resultError + level=debug ts=2024-05-29T13:44:13.478472161Z caller=remote_instance_store.go:51 user=373502 slug=stakeandrelax msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=373502 slug=stakeandrelax t=2024-05-29T13:44:13.478419682Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.478362092Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=373502 slug=stakeandrelax t=2024-05-29T13:44:13.478333509Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.478274007Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:13.477715546Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.027949ms + logger=ngalert.scheduler user=145127 slug=detooperp version=5 fingerprint=42386747bc9c916b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.477481223Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.477196912s EvaluationString:}]" duration=69.865059ms + logger=ngalert.state.manager.persist user=937416 slug=cambridgeuniversitypress t=2024-05-29T13:44:13.47743028Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=info ts=2024-05-29T13:44:13.477374569Z caller=remote_image_capturer.go:61 user=937416 slug=cambridgeuniversitypress rule_org_id=1 rule_uid=NzyubG87k dashboard=HXfGZAsnk panel=2 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:13.476999712Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.477007992Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.476809165Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.47664016Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=937416 slug=cambridgeuniversitypress instance= t=2024-05-29T13:44:13.476627595Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=937416 slug=cambridgeuniversitypress t=2024-05-29T13:44:13.476519413Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info ts=2024-05-29T13:44:13.476477631Z caller=remote_alert_sender.go:94 user=31632 slug=fastjp host=fastjp-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.160.11.235:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=LXBsE8i4z alerts=1 + logger=ngalert.scheduler user=937416 slug=cambridgeuniversitypress version=2 fingerprint=3014edf23ebbf55d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.47639172Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Alerting Error: Results:map[] Values:map[B0:{Var:B Labels:ClusterName=TF-Dev-Java-Services, ServiceName=TF-Dev-Publisher Value:0xc02b42c450}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.476015194s EvaluationString:[ var='B0' metric='TF-Dev-Java-Services TF-Dev-Publisher' labels={ClusterName=TF-Dev-Java-Services, ServiceName=TF-Dev-Publisher} value=94.82421875 ]}]" duration=51.365474ms + level=debug ts=2024-05-29T13:44:13.476395848Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.476398108Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.476284995Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.4761129Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.476130029Z caller=remote_instance_store.go:51 user=482907 slug=wavelonp msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=770817 slug=exproment version=11 fingerprint=d360fe278824171a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.476020567Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc0089d28e8} B:{Var:B Labels: Value:0xc0089d28f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.475392443s EvaluationString:[ var='A' labels={} value=3 ], [ var='B' labels={} value=0 ]}]" duration=102.479988ms + level=debug ts=2024-05-29T13:44:13.47612132Z caller=remote_instance_store.go:51 user=638425 slug=docktech msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.476091758Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.476021942Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.476009147Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=638425 slug=docktech t=2024-05-29T13:44:13.476017119Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="env=prd" + logger=ngalert.state.manager user=638425 slug=docktech instance="__name__=windows_iis_current_application_pool_state, account=mexico-prd, agent_hostname=mx-dockone-prd, app=wsParabiliaWebHookPool, business_unit=mexico-aws, cloud_provider=aws, env=prd, instance=mx-dockone-prd, job=integrations/windows_exporter, service=DockOne, squad=infrastructure, state=Running" t=2024-05-29T13:44:13.476007009Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=638425 slug=docktech instance="__name__=windows_iis_current_application_pool_state, account=mexico-prd, agent_hostname=mx-dockone-prd, app=wsFechasClientesPool, business_unit=mexico-aws, cloud_provider=aws, env=prd, instance=mx-dockone-prd, job=integrations/windows_exporter, service=DockOne, squad=infrastructure, state=Running" t=2024-05-29T13:44:13.475973548Z level=debug msg="Keeping state" state=Normal + level=debug component=discovery ts=2024-05-29T13:44:13.475861856Z caller=hgapi.go:144 msg="received a list of active users from the hg api" count=1269 + logger=ngalert.state.manager user=638425 slug=docktech instance="__name__=windows_iis_current_application_pool_state, account=mexico-prd, agent_hostname=mx-dockone-prd, app=wsDockOnePool, business_unit=mexico-aws, cloud_provider=aws, env=prd, instance=mx-dockone-prd, job=integrations/windows_exporter, service=DockOne, squad=infrastructure, state=Running" t=2024-05-29T13:44:13.475922108Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=638425 slug=docktech t=2024-05-29T13:44:13.475754516Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="env=prd" + logger=ngalert.state.manager user=638425 slug=docktech instance="__name__=windows_iis_current_application_pool_state, account=mexico-prd, agent_hostname=mx-dockone-prd, app=RDWebAccess, business_unit=mexico-aws, cloud_provider=aws, env=prd, instance=mx-dockone-prd, job=integrations/windows_exporter, service=DockOne, squad=infrastructure, state=Running" t=2024-05-29T13:44:13.475742816Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=638425 slug=docktech instance="__name__=windows_iis_current_application_pool_state, account=mexico-prd, agent_hostname=mx-dockone-prd, app=RDWebAccess, business_unit=mexico-aws, cloud_provider=aws, env=prd, instance=mx-dockone-prd, job=integrations/windows_exporter, service=DockOne, squad=infrastructure, state=Running" t=2024-05-29T13:44:13.475725795Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=638425 slug=docktech t=2024-05-29T13:44:13.475712585Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="env=prd" + logger=ngalert.state.manager user=638425 slug=docktech instance="__name__=windows_iis_current_application_pool_state, account=mexico-prd, agent_hostname=mx-dockone-prd, app=PreAutorizador, business_unit=mexico-aws, cloud_provider=aws, env=prd, instance=mx-dockone-prd, job=integrations/windows_exporter, service=DockOne, squad=infrastructure, state=Running" t=2024-05-29T13:44:13.475703065Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=638425 slug=docktech t=2024-05-29T13:44:13.475677325Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="env=prd" + logger=ngalert.state.manager user=638425 slug=docktech instance="__name__=windows_iis_current_application_pool_state, account=mexico-prd, agent_hostname=mx-dockone-prd, app=DefaultAppPool, business_unit=mexico-aws, cloud_provider=aws, env=prd, instance=mx-dockone-prd, job=integrations/windows_exporter, service=DockOne, squad=infrastructure, state=Running" t=2024-05-29T13:44:13.475656165Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=638425 slug=docktech instance="__name__=windows_iis_current_application_pool_state, account=mexico-prd, agent_hostname=mx-dockone-prd, app=Classic .NET AppPool, business_unit=mexico-aws, cloud_provider=aws, env=prd, instance=mx-dockone-prd, job=integrations/windows_exporter, service=DockOne, squad=infrastructure, state=Running" t=2024-05-29T13:44:13.475628614Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.475617224Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.4756067Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.475598347Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391359 slug=linklogistics instance= t=2024-05-29T13:44:13.475595645Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=638425 slug=docktech t=2024-05-29T13:44:13.475604994Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="env=prd" + level=debug ts=2024-05-29T13:44:13.475508197Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=638425 slug=docktech instance="__name__=windows_iis_current_application_pool_state, account=mexico-prd, agent_hostname=mx-dockone-prd, app=.NET v4.5, business_unit=mexico-aws, cloud_provider=aws, env=prd, instance=mx-dockone-prd, job=integrations/windows_exporter, service=DockOne, squad=infrastructure, state=Running" t=2024-05-29T13:44:13.475540633Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=391359 slug=linklogistics version=7 fingerprint=c3559fe504fb2a83 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.475456843Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels: Value:0xc00d4782e8} B:{Var:B Labels: Value:0xc00d4782d0} C:{Var:C Labels: Value:0xc00d4782d8} D:{Var:D Labels: Value:0xc00d4782e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.474218868s EvaluationString:[ var='A' labels={} value=44 ], [ var='B' labels={} value=44 ], [ var='C' labels={} value=1 ], [ var='D' labels={} value=1 ]}]" duration=106.603863ms + logger=ngalert.state.manager user=638425 slug=docktech instance="__name__=windows_iis_current_application_pool_state, account=mexico-prd, agent_hostname=mx-dockone-prd, app=.NET v4.5 Classic, business_unit=mexico-aws, cloud_provider=aws, env=prd, instance=mx-dockone-prd, job=integrations/windows_exporter, service=DockOne, squad=infrastructure, state=Running" t=2024-05-29T13:44:13.475515783Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=638425 slug=docktech instance="__name__=windows_iis_current_application_pool_state, account=mexico-prd, agent_hostname=mx-dockone-prd, app=.NET v4.5 Classic, business_unit=mexico-aws, cloud_provider=aws, env=prd, instance=mx-dockone-prd, job=integrations/windows_exporter, service=DockOne, squad=infrastructure, state=Running" t=2024-05-29T13:44:13.475506353Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=638425 slug=docktech t=2024-05-29T13:44:13.475486833Z level=warn msg="Evaluation result contains either reserved labels or labels declared in the rules. Those labels from the result will be ignored" labels="env=prd" + logger=ngalert.state.manager user=638425 slug=docktech instance="__name__=windows_iis_current_application_pool_state, account=mexico-prd, agent_hostname=mx-dockone-prd, app=.NET v2.0, business_unit=mexico-aws, cloud_provider=aws, env=prd, instance=mx-dockone-prd, job=integrations/windows_exporter, service=DockOne, squad=infrastructure, state=Running" t=2024-05-29T13:44:13.475468132Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=638425 slug=docktech instance="__name__=windows_iis_current_application_pool_state, account=mexico-prd, agent_hostname=mx-dockone-prd, app=.NET v2.0, business_unit=mexico-aws, cloud_provider=aws, env=prd, instance=mx-dockone-prd, job=integrations/windows_exporter, service=DockOne, squad=infrastructure, state=Running" t=2024-05-29T13:44:13.475455902Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=638425 slug=docktech instance="__name__=windows_iis_current_application_pool_state, account=mexico-prd, agent_hostname=mx-dockone-prd, app=.NET v2.0 Classic, business_unit=mexico-aws, cloud_provider=aws, env=prd, instance=mx-dockone-prd, job=integrations/windows_exporter, service=DockOne, squad=infrastructure, state=Running" t=2024-05-29T13:44:13.475417582Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.475308251Z caller=remote_instance_store.go:51 user=548157 slug=kushkiprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.475283482Z caller=remote_instance_store.go:51 user=935198 slug=provable msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.475199336Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=698103 slug=vericast t=2024-05-29T13:44:13.474971017Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.474914812Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698103 slug=vericast instance="datasource_uid=ce64a33c-3937-4c00-8e3f-b8c7277af2de, ref_id=A" t=2024-05-29T13:44:13.474938695Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=698103 slug=vericast t=2024-05-29T13:44:13.474885762Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.474836396Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=163513 slug=dialpad t=2024-05-29T13:44:13.474865299Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.scheduler user=698103 slug=vericast version=24 fingerprint=9763a43639eca954 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.47482441Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=ce64a33c-3937-4c00-8e3f-b8c7277af2de, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.474406187s EvaluationString:}]" duration=46.015712ms + logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.474855498Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.474833543Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.474758727Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=163513 slug=dialpad t=2024-05-29T13:44:13.474746427Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.474716946Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=114492 slug=railsbank version=1 fingerprint=12148b7d2653361e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.474418645Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.474142867s EvaluationString:}]" duration=150.099566ms + logger=ngalert.state.manager.persist user=705083 slug=mediakindsaas t=2024-05-29T13:44:13.474287233Z level=debug msg="Saving alert states done" count=6 max_state_save_concurrency=1 duration=77.19251ms + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.474309111Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.474068736Z caller=remote_instance_store.go:51 user=691855 slug=chainlake msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.473943242Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.473879421Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.473768849Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=327842 slug=exabeam t=2024-05-29T13:44:13.473088235Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=327842 slug=exabeam instance= t=2024-05-29T13:44:13.473075143Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=327842 slug=exabeam instance= t=2024-05-29T13:44:13.473063833Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=327842 slug=exabeam t=2024-05-29T13:44:13.473029223Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.472889189Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:13.472900811Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=22.348559ms + level=debug ts=2024-05-29T13:44:13.472917025Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.472477474Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206107 slug=hydrolix instance="__name__=prometheus_ready, instance=localhost:9090, job=prometheus" t=2024-05-29T13:44:13.472427214Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix t=2024-05-29T13:44:13.472359548Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.472250608Z caller=remote_instance_store.go:51 user=756004 slug=jdsportsprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=148654 slug=tinybeans t=2024-05-29T13:44:13.472145917Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=23.795116ms + level=debug ts=2024-05-29T13:44:13.472070572Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.472085085Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.472009173Z caller=remote_alert_sender.go:94 user=437877 slug=justwilliam host=justwilliam-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.4.44:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=a0218dcc-74e8-4618-9411-76bfa939d268 alerts=1 + level=debug ts=2024-05-29T13:44:13.472001122Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.47195549Z caller=remote_instance_store.go:51 user=93046 slug=nese msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.47189409Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=437877 slug=justwilliam t=2024-05-29T13:44:13.471888276Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.202856ms + level=debug ts=2024-05-29T13:44:13.471763581Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.471799016Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.471566147Z caller=remote_instance_store.go:51 user=266592 slug=srsge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.471448495Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=266592 slug=srsge instance="ECS_PROMETHEUS_JOB_NAME=tpt.sessionguardian.com" t=2024-05-29T13:44:13.471494425Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=266592 slug=srsge instance="ECS_PROMETHEUS_JOB_NAME=sr.sessionguardian.com" t=2024-05-29T13:44:13.47146883Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.471439814Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=266592 slug=srsge instance="ECS_PROMETHEUS_JOB_NAME=hirecounsel.sessionguardian.com" t=2024-05-29T13:44:13.471440207Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=266592 slug=srsge instance="ECS_PROMETHEUS_JOB_NAME=cov.sessionguardian.com" t=2024-05-29T13:44:13.471421437Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=266592 slug=srsge t=2024-05-29T13:44:13.4713651Z level=debug msg="State manager processing evaluation results" resultCount=4 + level=debug ts=2024-05-29T13:44:13.471353822Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=287424 slug=ercande t=2024-05-29T13:44:13.471349437Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.543162ms + logger=ngalert.state.manager user=624354 slug=truliooworkflow instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.471151422Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.471088511Z caller=remote_instance_store.go:51 user=642786 slug=sophoscomnsg msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.471038714Z caller=remote_instance_store.go:51 user=316418 slug=workmotion msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.471011103Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=196013 slug=inmediasoftware instance="application=authentication-microservice" t=2024-05-29T13:44:13.47090704Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=316418 slug=workmotion instance="ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=hxHfRl-xQUauVzymQSapww" t=2024-05-29T13:44:13.470893619Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=682586 slug=nielsonquea t=2024-05-29T13:44:13.470858153Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.301749ms + level=debug ts=2024-05-29T13:44:13.470683659Z caller=remote_instance_store.go:51 user=798928 slug=basepowercompany msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=277807 slug=info96f8 instance="__name__=magento_rabbitmq_health_check, environment=anwb-webwinkel, instance=prod.anwb-webwinkel.emico.nl:443, job=magento, monitor=magento" t=2024-05-29T13:44:13.470418577Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=316418 slug=workmotion version=4 fingerprint=df73cde584d78450 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.470303916Z level=debug msg="Alert rule evaluated" results="[{Instance:ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=10eKxF4jRqenox4anBTrCw State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=10eKxF4jRqenox4anBTrCw Value:0xc07eb9e228} C:{Var:C Labels:ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=10eKxF4jRqenox4anBTrCw Value:0xc07eb9e200}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.46986975s EvaluationString:[ var='B' labels={ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=10eKxF4jRqenox4anBTrCw} value=15.4 ], [ var='C' labels={ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=10eKxF4jRqenox4anBTrCw} value=0 ]} {Instance:ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=hxHfRl-xQUauVzymQSapww State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=hxHfRl-xQUauVzymQSapww Value:0xc07eb9e2a0} C:{Var:C Labels:ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=hxHfRl-xQUauVzymQSapww Value:0xc07eb9e2d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.469889344s EvaluationString:[ var='B' labels={ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=hxHfRl-xQUauVzymQSapww} value=9 ], [ var='C' labels={ClientId=455456564602, DomainName=prod-workmotion-auditlog, NodeId=hxHfRl-xQUauVzymQSapww} value=0 ]}]" duration=41.739149ms + level=debug ts=2024-05-29T13:44:13.469895052Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.469879742Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.469636006Z caller=remote_instance_store.go:51 user=76255 slug=benzinga msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=76255 slug=benzinga instance= t=2024-05-29T13:44:13.469579665Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=76255 slug=benzinga version=1 fingerprint=e9725305b5a9f2c4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.469421048Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.469175177s EvaluationString:}]" duration=132.532429ms + level=debug ts=2024-05-29T13:44:13.469388626Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.46924583Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.469209505Z caller=remote_instance_store.go:51 user=556147 slug=bettercloudholding msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.469182325Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.469158889Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:13.46915016Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=lemodel-platform-interactive-command-listener-worker, pod=lemodel-platform-interactive-command-listener-worker-858ccb2bzh" t=2024-05-29T13:44:13.469133359Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.469099052Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=lemodel-platform-interactive-command-listener-worker, pod=lemodel-platform-interactive-command-listener-worker-858ccb2bzh" t=2024-05-29T13:44:13.469119319Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=698963 slug=lemonade version=2 fingerprint=2bc6e4cc84c7440c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.468937096Z level=debug msg="Alert rule evaluated" results="[{Instance:app=lemodel-platform-interactive-command-listener-worker, pod=lemodel-platform-interactive-command-listener-worker-858ccb2bzh State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=lemodel-platform-interactive-command-listener-worker, pod=lemodel-platform-interactive-command-listener-worker-858ccb2bzh Value:0xc067dbc588} THRESHOLD:{Var:THRESHOLD Labels:app=lemodel-platform-interactive-command-listener-worker, pod=lemodel-platform-interactive-command-listener-worker-858ccb2bzh Value:0xc067dbc5b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.468575927s EvaluationString:[ var='QUERY' labels={app=lemodel-platform-interactive-command-listener-worker, pod=lemodel-platform-interactive-command-listener-worker-858ccb2bzh} value=0 ], [ var='THRESHOLD' labels={app=lemodel-platform-interactive-command-listener-worker, pod=lemodel-platform-interactive-command-listener-worker-858ccb2bzh} value=0 ]}]" duration=49.442674ms + logger=ngalert.state.manager.persist user=504517 slug=mohdkhairi t=2024-05-29T13:44:13.468712679Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.454823ms + level=info ts=2024-05-29T13:44:13.468700431Z caller=remote_alert_sender.go:94 user=84824 slug=factmata host=factmata-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.160.26.193:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=e543c869-af46-4241-a732-20080d7c8c5f alerts=1 + logger=ngalert.state.manager.persist user=84824 slug=factmata t=2024-05-29T13:44:13.468623877Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.44576ms + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=adennlvlmzi0wf, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:13.468344178Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.468432395Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=adennlvlmzi0wf, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:13.468335889Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.468301851Z caller=remote_image_capturer.go:33 user=538037 slug=drivewealth rule_org_id=1 rule_uid=fdlpplzicx2psd msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=adennlvlmzi0wf, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:13.468291228Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:45:40Z next_ends_at=2024-05-29T13:46:10Z + logger=ngalert.state.manager user=538037 slug=drivewealth instance="datasource_uid=adennlvlmzi0wf, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:13.468278998Z level=debug msg="Execution keep last state is Alerting" handler=resultAlerting + logger=ngalert.state.manager user=538037 slug=drivewealth t=2024-05-29T13:44:13.468248544Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=538037 slug=drivewealth version=28 fingerprint=6b753910401e1c16 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.468178468Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=adennlvlmzi0wf, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.467604015s EvaluationString:}]" duration=53.048105ms + logger=ngalert.state.manager.persist user=489921 slug=statuscake t=2024-05-29T13:44:13.46820176Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.468092918Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=334665 slug=mjacobson t=2024-05-29T13:44:13.468120668Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=787184 slug=brownlabatyale t=2024-05-29T13:44:13.4680847Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=787184 slug=brownlabatyale instance="datasource_uid=ddgd1f3jrgdmob, ref_id=TimeSeries" t=2024-05-29T13:44:13.46802383Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=334665 slug=mjacobson instance= t=2024-05-29T13:44:13.468057428Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:13.468057111Z caller=remote_image_capturer.go:33 user=787184 slug=brownlabatyale rule_org_id=1 rule_uid=ddmasb1svcnb4d msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.scheduler user=787184 slug=brownlabatyale version=9 fingerprint=c230d055650e71ec attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.467912686Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=ddgd1f3jrgdmob, ref_id=TimeSeries State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.467535334s EvaluationString:}]" duration=37.433222ms + level=debug ts=2024-05-29T13:44:13.467888512Z caller=remote_instance_store.go:51 user=391359 slug=linklogistics msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=60199 slug=wallapop instance= t=2024-05-29T13:44:13.467882355Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=60199 slug=wallapop t=2024-05-29T13:44:13.467840328Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=391359 slug=linklogistics instance= t=2024-05-29T13:44:13.46782951Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=298436 slug=mgnr version=69 fingerprint=f8775fcd918a58e6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.467447623Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.467125169s EvaluationString:}]" duration=13.943765ms + logger=ngalert.state.manager.persist user=386776 slug=rcsworks t=2024-05-29T13:44:13.467501601Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=386776 slug=rcsworks instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.467465411Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=386776 slug=rcsworks version=8 fingerprint=1c7e0c71424850e6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.467379285Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.467144625s EvaluationString:}]" duration=22.246213ms + logger=ngalert.state.manager.persist user=707607 slug=obi t=2024-05-29T13:44:13.466824216Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.566081ms + level=debug ts=2024-05-29T13:44:13.466772502Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.466759745Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=857554 slug=tripleaprod t=2024-05-29T13:44:13.466575433Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.state.manager user=349246 slug=metricgamingdev instance="DBInstanceIdentifier=tenant-1" t=2024-05-29T13:44:13.466556763Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=349246 slug=metricgamingdev instance="DBInstanceIdentifier=tenant-0" t=2024-05-29T13:44:13.466456544Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.466203672Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.466092357Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.46600166Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.465555249Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio t=2024-05-29T13:44:13.46575297Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=134486 slug=podigee instance="hostname=railspodigeecache-green-fsn1-01, instance=5.75.255.117:9121, job=consul_services, service=redis_exporter" t=2024-05-29T13:44:13.465722655Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=134486 slug=podigee instance="hostname=railspodigeecache-green-fsn1-01, instance=5.75.255.117:9121, job=consul_services, service=redis_exporter" t=2024-05-29T13:44:13.465695158Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.465317493Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.46560109Z caller=remote_instance_store.go:51 user=102207 slug=recogizergroup msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=938012 slug=mywifinetworks instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.465520166Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=612525 slug=adleyeview version=139 fingerprint=47e45987723fe1c6 attempt=1 now=2024-05-29T13:43:50Z t=2024-05-29T13:44:13.465444582Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[C:{Var:C Labels: Value:0xc00dcfd080} D:{Var:D Labels: Value:0xc00dcfd088}] EvaluatedAt:2024-05-29 13:43:50 +0000 UTC EvaluationDuration:23.457567154s EvaluationString:[ var='C' labels={} value=0 ], [ var='D' labels={} value=0 ]}]" duration=19.917717431s + logger=ngalert.scheduler user=134486 slug=podigee version=32 fingerprint=0a63115afb01769a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.465377725Z level=debug msg="Alert rule evaluated" results="[{Instance:hostname=railspodigeecache-green-fsn1-01, instance=5.75.255.117:9121, job=consul_services, service=redis_exporter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:hostname=railspodigeecache-green-fsn1-01, instance=5.75.255.117:9121, job=consul_services, service=redis_exporter Value:0xc015c5cde0} B:{Var:B Labels:hostname=railspodigeecache-green-fsn1-01, instance=5.75.255.117:9121, job=consul_services, service=redis_exporter Value:0xc015c5ce28} C:{Var:C Labels:hostname=railspodigeecache-green-fsn1-01, instance=5.75.255.117:9121, job=consul_services, service=redis_exporter Value:0xc015c5ce70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.464822645s EvaluationString:[ var='A' labels={hostname=railspodigeecache-green-fsn1-01, instance=5.75.255.117:9121, job=consul_services, service=redis_exporter} value=75.69650020749758 ], [ var='B' labels={hostname=railspodigeecache-green-fsn1-01, instance=5.75.255.117:9121, job=consul_services, service=redis_exporter} value=75.69650020749758 ], [ var='C' labels={hostname=railspodigeecache-green-fsn1-01, instance=5.75.255.117:9121, job=consul_services, service=redis_exporter} value=0 ]}]" duration=16.799295ms + level=info ts=2024-05-29T13:44:13.465425208Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.244.39:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=bdbhsq1zk09hcd alerts=1 + level=info ts=2024-05-29T13:44:13.465372514Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.156.57:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=bdbhsq1zk09hcd alerts=1 + level=debug ts=2024-05-29T13:44:13.465329012Z caller=remote_instance_store.go:51 user=71676 slug=lemonbeat msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.465203909Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.465281263Z caller=remote_instance_store.go:51 user=548157 slug=kushkiprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:13.465282819Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=41.496282ms + level=debug ts=2024-05-29T13:44:13.465047092Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=548157 slug=kushkiprod t=2024-05-29T13:44:13.465227042Z level=debug msg="Saving alert states" count=5 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.465120371Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=548157 slug=kushkiprod instance="DBInstanceIdentifier=application-autoscaling-ec938a02-4bad-41cb-93c6-4839d3986a2e" t=2024-05-29T13:44:13.465088021Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=548157 slug=kushkiprod instance="DBInstanceIdentifier=alpha-z32-production-2" t=2024-05-29T13:44:13.464921439Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=548157 slug=kushkiprod t=2024-05-29T13:44:13.464783447Z level=debug msg="State manager processing evaluation results" resultCount=5 + level=debug ts=2024-05-29T13:44:13.464708968Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.464804705Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.003977ms + level=debug ts=2024-05-29T13:44:13.464747444Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.464710066Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.464523846Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=553581 slug=johnlewispreprod t=2024-05-29T13:44:13.4641583Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=25.59616ms + level=debug ts=2024-05-29T13:44:13.46400908Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.464004994Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.463920359Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=845543 slug=deliveryhero t=2024-05-29T13:44:13.463894787Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.scheduler user=845543 slug=deliveryhero version=11 fingerprint=998ddb05200d581a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.463748664Z level=debug msg="Alert rule evaluated" results="[{Instance:account_id=457710302499, dimension_FunctionName=backoffice-prod-compensation-service-auth-user, dimension_Resource=backoffice-prod-compensation-service-auth-user, name=arn:aws:lambda:eu-west-2:457710302499:function:backoffice-prod-compensation-service-auth-user, region=eu-west-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=457710302499, dimension_FunctionName=backoffice-prod-compensation-service-auth-user, dimension_Resource=backoffice-prod-compensation-service-auth-user, name=arn:aws:lambda:eu-west-2:457710302499:function:backoffice-prod-compensation-service-auth-user, region=eu-west-2 Value:0xc078202d50} B:{Var:B Labels:account_id=457710302499, dimension_FunctionName=backoffice-prod-compensation-service-auth-user, dimension_Resource=backoffice-prod-compensation-service-auth-user, name=arn:aws:lambda:eu-west-2:457710302499:function:backoffice-prod-compensation-service-auth-user, region=eu-west-2 Value:0xc078202d90} C:{Var:C Labels:account_id=457710302499, dimension_FunctionName=backoffice-prod-compensation-service-auth-user, dimension_Resource=backoffice-prod-compensation-service-auth-user, name=arn:aws:lambda:eu-west-2:457710302499:function:backoffice-prod-compensation-service-auth-user, region=eu-west-2 Value:0xc078202de0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.463313491s EvaluationString:[ var='A' labels={account_id=457710302499, dimension_FunctionName=backoffice-prod-compensation-service-auth-user, dimension_Resource=backoffice-prod-compensation-service-auth-user, name=arn:aws:lambda:eu-west-2:457710302499:function:backoffice-prod-compensation-service-auth-user, region=eu-west-2} value=0 ], [ var='B' labels={account_id=457710302499, dimension_FunctionName=backoffice-prod-compensation-service-auth-user, dimension_Resource=backoffice-prod-compensation-service-auth-user, name=arn:aws:lambda:eu-west-2:457710302499:function:backoffice-prod-compensation-service-auth-user, region=eu-west-2} value=0 ], [ var='C' labels={account_id=457710302499, dimension_FunctionName=backoffice-prod-compensation-service-auth-user, dimension_Resource=backoffice-prod-compensation-service-auth-user, name=arn:aws:lambda:eu-west-2:457710302499:function:backoffice-prod-compensation-service-auth-user, region=eu-west-2} value=0 ]} {Instance:account_id=457710302499, dimension_FunctionName=backoffice-prod-compensation-service-process, dimension_Resource=backoffice-prod-compensation-service-process, name=arn:aws:lambda:eu-west-2:457710302499:function:backoffice-prod-compensation-service-process, region=eu-west-2 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:account_id=457710302499, dimension_FunctionName=backoffice-prod-compensation-service-process, dimension_Resource=backoffice-prod-compensation-service-process, name=arn:aws:lambda:eu-west-2:457710302499:function:backoffice-prod-compensation-service-process, region=eu-west-2 Value:0xc078202e80} B:{Var:B Labels:account_id=457710302499, dimension_FunctionName=backoffice-prod-compensation-service-process, dimension_Resource=backoffice-prod-compensation-service-process, name=arn:aws:lambda:eu-west-2:457710302499:function:backoffice-prod-compensation-service-process, region=eu-west-2 Value:0xc078202ec0} C:{Var:C Labels:account_id=457710302499, dimension_FunctionName=backoffice-prod-compensation-service-process, dimension_Resource=backoffice-prod-compensation-service-process, name=arn:aws:lambda:eu-west-2:457710302499:function:backoffice-prod-compensation-service-process, region=eu-west-2 Value:0xc078202f00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.463330091s EvaluationString:[ var='A' labels={account_id=457710302499, dimension_FunctionName=backoffice-prod-compensation-service-process, dimension_Resource=backoffice-prod-compensation-service-process, name=arn:aws:lambda:eu-west-2:457710302499:function:backoffice-prod-compensation-service-process, region=eu-west-2} value=16 ], [ var='B' labels={account_id=457710302499, dimension_FunctionName=backoffice-prod-compensation-service-process, dimension_Resource=backoffice-prod-compensation-service-process, name=arn:aws:lambda:eu-west-2:457710302499:function:backoffice-prod-compensation-service-process, region=eu-west-2} value=16 ], [ var='C' labels={account_id=457710302499, dimension_FunctionName=backoffice-prod-compensation-service-process, dimension_Resource=backoffice-prod-compensation-service-process, name=arn:aws:lambda:eu-west-2:457710302499:function:backoffice-prod-compensation-service-process, region=eu-west-2} value=0 ]}]" duration=18.43042ms + level=debug ts=2024-05-29T13:44:13.463721892Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.463334226Z caller=remote_instance_store.go:51 user=350551 slug=loopme msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=112387 slug=lucidhq t=2024-05-29T13:44:13.463268892Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=112387 slug=lucidhq instance="datasource_uid=grafanacloud-prom, ref_id=C" t=2024-05-29T13:44:13.463203863Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.46312973Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.462329301Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.462331076Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.46231929Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.462247688Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:13.462215073Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=612525 slug=adleyeview t=2024-05-29T13:44:13.462129975Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=612525 slug=adleyeview instance= t=2024-05-29T13:44:13.462110274Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=245291 slug=pismo version=2 fingerprint=7ad824559a9d2ec5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.462138597Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.461967029s EvaluationString:}]" duration=435.012668ms + level=debug ts=2024-05-29T13:44:13.461841828Z caller=remote_instance_store.go:51 user=31632 slug=fastjp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.461793481Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=31632 slug=fastjp instance= t=2024-05-29T13:44:13.461783527Z level=warn msg="Failed to take an image" dashboard=0qoZYdVGz panel=38 error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:13.461591251Z caller=remote_instance_store.go:51 user=765752 slug=octave msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.461685986Z caller=remote_instance_store.go:51 user=706031 slug=miceutest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.461653791Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.461427848Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.461379348Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=513734 slug=rgitsolutions t=2024-05-29T13:44:13.461318233Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=513734 slug=rgitsolutions instance="device=tmpfs, fstype=tmpfs, instance=localhost:9100, job=node_exporter, mountpoint=/run/lock, origin_prometheus=dev_marktwahn" t=2024-05-29T13:44:13.461291219Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=513734 slug=rgitsolutions instance="device=/dev/sda1, fstype=ext4, instance=localhost:9100, job=node_exporter, mountpoint=/, origin_prometheus=dev_marktwahn" t=2024-05-29T13:44:13.461204031Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=513734 slug=rgitsolutions t=2024-05-29T13:44:13.46113653Z level=debug msg="State manager processing evaluation results" resultCount=3 + level=debug ts=2024-05-29T13:44:13.460958289Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=31632 slug=fastjp t=2024-05-29T13:44:13.460964733Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=310637 slug=notino instance="datasource_uid=grafanacloud-logs, ref_id=main-menu fragment,navigation fragment" t=2024-05-29T13:44:13.460937444Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=310637 slug=notino instance="datasource_uid=grafanacloud-logs, ref_id=main-menu fragment,navigation fragment" t=2024-05-29T13:44:13.460929047Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.460852572Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=310637 slug=notino t=2024-05-29T13:44:13.460893477Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.460800517Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=715708 slug=ggiprod t=2024-05-29T13:44:13.460739331Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.460768084Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=715708 slug=ggiprod t=2024-05-29T13:44:13.46066273Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.460492937Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.460476182Z caller=remote_rule_evaluator.go:193 user=456850 slug=juniz msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager user=408060 slug=fallenangel2011901 instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.460481095Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=408060 slug=fallenangel2011901 t=2024-05-29T13:44:13.460294764Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=390300 slug=astrachain t=2024-05-29T13:44:13.460146672Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=390300 slug=astrachain instance="datasource_uid=o1ErKYq7z, ref_id=A" t=2024-05-29T13:44:13.460115631Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=info ts=2024-05-29T13:44:13.459936404Z caller=remote_alert_sender.go:94 user=143430 slug=realmfive host=realmfive-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.222.42:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=r5BHyqdnz alerts=1 + level=debug ts=2024-05-29T13:44:13.459847653Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.458971731Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.458956175Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=236496 slug=improbable t=2024-05-29T13:44:13.458935606Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.767518ms + level=debug ts=2024-05-29T13:44:13.45890587Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.458778561Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.458659153Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.458548618Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.458568177Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.458544042Z caller=remote_instance_store.go:51 user=267723 slug=niubits msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=267723 slug=niubits instance="datasource_uid=f94b6633-1956-4944-a114-96eea3155b35, ref_id=A" t=2024-05-29T13:44:13.45846969Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=267723 slug=niubits instance="datasource_uid=f94b6633-1956-4944-a114-96eea3155b35, ref_id=A" t=2024-05-29T13:44:13.458457163Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=267723 slug=niubits t=2024-05-29T13:44:13.458412893Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.45843961Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=267723 slug=niubits version=1 fingerprint=e6d6cb110710b606 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.45830979Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=f94b6633-1956-4944-a114-96eea3155b35, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.457944043s EvaluationString:}]" duration=13.504222ms + level=debug ts=2024-05-29T13:44:13.458236511Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.458216704Z caller=remote_image_capturer.go:33 user=504517 slug=mohdkhairi rule_org_id=1 rule_uid=Uzmcz3fVk msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=504517 slug=mohdkhairi instance= t=2024-05-29T13:44:13.458182864Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:13.458133897Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.458176288Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.458152387Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.458101248Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.458110787Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.458071524Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.457834183Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, daemonset=collector-opentelemetry-collector-agent, env=office, namespace=open-telemetry" t=2024-05-29T13:44:13.457862262Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, daemonset=cilium, env=office, namespace=kube-system" t=2024-05-29T13:44:13.45767824Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=705828 slug=tempoplatform t=2024-05-29T13:44:13.457549547Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.052702ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, daemonset=kube-flannel, env=sandbox, namespace=kube-system" t=2024-05-29T13:44:13.457492026Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.457393818Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.457369437Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=143430 slug=realmfive t=2024-05-29T13:44:13.457291837Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.058709ms + level=debug ts=2024-05-29T13:44:13.457325001Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.457219832Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.457107773Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.456992391Z caller=remote_alert_sender.go:94 user=656459 slug=activeport host=activeport-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.164.20.114:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=fd8f792a-eb05-441b-8593-ee65eb724457 alerts=1 + logger=ngalert.state.manager.persist user=697570 slug=carroteco t=2024-05-29T13:44:13.456936545Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.989489ms + level=debug ts=2024-05-29T13:44:13.456872547Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=93308 slug=cede t=2024-05-29T13:44:13.456928992Z level=debug msg="Saving alert states" count=76 max_state_save_concurrency=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, daemonset=cilium, env=office, namespace=kube-system" t=2024-05-29T13:44:13.456945317Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=679865 slug=jasonshugart t=2024-05-29T13:44:13.456774389Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=679865 slug=jasonshugart instance="__name__=homeassistant_sensor_unit_celsius, domain=sensor, entity=sensor.freezer_temp, friendly_name=Freezer Temp, instance=pi5, job=integrations/hass" t=2024-05-29T13:44:13.456752048Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.456759407Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.456667172Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, daemonset=collector-opentelemetry-collector-agent, env=production, namespace=open-telemetry" t=2024-05-29T13:44:13.456672053Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-213;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms" t=2024-05-29T13:44:13.45661257Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=679865 slug=jasonshugart t=2024-05-29T13:44:13.456536676Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=93308 slug=cede instance="device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-213;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms" t=2024-05-29T13:44:13.456596808Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.45655504Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, daemonset=kube-proxy, env=production, namespace=kube-system" t=2024-05-29T13:44:13.456464962Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, daemonset=kube-proxy, env=production, namespace=kube-system" t=2024-05-29T13:44:13.45645127Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-211;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms" t=2024-05-29T13:44:13.456408224Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:13.456331194Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=58.462115ms + level=debug ts=2024-05-29T13:44:13.456324026Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=127813 slug=clearsale t=2024-05-29T13:44:13.45620076Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=63.146416ms + level=debug ts=2024-05-29T13:44:13.456301885Z caller=remote_instance_store.go:51 user=679029 slug=joveoprodaws msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=93308 slug=cede instance="device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-fs-210;mode=rw;path=-swarm_volumes;service=sms A, path=-swarm_volumes, service=sms" t=2024-05-29T13:44:13.456312519Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-fs-210;mode=rw;path=-swarm_volumes;service=sms A, path=-swarm_volumes, service=sms" t=2024-05-29T13:44:13.456302891Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=127813 slug=clearsale t=2024-05-29T13:44:13.456237541Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=48.652913ms + logger=ngalert.state.manager user=93308 slug=cede instance="device=vdb, environment=staging, fstype=xfs, host=staging-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-stats-instance-190;mode=rw;path=-data;service=sms A, path=-data, service=sms" t=2024-05-29T13:44:13.456235261Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.456119985Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=84824 slug=factmata instance= t=2024-05-29T13:44:13.456156353Z level=warn msg="Failed to take an image" dashboard=ydYs_kMZk panel=12 error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:13.455984659Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=93308 slug=cede instance="device=vdb, environment=staging, fstype=xfs, host=staging-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-mysql-instance-183;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms" t=2024-05-29T13:44:13.456032412Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, daemonset=kube-proxy, env=production, namespace=kube-system" t=2024-05-29T13:44:13.45600533Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.455955295Z caller=remote_instance_store.go:51 user=35611 slug=play msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.455881434Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=628944 slug=dragonflydb instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.455668152Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, daemonset=collector-opentelemetry-collector-agent, env=production, namespace=open-telemetry" t=2024-05-29T13:44:13.455768662Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.455663831Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.455562542Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=93308 slug=cede instance="device=vdb, environment=staging, fstype=xfs, host=staging-zh-backend-instance-202, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-backend-instance-202;mode=rw;path=-edx-var-mongo-mongodb;service=sms A, path=-edx-var-mongo-mongodb, service=sms" t=2024-05-29T13:44:13.455623855Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=61472 slug=glasslewis version=1 fingerprint=62f0ca531636451f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.455522744Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.455204805s EvaluationString:}]" duration=42.287556ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, daemonset=kube-proxy, env=office, namespace=kube-system" t=2024-05-29T13:44:13.455565007Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-219, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-219;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms" t=2024-05-29T13:44:13.455522632Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-218, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-218;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms" t=2024-05-29T13:44:13.455438407Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=682586 slug=nielsonquea instance= t=2024-05-29T13:44:13.455433212Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.scheduler user=682586 slug=nielsonquea version=4 fingerprint=c2a1a0b00144fd26 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.455289449Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Alerting Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc001e3cde8} C:{Var:C Labels: Value:0xc001e3cdf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.45492472s EvaluationString:[ var='B' labels={} value=56.59863945452104 ], [ var='C' labels={} value=1 ]}]" duration=1.947765ms + logger=ngalert.state.manager user=84824 slug=factmata t=2024-05-29T13:44:13.455338871Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=93308 slug=cede instance="device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-217, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-217;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms" t=2024-05-29T13:44:13.455345563Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, daemonset=cilium, env=office, namespace=kube-system" t=2024-05-29T13:44:13.45528318Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, daemonset=cilium, env=office, namespace=kube-system" t=2024-05-29T13:44:13.455270197Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-215, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-215;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms" t=2024-05-29T13:44:13.455159176Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-215, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-215;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms" t=2024-05-29T13:44:13.455145642Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.454959071Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, daemonset=collector-opentelemetry-collector-agent, env=production, namespace=open-telemetry" t=2024-05-29T13:44:13.454969193Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.454877854Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-212;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms" t=2024-05-29T13:44:13.454861971Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-211;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms" t=2024-05-29T13:44:13.454787424Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-211;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms" t=2024-05-29T13:44:13.454772549Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, daemonset=grafana-agent, env=office, namespace=monitoring" t=2024-05-29T13:44:13.454649427Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-fs-210;mode=rw;path=-swarm_volumes;service=sms A, path=-swarm_volumes, service=sms" t=2024-05-29T13:44:13.45469259Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.454603292Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, daemonset=cilium, env=office, namespace=kube-system" t=2024-05-29T13:44:13.454438298Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.454265851Z caller=remote_instance_store.go:51 user=263317 slug=ymc msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=707607 slug=obi t=2024-05-29T13:44:13.454253315Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=93308 slug=cede instance="device=vdb, environment=campus, fstype=xfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-matomo-192;mode=rw;path=-data;service=sms A, path=-data, service=sms" t=2024-05-29T13:44:13.454217882Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, daemonset=grafana-agent, env=office, namespace=monitoring" t=2024-05-29T13:44:13.454248277Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=263317 slug=ymc t=2024-05-29T13:44:13.454208928Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=263317 slug=ymc instance= t=2024-05-29T13:44:13.454176302Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vdb, environment=campus, fstype=xfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-matomo-192;mode=rw;path=-data;service=sms A, path=-data, service=sms" t=2024-05-29T13:44:13.454202381Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, daemonset=collector-opentelemetry-collector-agent, env=office, namespace=open-telemetry" t=2024-05-29T13:44:13.454166001Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=263317 slug=ymc t=2024-05-29T13:44:13.454101343Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=707607 slug=obi t=2024-05-29T13:44:13.454182193Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=707607 slug=obi version=1 fingerprint=91bc5adaf5a956c5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.454136573Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=2.928589ms + level=error ts=2024-05-29T13:44:13.454107243Z caller=remote_rule_evaluator.go:110 user=707607 slug=obi msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + level=debug ts=2024-05-29T13:44:13.453993849Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, daemonset=kube-proxy, env=office, namespace=kube-system" t=2024-05-29T13:44:13.453966582Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.453903816Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.453877865Z caller=remote_instance_store.go:51 user=287424 slug=ercande msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=93308 slug=cede instance="device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-212, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-212;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms" t=2024-05-29T13:44:13.453801827Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-212, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-212;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms" t=2024-05-29T13:44:13.453783716Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=287424 slug=ercande instance= t=2024-05-29T13:44:13.453772053Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, daemonset=cilium, env=office, namespace=kube-system" t=2024-05-29T13:44:13.453689602Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=437877 slug=justwilliam t=2024-05-29T13:44:13.453679524Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, daemonset=cilium, env=office, namespace=kube-system" t=2024-05-29T13:44:13.453678539Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=247816 slug=adelaideitk t=2024-05-29T13:44:13.453581735Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.741306ms + logger=ngalert.state.manager user=287424 slug=ercande t=2024-05-29T13:44:13.453612712Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, daemonset=kube-proxy, env=office, namespace=kube-system" t=2024-05-29T13:44:13.453589522Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vda1, environment=staging, fstype=ext4, host=staging-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-stats-instance-190;mode=rw;path=-;service=sms A, path=-, service=sms" t=2024-05-29T13:44:13.453457609Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vda1, environment=staging, fstype=ext4, host=staging-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-stats-instance-190;mode=rw;path=-;service=sms A, path=-, service=sms" t=2024-05-29T13:44:13.453443423Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, daemonset=collector-opentelemetry-collector-agent, env=office, namespace=open-telemetry" t=2024-05-29T13:44:13.453441764Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.453304799Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.453218318Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=93308 slug=cede instance="device=vda1, environment=staging, fstype=ext4, host=staging-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-mysql-instance-183;mode=rw;path=-;service=sms A, path=-, service=sms" t=2024-05-29T13:44:13.453251177Z level=debug msg="Keeping state" state=Normal + level=debug component=discovery ts=2024-05-29T13:44:13.453189157Z caller=hgapi.go:100 msg="requesting a list of instances from the hg api" url="http://hosted-grafana-api.hosted-grafana.svc.cluster.local/instances?status=active" + level=debug ts=2024-05-29T13:44:13.453110788Z caller=ruler.go:426 msg="syncing rules" reason=periodic + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard2, daemonset=collector-opentelemetry-collector-agent, env=office, namespace=open-telemetry" t=2024-05-29T13:44:13.453040938Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.45298887Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-veridas-db, env=au" t=2024-05-29T13:44:13.453033349Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vda1, environment=staging, fstype=ext4, host=staging-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-clickhouse-195;mode=rw;path=-;service=sms A, path=-, service=sms" t=2024-05-29T13:44:13.452939787Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard2, daemonset=cilium, env=office, namespace=kube-system" t=2024-05-29T13:44:13.452935342Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard1, daemonset=grafana-agent, env=office, namespace=monitoring" t=2024-05-29T13:44:13.452741145Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-v4monitoring-db, env=au" t=2024-05-29T13:44:13.452738861Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-sar-investigation-db, env=au" t=2024-05-29T13:44:13.452529984Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.45250365Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard1, daemonset=cilium, env=office, namespace=kube-system" t=2024-05-29T13:44:13.452568821Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard1, daemonset=cilium, env=office, namespace=kube-system" t=2024-05-29T13:44:13.452554398Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-kansascity, daemonset=kube-proxy, env=office, namespace=kube-system" t=2024-05-29T13:44:13.45248689Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-kansascity, daemonset=grafana-agent, env=office, namespace=monitoring" t=2024-05-29T13:44:13.452414044Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.452366692Z caller=remote_instance_store.go:51 user=708531 slug=dooshimagbamwuan msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-kansascity, daemonset=grafana-agent, env=office, namespace=monitoring" t=2024-05-29T13:44:13.452401546Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-kansascity, daemonset=cilium, env=office, namespace=kube-system" t=2024-05-29T13:44:13.452212936Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-215, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-215;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms" t=2024-05-29T13:44:13.452179382Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=343338 slug=f5sdc t=2024-05-29T13:44:13.452201332Z level=debug msg="Saving alert states" count=30 max_state_save_concurrency=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-periodic-reviews-db, env=au" t=2024-05-29T13:44:13.452076258Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=tr2.tor, group=monitoring, instance=mon-poller.tr2.tor, origin=volterra-infra-vm" t=2024-05-29T13:44:13.452116807Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-214, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-214;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms" t=2024-05-29T13:44:13.45204364Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-214, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-214;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms" t=2024-05-29T13:44:13.452025072Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sv10.sjc, group=monitoring, instance=mon-poller.sv10.sjc, origin=volterra-infra-vm" t=2024-05-29T13:44:13.452062761Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=sif.che, group=monitoring, instance=mon-poller.sif.che, origin=volterra-infra-vm" t=2024-05-29T13:44:13.451974477Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa4.par, group=monitoring, instance=mon-poller.pa4.par, origin=volterra-infra-vm" t=2024-05-29T13:44:13.451904941Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-hendersonbuilding2, daemonset=collector-opentelemetry-collector-agent, env=office, namespace=open-telemetry" t=2024-05-29T13:44:13.451902177Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-instant-id-qa-db, env=au" t=2024-05-29T13:44:13.451817751Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.451845786Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa2.par, group=monitoring, instance=mon-poller-amer-02.pa2.par, origin=volterra-infra-vm" t=2024-05-29T13:44:13.451800971Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-hendersonbuilding2, daemonset=cilium, env=office, namespace=kube-system" t=2024-05-29T13:44:13.451785814Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=pa2.par, group=monitoring, instance=mon-poller-amer-01.pa2.par, origin=volterra-infra-vm" t=2024-05-29T13:44:13.45175875Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=os1.osa, group=monitoring, instance=mon-poller.os1.osa, origin=volterra-infra-vm" t=2024-05-29T13:44:13.45172404Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.451704354Z caller=remote_instance_store.go:51 user=679029 slug=joveoprodaws msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.451613693Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=mtl7.mon, group=monitoring, instance=mon-poller.mtl7.mon, origin=volterra-infra-vm" t=2024-05-29T13:44:13.451667939Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=mtl7.mon, group=monitoring, instance=mon-poller.mtl7.mon, origin=volterra-infra-vm" t=2024-05-29T13:44:13.45165856Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=me1.mel, group=monitoring, instance=mon-poller.me1.mel, origin=volterra-infra-vm" t=2024-05-29T13:44:13.451645667Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.451552542Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=me1.mel, group=monitoring, instance=mon-poller.me1.mel, origin=volterra-infra-vm" t=2024-05-29T13:44:13.451639027Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-idverse-enterprise-db, env=au" t=2024-05-29T13:44:13.451642134Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=md2.mad, group=monitoring, instance=mon-poller.md2.mad, origin=volterra-infra-vm" t=2024-05-29T13:44:13.451622595Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=646202 slug=kairosaerospace instance= t=2024-05-29T13:44:13.451508592Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=646202 slug=kairosaerospace instance= t=2024-05-29T13:44:13.451483401Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=646202 slug=kairosaerospace instance= t=2024-05-29T13:44:13.451477851Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=mb2.mum, group=monitoring, instance=mon-poller.mb2.mum, origin=volterra-infra-vm" t=2024-05-29T13:44:13.451585794Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.451577593Z caller=remote_instance_store.go:51 user=646202 slug=kairosaerospace msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=646202 slug=kairosaerospace instance= t=2024-05-29T13:44:13.451452061Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=ls1.lis, group=monitoring, instance=mon-poller.ls1.lis, origin=volterra-infra-vm" t=2024-05-29T13:44:13.451551942Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-hendersonbuilding1, daemonset=grafana-agent, env=office, namespace=monitoring" t=2024-05-29T13:44:13.451571956Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-frontend-log-db, env=au" t=2024-05-29T13:44:13.451509797Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=646202 slug=kairosaerospace instance= t=2024-05-29T13:44:13.45140114Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=646202 slug=kairosaerospace instance= t=2024-05-29T13:44:13.45139383Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=646202 slug=kairosaerospace instance= t=2024-05-29T13:44:13.45138373Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-hendersonbuilding1, daemonset=collector-opentelemetry-collector-agent, env=office, namespace=open-telemetry" t=2024-05-29T13:44:13.451483133Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=646202 slug=kairosaerospace instance= t=2024-05-29T13:44:13.45135844Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=93308 slug=cede instance="device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-184;mode=rw;path=-;service=sms A, path=-, service=sms" t=2024-05-29T13:44:13.451430283Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=646202 slug=kairosaerospace t=2024-05-29T13:44:13.451310239Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=343338 slug=f5sdc instance="datacenter=dc12.ash, group=monitoring, instance=mon-poller.dc12.ash, origin=volterra-infra-vm" t=2024-05-29T13:44:13.451382217Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-183;mode=rw;path=-;service=sms A, path=-, service=sms" t=2024-05-29T13:44:13.451295555Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-182;mode=rw;path=-;service=sms A, path=-, service=sms" t=2024-05-29T13:44:13.451212747Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=nordic-kansascity, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.451164361Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.451113572Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=93308 slug=cede instance="device=vda1, environment=campus, fstype=ext4, host=campus-zh-matomo-192, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-matomo-192;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms" t=2024-05-29T13:44:13.451116562Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vda1, environment=campus, fstype=ext4, host=campus-zh-matomo-192, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-matomo-192;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms" t=2024-05-29T13:44:13.4510997Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.450992296Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=93308 slug=cede instance="device=vda1, environment=campus, fstype=ext4, host=campus-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-clickhouse-195;mode=rw;path=-;service=sms A, path=-, service=sms" t=2024-05-29T13:44:13.451008188Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=nordic-kansascity, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.450980227Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-dow-jones-db, env=au" t=2024-05-29T13:44:13.450882548Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-dow-jones-db, env=au" t=2024-05-29T13:44:13.450853552Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=85680 slug=roivolution instance= t=2024-05-29T13:44:13.450893988Z level=warn msg="Failed to take an image" dashboard=_mpndSGZk panel=17 error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:13.45086229Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-watsonvillehilltop, daemonset=kube-proxy, env=production, namespace=kube-system" t=2024-05-29T13:44:13.450845663Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.450809326Z caller=remote_instance_store.go:51 user=96994 slug=loblawdigital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=93308 slug=cede instance="device=vda1, environment=campus, fstype=ext4, host=campus-zh-balancer-101, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-balancer-101;mode=rw;path=-;service=sms A, path=-, service=sms" t=2024-05-29T13:44:13.450853017Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vda1, environment=campus, fstype=ext4, host=campus-zh-balancer-101, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-balancer-101;mode=rw;path=-;service=sms A, path=-, service=sms" t=2024-05-29T13:44:13.450827914Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=96994 slug=loblawdigital instance= t=2024-05-29T13:44:13.450756972Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=96994 slug=loblawdigital instance= t=2024-05-29T13:44:13.450746741Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.45070662Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.450762097Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=93308 slug=cede instance="device=vda1, environment=campus, fstype=ext4, host=campus-zh-balancer-100, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-balancer-100;mode=rw;path=-;service=sms A, path=-, service=sms" t=2024-05-29T13:44:13.450702324Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-watsonvillehilltop, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.450518867Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-creditsafe-db, env=au" t=2024-05-29T13:44:13.450487988Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-creditsafe-db, env=au" t=2024-05-29T13:44:13.450473476Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-watsonvillehilltop, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.450488731Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=432323 slug=lithic version=3 fingerprint=83ad113282894635 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.450322337Z level=debug msg="Alert rule evaluated" results="[{Instance:DBClusterIdentifier=prod-journal-processor-cluster State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:DBClusterIdentifier=prod-journal-processor-cluster Value:0xc01a776498} C:{Var:C Labels:DBClusterIdentifier=prod-journal-processor-cluster Value:0xc01a7764a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.449992818s EvaluationString:[ var='B' labels={DBClusterIdentifier=prod-journal-processor-cluster} value=5.741666666666666 ], [ var='C' labels={DBClusterIdentifier=prod-journal-processor-cluster} value=0 ]}]" duration=103.170267ms + level=debug ts=2024-05-29T13:44:13.450288024Z caller=remote_instance_store.go:51 user=691855 slug=chainlake msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-comply-advantage-db, env=au" t=2024-05-29T13:44:13.450292476Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.450286001Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.665014ms + logger=ngalert.state.manager user=93308 slug=cede instance="device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-matomo-192, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-matomo-192;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms" t=2024-05-29T13:44:13.450150426Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=85680 slug=roivolution instance= t=2024-05-29T13:44:13.450143614Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-compliance-lens-db, env=au" t=2024-05-29T13:44:13.450061496Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:13.449953062Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=93308 slug=cede instance="device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-219, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-219;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms" t=2024-05-29T13:44:13.449937133Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=93308 slug=cede instance="device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-218, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-218;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms" t=2024-05-29T13:44:13.449856714Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.449773932Z caller=remote_instance_store.go:51 user=173175 slug=quodorbis msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-watsonville-walker-building-1, daemonset=collector-opentelemetry-collector-agent, env=production, namespace=open-telemetry" t=2024-05-29T13:44:13.449773606Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=93308 slug=cede instance="device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-217, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-217;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms" t=2024-05-29T13:44:13.44973435Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=173175 slug=quodorbis t=2024-05-29T13:44:13.449735084Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=93308 slug=cede instance="device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-216, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-216;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms" t=2024-05-29T13:44:13.449654486Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=93308 slug=cede instance="device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-216, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-216;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms" t=2024-05-29T13:44:13.449642996Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-backend-db-read-replica-1, env=au" t=2024-05-29T13:44:13.449631221Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-watsonville-cascade-building-1, daemonset=kube-proxy, env=production, namespace=kube-system" t=2024-05-29T13:44:13.449605289Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-watsonville-cascade-building-1, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.449539961Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-backend-db, env=au" t=2024-05-29T13:44:13.449495993Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=syd-prod-backend-db, env=au" t=2024-05-29T13:44:13.449479128Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-watsonville-cascade-building-1, daemonset=collector-opentelemetry-collector-agent, env=production, namespace=open-telemetry" t=2024-05-29T13:44:13.449454454Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-watsonville-cascade-building-1, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.449368846Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=93308 slug=cede instance="device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-213;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms" t=2024-05-29T13:44:13.449339793Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=93308 slug=cede instance="device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-212;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms" t=2024-05-29T13:44:13.449244658Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.449214395Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.449125999Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.449129744Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-sharon, daemonset=kube-proxy, env=office, namespace=kube-system" t=2024-05-29T13:44:13.449152764Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-sharon, daemonset=grafana-agent, env=office, namespace=monitoring" t=2024-05-29T13:44:13.448988414Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=93308 slug=cede t=2024-05-29T13:44:13.448719428Z level=debug msg="State manager processing evaluation results" resultCount=76 + logger=ngalert.state.manager.persist user=799774 slug=sagacitysoft t=2024-05-29T13:44:13.448850879Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-sharon, daemonset=collector-opentelemetry-collector-agent, env=production, namespace=open-telemetry" t=2024-05-29T13:44:13.448910652Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=799774 slug=sagacitysoft instance="agent_hostname=app-sagacitysoft-ca, device=/dev/nvme0n1p1, fstype=ext4, instance=prod-ec2, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:13.448833669Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-sharon, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.448767286Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=93308 slug=cede version=83 fingerprint=3a63466024470276 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.446649566Z level=debug msg="Alert rule evaluated" results="[{Instance:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-matomo-192;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-matomo-192;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc046989378} C:{Var:C Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-matomo-192;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc0469893d0} D:{Var:D Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-matomo-192;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc046989320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441586497s EvaluationString:[ var='B' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-matomo-192;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.2706400487351822 ], [ var='C' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-matomo-192;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0 ], [ var='D' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-matomo-192;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.002706400487351822 ]} {Instance:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-211;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-211;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc046989490} C:{Var:C Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-211;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc0469894f8} D:{Var:D Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-211;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc046989600}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441605424s EvaluationString:[ var='B' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-211;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.07084748247135562 ], [ var='C' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-211;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0 ], [ var='D' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-211;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.0007084748247135563 ]} {Instance:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-212;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-212;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc0469896c0} C:{Var:C Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-212;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc046989710} D:{Var:D Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-212;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc046989760}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441614497s EvaluationString:[ var='B' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-212;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.18811228104463393 ], [ var='C' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-212;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0 ], [ var='D' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-212;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.0018811228104463392 ]} {Instance:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-213;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-213;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc0469898e0} C:{Var:C Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-213;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc046989a20} D:{Var:D Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-213;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc046989890}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441621474s EvaluationString:[ var='B' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-213;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.18371485109813596 ], [ var='C' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-213;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0 ], [ var='D' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-213;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.0018371485109813595 ]} {Instance:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-214;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-214;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc046989c10} C:{Var:C Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-214;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc046989b40} D:{Var:D Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-214;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc046989ba0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441630385s EvaluationString:[ var='B' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-214;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.18786797938093958 ], [ var='C' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-214;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0 ], [ var='D' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-214;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.0018786797938093957 ]} {Instance:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-215, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-215;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-215, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-215;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc046989d30} C:{Var:C Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-215, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-215;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc046989ed0} D:{Var:D Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-215, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-215;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc046989fa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441637639s EvaluationString:[ var='B' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-215, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-215;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.1846920577529133 ], [ var='C' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-215, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-215;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0 ], [ var='D' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-215, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-215;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.0018469205775291332 ]} {Instance:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-216, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-216;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-216, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-216;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc039894360} C:{Var:C Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-216, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-216;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc0398944c0} D:{Var:D Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-216, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-216;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc039894678}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441643655s EvaluationString:[ var='B' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-216, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-216;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.18444775608921896 ], [ var='C' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-216, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-216;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0 ], [ var='D' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-216, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-216;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.0018444775608921896 ]} {Instance:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-217, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-217;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-217, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-217;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc039894930} C:{Var:C Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-217, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-217;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc039894b58} D:{Var:D Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-217, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-217;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc039895200}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441651422s EvaluationString:[ var='B' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-217, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-217;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.19031099601788287 ], [ var='C' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-217, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-217;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0 ], [ var='D' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-217, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-217;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.0019031099601788286 ]} {Instance:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-218, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-218;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-218, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-218;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc0398958e0} C:{Var:C Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-218, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-218;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc039895cb0} D:{Var:D Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-218, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-218;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc00bb94000}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441658182s EvaluationString:[ var='B' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-218, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-218;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.18860088437202258 ], [ var='C' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-218, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-218;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0 ], [ var='D' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-218, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-218;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.0018860088437202258 ]} {Instance:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-219, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-219;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-219, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-219;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc00bb94160} C:{Var:C Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-219, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-219;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc00bb94260} D:{Var:D Labels:device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-219, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-219;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc00bb940f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441664711s EvaluationString:[ var='B' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-219, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-219;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.1895780910267999 ], [ var='C' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-219, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-219;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0 ], [ var='D' labels={device=nsfs, environment=campus, fstype=nsfs, host=campus-zh-swarm-node-219, mode=rw, name=disk.used_percent;device=nsfs;environment=campus;fstype=nsfs;host=campus-zh-swarm-node-219;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.001895780910267999 ]} {Instance:device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-matomo-192, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-matomo-192;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-matomo-192, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-matomo-192;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc00bb94388} C:{Var:C Labels:device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-matomo-192, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-matomo-192;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc00bb943f0} D:{Var:D Labels:device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-matomo-192, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-matomo-192;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc00bb94320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441677148s EvaluationString:[ var='B' labels={device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-matomo-192, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-matomo-192;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.5430566330488751 ], [ var='C' labels={device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-matomo-192, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-matomo-192;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0 ], [ var='D' labels={device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-matomo-192, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-matomo-192;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.0054305663304887505 ]} {Instance:device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-211;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-211;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc00bb94538} C:{Var:C Labels:device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-211;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc00bb945a0} D:{Var:D Labels:device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-211;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc00bb944c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441683519s EvaluationString:[ var='B' labels={device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-211;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.12263943517455354 ], [ var='C' labels={device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-211;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0 ], [ var='D' labels={device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-211;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.0012263943517455355 ]} {Instance:device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-212;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-212;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc00bb946b0} C:{Var:C Labels:device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-212;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc00bb94788} D:{Var:D Labels:device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-212;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc00bb94808}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441690191s EvaluationString:[ var='B' labels={device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-212;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.1543986514548164 ], [ var='C' labels={device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-212;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0 ], [ var='D' labels={device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-212;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.001543986514548164 ]} {Instance:device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-213;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-213;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc00bb949a8} C:{Var:C Labels:device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-213;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc00bb948c8} D:{Var:D Labels:device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-213;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc00bb94940}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441696839s EvaluationString:[ var='B' labels={device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-213;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.15464295311851073 ], [ var='C' labels={device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-213;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0 ], [ var='D' labels={device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-213;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.0015464295311851074 ]} {Instance:device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-214;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-214;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc00bb94a90} C:{Var:C Labels:device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-214;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc00bb94b98} D:{Var:D Labels:device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-214;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms Value:0xc00bb94c08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441703112s EvaluationString:[ var='B' labels={device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-214;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.14511554275411712 ], [ var='C' labels={device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-214;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0 ], [ var='D' labels={device=nsfs, environment=staging, fstype=nsfs, host=staging-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=nsfs;environment=staging;fstype=nsfs;host=staging-zh-swarm-node-214;mode=rw;path=-run-snapd-ns-lxd.mnt;service=sms A, path=-run-snapd-ns-lxd.mnt, service=sms} value=0.001451155427541171 ]} {Instance:device=vda1, environment=campus, fstype=ext4, host=campus-zh-balancer-100, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-balancer-100;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-balancer-100, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-balancer-100;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb94cd0} C:{Var:C Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-balancer-100, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-balancer-100;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb94d40} D:{Var:D Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-balancer-100, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-balancer-100;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb94da8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441710142s EvaluationString:[ var='B' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-balancer-100, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-balancer-100;mode=rw;path=-;service=sms A, path=-, service=sms} value=24.651717104461195 ], [ var='C' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-balancer-100, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-balancer-100;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-balancer-100, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-balancer-100;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.24651717104461196 ]} {Instance:device=vda1, environment=campus, fstype=ext4, host=campus-zh-balancer-101, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-balancer-101;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-balancer-101, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-balancer-101;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb94f00} C:{Var:C Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-balancer-101, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-balancer-101;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb94fa0} D:{Var:D Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-balancer-101, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-balancer-101;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb95000}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441716721s EvaluationString:[ var='B' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-balancer-101, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-balancer-101;mode=rw;path=-;service=sms A, path=-, service=sms} value=22.531737987831175 ], [ var='C' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-balancer-101, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-balancer-101;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-balancer-101, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-balancer-101;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.22531737987831174 ]} {Instance:device=vda1, environment=campus, fstype=ext4, host=campus-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-clickhouse-195;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-clickhouse-195;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb950e8} C:{Var:C Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-clickhouse-195;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb95150} D:{Var:D Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-clickhouse-195;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb951c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441733264s EvaluationString:[ var='B' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-clickhouse-195;mode=rw;path=-;service=sms A, path=-, service=sms} value=44.88028938694848 ], [ var='C' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-clickhouse-195;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-clickhouse-195;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.44880289386948485 ]} {Instance:device=vda1, environment=campus, fstype=ext4, host=campus-zh-matomo-192, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-matomo-192;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-matomo-192, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-matomo-192;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb95370} C:{Var:C Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-matomo-192, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-matomo-192;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb95400} D:{Var:D Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-matomo-192, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-matomo-192;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb95458}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441738487s EvaluationString:[ var='B' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-matomo-192, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-matomo-192;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=42.63153170349642 ], [ var='C' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-matomo-192, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-matomo-192;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-matomo-192, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-matomo-192;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.4263153170349642 ]} {Instance:device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-182;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-182;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb95560} C:{Var:C Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-182;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb95620} D:{Var:D Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-182;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb956d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441749105s EvaluationString:[ var='B' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-182;mode=rw;path=-;service=sms A, path=-, service=sms} value=33.69775561909673 ], [ var='C' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-182;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-182;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.3369775561909673 ]} {Instance:device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-183;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-183;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb95788} C:{Var:C Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-183;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb95800} D:{Var:D Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-183;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb95870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441755538s EvaluationString:[ var='B' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-183;mode=rw;path=-;service=sms A, path=-, service=sms} value=38.92110599366746 ], [ var='C' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-183;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-183;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.3892110599366746 ]} {Instance:device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-184;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-184;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb95940} C:{Var:C Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-184;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb959b0} D:{Var:D Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-184;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb95a78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441763201s EvaluationString:[ var='B' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-184;mode=rw;path=-;service=sms A, path=-, service=sms} value=33.75401677889964 ], [ var='C' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-184;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-mysql-instance-184;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.3375401677889964 ]} {Instance:device=vda1, environment=campus, fstype=ext4, host=campus-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-stats-instance-190;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-stats-instance-190;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb95bf0} C:{Var:C Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-stats-instance-190;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb95c68} D:{Var:D Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-stats-instance-190;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb95b90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441769071s EvaluationString:[ var='B' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-stats-instance-190;mode=rw;path=-;service=sms A, path=-, service=sms} value=52.36223429237173 ], [ var='C' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-stats-instance-190;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-stats-instance-190;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.5236223429237173 ]} {Instance:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-fs-210;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-fs-210;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb95e60} C:{Var:C Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-fs-210;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb95d30} D:{Var:D Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-fs-210;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb95da0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441774932s EvaluationString:[ var='B' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-fs-210;mode=rw;path=-;service=sms A, path=-, service=sms} value=19.68895860678042 ], [ var='C' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-fs-210;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-fs-210;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.1968895860678042 ]} {Instance:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-211, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-211;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-211, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-211;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96e0b8} C:{Var:C Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-211, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-211;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc00bb95fd0} D:{Var:D Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-211, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-211;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96e040}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441780797s EvaluationString:[ var='B' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-211, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-211;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=45.092763923125354 ], [ var='C' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-211, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-211;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-211, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-211;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.45092763923125356 ]} {Instance:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-212, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-212;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-212, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-212;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96e240} C:{Var:C Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-212, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-212;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96e298} D:{Var:D Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-212, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-212;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96e328}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441789106s EvaluationString:[ var='B' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-212, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-212;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=34.97550981208183 ], [ var='C' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-212, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-212;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-212, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-212;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.34975509812081834 ]} {Instance:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-213, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-213;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-213, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-213;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96e5f8} C:{Var:C Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-213, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-213;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96e4d8} D:{Var:D Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-213, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-213;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96e570}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441796317s EvaluationString:[ var='B' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-213, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-213;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=35.94540662248829 ], [ var='C' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-213, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-213;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-213, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-213;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.3594540662248829 ]} {Instance:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-214, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-214;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-214, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-214;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96e810} C:{Var:C Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-214, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-214;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96e890} D:{Var:D Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-214, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-214;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96e790}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441806849s EvaluationString:[ var='B' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-214, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-214;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=35.46872832431625 ], [ var='C' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-214, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-214;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-214, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-214;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.3546872832431625 ]} {Instance:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-215, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-215;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-215, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-215;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96ea40} C:{Var:C Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-215, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-215;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96eab8} D:{Var:D Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-215, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-215;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96eb18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441813311s EvaluationString:[ var='B' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-215, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-215;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=35.494979244699124 ], [ var='C' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-215, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-215;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-215, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-215;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.35494979244699126 ]} {Instance:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-216, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-216;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-216, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-216;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96ee48} C:{Var:C Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-216, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-216;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96ed00} D:{Var:D Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-216, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-216;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96ed90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441822448s EvaluationString:[ var='B' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-216, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-216;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=35.616405320471436 ], [ var='C' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-216, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-216;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-216, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-216;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.35616405320471434 ]} {Instance:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-217, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-217;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-217, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-217;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96f010} C:{Var:C Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-217, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-217;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96f088} D:{Var:D Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-217, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-217;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96f120}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.44182995s EvaluationString:[ var='B' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-217, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-217;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=36.336835141148285 ], [ var='C' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-217, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-217;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-217, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-217;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.3633683514114828 ]} {Instance:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-218, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-218;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-218, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-218;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96f2c8} C:{Var:C Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-218, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-218;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96f368} D:{Var:D Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-218, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-218;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96f3e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441836237s EvaluationString:[ var='B' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-218, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-218;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=34.73695892267389 ], [ var='C' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-218, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-218;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-218, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-218;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.34736958922673894 ]} {Instance:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-219, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-219;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-219, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-219;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96f610} C:{Var:C Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-219, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-219;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96f680} D:{Var:D Labels:device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-219, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-219;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96f598}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441844203s EvaluationString:[ var='B' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-219, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-219;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=28.40752179297616 ], [ var='C' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-219, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-219;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=campus, fstype=ext4, host=campus-zh-swarm-node-219, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=campus;fstype=ext4;host=campus-zh-swarm-node-219;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.2840752179297616 ]} {Instance:device=vda1, environment=staging, fstype=ext4, host=staging-zh-backend-instance-202, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-backend-instance-202;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-backend-instance-202, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-backend-instance-202;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96f800} C:{Var:C Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-backend-instance-202, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-backend-instance-202;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96f870} D:{Var:D Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-backend-instance-202, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-backend-instance-202;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96f8c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.4418522s EvaluationString:[ var='B' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-backend-instance-202, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-backend-instance-202;mode=rw;path=-;service=sms A, path=-, service=sms} value=23.468725984117498 ], [ var='C' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-backend-instance-202, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-backend-instance-202;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-backend-instance-202, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-backend-instance-202;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.23468725984117497 ]} {Instance:device=vda1, environment=staging, fstype=ext4, host=staging-zh-balancer-100, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-balancer-100;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-balancer-100, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-balancer-100;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96f9b0} C:{Var:C Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-balancer-100, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-balancer-100;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96fa08} D:{Var:D Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-balancer-100, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-balancer-100;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96fb10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441857659s EvaluationString:[ var='B' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-balancer-100, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-balancer-100;mode=rw;path=-;service=sms A, path=-, service=sms} value=23.81526829710343 ], [ var='C' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-balancer-100, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-balancer-100;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-balancer-100, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-balancer-100;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.23815268297103429 ]} {Instance:device=vda1, environment=staging, fstype=ext4, host=staging-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-clickhouse-195;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-clickhouse-195;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96fbf0} C:{Var:C Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-clickhouse-195;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96fc50} D:{Var:D Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-clickhouse-195;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96fca8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441863383s EvaluationString:[ var='B' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-clickhouse-195;mode=rw;path=-;service=sms A, path=-, service=sms} value=41.96111297136206 ], [ var='C' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-clickhouse-195;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-clickhouse-195;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.41961112971362063 ]} {Instance:device=vda1, environment=staging, fstype=ext4, host=staging-zh-matomo-192, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-matomo-192;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-matomo-192, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-matomo-192;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96fe80} C:{Var:C Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-matomo-192, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-matomo-192;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96ff08} D:{Var:D Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-matomo-192, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-matomo-192;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc01b96ff80}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441868875s EvaluationString:[ var='B' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-matomo-192, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-matomo-192;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=37.287012870437195 ], [ var='C' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-matomo-192, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-matomo-192;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-matomo-192, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-matomo-192;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.372870128704372 ]} {Instance:device=vda1, environment=staging, fstype=ext4, host=staging-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-mysql-instance-182;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-mysql-instance-182;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc500f0} C:{Var:C Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-mysql-instance-182;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc50170} D:{Var:D Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-mysql-instance-182;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc501c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441876323s EvaluationString:[ var='B' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-mysql-instance-182;mode=rw;path=-;service=sms A, path=-, service=sms} value=40.5438465078689 ], [ var='C' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-mysql-instance-182;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-mysql-instance-182;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.40543846507868897 ]} {Instance:device=vda1, environment=staging, fstype=ext4, host=staging-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-mysql-instance-183;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-mysql-instance-183;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc50390} C:{Var:C Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-mysql-instance-183;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc502a0} D:{Var:D Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-mysql-instance-183;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc50320}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.44188219s EvaluationString:[ var='B' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-mysql-instance-183;mode=rw;path=-;service=sms A, path=-, service=sms} value=42.40574078533847 ], [ var='C' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-mysql-instance-183;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-mysql-instance-183;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.42405740785338475 ]} {Instance:device=vda1, environment=staging, fstype=ext4, host=staging-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-mysql-instance-184;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-mysql-instance-184;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc50450} C:{Var:C Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-mysql-instance-184;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc504c8} D:{Var:D Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-mysql-instance-184;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc50548}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441887442s EvaluationString:[ var='B' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-mysql-instance-184;mode=rw;path=-;service=sms A, path=-, service=sms} value=42.57699846333033 ], [ var='C' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-mysql-instance-184;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-mysql-instance-184;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.4257699846333033 ]} {Instance:device=vda1, environment=staging, fstype=ext4, host=staging-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-stats-instance-190;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-stats-instance-190;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc506d8} C:{Var:C Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-stats-instance-190;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc50608} D:{Var:D Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-stats-instance-190;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc50670}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441893167s EvaluationString:[ var='B' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-stats-instance-190;mode=rw;path=-;service=sms A, path=-, service=sms} value=62.938770331227346 ], [ var='C' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-stats-instance-190;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-stats-instance-190;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.6293877033122734 ]} {Instance:device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-fs-210;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-fs-210;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc507b8} C:{Var:C Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-fs-210;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc50828} D:{Var:D Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-fs-210;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc508b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441899362s EvaluationString:[ var='B' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-fs-210;mode=rw;path=-;service=sms A, path=-, service=sms} value=18.995416768672637 ], [ var='C' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-fs-210;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-fs-210;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.18995416768672638 ]} {Instance:device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-211, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-211;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-211, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-211;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc50a50} C:{Var:C Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-211, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-211;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc50ad8} D:{Var:D Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-211, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-211;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc509c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441904537s EvaluationString:[ var='B' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-211, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-211;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=38.95769293833374 ], [ var='C' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-211, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-211;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-211, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-211;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.38957692938333743 ]} {Instance:device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-212, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-212;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-212, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-212;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc50be8} C:{Var:C Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-212, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-212;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc50c58} D:{Var:D Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-212, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-212;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc50d40}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441912945s EvaluationString:[ var='B' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-212, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-212;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=44.5578316366941 ], [ var='C' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-212, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-212;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-212, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-212;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.44557831636694095 ]} {Instance:device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-213, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-213;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-213, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-213;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc51068} C:{Var:C Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-213, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-213;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc50f48} D:{Var:D Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-213, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-213;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc50fd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441920961s EvaluationString:[ var='B' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-213, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-213;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=42.04529202653312 ], [ var='C' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-213, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-213;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-213, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-213;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.4204529202653312 ]} {Instance:device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-214, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-214;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-214, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-214;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc51188} C:{Var:C Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-214, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-214;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc51208} D:{Var:D Labels:device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-214, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-214;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms Value:0xc03fc512a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441926973s EvaluationString:[ var='B' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-214, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-214;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=39.17251014604125 ], [ var='C' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-214, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-214;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0 ], [ var='D' labels={device=vda1, environment=staging, fstype=ext4, host=staging-zh-swarm-node-214, label=cloudimg-rootfs, mode=rw, name=disk.used_percent;device=vda1;environment=staging;fstype=ext4;host=staging-zh-swarm-node-214;label=cloudimg-rootfs;mode=rw;path=-;service=sms A, path=-, service=sms} value=0.39172510146041245 ]} {Instance:device=vdb, environment=campus, fstype=xfs, host=campus-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-clickhouse-195;mode=rw;path=-data;service=sms A, path=-data, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-clickhouse-195;mode=rw;path=-data;service=sms A, path=-data, service=sms Value:0xc03fc51538} C:{Var:C Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-clickhouse-195;mode=rw;path=-data;service=sms A, path=-data, service=sms Value:0xc03fc51418} D:{Var:D Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-clickhouse-195;mode=rw;path=-data;service=sms A, path=-data, service=sms Value:0xc03fc514c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441932738s EvaluationString:[ var='B' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-clickhouse-195;mode=rw;path=-data;service=sms A, path=-data, service=sms} value=31.188293844650712 ], [ var='C' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-clickhouse-195;mode=rw;path=-data;service=sms A, path=-data, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-clickhouse-195;mode=rw;path=-data;service=sms A, path=-data, service=sms} value=0.3118829384465071 ]} {Instance:device=vdb, environment=campus, fstype=xfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-matomo-192;mode=rw;path=-data;service=sms A, path=-data, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-matomo-192;mode=rw;path=-data;service=sms A, path=-data, service=sms Value:0xc03fc51678} C:{Var:C Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-matomo-192;mode=rw;path=-data;service=sms A, path=-data, service=sms Value:0xc03fc516e8} D:{Var:D Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-matomo-192;mode=rw;path=-data;service=sms A, path=-data, service=sms Value:0xc03fc51768}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441939125s EvaluationString:[ var='B' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-matomo-192;mode=rw;path=-data;service=sms A, path=-data, service=sms} value=40.09653608939912 ], [ var='C' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-matomo-192;mode=rw;path=-data;service=sms A, path=-data, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-matomo-192;mode=rw;path=-data;service=sms A, path=-data, service=sms} value=0.4009653608939912 ]} {Instance:device=vdb, environment=campus, fstype=xfs, host=campus-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-mysql-instance-182;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-mysql-instance-182;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms Value:0xc03fc51858} C:{Var:C Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-mysql-instance-182;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms Value:0xc03fc518e0} D:{Var:D Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-mysql-instance-182;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms Value:0xc03fc51950}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441945028s EvaluationString:[ var='B' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-mysql-instance-182;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms} value=19.729005098925256 ], [ var='C' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-mysql-instance-182;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-mysql-instance-182;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms} value=0.19729005098925256 ]} {Instance:device=vdb, environment=campus, fstype=xfs, host=campus-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-mysql-instance-183;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-mysql-instance-183;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms Value:0xc03fc51a40} C:{Var:C Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-mysql-instance-183;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms Value:0xc03fc51ac0} D:{Var:D Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-mysql-instance-183;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms Value:0xc03fc51bc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441950579s EvaluationString:[ var='B' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-mysql-instance-183;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms} value=19.189491290608206 ], [ var='C' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-mysql-instance-183;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-mysql-instance-183;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms} value=0.19189491290608207 ]} {Instance:device=vdb, environment=campus, fstype=xfs, host=campus-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-mysql-instance-184;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-mysql-instance-184;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms Value:0xc03fc51ce0} C:{Var:C Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-mysql-instance-184;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms Value:0xc03fc51d70} D:{Var:D Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-mysql-instance-184;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms Value:0xc03fc51de0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441956464s EvaluationString:[ var='B' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-mysql-instance-184;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms} value=19.705041867672204 ], [ var='C' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-mysql-instance-184;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-mysql-instance-184;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms} value=0.19705041867672204 ]} {Instance:device=vdb, environment=campus, fstype=xfs, host=campus-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-stats-instance-190;mode=rw;path=-data;service=sms A, path=-data, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-stats-instance-190;mode=rw;path=-data;service=sms A, path=-data, service=sms Value:0xc03fc51f30} C:{Var:C Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-stats-instance-190;mode=rw;path=-data;service=sms A, path=-data, service=sms Value:0xc03fc51fa0} D:{Var:D Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-stats-instance-190;mode=rw;path=-data;service=sms A, path=-data, service=sms Value:0xc018bba050}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441961756s EvaluationString:[ var='B' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-stats-instance-190;mode=rw;path=-data;service=sms A, path=-data, service=sms} value=51.24324688670031 ], [ var='C' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-stats-instance-190;mode=rw;path=-data;service=sms A, path=-data, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-stats-instance-190;mode=rw;path=-data;service=sms A, path=-data, service=sms} value=0.5124324688670031 ]} {Instance:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-fs-210;mode=rw;path=-swarm_volumes;service=sms A, path=-swarm_volumes, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-fs-210;mode=rw;path=-swarm_volumes;service=sms A, path=-swarm_volumes, service=sms Value:0xc018bba1f8} C:{Var:C Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-fs-210;mode=rw;path=-swarm_volumes;service=sms A, path=-swarm_volumes, service=sms Value:0xc018bba300} D:{Var:D Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-fs-210;mode=rw;path=-swarm_volumes;service=sms A, path=-swarm_volumes, service=sms Value:0xc018bba160}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441966515s EvaluationString:[ var='B' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-fs-210;mode=rw;path=-swarm_volumes;service=sms A, path=-swarm_volumes, service=sms} value=5.778925714460185 ], [ var='C' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-fs-210;mode=rw;path=-swarm_volumes;service=sms A, path=-swarm_volumes, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-fs-210;mode=rw;path=-swarm_volumes;service=sms A, path=-swarm_volumes, service=sms} value=0.057789257144601855 ]} {Instance:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-211;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-211;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bba428} C:{Var:C Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-211;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bba490} D:{Var:D Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-211;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bba520}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441971916s EvaluationString:[ var='B' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-211;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=74.95173322748738 ], [ var='C' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-211;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-211;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0.7495173322748738 ]} {Instance:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-212;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-212;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bba7d8} C:{Var:C Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-212;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bba6c0} D:{Var:D Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-212;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bba750}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441990692s EvaluationString:[ var='B' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-212;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=42.67850691051946 ], [ var='C' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-212;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-212;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0.4267850691051946 ]} {Instance:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-213;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-213;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bba980} C:{Var:C Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-213;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bba9e0} D:{Var:D Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-213;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bba900}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.44199625s EvaluationString:[ var='B' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-213;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=35.077307619890895 ], [ var='C' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-213;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-213;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0.35077307619890896 ]} {Instance:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-214;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-214;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bbac98} C:{Var:C Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-214;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bbabb0} D:{Var:D Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-214;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bbac30}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.442001354s EvaluationString:[ var='B' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-214;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=77.93710116939424 ], [ var='C' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-214;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-214;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0.7793710116939424 ]} {Instance:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-215, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-215;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-215, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-215;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bbada0} C:{Var:C Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-215, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-215;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bbae38} D:{Var:D Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-215, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-215;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bbafa0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.442005937s EvaluationString:[ var='B' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-215, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-215;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=49.95547027662433 ], [ var='C' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-215, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-215;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-215, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-215;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0.4995547027662433 ]} {Instance:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-216, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-216;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-216, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-216;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bbb0f0} C:{Var:C Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-216, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-216;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bbb1c0} D:{Var:D Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-216, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-216;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bbb240}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.442011207s EvaluationString:[ var='B' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-216, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-216;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=44.319220963605275 ], [ var='C' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-216, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-216;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-216, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-216;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0.44319220963605277 ]} {Instance:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-217, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-217;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-217, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-217;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bbb510} C:{Var:C Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-217, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-217;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bbb570} D:{Var:D Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-217, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-217;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bbb430}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.442017524s EvaluationString:[ var='B' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-217, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-217;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=39.3744847642892 ], [ var='C' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-217, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-217;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-217, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-217;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0.39374484764289197 ]} {Instance:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-218, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-218;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-218, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-218;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bbb680} C:{Var:C Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-218, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-218;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bbb730} D:{Var:D Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-218, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-218;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bbb798}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.442023171s EvaluationString:[ var='B' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-218, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-218;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=24.95129750346035 ], [ var='C' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-218, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-218;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-218, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-218;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0.24951297503460348 ]} {Instance:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-219, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-219;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-219, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-219;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bbb9e8} C:{Var:C Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-219, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-219;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bbba70} D:{Var:D Labels:device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-219, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-219;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc018bbbb28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.442029331s EvaluationString:[ var='B' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-219, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-219;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=12.108882027560657 ], [ var='C' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-219, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-219;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=campus, fstype=xfs, host=campus-zh-swarm-node-219, mode=rw, name=disk.used_percent;device=vdb;environment=campus;fstype=xfs;host=campus-zh-swarm-node-219;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0.12108882027560658 ]} {Instance:device=vdb, environment=staging, fstype=xfs, host=staging-zh-backend-instance-202, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-backend-instance-202;mode=rw;path=-edx-var-mongo-mongodb;service=sms A, path=-edx-var-mongo-mongodb, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-backend-instance-202, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-backend-instance-202;mode=rw;path=-edx-var-mongo-mongodb;service=sms A, path=-edx-var-mongo-mongodb, service=sms Value:0xc018bbbd20} C:{Var:C Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-backend-instance-202, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-backend-instance-202;mode=rw;path=-edx-var-mongo-mongodb;service=sms A, path=-edx-var-mongo-mongodb, service=sms Value:0xc018bbbdc8} D:{Var:D Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-backend-instance-202, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-backend-instance-202;mode=rw;path=-edx-var-mongo-mongodb;service=sms A, path=-edx-var-mongo-mongodb, service=sms Value:0xc018bbbc18}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.442034708s EvaluationString:[ var='B' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-backend-instance-202, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-backend-instance-202;mode=rw;path=-edx-var-mongo-mongodb;service=sms A, path=-edx-var-mongo-mongodb, service=sms} value=9.849887029799707 ], [ var='C' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-backend-instance-202, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-backend-instance-202;mode=rw;path=-edx-var-mongo-mongodb;service=sms A, path=-edx-var-mongo-mongodb, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-backend-instance-202, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-backend-instance-202;mode=rw;path=-edx-var-mongo-mongodb;service=sms A, path=-edx-var-mongo-mongodb, service=sms} value=0.09849887029799706 ]} {Instance:device=vdb, environment=staging, fstype=xfs, host=staging-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-clickhouse-195;mode=rw;path=-data;service=sms A, path=-data, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-clickhouse-195;mode=rw;path=-data;service=sms A, path=-data, service=sms Value:0xc018bbbf68} C:{Var:C Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-clickhouse-195;mode=rw;path=-data;service=sms A, path=-data, service=sms Value:0xc02e4e81a8} D:{Var:D Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-clickhouse-195;mode=rw;path=-data;service=sms A, path=-data, service=sms Value:0xc018bbbf08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.442041047s EvaluationString:[ var='B' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-clickhouse-195;mode=rw;path=-data;service=sms A, path=-data, service=sms} value=86.61497771128481 ], [ var='C' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-clickhouse-195;mode=rw;path=-data;service=sms A, path=-data, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-clickhouse-195, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-clickhouse-195;mode=rw;path=-data;service=sms A, path=-data, service=sms} value=0.8661497771128481 ]} {Instance:device=vdb, environment=staging, fstype=xfs, host=staging-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-matomo-192;mode=rw;path=-data;service=sms A, path=-data, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-matomo-192;mode=rw;path=-data;service=sms A, path=-data, service=sms Value:0xc02e4e86d0} C:{Var:C Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-matomo-192;mode=rw;path=-data;service=sms A, path=-data, service=sms Value:0xc02e4e8970} D:{Var:D Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-matomo-192;mode=rw;path=-data;service=sms A, path=-data, service=sms Value:0xc02e4e89c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.442045634s EvaluationString:[ var='B' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-matomo-192;mode=rw;path=-data;service=sms A, path=-data, service=sms} value=11.630747740595995 ], [ var='C' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-matomo-192;mode=rw;path=-data;service=sms A, path=-data, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-matomo-192;mode=rw;path=-data;service=sms A, path=-data, service=sms} value=0.11630747740595995 ]} {Instance:device=vdb, environment=staging, fstype=xfs, host=staging-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-mysql-instance-182;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-mysql-instance-182;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms Value:0xc02e4e9010} C:{Var:C Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-mysql-instance-182;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms Value:0xc02e4e9238} D:{Var:D Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-mysql-instance-182;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms Value:0xc02e4e94e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.442050912s EvaluationString:[ var='B' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-mysql-instance-182;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms} value=5.968146983390327 ], [ var='C' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-mysql-instance-182;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-mysql-instance-182, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-mysql-instance-182;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms} value=0.059681469833903274 ]} {Instance:device=vdb, environment=staging, fstype=xfs, host=staging-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-mysql-instance-183;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-mysql-instance-183;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms Value:0xc02e4e98c0} C:{Var:C Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-mysql-instance-183;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms Value:0xc02e4e9a60} D:{Var:D Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-mysql-instance-183;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms Value:0xc02e4e9d10}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.442055894s EvaluationString:[ var='B' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-mysql-instance-183;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms} value=4.761045127015144 ], [ var='C' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-mysql-instance-183;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-mysql-instance-183, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-mysql-instance-183;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms} value=0.04761045127015144 ]} {Instance:device=vdb, environment=staging, fstype=xfs, host=staging-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-mysql-instance-184;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-mysql-instance-184;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms Value:0xc0378fe360} C:{Var:C Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-mysql-instance-184;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms Value:0xc0378fe400} D:{Var:D Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-mysql-instance-184;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms Value:0xc0378fe478}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.442061662s EvaluationString:[ var='B' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-mysql-instance-184;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms} value=5.972383365901319 ], [ var='C' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-mysql-instance-184;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-mysql-instance-184, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-mysql-instance-184;mode=rw;path=-var-lib-mysql;service=sms A, path=-var-lib-mysql, service=sms} value=0.05972383365901319 ]} {Instance:device=vdb, environment=staging, fstype=xfs, host=staging-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-stats-instance-190;mode=rw;path=-data;service=sms A, path=-data, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-stats-instance-190;mode=rw;path=-data;service=sms A, path=-data, service=sms Value:0xc0378fe668} C:{Var:C Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-stats-instance-190;mode=rw;path=-data;service=sms A, path=-data, service=sms Value:0xc0378fe6e8} D:{Var:D Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-stats-instance-190;mode=rw;path=-data;service=sms A, path=-data, service=sms Value:0xc0378fe7d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.442068159s EvaluationString:[ var='B' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-stats-instance-190;mode=rw;path=-data;service=sms A, path=-data, service=sms} value=2.543600390815828 ], [ var='C' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-stats-instance-190;mode=rw;path=-data;service=sms A, path=-data, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-stats-instance-190, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-stats-instance-190;mode=rw;path=-data;service=sms A, path=-data, service=sms} value=0.025436003908158276 ]} {Instance:device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-fs-210;mode=rw;path=-swarm_volumes;service=sms A, path=-swarm_volumes, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-fs-210;mode=rw;path=-swarm_volumes;service=sms A, path=-swarm_volumes, service=sms Value:0xc0378feac8} C:{Var:C Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-fs-210;mode=rw;path=-swarm_volumes;service=sms A, path=-swarm_volumes, service=sms Value:0xc0378fec10} D:{Var:D Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-fs-210;mode=rw;path=-swarm_volumes;service=sms A, path=-swarm_volumes, service=sms Value:0xc0378fecf8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.442080121s EvaluationString:[ var='B' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-fs-210;mode=rw;path=-swarm_volumes;service=sms A, path=-swarm_volumes, service=sms} value=0.2580300439667807 ], [ var='C' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-fs-210;mode=rw;path=-swarm_volumes;service=sms A, path=-swarm_volumes, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-fs-210, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-fs-210;mode=rw;path=-swarm_volumes;service=sms A, path=-swarm_volumes, service=sms} value=0.002580300439667807 ]} {Instance:device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-211;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-211;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc0378feec8} C:{Var:C Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-211;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc0378fef40} D:{Var:D Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-211;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc0378ff288}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.442085059s EvaluationString:[ var='B' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-211;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=88.17149261796601 ], [ var='C' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-211;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-211, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-211;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0.8817149261796601 ]} {Instance:device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-212;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-212;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc0378ff570} C:{Var:C Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-212;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc0378ff658} D:{Var:D Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-212;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc0378ff3f8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.442090811s EvaluationString:[ var='B' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-212;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=87.8474982598183 ], [ var='C' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-212;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-212, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-212;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0.8784749825981829 ]} {Instance:device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-213;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-213;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc0378ff860} C:{Var:C Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-213;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc0378ff8e8} D:{Var:D Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-213;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc0378ff970}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.442096985s EvaluationString:[ var='B' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-213;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=86.83368213291325 ], [ var='C' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-213;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-213, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-213;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0.8683368213291325 ]} {Instance:device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-214;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-214;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc0378ffd90} C:{Var:C Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-214;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc0378ffaf0} D:{Var:D Labels:device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-214;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms Value:0xc0378ffbb8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.442102344s EvaluationString:[ var='B' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-214;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=24.07351077796776 ], [ var='C' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-214;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0 ], [ var='D' labels={device=vdb, environment=staging, fstype=xfs, host=staging-zh-swarm-node-214, mode=rw, name=disk.used_percent;device=vdb;environment=staging;fstype=xfs;host=staging-zh-swarm-node-214;mode=rw;path=-var-lib-docker;service=sms A, path=-var-lib-docker, service=sms} value=0.24073510777967758 ]} {Instance:device=vdc, environment=campus, fstype=xfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdc;environment=campus;fstype=xfs;host=campus-zh-matomo-192;mode=rw;path=-backup;service=sms A, path=-backup, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdc, environment=campus, fstype=xfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdc;environment=campus;fstype=xfs;host=campus-zh-matomo-192;mode=rw;path=-backup;service=sms A, path=-backup, service=sms Value:0xc0378ffe70} C:{Var:C Labels:device=vdc, environment=campus, fstype=xfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdc;environment=campus;fstype=xfs;host=campus-zh-matomo-192;mode=rw;path=-backup;service=sms A, path=-backup, service=sms Value:0xc0378fff48} D:{Var:D Labels:device=vdc, environment=campus, fstype=xfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdc;environment=campus;fstype=xfs;host=campus-zh-matomo-192;mode=rw;path=-backup;service=sms A, path=-backup, service=sms Value:0xc0139f2018}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.442109121s EvaluationString:[ var='B' labels={device=vdc, environment=campus, fstype=xfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdc;environment=campus;fstype=xfs;host=campus-zh-matomo-192;mode=rw;path=-backup;service=sms A, path=-backup, service=sms} value=1.0014044943820224 ], [ var='C' labels={device=vdc, environment=campus, fstype=xfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdc;environment=campus;fstype=xfs;host=campus-zh-matomo-192;mode=rw;path=-backup;service=sms A, path=-backup, service=sms} value=0 ], [ var='D' labels={device=vdc, environment=campus, fstype=xfs, host=campus-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdc;environment=campus;fstype=xfs;host=campus-zh-matomo-192;mode=rw;path=-backup;service=sms A, path=-backup, service=sms} value=0.010014044943820223 ]} {Instance:device=vdc, environment=staging, fstype=xfs, host=staging-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdc;environment=staging;fstype=xfs;host=staging-zh-matomo-192;mode=rw;path=-backup;service=sms A, path=-backup, service=sms State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:device=vdc, environment=staging, fstype=xfs, host=staging-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdc;environment=staging;fstype=xfs;host=staging-zh-matomo-192;mode=rw;path=-backup;service=sms A, path=-backup, service=sms Value:0xc0139f21b8} C:{Var:C Labels:device=vdc, environment=staging, fstype=xfs, host=staging-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdc;environment=staging;fstype=xfs;host=staging-zh-matomo-192;mode=rw;path=-backup;service=sms A, path=-backup, service=sms Value:0xc0139f20e0} D:{Var:D Labels:device=vdc, environment=staging, fstype=xfs, host=staging-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdc;environment=staging;fstype=xfs;host=staging-zh-matomo-192;mode=rw;path=-backup;service=sms A, path=-backup, service=sms Value:0xc0139f2150}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.442114192s EvaluationString:[ var='B' labels={device=vdc, environment=staging, fstype=xfs, host=staging-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdc;environment=staging;fstype=xfs;host=staging-zh-matomo-192;mode=rw;path=-backup;service=sms A, path=-backup, service=sms} value=0.8482611748900831 ], [ var='C' labels={device=vdc, environment=staging, fstype=xfs, host=staging-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdc;environment=staging;fstype=xfs;host=staging-zh-matomo-192;mode=rw;path=-backup;service=sms A, path=-backup, service=sms} value=0 ], [ var='D' labels={device=vdc, environment=staging, fstype=xfs, host=staging-zh-matomo-192, mode=rw, name=disk.used_percent;device=vdc;environment=staging;fstype=xfs;host=staging-zh-matomo-192;mode=rw;path=-backup;service=sms A, path=-backup, service=sms} value=0.008482611748900831 ]}]" duration=45.481422ms + level=debug ts=2024-05-29T13:44:13.448575305Z caller=remote_instance_store.go:51 user=705828 slug=tempoplatform msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-san-leandro-building-1, daemonset=kube-proxy, env=production, namespace=kube-system" t=2024-05-29T13:44:13.448597486Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-periodic-reviews-db, env=apac" t=2024-05-29T13:44:13.448563598Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.448574898Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=705828 slug=tempoplatform t=2024-05-29T13:44:13.448492664Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-san-leandro-building-1, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.448471222Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.448444657Z caller=remote_instance_store.go:51 user=4947 slug=mediamath msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=705828 slug=tempoplatform t=2024-05-29T13:44:13.448403133Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.448374886Z caller=remote_instance_store.go:51 user=148654 slug=tinybeans msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:13.448266996Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.44809009Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.448174166Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.566982ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-rochelle-building-1, daemonset=kube-proxy, env=production, namespace=kube-system" t=2024-05-29T13:44:13.448128156Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.448095801Z caller=remote_instance_store.go:51 user=615073 slug=origence msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.448027439Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=615073 slug=origence instance= t=2024-05-29T13:44:13.447997594Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=615073 slug=origence t=2024-05-29T13:44:13.447966492Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-rochelle-building-1, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.447836093Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-rochelle-building-1, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.447822796Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-frontend-db-read-replica-1, env=apac" t=2024-05-29T13:44:13.447766863Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-frontend-db-read-replica-1, env=apac" t=2024-05-29T13:44:13.447752853Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.447730628Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.447543789Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=181845 slug=novol instance="__name__=cronjob_status, env=prod, instance=192.168.25.230:8700, job=cronjob-exporter, name=psql-backups-28615710, namespace=backups, prometheus=monitoring/kube-prometheus-prometheus, prometheus_replica=prometheus-kube-prometheus-prometheus-0" t=2024-05-29T13:44:13.447555224Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-rochelle, daemonset=collector-opentelemetry-collector-agent, env=sandbox, namespace=open-telemetry" t=2024-05-29T13:44:13.447498152Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-rochelle, daemonset=collector-opentelemetry-collector-agent, env=sandbox, namespace=open-telemetry" t=2024-05-29T13:44:13.447460052Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=181845 slug=novol t=2024-05-29T13:44:13.447412919Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.scheduler user=181845 slug=novol version=23 fingerprint=ca0c889bca282446 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.44729142Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=cronjob_status, env=prod, instance=192.168.25.230:8700, job=cronjob-exporter, name=psql-backups-28615710, namespace=backups, prometheus=monitoring/kube-prometheus-prometheus, prometheus_replica=prometheus-kube-prometheus-prometheus-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=cronjob_status, env=prod, instance=192.168.25.230:8700, job=cronjob-exporter, name=psql-backups-28615710, namespace=backups, prometheus=monitoring/kube-prometheus-prometheus, prometheus_replica=prometheus-kube-prometheus-prometheus-0 Value:0xc0519b0a50} B:{Var:B Labels:__name__=cronjob_status, env=prod, instance=192.168.25.230:8700, job=cronjob-exporter, name=psql-backups-28615710, namespace=backups, prometheus=monitoring/kube-prometheus-prometheus, prometheus_replica=prometheus-kube-prometheus-prometheus-0 Value:0xc0519b0ad0} C:{Var:C Labels:__name__=cronjob_status, env=prod, instance=192.168.25.230:8700, job=cronjob-exporter, name=psql-backups-28615710, namespace=backups, prometheus=monitoring/kube-prometheus-prometheus, prometheus_replica=prometheus-kube-prometheus-prometheus-0 Value:0xc0519b09a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.446916993s EvaluationString:[ var='A' labels={__name__=cronjob_status, env=prod, instance=192.168.25.230:8700, job=cronjob-exporter, name=psql-backups-28615710, namespace=backups, prometheus=monitoring/kube-prometheus-prometheus, prometheus_replica=prometheus-kube-prometheus-prometheus-0} value=1 ], [ var='B' labels={__name__=cronjob_status, env=prod, instance=192.168.25.230:8700, job=cronjob-exporter, name=psql-backups-28615710, namespace=backups, prometheus=monitoring/kube-prometheus-prometheus, prometheus_replica=prometheus-kube-prometheus-prometheus-0} value=1 ], [ var='C' labels={__name__=cronjob_status, env=prod, instance=192.168.25.230:8700, job=cronjob-exporter, name=psql-backups-28615710, namespace=backups, prometheus=monitoring/kube-prometheus-prometheus, prometheus_replica=prometheus-kube-prometheus-prometheus-0} value=0 ]} {Instance:__name__=cronjob_status, env=test, instance=192.168.31.21:8700, job=cronjob-exporter, name=psql-backups-28615710, namespace=backups, prometheus=monitoring/kube-prometheus-prometheus, prometheus_replica=prometheus-kube-prometheus-prometheus-0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=cronjob_status, env=test, instance=192.168.31.21:8700, job=cronjob-exporter, name=psql-backups-28615710, namespace=backups, prometheus=monitoring/kube-prometheus-prometheus, prometheus_replica=prometheus-kube-prometheus-prometheus-0 Value:0xc0519b0c08} B:{Var:B Labels:__name__=cronjob_status, env=test, instance=192.168.31.21:8700, job=cronjob-exporter, name=psql-backups-28615710, namespace=backups, prometheus=monitoring/kube-prometheus-prometheus, prometheus_replica=prometheus-kube-prometheus-prometheus-0 Value:0xc0519b0cc0} C:{Var:C Labels:__name__=cronjob_status, env=test, instance=192.168.31.21:8700, job=cronjob-exporter, name=psql-backups-28615710, namespace=backups, prometheus=monitoring/kube-prometheus-prometheus, prometheus_replica=prometheus-kube-prometheus-prometheus-0 Value:0xc0519b0d28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.446937165s EvaluationString:[ var='A' labels={__name__=cronjob_status, env=test, instance=192.168.31.21:8700, job=cronjob-exporter, name=psql-backups-28615710, namespace=backups, prometheus=monitoring/kube-prometheus-prometheus, prometheus_replica=prometheus-kube-prometheus-prometheus-0} value=1 ], [ var='B' labels={__name__=cronjob_status, env=test, instance=192.168.31.21:8700, job=cronjob-exporter, name=psql-backups-28615710, namespace=backups, prometheus=monitoring/kube-prometheus-prometheus, prometheus_replica=prometheus-kube-prometheus-prometheus-0} value=1 ], [ var='C' labels={__name__=cronjob_status, env=test, instance=192.168.31.21:8700, job=cronjob-exporter, name=psql-backups-28615710, namespace=backups, prometheus=monitoring/kube-prometheus-prometheus, prometheus_replica=prometheus-kube-prometheus-prometheus-0} value=0 ]}]" duration=16.802179ms + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:13.447368642Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank t=2024-05-29T13:44:13.447297172Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.447343968Z caller=remote_instance_store.go:51 user=174054 slug=netrading msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.447250161Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=114492 slug=railsbank version=1 fingerprint=d220976dcd9cba72 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.4471954Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.446923258s EvaluationString:}]" duration=125.743615ms + logger=ngalert.state.manager user=174054 slug=netrading instance="__name__=duplicate_order_gauge, cluster=DEV, instance=10.136.6.214:9090, job=prometheus.scrape.annotation_autodiscovery_http" t=2024-05-29T13:44:13.447252484Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=174054 slug=netrading instance="__name__=duplicate_order_gauge, app=day-ahead-trade-api, azure_workload_identity_use=true, cluster=DEV, container=day-ahead-trade-api, criticality=critical_within_working_hours, env=dev, imageversion=2024.05.28-06.16.31-aa9865f3-dockerfile-968f11f8, instance=day-ahead-trade-api-5b95f895c8-sjfz7:day-ahead-trade-api:http-metrics, job=frontoffice-powertrading-tradeautomation/day-ahead-trade-api, name=day-ahead-trade-api, namespace=frontoffice-powertrading-tradeautomation, owningteam=curvepower, pod=day-ahead-trade-api-5b95f895c8-sjfz7, pod_template_hash=5b95f895c8, rabbitmq=0" t=2024-05-29T13:44:13.447227562Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=174054 slug=netrading instance="__name__=duplicate_order_gauge, app=day-ahead-trade-api, azure_workload_identity_use=true, cluster=DEV, container=day-ahead-trade-api, criticality=critical_within_working_hours, env=dev, imageversion=2024.05.28-06.16.31-aa9865f3-dockerfile-968f11f8, instance=day-ahead-trade-api-5b95f895c8-sjfz7:day-ahead-trade-api:http-metrics, job=frontoffice-powertrading-tradeautomation/day-ahead-trade-api, name=day-ahead-trade-api, namespace=frontoffice-powertrading-tradeautomation, owningteam=curvepower, pod=day-ahead-trade-api-5b95f895c8-sjfz7, pod_template_hash=5b95f895c8, rabbitmq=0" t=2024-05-29T13:44:13.447211939Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.447178393Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-creditsafe-db, env=apac" t=2024-05-29T13:44:13.447084128Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=174054 slug=netrading version=4 fingerprint=0e2684225b30919d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.447050262Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=duplicate_order_gauge, app=day-ahead-trade-api, azure_workload_identity_use=true, cluster=DEV, container=day-ahead-trade-api, criticality=critical_within_working_hours, env=dev, imageversion=2024.05.28-06.16.31-aa9865f3-dockerfile-968f11f8, instance=day-ahead-trade-api-5b95f895c8-sjfz7:day-ahead-trade-api:http-metrics, job=frontoffice-powertrading-tradeautomation/day-ahead-trade-api, name=day-ahead-trade-api, namespace=frontoffice-powertrading-tradeautomation, owningteam=curvepower, pod=day-ahead-trade-api-5b95f895c8-sjfz7, pod_template_hash=5b95f895c8, rabbitmq=0 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=duplicate_order_gauge, app=day-ahead-trade-api, azure_workload_identity_use=true, cluster=DEV, container=day-ahead-trade-api, criticality=critical_within_working_hours, env=dev, imageversion=2024.05.28-06.16.31-aa9865f3-dockerfile-968f11f8, instance=day-ahead-trade-api-5b95f895c8-sjfz7:day-ahead-trade-api:http-metrics, job=frontoffice-powertrading-tradeautomation/day-ahead-trade-api, name=day-ahead-trade-api, namespace=frontoffice-powertrading-tradeautomation, owningteam=curvepower, pod=day-ahead-trade-api-5b95f895c8-sjfz7, pod_template_hash=5b95f895c8, rabbitmq=0 Value:0xc0243b3128} B:{Var:B Labels: Value:0xc0243b3140}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.446619161s EvaluationString:[ var='A' labels={__name__=duplicate_order_gauge, app=day-ahead-trade-api, azure_workload_identity_use=true, cluster=DEV, container=day-ahead-trade-api, criticality=critical_within_working_hours, env=dev, imageversion=2024.05.28-06.16.31-aa9865f3-dockerfile-968f11f8, instance=day-ahead-trade-api-5b95f895c8-sjfz7:day-ahead-trade-api:http-metrics, job=frontoffice-powertrading-tradeautomation/day-ahead-trade-api, name=day-ahead-trade-api, namespace=frontoffice-powertrading-tradeautomation, owningteam=curvepower, pod=day-ahead-trade-api-5b95f895c8-sjfz7, pod_template_hash=5b95f895c8, rabbitmq=0} value=0 ], [ var='B' labels={} value=0 ]} {Instance:__name__=duplicate_order_gauge, cluster=DEV, instance=10.136.6.214:9090, job=prometheus.scrape.annotation_autodiscovery_http State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=duplicate_order_gauge, cluster=DEV, instance=10.136.6.214:9090, job=prometheus.scrape.annotation_autodiscovery_http Value:0xc0243b31c8} B:{Var:B Labels: Value:0xc0243b31d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.446634537s EvaluationString:[ var='A' labels={__name__=duplicate_order_gauge, cluster=DEV, instance=10.136.6.214:9090, job=prometheus.scrape.annotation_autodiscovery_http} value=0 ], [ var='B' labels={} value=0 ]}]" duration=124.782286ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-riverside2, daemonset=cilium, env=office, namespace=kube-system" t=2024-05-29T13:44:13.446935006Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.446837318Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.446720476Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.446666883Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-riverside1, daemonset=collector-opentelemetry-collector-agent, env=office, namespace=open-telemetry" t=2024-05-29T13:44:13.44661885Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-riverside1, daemonset=cilium, env=office, namespace=kube-system" t=2024-05-29T13:44:13.446504697Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=sing-prod-backend-db, env=apac" t=2024-05-29T13:44:13.445967716Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=399183 slug=guidion t=2024-05-29T13:44:13.445864826Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=21.379917ms + logger=ngalert.state.manager.persist user=697570 slug=carroteco t=2024-05-29T13:44:13.445937216Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=697570 slug=carroteco instance="QueueName=document-core-cqrs-mongo-event-bridge-dlq-707b2bd" t=2024-05-29T13:44:13.445928366Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=697570 slug=carroteco instance="QueueName=document-core-cqrs-mongo-event-bridge-dlq-707b2bd" t=2024-05-29T13:44:13.445911865Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-riverside-building-2, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.445847146Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-riverside-building-2, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.445833218Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=350551 slug=loopme t=2024-05-29T13:44:13.445776743Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=24.648164ms + level=debug ts=2024-05-29T13:44:13.445626506Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-riverside-building-1, daemonset=kube-proxy, env=production, namespace=kube-system" t=2024-05-29T13:44:13.445573865Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.445495801Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-riverside-building-1, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.445474844Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=707603 slug=canoneurope t=2024-05-29T13:44:13.445418559Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=679029 slug=joveoprodaws t=2024-05-29T13:44:13.445249351Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.44523346Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-riverside-building-1, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.445283542Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.445217739Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.445291983Z caller=remote_instance_store.go:51 user=679029 slug=joveoprodaws msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=795224 slug=gannettdigital t=2024-05-29T13:44:13.445226448Z level=debug msg="Saving alert states" count=19 max_state_save_concurrency=1 + logger=ngalert.state.manager user=795224 slug=gannettdigital instance="__name__=kube_deployment_spec_replicas, cluster=wendy-ruderman, deployment=canary, instance=prometheus-kube-state-metrics.prometheus.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=default" t=2024-05-29T13:44:13.445197378Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.445185856Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=795224 slug=gannettdigital instance="__name__=kube_deployment_spec_replicas, cluster=steve-starr, deployment=canary, instance=prometheus-kube-state-metrics.prometheus.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=default" t=2024-05-29T13:44:13.445138197Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.445096643Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=795224 slug=gannettdigital instance="__name__=kube_deployment_spec_replicas, cluster=raquel-rutledge, deployment=canary, instance=prometheus-kube-state-metrics.prometheus.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=default" t=2024-05-29T13:44:13.445084947Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-rialto, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.44505754Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=795224 slug=gannettdigital instance="__name__=kube_deployment_spec_replicas, cluster=loretta-tofani, deployment=canary, instance=prometheus-kube-state-metrics.prometheus.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=default" t=2024-05-29T13:44:13.444984846Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=795224 slug=gannettdigital instance="__name__=kube_deployment_spec_replicas, cluster=homer-bigart, deployment=canary, instance=prometheus-kube-state-metrics.prometheus.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=default" t=2024-05-29T13:44:13.444925385Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=795224 slug=gannettdigital instance="__name__=kube_deployment_spec_replicas, cluster=homer-bigart, deployment=canary, instance=prometheus-kube-state-metrics.prometheus.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=default" t=2024-05-29T13:44:13.444915585Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.444923154Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-rialto, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.444864399Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=795224 slug=gannettdigital instance="__name__=kube_deployment_spec_replicas, cluster=george-perle, deployment=canary, instance=prometheus-kube-state-metrics.prometheus.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=default" t=2024-05-29T13:44:13.444863434Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.444849191Z caller=remote_instance_store.go:51 user=935198 slug=provable msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.444816533Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=714575 slug=bonduelleprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.444760819Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=795224 slug=gannettdigital instance="__name__=kube_deployment_spec_replicas, cluster=bruce-russell, deployment=canary, instance=prometheus-kube-state-metrics.prometheus.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=default" t=2024-05-29T13:44:13.444679902Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=795224 slug=gannettdigital instance="__name__=kube_deployment_spec_replicas, cluster=bruce-russell, deployment=canary, instance=prometheus-kube-state-metrics.prometheus.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=default" t=2024-05-29T13:44:13.444661312Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.444613253Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=795224 slug=gannettdigital instance="__name__=kube_deployment_spec_replicas, cluster=booth-tarkington, deployment=canary, instance=prometheus-kube-state-metrics.prometheus.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=default" t=2024-05-29T13:44:13.444627931Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-pasadena-houston-gulf-building-1, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.444678411Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=795224 slug=gannettdigital instance="__name__=kube_deployment_spec_replicas, cluster=barbara-laker, deployment=canary, instance=prometheus-kube-state-metrics.prometheus.svc:8080, job=integrations/kubernetes/kube-state-metrics, namespace=default" t=2024-05-29T13:44:13.44455499Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.444647408Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.444655373Z caller=remote_instance_store.go:51 user=467258 slug=neonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-idverse-enterprise-db, env=us" t=2024-05-29T13:44:13.44448739Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.444481888Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.444488277Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-pasadena-houston-gulf-building-1, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.44446252Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.444371971Z caller=remote_instance_store.go:51 user=290764 slug=inforom msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-oxnard1, daemonset=kube-proxy, env=production, namespace=kube-system" t=2024-05-29T13:44:13.444388323Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-oxnard1, daemonset=kube-proxy, env=production, namespace=kube-system" t=2024-05-29T13:44:13.444376833Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.444174811Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-oxnard1, daemonset=collector-opentelemetry-collector-agent, env=production, namespace=open-telemetry" t=2024-05-29T13:44:13.444183679Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=290764 slug=inforom version=12 fingerprint=5f8022fb047fe4f8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.444121762Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.443855486s EvaluationString:}]" duration=9.827105ms + level=debug ts=2024-05-29T13:44:13.444139211Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=236496 slug=improbable t=2024-05-29T13:44:13.44407655Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=467258 slug=neonprod version=53 fingerprint=ec51d5c7bd0915e0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.44388069Z level=debug msg="Alert rule evaluated" results="[{Instance:neon_region=eu-west-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:neon_region=eu-west-1 Value:0xc00589a110} B:{Var:B Labels:neon_region=eu-west-1 Value:0xc00589a118} C:{Var:C Labels:neon_region=eu-west-1 Value:0xc00589a170}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.443520398s EvaluationString:[ var='A' labels={neon_region=eu-west-1} value=0 ], [ var='B' labels={neon_region=eu-west-1} value=0 ], [ var='C' labels={neon_region=eu-west-1} value=0 ]} {Instance:neon_region=us-east-2 State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:neon_region=us-east-2 Value:0xc00589a330} B:{Var:B Labels:neon_region=us-east-2 Value:0xc00589a200} C:{Var:C Labels:neon_region=us-east-2 Value:0xc00589a208}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.443531748s EvaluationString:[ var='A' labels={neon_region=us-east-2} value=1 ], [ var='B' labels={neon_region=us-east-2} value=1 ], [ var='C' labels={neon_region=us-east-2} value=1 ]}]" duration=116.66184ms + level=debug ts=2024-05-29T13:44:13.443830244Z caller=remote_instance_store.go:51 user=350551 slug=loopme msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.443779717Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-oxnard-building-4, daemonset=kube-proxy, env=production, namespace=kube-system" t=2024-05-29T13:44:13.443964684Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.443750377Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-oxnard-building-4, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.44385845Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.443861699Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-frontend-db, env=us" t=2024-05-29T13:44:13.443793299Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-oxnard-building-4, daemonset=collector-opentelemetry-collector-agent, env=production, namespace=open-telemetry" t=2024-05-29T13:44:13.443762509Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-oxnard-building-4, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.443694244Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.443666561Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.443658933Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.443653788Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:13.443609197Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-oxnard-building-3, daemonset=kube-proxy, env=production, namespace=kube-system" t=2024-05-29T13:44:13.443601653Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=162543 slug=rapharacing t=2024-05-29T13:44:13.443479043Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.208456ms + logger=ngalert.scheduler user=245291 slug=pismo version=2 fingerprint=e52933b180096194 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.443510117Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.443213561s EvaluationString:}]" duration=481.769488ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kgeo1f4c-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.442709804Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-oxnard-building-3, daemonset=collector-opentelemetry-collector-agent, env=production, namespace=open-telemetry" t=2024-05-29T13:44:13.443417762Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kgeo1f4c-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.442457692Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kga3mhej-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.442365041Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kga3mhej-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.442231239Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-creditsafe-db, env=us" t=2024-05-29T13:44:13.443216077Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-creditsafe-db, env=us" t=2024-05-29T13:44:13.443200832Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=656459 slug=activeport t=2024-05-29T13:44:13.443330049Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=656459 slug=activeport instance="datasource_uid=a17a51ac-52fa-4a8f-ae4d-66e273cfbbfc, ref_id=A" t=2024-05-29T13:44:13.443306277Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.443204934Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kga3mhej-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.442124948Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.443088401Z caller=remote_instance_store.go:51 user=890273 slug=cmhusqnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kg81l7yp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.441433831Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kg53x7xp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.44133035Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=143430 slug=realmfive instance="datasource_uid=ETynHsFGz, ref_id=A" t=2024-05-29T13:44:13.443206901Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.44316042Z caller=remote_instance_store.go:51 user=297464 slug=wispr msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kfyi3x8q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.440825555Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=297464 slug=wispr t=2024-05-29T13:44:13.443122603Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kfxrnfa5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.440555572Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.443016016Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.443041775Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.442995829Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-oxnard-building-2, daemonset=collector-opentelemetry-collector-agent, env=production, namespace=open-telemetry" t=2024-05-29T13:44:13.442960604Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kfpwo15n-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.439775524Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kfmavznr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.439666083Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-oxnard-building-2, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.442882922Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=ohio-prod-compliance-lens-db, env=us" t=2024-05-29T13:44:13.442792072Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-oxnard, daemonset=kube-proxy, env=sandbox, namespace=kube-system" t=2024-05-29T13:44:13.442776105Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.442727799Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-oxnard, daemonset=kube-proxy, env=office, namespace=kube-system" t=2024-05-29T13:44:13.442672859Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:13.442682506Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=288032 slug=dapperlabssre instance="datasource_uid=lyVfh3DVz, ref_id=A" t=2024-05-29T13:44:13.442652636Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=288032 slug=dapperlabssre version=1 fingerprint=dfcbc9cd1124d69c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.442564865Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=lyVfh3DVz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.44226152s EvaluationString:}]" duration=31.455353ms + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.442547424Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.169444ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-oxnard, daemonset=grafana-agent, env=office, namespace=monitoring" t=2024-05-29T13:44:13.442487027Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-oxnard, daemonset=grafana-agent, env=office, namespace=monitoring" t=2024-05-29T13:44:13.442472607Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-oxnard, daemonset=collector-opentelemetry-collector-agent, env=sandbox, namespace=open-telemetry" t=2024-05-29T13:44:13.442372128Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.442144785Z caller=remote_instance_store.go:51 user=60121 slug=eddieparis msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=60121 slug=eddieparis t=2024-05-29T13:44:13.442078822Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=60121 slug=eddieparis version=1 fingerprint=096f837b5f9bee2b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.442019096Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441755824s EvaluationString:}]" duration=2.226253923s + level=debug ts=2024-05-29T13:44:13.442102346Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-oxnard, daemonset=cilium, env=office, namespace=kube-system" t=2024-05-29T13:44:13.442051857Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-oxnard, daemonset=cilium, env=office, namespace=kube-system" t=2024-05-29T13:44:13.442038509Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.442021392Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.441887536Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=8647ec6f02b2bd18 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.441806183Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.441597064s EvaluationString:}]" duration=203.865616ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-logansport-building-3, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.441859739Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-logansport-building-3, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.441849969Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-logansport-building-3, daemonset=collector-opentelemetry-collector-agent, env=production, namespace=open-telemetry" t=2024-05-29T13:44:13.441755665Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.44169123Z caller=remote_instance_store.go:51 user=706031 slug=miceutest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-logansport-building-3, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.441638215Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.441582485Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-sar-investigation-db, env=qa" t=2024-05-29T13:44:13.441525506Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.441452157Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-risk-defense-platform-db, env=qa" t=2024-05-29T13:44:13.441329456Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-periodic-reviews-db, env=qa" t=2024-05-29T13:44:13.441171758Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.441389001Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-jolietsouth, daemonset=grafana-agent, env=sandbox, namespace=monitoring" t=2024-05-29T13:44:13.441371242Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-jolietsouth, daemonset=grafana-agent, env=office, namespace=monitoring" t=2024-05-29T13:44:13.441286417Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=206439 slug=relaypro t=2024-05-29T13:44:13.441250235Z level=debug msg="Saving alert states" count=7 max_state_save_concurrency=1 + logger=ngalert.state.manager user=206439 slug=relaypro instance="cluster=mob, environment=qa, role=ibotdr_fdb" t=2024-05-29T13:44:13.441146984Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206439 slug=relaypro t=2024-05-29T13:44:13.441123772Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}The maximum lag (transaction log to storage server) has exceeded 4 hours on the {{role}} FDB cluster in the {{cluster}}_{{environment}} environment. This may indicate that mutations on the storage servers are far behind the mutations stored on the transaction logs. This can occur when a missing storage server rejoins, if its data hasn't been re-replicated yet.': error parsing template __alert_FDB - Transaction Log to Storage Server Lag (Exteme): template: __alert_FDB - Transaction Log to Storage Server Lag (Exteme):1: function \"role\" not defined" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-jolietsouth, daemonset=collector-opentelemetry-collector-agent, env=office, namespace=open-telemetry" t=2024-05-29T13:44:13.441025624Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=206439 slug=relaypro t=2024-05-29T13:44:13.441025959Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}The maximum lag (transaction log to storage server) has exceeded 4 hours on the {{role}} FDB cluster in the {{cluster}}_{{environment}} environment. This may indicate that mutations on the storage servers are far behind the mutations stored on the transaction logs. This can occur when a missing storage server rejoins, if its data hasn't been re-replicated yet.': error parsing template __alert_FDB - Transaction Log to Storage Server Lag (Exteme): template: __alert_FDB - Transaction Log to Storage Server Lag (Exteme):1: function \"role\" not defined" + logger=ngalert.state.manager.persist user=190917 slug=d1cx t=2024-05-29T13:44:13.440988637Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.544196ms + logger=ngalert.state.manager.persist user=518752 slug=ishiitest t=2024-05-29T13:44:13.440896246Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=28.706137ms + logger=ngalert.state.manager user=206439 slug=relaypro t=2024-05-29T13:44:13.440937748Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}The maximum lag (transaction log to storage server) has exceeded 4 hours on the {{role}} FDB cluster in the {{cluster}}_{{environment}} environment. This may indicate that mutations on the storage servers are far behind the mutations stored on the transaction logs. This can occur when a missing storage server rejoins, if its data hasn't been re-replicated yet.': error parsing template __alert_FDB - Transaction Log to Storage Server Lag (Exteme): template: __alert_FDB - Transaction Log to Storage Server Lag (Exteme):1: function \"role\" not defined" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-jolietsouth, daemonset=cilium, env=office, namespace=kube-system" t=2024-05-29T13:44:13.440856937Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206439 slug=relaypro instance="cluster=mob, environment=pro, role=ibot_fdb" t=2024-05-29T13:44:13.440863427Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.440751613Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206439 slug=relaypro instance="cluster=mob, environment=pro, role=fabric_fdb" t=2024-05-29T13:44:13.440782876Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206439 slug=relaypro t=2024-05-29T13:44:13.440740143Z level=error msg="Error in expanding template" error="failed to expand template '{{- $labels := .Labels -}}{{- $values := .Values -}}{{- $value := .Value -}}The maximum lag (transaction log to storage server) has exceeded 4 hours on the {{role}} FDB cluster in the {{cluster}}_{{environment}} environment. This may indicate that mutations on the storage servers are far behind the mutations stored on the transaction logs. This can occur when a missing storage server rejoins, if its data hasn't been re-replicated yet.': error parsing template __alert_FDB - Transaction Log to Storage Server Lag (Exteme): template: __alert_FDB - Transaction Log to Storage Server Lag (Exteme):1: function \"role\" not defined" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-idverse-enterprise-db, env=qa" t=2024-05-29T13:44:13.440700599Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-idverse-enterprise-db, env=qa" t=2024-05-29T13:44:13.440681832Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-frontend-log-db, env=qa" t=2024-05-29T13:44:13.440464565Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-joliet-north, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.440659775Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=830631 slug=api3 t=2024-05-29T13:44:13.440529103Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=32.99429ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-joliet-north, daemonset=collector-opentelemetry-collector-agent, env=production, namespace=open-telemetry" t=2024-05-29T13:44:13.440562607Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.440341636Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.44005026Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.44002266Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-joliet-building-1-system-1, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.439991338Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.439958077Z caller=remote_instance_store.go:51 user=679029 slug=joveoprodaws msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-hendersonbuilding1, daemonset=kube-proxy, env=sandbox, namespace=kube-system" t=2024-05-29T13:44:13.439914869Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.439877406Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=679029 slug=joveoprodaws instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.439871056Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=679029 slug=joveoprodaws t=2024-05-29T13:44:13.439856326Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=679029 slug=joveoprodaws version=14991 fingerprint=97eb712e62429b25 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.439791085Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.439206155s EvaluationString:}]" duration=1.57566575s + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-hendersonbuilding1, daemonset=grafana-agent, env=sandbox, namespace=monitoring" t=2024-05-29T13:44:13.439814908Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-hendersonbuilding1, daemonset=collector-opentelemetry-collector-agent, env=sandbox, namespace=open-telemetry" t=2024-05-29T13:44:13.439735764Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-hendersonbuilding1, daemonset=collector-opentelemetry-collector-agent, env=sandbox, namespace=open-telemetry" t=2024-05-29T13:44:13.439721537Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.439697754Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.439617528Z caller=remote_instance_store.go:51 user=691855 slug=chainlake msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-hendersonbuilding1, daemonset=cilium, env=sandbox, namespace=kube-system" t=2024-05-29T13:44:13.439632534Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-henderson-building-2, daemonset=kube-proxy, env=production, namespace=kube-system" t=2024-05-29T13:44:13.439517356Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.439380557Z caller=remote_instance_store.go:51 user=705083 slug=mediakindsaas msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kfet0wbn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.439260369Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=409436 slug=jeffryaldair1997 t=2024-05-29T13:44:13.439327022Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.510053ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-henderson-building-2, daemonset=collector-opentelemetry-collector-agent, env=production, namespace=open-telemetry" t=2024-05-29T13:44:13.43931378Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=708873 slug=soultv t=2024-05-29T13:44:13.439083069Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kfet0wbn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.439059047Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-henderson-building-1, daemonset=kube-proxy, env=production, namespace=kube-system" t=2024-05-29T13:44:13.439046701Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kfet0wbn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.439005186Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kfet0wbn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.438831044Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-henderson-building-1, daemonset=collector-opentelemetry-collector-agent, env=production, namespace=open-telemetry" t=2024-05-29T13:44:13.438831028Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=206107 slug=hydrolix t=2024-05-29T13:44:13.438758716Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.186199ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kf0zu56i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.438686243Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-henderson-building-1, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.438741611Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kf0zu56i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.438646552Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-everettbostonharbor, daemonset=kube-proxy, env=production, namespace=kube-system" t=2024-05-29T13:44:13.438649356Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=553581 slug=johnlewispreprod instance= t=2024-05-29T13:44:13.438522574Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kf0zu56i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.438485181Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.438520732Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-everettbostonharbor, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.438541514Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kf0zu56i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.43841796Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kf0imzms-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.438290859Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.438293136Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-everettbostonharbor, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.438343268Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-everettbostonharbor, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.438329336Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=371756 slug=asapp t=2024-05-29T13:44:13.438245681Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=47.139334ms + logger=ngalert.state.manager.persist user=849222 slug=franv2dev t=2024-05-29T13:44:13.438218467Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=7.61657ms + level=debug ts=2024-05-29T13:44:13.438236411Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.438153852Z caller=remote_instance_store.go:51 user=520342 slug=atrati msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-cheektowagabroadway, daemonset=kube-proxy, env=production, namespace=kube-system" t=2024-05-29T13:44:13.438185965Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.438076724Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.437970058Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.437978988Z caller=remote_instance_store.go:51 user=824501 slug=bendingspoons msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-cheektowagabroadway, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.437907715Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kewvn0ax-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.437844224Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kewvn0ax-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.437785653Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-cheektowagabroadway, daemonset=grafana-agent, env=office, namespace=monitoring" t=2024-05-29T13:44:13.437818406Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-cheektowagabroadway, daemonset=grafana-agent, env=office, namespace=monitoring" t=2024-05-29T13:44:13.437806569Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.437757038Z caller=remote_instance_store.go:51 user=316960 slug=mojamteam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=112732 slug=gleamer instance= t=2024-05-29T13:44:13.437698226Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=316960 slug=mojamteam instance="datasource_uid=grafanacloud-logs, ref_id=A,C" t=2024-05-29T13:44:13.437671762Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-cheektowagabroadway, daemonset=collector-opentelemetry-collector-agent, env=office, namespace=open-telemetry" t=2024-05-29T13:44:13.437654598Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=316960 slug=mojamteam t=2024-05-29T13:44:13.437636218Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=112732 slug=gleamer version=1 fingerprint=e5742d64daa6b625 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.437477398Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.437157139s EvaluationString:}]" duration=14.063289ms + logger=ngalert.state.manager.persist user=398032 slug=zegl t=2024-05-29T13:44:13.437579773Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=27.344602ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-cheektowagabroadway, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.437570471Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=407181 slug=novacentar t=2024-05-29T13:44:13.437407635Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.848959ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kewipx85-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.437366999Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kewipx85-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.437281028Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-bedford-park-p2-building-1, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.437277893Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=638476 slug=justinhs t=2024-05-29T13:44:13.437133925Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.732739ms + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-frontend-db, env=qa" t=2024-05-29T13:44:13.436919329Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kewipx85-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.437087666Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-keweig7k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.436970475Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=460952 slug=prdnextgen t=2024-05-29T13:44:13.436989821Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.203091ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-keweig7k-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.436931735Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-data-management-db, env=qa" t=2024-05-29T13:44:13.436406626Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.436954319Z caller=remote_instance_store.go:51 user=932433 slug=cmhdmxnp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=915065 slug=cmpladpd instance="__name__=probe_http_status_code, config_version=1716382621032162816, instance=https://www.slauf.com/es/slaues, job=Browser to slauf.com, probe=Mumbai" t=2024-05-29T13:44:13.437051565Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=915065 slug=cmpladpd t=2024-05-29T13:44:13.436992493Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-keweig7k-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.436783713Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.436840824Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-keweig7k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.436656132Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-bedford-park-p1-building-1, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.436712209Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kewdgdbf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.43649737Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-bedford-park-p1-building-1, daemonset=collector-opentelemetry-collector-agent, env=production, namespace=open-telemetry" t=2024-05-29T13:44:13.436439116Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kewdgdbf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.436347239Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-bedford-park-p1-building-1, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.43635798Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=lineage-bedford-park-p1-building-1, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.436342373Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=471861 slug=planetstaging t=2024-05-29T13:44:13.436238449Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.909645ms + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-creditsafe-db, env=qa" t=2024-05-29T13:44:13.436215748Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-creditsafe-db, env=qa" t=2024-05-29T13:44:13.436195635Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-keo7l4ov-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.436101586Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-keo7l4ov-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.435971995Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-comply-advantage-db, env=qa" t=2024-05-29T13:44:13.435975032Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=innovative-san-diego-building-2, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.435800984Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=250150 slug=bizagi version=57 fingerprint=0c11c13c12536601 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.435685358Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.435427447s EvaluationString:}]" duration=173.862417ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-keo7l4ov-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.435751043Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kemxx3b6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.435651472Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=114516 slug=heliumdashboard t=2024-05-29T13:44:13.435674554Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=37.467378ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=dualcool-capitalcity, daemonset=kube-proxy, env=sandbox, namespace=kube-system" t=2024-05-29T13:44:13.435662516Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.435471416Z caller=remote_instance_store.go:51 user=93046 slug=nese msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.435249196Z caller=remote_instance_store.go:51 user=935198 slug=provable msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-qa-backend-db, env=qa" t=2024-05-29T13:44:13.435358967Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.435299763Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=dualcool-capitalcity, daemonset=grafana-agent, env=sandbox, namespace=monitoring" t=2024-05-29T13:44:13.435325473Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.435279942Z caller=remote_instance_store.go:51 user=802856 slug=altowud msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kekqjj0l-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.435235717Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=242473 slug=roicsandbox t=2024-05-29T13:44:13.435258201Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kekqjj0l-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.435179437Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=242473 slug=roicsandbox t=2024-05-29T13:44:13.435197097Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.435236728Z caller=remote_instance_store.go:51 user=134590 slug=humnpreprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=802856 slug=altowud instance="instance=node_exporter:9100, job=node_synergy" t=2024-05-29T13:44:13.435196161Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=242473 slug=roicsandbox version=1 fingerprint=fb960bc2f07d3791 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.435119792Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=yWTOXNS7z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.434536129s EvaluationString:}]" duration=3.594424ms + logger=ngalert.scheduler user=802856 slug=altowud version=29 fingerprint=f9c432b6dde03921 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.435088Z level=debug msg="Alert rule evaluated" results="[{Instance:instance=node_exporter:9100, job=node_synergy State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:instance=node_exporter:9100, job=node_synergy Value:0xc02314aa48} B:{Var:B Labels:instance=node_exporter:9100, job=node_synergy Value:0xc02314aa70}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.434654389s EvaluationString:[ var='A' labels={instance=node_exporter:9100, job=node_synergy} value=45.04832755190109 ], [ var='B' labels={instance=node_exporter:9100, job=node_synergy} value=0 ]}]" duration=11.239757ms + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-veridas-db, env=uk" t=2024-05-29T13:44:13.435122807Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=dualcool-capitalcity, daemonset=grafana-agent, env=office, namespace=monitoring" t=2024-05-29T13:44:13.43510913Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kekqjj0l-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.434992655Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=134590 slug=humnpreprod t=2024-05-29T13:44:13.435113272Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=166441 slug=tysonatp t=2024-05-29T13:44:13.435061693Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=166441 slug=tysonatp t=2024-05-29T13:44:13.435002124Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=crash-detection-consumer-worker, pod=crash-detection-consumer-worker-cdfb8f6cf-wszz7" t=2024-05-29T13:44:13.435056202Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:13.434914789Z level=debug msg="State manager processing evaluation results" resultCount=2 + level=debug ts=2024-05-29T13:44:13.43489621Z caller=remote_instance_store.go:51 user=542894 slug=aize msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.434631256Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=486972 slug=payretailers instance= t=2024-05-29T13:44:13.434739324Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.434696822Z caller=remote_instance_store.go:51 user=543654 slug=jobcloudprogrammaticprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=dualcool-capitalcity, daemonset=cilium, env=sandbox, namespace=kube-system" t=2024-05-29T13:44:13.43472736Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=543654 slug=jobcloudprogrammaticprod t=2024-05-29T13:44:13.434611482Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=486972 slug=payretailers t=2024-05-29T13:44:13.434645251Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=486972 slug=payretailers version=4 fingerprint=522b3e264084e01c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.434542896Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[Alert Condition:{Var:Alert Condition Labels: Value:0xc022212878} Rate:{Var:Rate Labels: Value:0xc0222128a8} Total MISSING_INFO Payins:{Var:Total MISSING_INFO Payins Labels: Value:0xc0222128b0} Total Payins:{Var:Total Payins Labels: Value:0xc0222128b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.43424355s EvaluationString:[ var='Alert Condition' labels={} value=0 ], [ var='Rate' labels={} value=2.6808433296982916 ], [ var='Total MISSING_INFO Payins' labels={} value=295 ], [ var='Total Payins' labels={} value=11004 ]}]" duration=228.735043ms + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.434575725Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.434498707Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.434463072Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=dualcool-capitalcity, daemonset=cilium, env=office, namespace=kube-system" t=2024-05-29T13:44:13.434569997Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.434423628Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-risk-defense-platform-db, env=uk" t=2024-05-29T13:44:13.434483111Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=543654 slug=jobcloudprogrammaticprod version=3 fingerprint=a8e157b12229504f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.434291626Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.433876045s EvaluationString:}]" duration=14.675836ms + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-periodic-reviews-db, env=uk" t=2024-05-29T13:44:13.434239714Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ke9e7s7d-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.434178266Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.434143433Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.434061912Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=crossnokaye-metropower, daemonset=cilium, env=production, namespace=kube-system" t=2024-05-29T13:44:13.434125871Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=cold-link-kingstown-building-1, daemonset=kube-proxy, env=production, namespace=kube-system" t=2024-05-29T13:44:13.434021229Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-instant-id-qa-db, env=uk" t=2024-05-29T13:44:13.434003728Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=cold-link-kingstown-building-1, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.433925128Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=772055 slug=josephevensen instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.433830825Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ke612xs1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.433828893Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=772055 slug=josephevensen t=2024-05-29T13:44:13.433790464Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=butterball-huntsville-building-1, daemonset=kube-proxy, env=production, namespace=kube-system" t=2024-05-29T13:44:13.433607332Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.433522974Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=512398 slug=brightdigital t=2024-05-29T13:44:13.433582156Z level=debug msg="Saving alert states" count=11 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ke612xs1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.43355944Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=512398 slug=brightdigital instance="metric=Kast 19 - D2 - LK2 4 P2/15Stroomuitval Alarm" t=2024-05-29T13:44:13.433495055Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=butterball-huntsville-building-1, daemonset=grafana-agent, env=production, namespace=monitoring" t=2024-05-29T13:44:13.433519794Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=butterball-huntsville-building-1, daemonset=collector-opentelemetry-collector-agent, env=production, namespace=open-telemetry" t=2024-05-29T13:44:13.433458231Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.433440099Z caller=remote_instance_store.go:51 user=672210 slug=noteb5 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=512398 slug=brightdigital instance="metric=Kast 19 - D2 - LK2 4 P2/15Stroom Alarm" t=2024-05-29T13:44:13.433453055Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=butterball-huntsville-building-1, daemonset=collector-opentelemetry-collector-agent, env=production, namespace=open-telemetry" t=2024-05-29T13:44:13.433447Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ke05dt19-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.433362838Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye t=2024-05-29T13:44:13.433185508Z level=debug msg="State manager processing evaluation results" resultCount=243 + logger=ngalert.state.manager.persist user=327842 slug=exabeam t=2024-05-29T13:44:13.433307581Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=53.765235ms + logger=ngalert.state.manager user=512398 slug=brightdigital instance="metric=Kast 19 - D2 - LK2 4 P2/15Slave Reset" t=2024-05-29T13:44:13.433336954Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=512398 slug=brightdigital instance="metric=Kast 19 - D2 - LK2 4 P2/15Power On Reset" t=2024-05-29T13:44:13.433301354Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=512398 slug=brightdigital instance="metric=Kast 19 - D2 - LK2 4 P2/15Interne Error" t=2024-05-29T13:44:13.433277553Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ke05dt19-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.433204906Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ke05dt19-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.433172316Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=512398 slug=brightdigital instance="metric=Kast 19 - D2 - LK2 4 P2/15Externe Reset" t=2024-05-29T13:44:13.433201153Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=512398 slug=brightdigital instance="metric=Kast 19 - D2 - LK2 4 P2/15Algemeen Alarm" t=2024-05-29T13:44:13.433164952Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=512398 slug=brightdigital instance="metric=Kast 19 - D2 - LK2 4 P2/15Algemeen Alarm" t=2024-05-29T13:44:13.433153752Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=512398 slug=brightdigital t=2024-05-29T13:44:13.433107052Z level=debug msg="State manager processing evaluation results" resultCount=11 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kdycyf7b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.433032175Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.43308096Z caller=remote_instance_store.go:51 user=792570 slug=metridev msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=843304 slug=ppcgroup t=2024-05-29T13:44:13.433040856Z level=debug msg="Skip rule evaluation because it is paused" + level=debug ts=2024-05-29T13:44:13.433031316Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.433022159Z caller=remote_instance_store.go:51 user=70430 slug=dapperlabs msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=792570 slug=metridev version=6 fingerprint=e6a5d117420c7ff3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.432877056Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432496528s EvaluationString:}]" duration=16.34811ms + level=debug ts=2024-05-29T13:44:13.432777248Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-frontend-database, env=uk" t=2024-05-29T13:44:13.432906855Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=70430 slug=dapperlabs instance="datasource_uid=000000002, ref_id=A" t=2024-05-29T13:44:13.432931951Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=713300 slug=tpcnanonprod t=2024-05-29T13:44:13.432580731Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=70430 slug=dapperlabs instance="datasource_uid=000000002, ref_id=A" t=2024-05-29T13:44:13.432923888Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=115097 slug=controlplane instance= t=2024-05-29T13:44:13.432740446Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=115097 slug=controlplane instance= t=2024-05-29T13:44:13.432729862Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.432715135Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=115097 slug=controlplane instance= t=2024-05-29T13:44:13.432708117Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-dow-jones-db, env=uk" t=2024-05-29T13:44:13.432625494Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=250150 slug=bizagi version=58 fingerprint=926cf3421d9630fb attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.432561864Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432393818s EvaluationString:}]" duration=218.767093ms + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=cmt-driver-reports-consumer-worker, pod=cmt-driver-reports-consumer-worker-89d94944b-n4tzv" t=2024-05-29T13:44:13.432577378Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.432552292Z caller=remote_instance_store.go:51 user=432323 slug=lithic msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.432605907Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=713300 slug=tpcnanonprod version=1 fingerprint=7a97123d23864b6d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.432434909Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432052297s EvaluationString:}]" duration=6.639903ms + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.432559522Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=115097 slug=controlplane instance= t=2024-05-29T13:44:13.432476065Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=115097 slug=controlplane instance= t=2024-05-29T13:44:13.432459854Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=432323 slug=lithic t=2024-05-29T13:44:13.432428405Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=698963 slug=lemonade version=4 fingerprint=0c24cef006884bf9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.432376298Z level=debug msg="Alert rule evaluated" results="[{Instance:app=cmt-driver-reports-consumer-worker, pod=cmt-driver-reports-consumer-worker-89d94944b-n4tzv State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels:app=cmt-driver-reports-consumer-worker, pod=cmt-driver-reports-consumer-worker-89d94944b-n4tzv Value:0xc0154ab1b8} THRESHOLD:{Var:THRESHOLD Labels:app=cmt-driver-reports-consumer-worker, pod=cmt-driver-reports-consumer-worker-89d94944b-n4tzv Value:0xc0154ab1e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431961398s EvaluationString:[ var='QUERY' labels={app=cmt-driver-reports-consumer-worker, pod=cmt-driver-reports-consumer-worker-89d94944b-n4tzv} value=0 ], [ var='THRESHOLD' labels={app=cmt-driver-reports-consumer-worker, pod=cmt-driver-reports-consumer-worker-89d94944b-n4tzv} value=0 ]}]" duration=60.921725ms + logger=ngalert.state.manager user=115097 slug=controlplane instance= t=2024-05-29T13:44:13.43242776Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.scheduler user=432323 slug=lithic version=6 fingerprint=696c9b7220f91a52 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.432328714Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.432002804s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=194.547656ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kduam4i0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.432333998Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=901230 slug=integromonitor t=2024-05-29T13:44:13.432275748Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=901230 slug=integromonitor instance= previous_handler=resultError t=2024-05-29T13:44:13.432264238Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=901230 slug=integromonitor instance= previous_handler=resultError t=2024-05-29T13:44:13.432257271Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=901230 slug=integromonitor instance= t=2024-05-29T13:44:13.432248516Z level=debug msg="Setting next state" handler=resultError + level=debug ts=2024-05-29T13:44:13.432203453Z caller=remote_instance_store.go:51 user=523054 slug=vialtopartners msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=523054 slug=vialtopartners t=2024-05-29T13:44:13.432160942Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=523054 slug=vialtopartners instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.432118553Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=622339 slug=lendbr instance="datasource_uid=grafanacloud-prom, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:13.431984497Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.432110138Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kduam4i0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.432086075Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.431921564Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.432089706Z caller=remote_instance_store.go:51 user=622339 slug=lendbr msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.431885053Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=115097 slug=controlplane instance= t=2024-05-29T13:44:13.432057325Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=622339 slug=lendbr instance="datasource_uid=grafanacloud-prom, ref_id=A" previous_handler=resultNoData t=2024-05-29T13:44:13.431975753Z level=debug msg="Execution keep last state is Normal" handler=resultNormal + logger=ngalert.state.manager user=115097 slug=controlplane t=2024-05-29T13:44:13.43203631Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.431896517Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=115097 slug=controlplane version=3 fingerprint=d0ebbafeae835281 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.431981694Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=33.87269ms + logger=ngalert.state.manager user=622339 slug=lendbr instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.431963699Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=622339 slug=lendbr t=2024-05-29T13:44:13.431943333Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=error ts=2024-05-29T13:44:13.431917432Z caller=remote_rule_evaluator.go:110 user=115097 slug=controlplane msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kdt32rfq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.431924943Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=622339 slug=lendbr version=42 fingerprint=7838ca47e2b5daf2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.431840426Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431488038s EvaluationString:}]" duration=16.13822ms + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-comply-advantage-db, env=uk" t=2024-05-29T13:44:13.431869177Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=537072 slug=devbitvavo version=3 fingerprint=851686e2172dea27 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.431688987Z level=debug msg="Alert rule evaluated" results="[{Instance:LoadBalancer=net/dev-exchange-api-ext-prim/d79bb24fcedd755e, TargetGroup=targetgroup/dev-exch-api-ext-prim-http/167c8fffe2dfbe7d State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:LoadBalancer=net/dev-exchange-api-ext-prim/d79bb24fcedd755e, TargetGroup=targetgroup/dev-exch-api-ext-prim-http/167c8fffe2dfbe7d Value:0xc05652bd08} C:{Var:C Labels:LoadBalancer=net/dev-exchange-api-ext-prim/d79bb24fcedd755e, TargetGroup=targetgroup/dev-exch-api-ext-prim-http/167c8fffe2dfbe7d Value:0xc05652bd50}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.431407937s EvaluationString:[ var='B' labels={LoadBalancer=net/dev-exchange-api-ext-prim/d79bb24fcedd755e, TargetGroup=targetgroup/dev-exch-api-ext-prim-http/167c8fffe2dfbe7d} value=0 ], [ var='C' labels={LoadBalancer=net/dev-exchange-api-ext-prim/d79bb24fcedd755e, TargetGroup=targetgroup/dev-exch-api-ext-prim-http/167c8fffe2dfbe7d} value=0 ]}]" duration=155.038139ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kdfqlhj8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.43159568Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-compliance-lens-db, env=uk" t=2024-05-29T13:44:13.431571652Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kdfqlhj8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.431475189Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kdfqlhj8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.431431248Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.431489043Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=453497 slug=n444151595 version=1 fingerprint=f1a51390f0b41ce2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.43142341Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=3.781931ms + level=error ts=2024-05-29T13:44:13.43139052Z caller=remote_rule_evaluator.go:110 user=453497 slug=n444151595 msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + level=debug ts=2024-05-29T13:44:13.431451303Z caller=remote_instance_store.go:51 user=288032 slug=dapperlabssre msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=4947 slug=mediamath instance= t=2024-05-29T13:44:13.431417142Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kdfqlhj8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.431324237Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=236496 slug=improbable instance= t=2024-05-29T13:44:13.43135412Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:13.431397699Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kde8q6et-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.431228706Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.431396661Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kde8q6et-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.430995484Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=112732 slug=gleamer t=2024-05-29T13:44:13.431341663Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kd7qz8fq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.43062624Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112732 slug=gleamer instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.431294147Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=112732 slug=gleamer instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.431278831Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kcyw66kx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.430146945Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kcyw66kx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.430113175Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112732 slug=gleamer instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.431261303Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kcyw66kx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.429987873Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kcyw66kx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.429902913Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kcxu0brh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.429752541Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kcxu0brh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.429547199Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-backend-db-read-replica-1, env=uk" t=2024-05-29T13:44:13.431149798Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kcvkcq0p-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.429067024Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kcvkcq0p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.428884152Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-prod-backend-db-read-replica-1, env=uk" t=2024-05-29T13:44:13.431127091Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kcvkcq0p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.428758791Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kcnf54q4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.428569359Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kcnf54q4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.428418907Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kcmo3wgs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.428338037Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kcmo3wgs-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.428177075Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kcmo3wgs-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.428094934Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kcmd8ja7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.427981363Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kcmd8ja7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.42771504Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kcaomfv3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.427646959Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kcaomfv3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.427600419Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kc7hf7ik-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.427128344Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kc7hf7ik-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.427015593Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kc13h6vl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.426902152Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kc13h6vl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.426861071Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kc13h6vl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.426790131Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kc13h6vl-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.42672738Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kbys76ii-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.426180764Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kbys76ii-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.426085723Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kbys76ii-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.425973722Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.43089237Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kbm5v21y-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.425819921Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kbm5v21y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.42571388Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.423419286Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.430571135Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-sar-investigation-db, env=pp" t=2024-05-29T13:44:13.430369925Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=890273 slug=cmhusqnp instance="__name__=jvm_thread_count, cluster=gke-husq-n-europe-west4-1, job=husq-qa/husqqaauthts-app, jvm_thread_daemon=true" t=2024-05-29T13:44:13.430330641Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=890273 slug=cmhusqnp instance="__name__=jvm_thread_count, cluster=gke-husq-n-europe-west4-1, job=husq-qa/husqqaauthts-app, jvm_thread_daemon=false" t=2024-05-29T13:44:13.43029659Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-sar-investigation-db, env=pp" t=2024-05-29T13:44:13.430358417Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.4302409Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=890273 slug=cmhusqnp instance="__name__=jvm_thread_count, cluster=gke-husq-n-europe-west4-1, job=husq-pre/husqprelivets-app, jvm_thread_daemon=false" t=2024-05-29T13:44:13.430060645Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-risk-defense-platform-db, env=pp" t=2024-05-29T13:44:13.430210739Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=733461 slug=lattice t=2024-05-29T13:44:13.430261923Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.255509ms + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-risk-defense-platform-db, env=pp" t=2024-05-29T13:44:13.430201526Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=890273 slug=cmhusqnp instance="__name__=jvm_thread_count, cluster=gke-husq-n-europe-west4-1, job=husq-pre/husqpreauthts-app, jvm_thread_daemon=false" t=2024-05-29T13:44:13.429943672Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=890273 slug=cmhusqnp instance="__name__=jvm_thread_count, cluster=gke-husq-n-europe-west4-1, job=husq-pre/husqpreauthquery-app, jvm_thread_daemon=true" t=2024-05-29T13:44:13.429927902Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=890273 slug=cmhusqnp instance="__name__=jvm_thread_count, cluster=gke-husq-n-europe-west4-1, job=husq-dev/husqdevquery-app, jvm_thread_daemon=true" t=2024-05-29T13:44:13.429881481Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-instant-id-qa-db, env=pp" t=2024-05-29T13:44:13.429918794Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=890273 slug=cmhusqnp instance="__name__=jvm_thread_count, cluster=gke-husq-n-europe-west4-1, job=husq-dev/husqdevlivets-app, jvm_thread_daemon=false" t=2024-05-29T13:44:13.429759768Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=890273 slug=cmhusqnp instance="__name__=jvm_thread_count, cluster=gke-husq-n-europe-west4-1, job=husq-dev/husqdevlivequery-app, jvm_thread_daemon=false" t=2024-05-29T13:44:13.429688657Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.429739221Z caller=remote_instance_store.go:51 user=796993 slug=marketops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=796993 slug=marketops instance="host=prod-scylla-sjc-2" t=2024-05-29T13:44:13.429628919Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:13.429720889Z caller=remote_alert_sender.go:94 user=884866 slug=cnonumerique host=cnonumerique-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.9.74.54:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=adlo1yzj8c9ogd alerts=1 + logger=ngalert.state.manager user=890273 slug=cmhusqnp instance="__name__=jvm_thread_count, cluster=gke-husq-n-europe-west4-1, job=husq-dev/husqdevauthquery-app, jvm_thread_daemon=false" t=2024-05-29T13:44:13.429509963Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-idverse-enterprise-db, env=pp" t=2024-05-29T13:44:13.429676176Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=890273 slug=cmhusqnp t=2024-05-29T13:44:13.429424721Z level=debug msg="State manager processing evaluation results" resultCount=30 + level=debug ts=2024-05-29T13:44:13.429660636Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=796993 slug=marketops instance="host=prod-scylla-nyj-5" t=2024-05-29T13:44:13.429547028Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=796993 slug=marketops instance="host=prod-scylla-nyj-3" t=2024-05-29T13:44:13.429504188Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:13.429572247Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-frontend-log-db, env=pp" t=2024-05-29T13:44:13.429503979Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=796993 slug=marketops instance="host=prod-scylla-nyj-1" t=2024-05-29T13:44:13.429462867Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=796993 slug=marketops instance="host=prod-bidder-sjc-2" t=2024-05-29T13:44:13.429439747Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=796993 slug=marketops instance="host=prod-bidder-sjc-1" t=2024-05-29T13:44:13.429419527Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=796993 slug=marketops instance="host=prod-bidder-nyj-4" t=2024-05-29T13:44:13.429396196Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=796993 slug=marketops instance="host=prod-bidder-nyj-4" t=2024-05-29T13:44:13.429387556Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=796993 slug=marketops instance="host=prod-bidder-nyj-3" t=2024-05-29T13:44:13.429373316Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=335419 slug=tbauctions instance="datasource_uid=fZkHHx7Vz, ref_id=A" t=2024-05-29T13:44:13.429285916Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=691102 slug=deluxeconfdev t=2024-05-29T13:44:13.429303096Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=6.775347ms + logger=ngalert.state.manager user=796993 slug=marketops instance="host=prod-bidder-nyj-2" t=2024-05-29T13:44:13.429350116Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=335419 slug=tbauctions instance="datasource_uid=fZkHHx7Vz, ref_id=A" t=2024-05-29T13:44:13.429274753Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=796993 slug=marketops instance="host=prod-bidder-nyj-2" t=2024-05-29T13:44:13.429344016Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.42936139Z caller=remote_instance_store.go:51 user=335419 slug=tbauctions msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=335419 slug=tbauctions instance="datasource_uid=fZkHHx7Vz, ref_id=A" t=2024-05-29T13:44:13.429260624Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=796993 slug=marketops instance="host=prod-bidder-nyj-1" t=2024-05-29T13:44:13.429306845Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=335419 slug=tbauctions version=21 fingerprint=a8fa41699f59ec05 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.42917551Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=fZkHHx7Vz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.428314856s EvaluationString:}]" duration=82.780399ms + level=debug ts=2024-05-29T13:44:13.42914417Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-frontend-database, env=pp" t=2024-05-29T13:44:13.428908141Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.428889215Z caller=remote_instance_store.go:51 user=612695 slug=ocipprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=612695 slug=ocipprod t=2024-05-29T13:44:13.428829302Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=612695 slug=ocipprod instance="__name__=vault_process_status, environment=ip-10-238-5-237.eu-central-1.compute.internal, instance=vault-2, job=node" t=2024-05-29T13:44:13.428768812Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=612695 slug=ocipprod instance="__name__=vault_process_status, environment=ip-10-238-4-96.eu-central-1.compute.internal, instance=vault-1, job=node" t=2024-05-29T13:44:13.428740226Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.428690344Z caller=remote_instance_store.go:51 user=350551 slug=loopme msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.428590075Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=612695 slug=ocipprod version=2 fingerprint=fe093c758df7cbb9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.428507574Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=vault_process_status, environment=ip-10-238-4-96.eu-central-1.compute.internal, instance=vault-1, job=node State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=vault_process_status, environment=ip-10-238-4-96.eu-central-1.compute.internal, instance=vault-1, job=node Value:0xc0362d8d38} B:{Var:B Labels:__name__=vault_process_status, environment=ip-10-238-4-96.eu-central-1.compute.internal, instance=vault-1, job=node Value:0xc0362d8d70} C:{Var:C Labels:__name__=vault_process_status, environment=ip-10-238-4-96.eu-central-1.compute.internal, instance=vault-1, job=node Value:0xc0362d8dd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.427751364s EvaluationString:[ var='A' labels={__name__=vault_process_status, environment=ip-10-238-4-96.eu-central-1.compute.internal, instance=vault-1, job=node} value=1 ], [ var='B' labels={__name__=vault_process_status, environment=ip-10-238-4-96.eu-central-1.compute.internal, instance=vault-1, job=node} value=1 ], [ var='C' labels={__name__=vault_process_status, environment=ip-10-238-4-96.eu-central-1.compute.internal, instance=vault-1, job=node} value=0 ]} {Instance:__name__=vault_process_status, environment=ip-10-238-5-237.eu-central-1.compute.internal, instance=vault-2, job=node State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=vault_process_status, environment=ip-10-238-5-237.eu-central-1.compute.internal, instance=vault-2, job=node Value:0xc0362d8f08} B:{Var:B Labels:__name__=vault_process_status, environment=ip-10-238-5-237.eu-central-1.compute.internal, instance=vault-2, job=node Value:0xc0362d8e78} C:{Var:C Labels:__name__=vault_process_status, environment=ip-10-238-5-237.eu-central-1.compute.internal, instance=vault-2, job=node Value:0xc0362d8ee0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.427764987s EvaluationString:[ var='A' labels={__name__=vault_process_status, environment=ip-10-238-5-237.eu-central-1.compute.internal, instance=vault-2, job=node} value=1 ], [ var='B' labels={__name__=vault_process_status, environment=ip-10-238-5-237.eu-central-1.compute.internal, instance=vault-2, job=node} value=1 ], [ var='C' labels={__name__=vault_process_status, environment=ip-10-238-5-237.eu-central-1.compute.internal, instance=vault-2, job=node} value=0 ]} {Instance:__name__=vault_process_status, environment=ip-10-238-7-190.eu-central-1.compute.internal, instance=vault-3, job=node State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=vault_process_status, environment=ip-10-238-7-190.eu-central-1.compute.internal, instance=vault-3, job=node Value:0xc0362d8fa8} B:{Var:B Labels:__name__=vault_process_status, environment=ip-10-238-7-190.eu-central-1.compute.internal, instance=vault-3, job=node Value:0xc0362d8ff0} C:{Var:C Labels:__name__=vault_process_status, environment=ip-10-238-7-190.eu-central-1.compute.internal, instance=vault-3, job=node Value:0xc0362d9038}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.427774109s EvaluationString:[ var='A' labels={__name__=vault_process_status, environment=ip-10-238-7-190.eu-central-1.compute.internal, instance=vault-3, job=node} value=1 ], [ var='B' labels={__name__=vault_process_status, environment=ip-10-238-7-190.eu-central-1.compute.internal, instance=vault-3, job=node} value=1 ], [ var='C' labels={__name__=vault_process_status, environment=ip-10-238-7-190.eu-central-1.compute.internal, instance=vault-3, job=node} value=0 ]}]" duration=11.132502ms + level=debug ts=2024-05-29T13:44:13.428434186Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.428399701Z caller=remote_alert_sender.go:94 user=350551 slug=loopme host=loopme-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.148.183.121:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=da0b4ace-045c-4a08-83d3-99a3ebec6bc8 alerts=1 + level=debug ts=2024-05-29T13:44:13.428429103Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.428412363Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.428192499Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=350551 slug=loopme t=2024-05-29T13:44:13.428278035Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=23.966097ms + level=debug ts=2024-05-29T13:44:13.428191679Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.427897981Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.427541347Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.833931ms + level=debug ts=2024-05-29T13:44:13.427681336Z caller=remote_instance_store.go:51 user=438855 slug=teckresources msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-billing-db, env=pp" t=2024-05-29T13:44:13.427644146Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.427644178Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=438855 slug=teckresources instance="datasource_uid=000000096, ref_id=B" t=2024-05-29T13:44:13.427613632Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=438855 slug=teckresources instance="datasource_uid=000000096, ref_id=B" t=2024-05-29T13:44:13.427604607Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=438855 slug=teckresources instance="datasource_uid=000000096, ref_id=B" t=2024-05-29T13:44:13.427591567Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.427376588Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=426229 slug=accelbyte t=2024-05-29T13:44:13.427346015Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=27.259802ms + logger=ngalert.state.manager user=162543 slug=rapharacing instance= t=2024-05-29T13:44:13.427247111Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=162543 slug=rapharacing t=2024-05-29T13:44:13.427213758Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-pp-backend-db, env=pp" t=2024-05-29T13:44:13.427274657Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.427138741Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.427140928Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=472647 slug=planet t=2024-05-29T13:44:13.426779828Z level=debug msg="Skip rule evaluation because it is paused" + level=info ts=2024-05-29T13:44:13.426702895Z caller=grafana.go:247 user=438185 slug=nodeinfra msg="rules manager rule groups request" path=/prometheus/api/v1/alerts grafana_org_id=1 query= groups=0 alerts=373 + level=debug ts=2024-05-29T13:44:13.426734865Z caller=remote_instance_store.go:51 user=667326 slug=lakovna msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.42672206Z caller=remote_instance_store.go:51 user=560336 slug=powernet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-veridas-db, env=dev" t=2024-05-29T13:44:13.426713789Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.426642992Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.426602742Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=667326 slug=lakovna version=25 fingerprint=e6472ae3ebc6c644 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.426472319Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[B:{Var:B Labels: Value:0xc077e23688} C:{Var:C Labels: Value:0xc077e23690}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.426064271s EvaluationString:[ var='B' labels={} value=0.11133337020874023 ], [ var='C' labels={} value=0 ]}]" duration=135.64514ms + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.426531469Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.426428356Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-risk-defense-platform-db, env=dev" t=2024-05-29T13:44:13.426342944Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=250150 slug=bizagi version=58 fingerprint=1175e3c32856ab2a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.426267409Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.426033416s EvaluationString:}]" duration=176.500289ms + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-portal-db, env=dev" t=2024-05-29T13:44:13.426205959Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.426114829Z caller=grafana.go:247 user=438185 slug=nodeinfra msg="rules manager rule groups request" path=/prometheus/api/v1/alerts grafana_org_id=1 query= groups=0 alerts=256 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-periodic-reviews-db, env=dev" t=2024-05-29T13:44:13.426090817Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-periodic-reviews-db, env=dev" t=2024-05-29T13:44:13.426081028Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.425949685Z caller=remote_instance_store.go:51 user=739130 slug=redphasetech msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-instant-id-qa-db, env=dev" t=2024-05-29T13:44:13.42595799Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-idverse-enterprise-db, env=dev" t=2024-05-29T13:44:13.425791401Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-frontend-db, env=dev" t=2024-05-29T13:44:13.42554174Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.425487509Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.85482ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kbkglrkh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.425433767Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-dow-jones-db, env=dev" t=2024-05-29T13:44:13.425427068Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kbkglrkh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.425369146Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=656459 slug=activeport t=2024-05-29T13:44:13.425342931Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=656459 slug=activeport instance="datasource_uid=a17a51ac-52fa-4a8f-ae4d-66e273cfbbfc, ref_id=A" t=2024-05-29T13:44:13.425307504Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kbkglrkh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.425222024Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-data-management-db, env=dev" t=2024-05-29T13:44:13.425287645Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.423429376Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=656459 slug=activeport version=87 fingerprint=054708b636129145 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.425185273Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=a17a51ac-52fa-4a8f-ae4d-66e273cfbbfc, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.424910956s EvaluationString:}]" duration=20.140182ms + level=debug ts=2024-05-29T13:44:13.425170534Z caller=remote_instance_store.go:51 user=708531 slug=dooshimagbamwuan msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=635771 slug=sharedservices t=2024-05-29T13:44:13.425093012Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=18.233611ms + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-creditsafe-db, env=dev" t=2024-05-29T13:44:13.425149783Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kbjhv4dw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.424987412Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=935198 slug=provable instance="__name__=up, group=provable_validator, instance=18.144.35.122:9000, job=snarkos_provable_validator, origin_prometheus=testnet-beta" t=2024-05-29T13:44:13.424887547Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.424889039Z caller=remote_instance_store.go:51 user=316418 slug=workmotion msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=538355 slug=flogic t=2024-05-29T13:44:13.424860585Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.877624ms + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-compliance-lens-db, env=dev" t=2024-05-29T13:44:13.424845353Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kbh7ymwa-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.42479684Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=935198 slug=provable instance="__name__=up, group=foundation_validator, instance=35.197.132.255:9000, job=snarkos_foundation_validator, origin_prometheus=testnet-beta" t=2024-05-29T13:44:13.42476961Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=935198 slug=provable instance="__name__=up, group=foundation_validator, instance=34.141.218.171:9000, job=snarkos_foundation_validator, origin_prometheus=testnet-beta" t=2024-05-29T13:44:13.42474856Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=935198 slug=provable instance="__name__=up, group=foundation_validator, instance=34.141.218.171:9000, job=snarkos_foundation_validator, origin_prometheus=testnet-beta" t=2024-05-29T13:44:13.424733669Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.424655047Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.424600348Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.424588907Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=lon-dev-backend-db, env=dev" t=2024-05-29T13:44:13.424565705Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.424511338Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=516446 slug=awarehqdev t=2024-05-29T13:44:13.424437526Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=399183 slug=guidion instance="gui_utl__Source__c=queue-runner" t=2024-05-29T13:44:13.424462378Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=516446 slug=awarehqdev t=2024-05-29T13:44:13.424367922Z level=debug msg="Deleting alert states" count=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-veridas-db, env=eu" t=2024-05-29T13:44:13.424426376Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=516446 slug=awarehqdev t=2024-05-29T13:44:13.424347221Z level=info msg="Detected stale state entry" cacheID="[[\"EndpointName\",\"v3-toxicspeech-esp\"],[\"Series\",\"query9b09afae6e45452aacfe47c994beb083\"],[\"__alert_rule_namespace_uid__\",\"D-8RyMx4z\"],[\"__alert_rule_uid__\",\"a45a9a18-783e-4fca-8012-9606acfa0fde\"],[\"alertname\",\"v3-toxicspeech-esp-5xx-errors\"],[\"grafana_folder\",\"bi\"],[\"group\",\"SageMaker5XXErrors\"],[\"route\",\"team=bi\"],[\"team\",\"bi\"]]" state=Normal reason= + logger=ngalert.state.manager user=399183 slug=guidion t=2024-05-29T13:44:13.424406168Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-veridas-db, env=eu" t=2024-05-29T13:44:13.424415752Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=70430 slug=dapperlabs instance="datasource_uid=XoeaeMUnk, ref_id=A" t=2024-05-29T13:44:13.424424865Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.42429086Z caller=remote_instance_store.go:51 user=716630 slug=coapdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-v4monitoring-db, env=eu" t=2024-05-29T13:44:13.424290446Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=471861 slug=planetstaging instance= t=2024-05-29T13:44:13.42427877Z level=debug msg="Setting next state" handler=resultError + level=debug ts=2024-05-29T13:44:13.424251381Z caller=remote_instance_store.go:51 user=174054 slug=netrading msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=471861 slug=planetstaging t=2024-05-29T13:44:13.424234716Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=44181 slug=prospa t=2024-05-29T13:44:13.424253805Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=21.86973ms + logger=ngalert.state.manager user=782931 slug=watchomonitoring instance= t=2024-05-29T13:44:13.424137811Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.424198591Z caller=remote_instance_store.go:51 user=782931 slug=watchomonitoring msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=516446 slug=awarehqdev version=7 fingerprint=d6590fe8aed7a2b9 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.424139308Z level=debug msg="Alert rule evaluated" results="[{Instance:EndpointName=v3-toxicspeech-esp, Series=query173fb94da88148b1a954732206528fd8 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:EndpointName=v3-toxicspeech-esp, Series=query173fb94da88148b1a954732206528fd8 Value:0xc02cd68240} C:{Var:C Labels:EndpointName=v3-toxicspeech-esp, Series=query173fb94da88148b1a954732206528fd8 Value:0xc02cd68220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.422520687s EvaluationString:[ var='B' labels={EndpointName=v3-toxicspeech-esp, Series=query173fb94da88148b1a954732206528fd8} value=0 ], [ var='C' labels={EndpointName=v3-toxicspeech-esp, Series=query173fb94da88148b1a954732206528fd8} value=0 ]}]" duration=55.402152ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kbcoqvpi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.424071883Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-sar-investigation-db, env=eu" t=2024-05-29T13:44:13.424017451Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.423893704Z caller=remote_rule_evaluator.go:193 user=802856 slug=altowud msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kbcoqvpi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.423878601Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=460952 slug=prdnextgen instance= t=2024-05-29T13:44:13.423766123Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=460952 slug=prdnextgen instance= t=2024-05-29T13:44:13.423747133Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=4947 slug=mediamath version=1 fingerprint=50c86e3b3205d6ed attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.423666695Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=000000020, ref_id=A,B State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.423429333s EvaluationString:}]" duration=37.320957ms + level=debug ts=2024-05-29T13:44:13.423579033Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=460952 slug=prdnextgen t=2024-05-29T13:44:13.423588789Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kbbantka-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.423528127Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-periodic-reviews-db, env=eu" t=2024-05-29T13:44:13.423487769Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=206107 slug=hydrolix version=2 fingerprint=abf8197f82db3f6c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.423407829Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[C0:{Var:C Labels: Value:0xc01722a3a8} C1:{Var:C Labels: Value:0xc01722a3b8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.423083026s EvaluationString:[ var='C0' metric='avg_raw_bytes' labels={} value=2.02058155e+08 ], [ var='C1' metric='avg_raw_bytes' labels={} value=1.65840358e+08 ]}]" duration=498.660754ms + level=debug ts=2024-05-29T13:44:13.423436691Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.423280354Z caller=remote_instance_store.go:51 user=672210 slug=noteb5 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-instant-id-qa-db, env=eu" t=2024-05-29T13:44:13.423350074Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.423271994Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-idverse-enterprise-db, env=eu" t=2024-05-29T13:44:13.423225488Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:13.423058686Z caller=remote_alert_sender.go:94 user=680839 slug=zerotoone360 host=zerotoone360-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.162.157:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=b0f4bd40-2360-4679-a34f-b9c7e7d2e53f alerts=1 + level=debug ts=2024-05-29T13:44:13.423004328Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.422941214Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.422929664Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.scheduler user=245291 slug=pismo version=54 fingerprint=f6256a89e5c26c71 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.422838104Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.422543882s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=38.865796ms + level=debug ts=2024-05-29T13:44:13.422833443Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-frontend-db-read-replica-1, env=eu" t=2024-05-29T13:44:13.422851818Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.422573821Z caller=remote_instance_store.go:51 user=691102 slug=deluxeconfdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=691102 slug=deluxeconfdev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.42251365Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=691102 slug=deluxeconfdev instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.4225051Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager.persist user=557231 slug=lnrsusinsuranceprod t=2024-05-29T13:44:13.422594871Z level=debug msg="Saving alert states" count=43 max_state_save_concurrency=1 + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-dow-jones-db, env=eu" t=2024-05-29T13:44:13.422563689Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-dow-jones-db, env=eu" t=2024-05-29T13:44:13.422553921Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=691102 slug=deluxeconfdev version=1 fingerprint=9e50380176d26934 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.422407968Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.42223401s EvaluationString:}]" duration=7.738623ms + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-data-management-db, env=eu" t=2024-05-29T13:44:13.422427947Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-wv-service" t=2024-05-29T13:44:13.42238897Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-wi-service" t=2024-05-29T13:44:13.422355968Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-creditsafe-db, env=eu" t=2024-05-29T13:44:13.422294497Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-wa-service" t=2024-05-29T13:44:13.422268372Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-vt-service" t=2024-05-29T13:44:13.42211639Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.421920614Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-tx-service" t=2024-05-29T13:44:13.421947682Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-sd-service" t=2024-05-29T13:44:13.421887298Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-sc-service" t=2024-05-29T13:44:13.421864868Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-ri-service" t=2024-05-29T13:44:13.421804318Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-pa-service" t=2024-05-29T13:44:13.421759208Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.42174994Z caller=remote_rule_evaluator.go:193 user=656459 slug=activeport msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-pa-service" t=2024-05-29T13:44:13.421749476Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=417739 slug=ciscoiotdev t=2024-05-29T13:44:13.421550642Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.412477ms + level=debug ts=2024-05-29T13:44:13.421585289Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.421479667Z caller=grafana.go:247 user=438185 slug=nodeinfra msg="rules manager rule groups request" path=/prometheus/api/v1/alerts grafana_org_id=1 query= groups=0 alerts=373 + level=debug ts=2024-05-29T13:44:13.42153478Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-nh-service" t=2024-05-29T13:44:13.421553135Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-mt-service" t=2024-05-29T13:44:13.421425446Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-backend-db-read-replica-1, env=eu" t=2024-05-29T13:44:13.421343227Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=683458 slug=whipped instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.421211695Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=391538 slug=risknarrative instance="dbinstance_identifier=fran-prod-backend-db-read-replica-1, env=eu" t=2024-05-29T13:44:13.421328261Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-me-service" t=2024-05-29T13:44:13.421145475Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.421147622Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=350551 slug=loopme t=2024-05-29T13:44:13.421057661Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.421055865Z caller=remote_instance_store.go:51 user=733461 slug=lattice msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.421069277Z caller=remote_alert_sender.go:94 user=391577 slug=daghouse host=daghouse-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.33.88:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=h0vSwW24k alerts=1 + logger=ngalert.state.manager.persist user=391577 slug=daghouse t=2024-05-29T13:44:13.420982727Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=25.93465ms + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-md-service" t=2024-05-29T13:44:13.42082707Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-ma-service" t=2024-05-29T13:44:13.420804888Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-la-service" t=2024-05-29T13:44:13.420746664Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-ky-service" t=2024-05-29T13:44:13.420702114Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=884866 slug=cnonumerique instance="datasource_uid=fdhk917z41xj4a, ref_id=A" t=2024-05-29T13:44:13.420642971Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=884866 slug=cnonumerique instance="datasource_uid=fdhk917z41xj4a, ref_id=A" t=2024-05-29T13:44:13.420625521Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=884866 slug=cnonumerique t=2024-05-29T13:44:13.42057876Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=884866 slug=cnonumerique version=115 fingerprint=8c2c1474942359c6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.420506299Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=fdhk917z41xj4a, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.420280125s EvaluationString:}]" duration=8.539068ms + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-ia-service" t=2024-05-29T13:44:13.420510243Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-ia-service" t=2024-05-29T13:44:13.420416383Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.420399726Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.420467246Z caller=remote_instance_store.go:51 user=190917 slug=d1cx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-de-service" t=2024-05-29T13:44:13.420310293Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-de-service" t=2024-05-29T13:44:13.420240714Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-dc-service" t=2024-05-29T13:44:13.42021305Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=190917 slug=d1cx version=1 fingerprint=728f9ee936a34415 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.420286572Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.420040779s EvaluationString:}]" duration=41.828841ms + logger=ngalert.state.manager.persist user=63636 slug=streamelements t=2024-05-29T13:44:13.420294041Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=info ts=2024-05-29T13:44:13.420210505Z caller=grafana.go:247 user=438185 slug=nodeinfra msg="rules manager rule groups request" path=/prometheus/api/v1/alerts grafana_org_id=1 query= groups=0 alerts=256 + level=debug ts=2024-05-29T13:44:13.420153304Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=63636 slug=streamelements version=5 fingerprint=56cb3d6556bec5d5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.420159385Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.419882344s EvaluationString:}]" duration=229.685232ms + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-co-service" t=2024-05-29T13:44:13.420163546Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-az-service" t=2024-05-29T13:44:13.420132249Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-ar-service" t=2024-05-29T13:44:13.420091065Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod instance="container=ins-mvr-gateway-al-service" t=2024-05-29T13:44:13.420053781Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.419924637Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=171235 slug=circleslabs t=2024-05-29T13:44:13.419809081Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=40.48087ms + level=debug ts=2024-05-29T13:44:13.419717464Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=557231 slug=lnrsusinsuranceprod t=2024-05-29T13:44:13.419724952Z level=debug msg="State manager processing evaluation results" resultCount=43 + level=debug ts=2024-05-29T13:44:13.419660256Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=407181 slug=novacentar t=2024-05-29T13:44:13.419553181Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=407181 slug=novacentar instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.419532688Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=407181 slug=novacentar instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.419523003Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.419293075Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.419206738Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=453497 slug=n444151595 t=2024-05-29T13:44:13.419438188Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=396586 slug=opengov t=2024-05-29T13:44:13.419319011Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=453497 slug=n444151595 instance= t=2024-05-29T13:44:13.419374176Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.scheduler user=453497 slug=n444151595 version=30 fingerprint=3be28e900ce76d42 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.419259124Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=4.283203ms + level=debug ts=2024-05-29T13:44:13.419215623Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + level=error ts=2024-05-29T13:44:13.419162813Z caller=remote_rule_evaluator.go:110 user=453497 slug=n444151595 msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + level=debug ts=2024-05-29T13:44:13.419133137Z caller=remote_instance_store.go:51 user=798928 slug=basepowercompany msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=798928 slug=basepowercompany instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.419073855Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=798928 slug=basepowercompany instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.419064315Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=798928 slug=basepowercompany instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.418994153Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.418836982Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=409436 slug=jeffryaldair1997 t=2024-05-29T13:44:13.41870945Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=451427 slug=rocketchat instance="__name__=up, cluster=us-east-1, instance=10.11.20.83:9216, job=database-exporter-db7, monitor=us-east-1, prometheus=monitoring/prometheus" t=2024-05-29T13:44:13.418436281Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=451427 slug=rocketchat instance="__name__=up, cluster=us-east-1, instance=10.11.20.82:9216, job=database-exporter-db7, monitor=us-east-1, prometheus=monitoring/prometheus" t=2024-05-29T13:44:13.418412765Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=451427 slug=rocketchat instance="__name__=up, cluster=us-east-1, instance=10.11.20.81:9216, job=database-exporter-db7, monitor=us-east-1, prometheus=monitoring/prometheus" t=2024-05-29T13:44:13.418344379Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.417974727Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=451427 slug=rocketchat instance="__name__=up, cluster=us-east-1, instance=10.11.20.53:9216, job=database-exporter-db4, monitor=us-east-1, prometheus=monitoring/prometheus" t=2024-05-29T13:44:13.417954938Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=451427 slug=rocketchat instance="__name__=up, cluster=us-east-1, instance=10.11.20.53:9216, job=database-exporter-db4, monitor=us-east-1, prometheus=monitoring/prometheus" t=2024-05-29T13:44:13.417941954Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=451427 slug=rocketchat instance="__name__=up, cluster=us-east-1, instance=10.11.20.46:9216, job=database-exporter-db3, monitor=us-east-1, prometheus=monitoring/prometheus" t=2024-05-29T13:44:13.417849236Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=451427 slug=rocketchat instance="__name__=up, cluster=us-east-1, instance=10.11.20.46:9216, job=database-exporter-db3, monitor=us-east-1, prometheus=monitoring/prometheus" t=2024-05-29T13:44:13.417791148Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=451427 slug=rocketchat instance="__name__=up, cluster=us-east-1, instance=10.11.20.45:9216, job=database-exporter-db3, monitor=us-east-1, prometheus=monitoring/prometheus" t=2024-05-29T13:44:13.417730557Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.417640672Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=451427 slug=rocketchat instance="__name__=up, cluster=us-east-1, instance=10.11.20.35:9216, job=database-exporter-db2, monitor=us-east-1, prometheus=monitoring/prometheus" t=2024-05-29T13:44:13.417693553Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.417652719Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=451427 slug=rocketchat instance="__name__=up, cluster=us-east-1, instance=10.11.20.33:9216, job=database-exporter-db2, monitor=us-east-1, prometheus=monitoring/prometheus" t=2024-05-29T13:44:13.417592838Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=451427 slug=rocketchat instance="__name__=up, cluster=us-east-1, instance=10.11.20.32:9216, job=database-exporter-db2, monitor=us-east-1, prometheus=monitoring/prometheus" t=2024-05-29T13:44:13.417529592Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.41740631Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.582886ms + logger=ngalert.state.manager user=451427 slug=rocketchat instance="__name__=up, cluster=us-east-1, instance=10.11.20.21:9216, job=database-exporter-db1, monitor=us-east-1, prometheus=monitoring/prometheus" t=2024-05-29T13:44:13.417374581Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=451427 slug=rocketchat instance="__name__=up, cluster=us-east-1, instance=10.11.20.21:9216, job=database-exporter-db1, monitor=us-east-1, prometheus=monitoring/prometheus" t=2024-05-29T13:44:13.41735833Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.417292802Z caller=remote_instance_store.go:51 user=536272 slug=kovalikadam00 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=451427 slug=rocketchat instance="__name__=up, cluster=us-east-1, instance=10.11.20.205:9216, job=database-exporter-open, monitor=us-east-1, prometheus=monitoring/prometheus" t=2024-05-29T13:44:13.417278737Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=174016 slug=journalstaging t=2024-05-29T13:44:13.417246606Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=451427 slug=rocketchat instance="__name__=up, cluster=us-east-1, instance=10.11.20.204:9216, job=database-exporter-open, monitor=us-east-1, prometheus=monitoring/prometheus" t=2024-05-29T13:44:13.417187585Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.417135701Z caller=remote_instance_store.go:51 user=639839 slug=silae msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=451427 slug=rocketchat t=2024-05-29T13:44:13.416999565Z level=debug msg="State manager processing evaluation results" resultCount=21 + level=debug ts=2024-05-29T13:44:13.416473229Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=274199 slug=telemetriahgm instance= t=2024-05-29T13:44:13.416424632Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=27998 slug=korob instance="datasource_uid=grafanacloud-korob, ref_id=A" t=2024-05-29T13:44:13.416029807Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.415731387Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.41543232Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.41491651Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.414667674Z caller=remote_instance_store.go:51 user=407477 slug=inventa msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.414627824Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.414516553Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.414404211Z caller=remote_instance_store.go:51 user=201644 slug=thoughtspot msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.414271171Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.414346594Z caller=remote_instance_store.go:51 user=527202 slug=lnrsusinsurancedev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.414287887Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + level=debug component=discovery ts=2024-05-29T13:44:13.414293974Z caller=hgapi.go:100 msg="requesting a list of instances from the hg api" url="http://hosted-grafana-api.hosted-grafana.svc.cluster.local/instances?status=active" + level=debug ts=2024-05-29T13:44:13.414228531Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.414220873Z caller=ruler.go:426 msg="syncing rules" reason=periodic + level=debug ts=2024-05-29T13:44:13.414206341Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.414089767Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.413792897Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.413821813Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.41373024Z caller=remote_instance_store.go:51 user=414522 slug=scaleops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:13.413586811Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=245291 slug=pismo version=38 fingerprint=37efa3f7228bbd08 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.413495379Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.413223815s EvaluationString:}]" duration=566.186751ms + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=LwbzRMnVk, ref_id=A" t=2024-05-29T13:44:13.413490126Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:13.413478742Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=430961 slug=solifi version=7 fingerprint=e0f18b272de0c97a attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.413427228Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=LwbzRMnVk, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.413156306s EvaluationString:}]" duration=30.945541ms + level=debug ts=2024-05-29T13:44:13.413200653Z caller=remote_instance_store.go:51 user=174927 slug=syndic82690 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.413180455Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.41255515Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.412468165Z caller=remote_instance_store.go:51 user=172772 slug=ppbtradingtribe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.412452893Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=172772 slug=ppbtradingtribe t=2024-05-29T13:44:13.412433378Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=765874 slug=rhwstaging t=2024-05-29T13:44:13.412187364Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.27086ms + logger=ngalert.state.manager.persist user=731546 slug=liderbci t=2024-05-29T13:44:13.41210101Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.39031ms + logger=ngalert.state.manager user=518752 slug=ishiitest instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.412106223Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=518752 slug=ishiitest instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.412090344Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.411976108Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=866972 slug=mitsubishi instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.411846578Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=866972 slug=mitsubishi instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.411836207Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=866972 slug=mitsubishi t=2024-05-29T13:44:13.411811117Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.411815446Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance="datasource_uid=1x3mYGa7z, ref_id=A" t=2024-05-29T13:44:13.411492725Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.411450545Z caller=remote_instance_store.go:51 user=706031 slug=miceutest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=307381 slug=kambitaskforce t=2024-05-29T13:44:13.411446065Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=307381 slug=kambitaskforce version=14 fingerprint=4ff3b13d0cd19200 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.411326731Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=1x3mYGa7z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.410882256s EvaluationString:}]" duration=24.839788ms + level=debug ts=2024-05-29T13:44:13.411149053Z caller=remote_instance_store.go:51 user=738479 slug=gohero msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.41118509Z caller=remote_instance_store.go:51 user=670772 slug=digittal msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=670772 slug=digittal t=2024-05-29T13:44:13.411135589Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=670772 slug=digittal instance="__name__=iberojet_get_prismic_health, __replica__=prometheus-prod-eu-west-1-prometheus-prometheus-0, app_kubernetes_io_name=prometheus-statsd-exporter, env=prod, kubernetes_namespace=infra-prod, metric_enabled=true, tag_Env=prod" t=2024-05-29T13:44:13.411121249Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.411109779Z caller=remote_instance_store.go:51 user=708531 slug=dooshimagbamwuan msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=708531 slug=dooshimagbamwuan t=2024-05-29T13:44:13.411039588Z level=debug msg="Saving alert states" count=7 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.410949632Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.411005948Z level=warn msg="Failed to take an image" dashboard=ff7e00ce-0697-47e9-bf68-35a43d8a11a6 panel=1 error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:13.410965882Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=340882 slug=hopstack t=2024-05-29T13:44:13.410876082Z level=debug msg="Saving alert states done" count=22 max_state_save_concurrency=1 duration=401.311206ms + level=debug ts=2024-05-29T13:44:13.410769041Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.410626122Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.410617934Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.410476156Z caller=remote_instance_store.go:51 user=705083 slug=mediakindsaas msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.41019486Z caller=remote_image_capturer.go:61 user=32396 slug=thingwizard rule_org_id=1 rule_uid=JobCAvunk dashboard=LTwUvJ0iz panel=23 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.410153818Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager.persist user=417739 slug=ciscoiotdev t=2024-05-29T13:44:13.41013239Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=417739 slug=ciscoiotdev instance="datasource_uid=grafanacloud-usage, ref_id=A" t=2024-05-29T13:44:13.410103597Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=538355 slug=flogic instance="account_id=641264638977, dimension_DBInstanceIdentifier=daiwapr-prod, http_scheme=http, instance=localhost:5000, job=yace-exporter, name=arn:aws:rds:ap-northeast-1:641264638977:db:daiwapr-prod, net_host_port=5000, region=ap-northeast-1, service_instance_id=localhost:5000, service_name=yace-exporter" t=2024-05-29T13:44:13.40996293Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=538355 slug=flogic t=2024-05-29T13:44:13.409896915Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.409838818Z caller=remote_instance_store.go:51 user=618621 slug=sendamatic msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=156035 slug=correlatedlabs t=2024-05-29T13:44:13.409749917Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=618621 slug=sendamatic instance="__name__=senderscore_reputation, agent_hostname=mta-us-v-1, instance=mta-us-v-1, job=integrations/node_exporter" t=2024-05-29T13:44:13.409758036Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=618621 slug=sendamatic instance="__name__=senderscore_reputation, agent_hostname=mta-uk-l-1, instance=mta-uk-l-1, job=integrations/node_exporter" t=2024-05-29T13:44:13.409726926Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=618621 slug=sendamatic instance="__name__=senderscore_reputation, agent_hostname=emitter-eu-h-1, instance=emitter-eu-h-1, job=integrations/node_exporter" t=2024-05-29T13:44:13.409695975Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.409683695Z caller=remote_instance_store.go:51 user=765752 slug=octave msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=618621 slug=sendamatic instance="__name__=senderscore_reputation, agent_hostname=emitter-eu-f-5, instance=emitter-eu-f-5, job=integrations/node_exporter" t=2024-05-29T13:44:13.409661914Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=618621 slug=sendamatic instance="__name__=senderscore_reputation, agent_hostname=emitter-eu-f-5, instance=emitter-eu-f-5, job=integrations/node_exporter" t=2024-05-29T13:44:13.409613393Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=156035 slug=correlatedlabs version=5 fingerprint=f55f5bbfdb15f5a3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.409619259Z level=error msg="Failed to evaluate rule" error="failed to build query 'B': data source not found" duration=4.615991ms + level=debug ts=2024-05-29T13:44:13.409549084Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=error ts=2024-05-29T13:44:13.409579548Z caller=remote_rule_evaluator.go:110 user=156035 slug=correlatedlabs msg="remote evaluate failed" code=Code(422) err="failed to build query 'B': data source not found" + level=debug ts=2024-05-29T13:44:13.409568244Z caller=remote_image_capturer.go:54 user=32396 slug=thingwizard rule_org_id=1 rule_uid=JobCAvunk dashboard=LTwUvJ0iz panel=23 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=618621 slug=sendamatic instance="__name__=senderscore_reputation, agent_hostname=emitter-eu-f-3, instance=emitter-eu-f-3, job=integrations/node_exporter" t=2024-05-29T13:44:13.409570513Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=618621 slug=sendamatic instance="__name__=senderscore_reputation, agent_hostname=emitter-eu-f-2, instance=emitter-eu-f-2, job=integrations/node_exporter" t=2024-05-29T13:44:13.409535592Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=618621 slug=sendamatic instance="__name__=senderscore_reputation, agent_hostname=emitter-eu-f-1, instance=emitter-eu-f-1, job=integrations/node_exporter" t=2024-05-29T13:44:13.409494371Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=32396 slug=thingwizard instance= t=2024-05-29T13:44:13.409453168Z level=debug msg="Execution error state is Alerting" handler=resultAlerting previous_handler=resultError + logger=ngalert.state.manager user=618621 slug=sendamatic instance="__name__=senderscore_reputation, agent_hostname=emitter-eu-f-1, instance=emitter-eu-f-1, job=integrations/node_exporter" t=2024-05-29T13:44:13.409476291Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=618621 slug=sendamatic version=76 fingerprint=bfbc69e0aa4a3365 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.409238335Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=senderscore_reputation, agent_hostname=emitter-eu-f-1, instance=emitter-eu-f-1, job=integrations/node_exporter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=senderscore_reputation, agent_hostname=emitter-eu-f-1, instance=emitter-eu-f-1, job=integrations/node_exporter Value:0xc0546b6370} B:{Var:B Labels:__name__=senderscore_reputation, agent_hostname=emitter-eu-f-1, instance=emitter-eu-f-1, job=integrations/node_exporter Value:0xc0546b63d0} C:{Var:C Labels:__name__=senderscore_reputation, agent_hostname=emitter-eu-f-1, instance=emitter-eu-f-1, job=integrations/node_exporter Value:0xc0546b6430}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.4086987s EvaluationString:[ var='A' labels={__name__=senderscore_reputation, agent_hostname=emitter-eu-f-1, instance=emitter-eu-f-1, job=integrations/node_exporter} value=255 ], [ var='B' labels={__name__=senderscore_reputation, agent_hostname=emitter-eu-f-1, instance=emitter-eu-f-1, job=integrations/node_exporter} value=255 ], [ var='C' labels={__name__=senderscore_reputation, agent_hostname=emitter-eu-f-1, instance=emitter-eu-f-1, job=integrations/node_exporter} value=0 ]} {Instance:__name__=senderscore_reputation, agent_hostname=emitter-eu-f-2, instance=emitter-eu-f-2, job=integrations/node_exporter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=senderscore_reputation, agent_hostname=emitter-eu-f-2, instance=emitter-eu-f-2, job=integrations/node_exporter Value:0xc0546b6540} B:{Var:B Labels:__name__=senderscore_reputation, agent_hostname=emitter-eu-f-2, instance=emitter-eu-f-2, job=integrations/node_exporter Value:0xc0546b6590} C:{Var:C Labels:__name__=senderscore_reputation, agent_hostname=emitter-eu-f-2, instance=emitter-eu-f-2, job=integrations/node_exporter Value:0xc0546b64e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.408726652s EvaluationString:[ var='A' labels={__name__=senderscore_reputation, agent_hostname=emitter-eu-f-2, instance=emitter-eu-f-2, job=integrations/node_exporter} value=255 ], [ var='B' labels={__name__=senderscore_reputation, agent_hostname=emitter-eu-f-2, instance=emitter-eu-f-2, job=integrations/node_exporter} value=255 ], [ var='C' labels={__name__=senderscore_reputation, agent_hostname=emitter-eu-f-2, instance=emitter-eu-f-2, job=integrations/node_exporter} value=0 ]} {Instance:__name__=senderscore_reputation, agent_hostname=emitter-eu-f-3, instance=emitter-eu-f-3, job=integrations/node_exporter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=senderscore_reputation, agent_hostname=emitter-eu-f-3, instance=emitter-eu-f-3, job=integrations/node_exporter Value:0xc0546b6700} B:{Var:B Labels:__name__=senderscore_reputation, agent_hostname=emitter-eu-f-3, instance=emitter-eu-f-3, job=integrations/node_exporter Value:0xc0546b6640} C:{Var:C Labels:__name__=senderscore_reputation, agent_hostname=emitter-eu-f-3, instance=emitter-eu-f-3, job=integrations/node_exporter Value:0xc0546b66a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.408731952s EvaluationString:[ var='A' labels={__name__=senderscore_reputation, agent_hostname=emitter-eu-f-3, instance=emitter-eu-f-3, job=integrations/node_exporter} value=255 ], [ var='B' labels={__name__=senderscore_reputation, agent_hostname=emitter-eu-f-3, instance=emitter-eu-f-3, job=integrations/node_exporter} value=255 ], [ var='C' labels={__name__=senderscore_reputation, agent_hostname=emitter-eu-f-3, instance=emitter-eu-f-3, job=integrations/node_exporter} value=0 ]} {Instance:__name__=senderscore_reputation, agent_hostname=emitter-eu-f-4, instance=emitter-eu-f-4, job=integrations/node_exporter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=senderscore_reputation, agent_hostname=emitter-eu-f-4, instance=emitter-eu-f-4, job=integrations/node_exporter Value:0xc0546b67b0} B:{Var:B Labels:__name__=senderscore_reputation, agent_hostname=emitter-eu-f-4, instance=emitter-eu-f-4, job=integrations/node_exporter Value:0xc0546b6800} C:{Var:C Labels:__name__=senderscore_reputation, agent_hostname=emitter-eu-f-4, instance=emitter-eu-f-4, job=integrations/node_exporter Value:0xc0546b6860}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.408736803s EvaluationString:[ var='A' labels={__name__=senderscore_reputation, agent_hostname=emitter-eu-f-4, instance=emitter-eu-f-4, job=integrations/node_exporter} value=255 ], [ var='B' labels={__name__=senderscore_reputation, agent_hostname=emitter-eu-f-4, instance=emitter-eu-f-4, job=integrations/node_exporter} value=255 ], [ var='C' labels={__name__=senderscore_reputation, agent_hostname=emitter-eu-f-4, instance=emitter-eu-f-4, job=integrations/node_exporter} value=0 ]} {Instance:__name__=senderscore_reputation, agent_hostname=emitter-eu-f-5, instance=emitter-eu-f-5, job=integrations/node_exporter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=senderscore_reputation, agent_hostname=emitter-eu-f-5, instance=emitter-eu-f-5, job=integrations/node_exporter Value:0xc0546b6a00} B:{Var:B Labels:__name__=senderscore_reputation, agent_hostname=emitter-eu-f-5, instance=emitter-eu-f-5, job=integrations/node_exporter Value:0xc0546b6920} C:{Var:C Labels:__name__=senderscore_reputation, agent_hostname=emitter-eu-f-5, instance=emitter-eu-f-5, job=integrations/node_exporter Value:0xc0546b69a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.408740863s EvaluationString:[ var='A' labels={__name__=senderscore_reputation, agent_hostname=emitter-eu-f-5, instance=emitter-eu-f-5, job=integrations/node_exporter} value=255 ], [ var='B' labels={__name__=senderscore_reputation, agent_hostname=emitter-eu-f-5, instance=emitter-eu-f-5, job=integrations/node_exporter} value=255 ], [ var='C' labels={__name__=senderscore_reputation, agent_hostname=emitter-eu-f-5, instance=emitter-eu-f-5, job=integrations/node_exporter} value=0 ]} {Instance:__name__=senderscore_reputation, agent_hostname=emitter-eu-h-1, instance=emitter-eu-h-1, job=integrations/node_exporter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=senderscore_reputation, agent_hostname=emitter-eu-h-1, instance=emitter-eu-h-1, job=integrations/node_exporter Value:0xc0546b6ab0} B:{Var:B Labels:__name__=senderscore_reputation, agent_hostname=emitter-eu-h-1, instance=emitter-eu-h-1, job=integrations/node_exporter Value:0xc0546b6b10} C:{Var:C Labels:__name__=senderscore_reputation, agent_hostname=emitter-eu-h-1, instance=emitter-eu-h-1, job=integrations/node_exporter Value:0xc0546b6b60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.408747173s EvaluationString:[ var='A' labels={__name__=senderscore_reputation, agent_hostname=emitter-eu-h-1, instance=emitter-eu-h-1, job=integrations/node_exporter} value=255 ], [ var='B' labels={__name__=senderscore_reputation, agent_hostname=emitter-eu-h-1, instance=emitter-eu-h-1, job=integrations/node_exporter} value=255 ], [ var='C' labels={__name__=senderscore_reputation, agent_hostname=emitter-eu-h-1, instance=emitter-eu-h-1, job=integrations/node_exporter} value=0 ]} {Instance:__name__=senderscore_reputation, agent_hostname=mta-uk-l-1, instance=mta-uk-l-1, job=integrations/node_exporter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=senderscore_reputation, agent_hostname=mta-uk-l-1, instance=mta-uk-l-1, job=integrations/node_exporter Value:0xc0546b6c00} B:{Var:B Labels:__name__=senderscore_reputation, agent_hostname=mta-uk-l-1, instance=mta-uk-l-1, job=integrations/node_exporter Value:0xc0546b6c38} C:{Var:C Labels:__name__=senderscore_reputation, agent_hostname=mta-uk-l-1, instance=mta-uk-l-1, job=integrations/node_exporter Value:0xc0546b6ca8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.408755854s EvaluationString:[ var='A' labels={__name__=senderscore_reputation, agent_hostname=mta-uk-l-1, instance=mta-uk-l-1, job=integrations/node_exporter} value=255 ], [ var='B' labels={__name__=senderscore_reputation, agent_hostname=mta-uk-l-1, instance=mta-uk-l-1, job=integrations/node_exporter} value=255 ], [ var='C' labels={__name__=senderscore_reputation, agent_hostname=mta-uk-l-1, instance=mta-uk-l-1, job=integrations/node_exporter} value=0 ]} {Instance:__name__=senderscore_reputation, agent_hostname=mta-us-v-1, instance=mta-us-v-1, job=integrations/node_exporter State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=senderscore_reputation, agent_hostname=mta-us-v-1, instance=mta-us-v-1, job=integrations/node_exporter Value:0xc0546b6db0} B:{Var:B Labels:__name__=senderscore_reputation, agent_hostname=mta-us-v-1, instance=mta-us-v-1, job=integrations/node_exporter Value:0xc0546b6e10} C:{Var:C Labels:__name__=senderscore_reputation, agent_hostname=mta-us-v-1, instance=mta-us-v-1, job=integrations/node_exporter Value:0xc0546b6d60}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.408763094s EvaluationString:[ var='A' labels={__name__=senderscore_reputation, agent_hostname=mta-us-v-1, instance=mta-us-v-1, job=integrations/node_exporter} value=97 ], [ var='B' labels={__name__=senderscore_reputation, agent_hostname=mta-us-v-1, instance=mta-us-v-1, job=integrations/node_exporter} value=97 ], [ var='C' labels={__name__=senderscore_reputation, agent_hostname=mta-us-v-1, instance=mta-us-v-1, job=integrations/node_exporter} value=0 ]}]" duration=9.163272ms + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.409341698Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.409333618Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.409300541Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.409252196Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=680839 slug=zerotoone360 t=2024-05-29T13:44:13.409092219Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.408558775Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=565508 slug=diagnosticrapid t=2024-05-29T13:44:13.408454921Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.205911ms + level=debug ts=2024-05-29T13:44:13.408473153Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.408405227Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.408399907Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.408393117Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.408329078Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.408329484Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=824501 slug=bendingspoons t=2024-05-29T13:44:13.408243976Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=20.245413ms + level=debug ts=2024-05-29T13:44:13.408057102Z caller=remote_instance_store.go:51 user=729654 slug=bmsmonitoring msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.407699086Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=830631 slug=api3 t=2024-05-29T13:44:13.407531512Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=830631 slug=api3 instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.407515493Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=830631 slug=api3 instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.407510213Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=127813 slug=clearsale version=7 fingerprint=754b8bdfad5c6e1e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.407439532Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.407159736s EvaluationString:}]" duration=203.061065ms + level=debug ts=2024-05-29T13:44:13.407421162Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=872772 slug=exatechstage t=2024-05-29T13:44:13.407447341Z level=debug msg="Skip rule evaluation because it is paused" + logger=ngalert.scheduler user=830631 slug=api3 version=30 fingerprint=47d2e9b67dae0d06 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.40739645Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.407046804s EvaluationString:}]" duration=65.440499ms + logger=ngalert.scheduler user=680839 slug=zerotoone360 version=2 fingerprint=7ccfdd20a32d4931 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.407266026Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=4.127402ms + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.407295194Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager.persist user=129076 slug=marginalunit t=2024-05-29T13:44:13.407069196Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=129076 slug=marginalunit instance= t=2024-05-29T13:44:13.407057061Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.407021981Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + level=error ts=2024-05-29T13:44:13.406675679Z caller=remote_rule_evaluator.go:110 user=680839 slug=zerotoone360 msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + level=debug ts=2024-05-29T13:44:13.40674716Z caller=remote_instance_store.go:51 user=716630 slug=coapdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=716630 slug=coapdev t=2024-05-29T13:44:13.406687459Z level=debug msg="Saving alert states" count=3 max_state_save_concurrency=1 + logger=ngalert.state.manager user=635771 slug=sharedservices instance="cluster=eng-eks-stg" t=2024-05-29T13:44:13.406689379Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=635771 slug=sharedservices t=2024-05-29T13:44:13.406632518Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=739130 slug=redphasetech instance="unit_serial=10016, vsecc=vsecc_2" t=2024-05-29T13:44:13.406665131Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=739130 slug=redphasetech instance="unit_serial=10016, vsecc=vsecc_2" t=2024-05-29T13:44:13.406654631Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716630 slug=coapdev instance="__name__=windows_service_status, agent_hostname=cap-prd-app, instance=cap-prd-app:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok" t=2024-05-29T13:44:13.406602928Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=739130 slug=redphasetech instance="unit_serial=10016, vsecc=vsecc_1" t=2024-05-29T13:44:13.406583331Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=716630 slug=coapdev instance="__name__=windows_service_status, agent_hostname=app-dev, instance=app-dev:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok" t=2024-05-29T13:44:13.406525487Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=716630 slug=coapdev t=2024-05-29T13:44:13.406464856Z level=debug msg="State manager processing evaluation results" resultCount=3 + logger=ngalert.state.manager user=386241 slug=bodygram instance="datasource_uid=___2pZIVz, ref_id=Detections" t=2024-05-29T13:44:13.406516085Z level=debug msg="Keeping state" state=Normal + Error parsing panelUID for alert annotationruleID5302dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=386241 slug=bodygram version=12 fingerprint=c49595074e14ad44 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.406400861Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=___2pZIVz, ref_id=Detections State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.406047312s EvaluationString:}]" duration=44.211316ms + logger=ngalert.scheduler user=716630 slug=coapdev version=1 fingerprint=9fb667bbdedc281e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.406245192Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=windows_service_status, agent_hostname=app-dev, instance=app-dev:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=windows_service_status, agent_hostname=app-dev, instance=app-dev:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok Value:0xc0023709e0} B:{Var:B Labels:__name__=windows_service_status, agent_hostname=app-dev, instance=app-dev:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok Value:0xc002370a38} C:{Var:C Labels:__name__=windows_service_status, agent_hostname=app-dev, instance=app-dev:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok Value:0xc002370a98}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.405593915s EvaluationString:[ var='A' labels={__name__=windows_service_status, agent_hostname=app-dev, instance=app-dev:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok} value=1 ], [ var='B' labels={__name__=windows_service_status, agent_hostname=app-dev, instance=app-dev:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok} value=1 ], [ var='C' labels={__name__=windows_service_status, agent_hostname=app-dev, instance=app-dev:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok} value=0 ]} {Instance:__name__=windows_service_status, agent_hostname=cap-prd-app, instance=cap-prd-app:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=windows_service_status, agent_hostname=cap-prd-app, instance=cap-prd-app:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok Value:0xc002370b40} B:{Var:B Labels:__name__=windows_service_status, agent_hostname=cap-prd-app, instance=cap-prd-app:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok Value:0xc002370b98} C:{Var:C Labels:__name__=windows_service_status, agent_hostname=cap-prd-app, instance=cap-prd-app:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok Value:0xc002370bf0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.405617495s EvaluationString:[ var='A' labels={__name__=windows_service_status, agent_hostname=cap-prd-app, instance=cap-prd-app:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok} value=1 ], [ var='B' labels={__name__=windows_service_status, agent_hostname=cap-prd-app, instance=cap-prd-app:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok} value=1 ], [ var='C' labels={__name__=windows_service_status, agent_hostname=cap-prd-app, instance=cap-prd-app:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok} value=0 ]} {Instance:__name__=windows_service_status, agent_hostname=cap-qa-app, instance=cap-qa-app:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=windows_service_status, agent_hostname=cap-qa-app, instance=cap-qa-app:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok Value:0xc002370d88} B:{Var:B Labels:__name__=windows_service_status, agent_hostname=cap-qa-app, instance=cap-qa-app:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok Value:0xc002370cb8} C:{Var:C Labels:__name__=windows_service_status, agent_hostname=cap-qa-app, instance=cap-qa-app:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok Value:0xc002370d20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.405629406s EvaluationString:[ var='A' labels={__name__=windows_service_status, agent_hostname=cap-qa-app, instance=cap-qa-app:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok} value=1 ], [ var='B' labels={__name__=windows_service_status, agent_hostname=cap-qa-app, instance=cap-qa-app:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok} value=1 ], [ var='C' labels={__name__=windows_service_status, agent_hostname=cap-qa-app, instance=cap-qa-app:12345, job=integrations/windows_exporter, name=epxtomcat, status=ok} value=0 ]}]" duration=8.548445ms + level=debug ts=2024-05-29T13:44:13.406326689Z caller=remote_instance_store.go:51 user=159781 slug=suncornoc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.406369183Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=739130 slug=redphasetech instance="unit_serial=10015, vsecc=vsecc_1" t=2024-05-29T13:44:13.40634823Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.406256029Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.40624656Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=739130 slug=redphasetech instance="unit_serial=10014, vsecc=vsecc_3" t=2024-05-29T13:44:13.40626883Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=739130 slug=redphasetech instance="unit_serial=10014, vsecc=vsecc_3" t=2024-05-29T13:44:13.40625723Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=23997 slug=wheniwork t=2024-05-29T13:44:13.406124785Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=739130 slug=redphasetech instance="unit_serial=10014, vsecc=vsecc_1" t=2024-05-29T13:44:13.40611753Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=739130 slug=redphasetech instance="unit_serial=10013, vsecc=vsecc_3" t=2024-05-29T13:44:13.40605273Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.405922909Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.405934896Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=739130 slug=redphasetech instance="unit_serial=10013, vsecc=vsecc_1" t=2024-05-29T13:44:13.405883029Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=652809 slug=glassnode instance="source_env=mi-shared" t=2024-05-29T13:44:13.405779551Z level=warn msg="Failed to take an image" dashboard=e5e7a978-1989-40bf-9869-f307fae5f90b panel=9 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=739130 slug=redphasetech instance="unit_serial=10004, vsecc=vsecc_3" t=2024-05-29T13:44:13.405799429Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=739130 slug=redphasetech instance="unit_serial=10004, vsecc=vsecc_2" t=2024-05-29T13:44:13.405717129Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.405574293Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=739130 slug=redphasetech instance="unit_serial=10004, vsecc=vsecc_1" t=2024-05-29T13:44:13.405567128Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=739130 slug=redphasetech instance="unit_serial=10001, vsecc=vsecc_4" t=2024-05-29T13:44:13.405413228Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.405349732Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.405264605Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=739130 slug=redphasetech instance="unit_serial=10001, vsecc=vsecc_3" t=2024-05-29T13:44:13.405333728Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.405318111Z level=warn msg="Failed to take an image" dashboard=ff7e00ce-0697-47e9-bf68-35a43d8a11a6 panel=1 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.scheduler user=22398 slug=sunfolding version=1 fingerprint=d53c4c5e1efb9659 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.405129068Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-sunfolding, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.404848102s EvaluationString:}]" duration=19.372219ms + level=info ts=2024-05-29T13:44:13.405273981Z caller=remote_image_capturer.go:61 user=708531 slug=dooshimagbamwuan rule_org_id=1 rule_uid=ef96ce42-fe8b-4117-bcae-07126689c62c dashboard=ff7e00ce-0697-47e9-bf68-35a43d8a11a6 panel=1 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=447897 slug=mysten instance="datasource_uid=CU1v-k2Vk, ref_id=A" t=2024-05-29T13:44:13.405293406Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.405264989Z caller=remote_instance_store.go:51 user=536272 slug=kovalikadam00 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=536272 slug=kovalikadam00 instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.405192188Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.405122717Z caller=remote_rule_evaluator.go:193 user=656459 slug=activeport msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager.persist user=34615 slug=nulogycn1 t=2024-05-29T13:44:13.405214439Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=536272 slug=kovalikadam00 instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.405183388Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=34615 slug=nulogycn1 instance= t=2024-05-29T13:44:13.405202705Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=536272 slug=kovalikadam00 instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.405173388Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=34615 slug=nulogycn1 t=2024-05-29T13:44:13.405152357Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=536272 slug=kovalikadam00 instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.405143487Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=739130 slug=redphasetech t=2024-05-29T13:44:13.405000427Z level=debug msg="State manager processing evaluation results" resultCount=24 + level=debug ts=2024-05-29T13:44:13.404981096Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=739130 slug=redphasetech version=11 fingerprint=ef7ef2716c7c39d0 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.404748926Z level=debug msg="Alert rule evaluated" results="[{Instance:unit_serial=10001, vsecc=vsecc_1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10001, vsecc=vsecc_1 Value:0xc0579c2480} C:{Var:C Labels:unit_serial=10001, vsecc=vsecc_1 Value:0xc0579c24e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.402466349s EvaluationString:[ var='B' labels={unit_serial=10001, vsecc=vsecc_1} value=40 ], [ var='C' labels={unit_serial=10001, vsecc=vsecc_1} value=0 ]} {Instance:unit_serial=10001, vsecc=vsecc_2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10001, vsecc=vsecc_2 Value:0xc0579c2578} C:{Var:C Labels:unit_serial=10001, vsecc=vsecc_2 Value:0xc0579c25a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.402489049s EvaluationString:[ var='B' labels={unit_serial=10001, vsecc=vsecc_2} value=28 ], [ var='C' labels={unit_serial=10001, vsecc=vsecc_2} value=0 ]} {Instance:unit_serial=10001, vsecc=vsecc_3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10001, vsecc=vsecc_3 Value:0xc0579c2628} C:{Var:C Labels:unit_serial=10001, vsecc=vsecc_3 Value:0xc0579c2660}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.402497549s EvaluationString:[ var='B' labels={unit_serial=10001, vsecc=vsecc_3} value=28 ], [ var='C' labels={unit_serial=10001, vsecc=vsecc_3} value=0 ]} {Instance:unit_serial=10001, vsecc=vsecc_4 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10001, vsecc=vsecc_4 Value:0xc0579c26f8} C:{Var:C Labels:unit_serial=10001, vsecc=vsecc_4 Value:0xc0579c2740}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.402504549s EvaluationString:[ var='B' labels={unit_serial=10001, vsecc=vsecc_4} value=22 ], [ var='C' labels={unit_serial=10001, vsecc=vsecc_4} value=0 ]} {Instance:unit_serial=10004, vsecc=vsecc_1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10004, vsecc=vsecc_1 Value:0xc0579c27b8} C:{Var:C Labels:unit_serial=10004, vsecc=vsecc_1 Value:0xc0579c27f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.402511949s EvaluationString:[ var='B' labels={unit_serial=10004, vsecc=vsecc_1} value=33 ], [ var='C' labels={unit_serial=10004, vsecc=vsecc_1} value=0 ]} {Instance:unit_serial=10004, vsecc=vsecc_2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10004, vsecc=vsecc_2 Value:0xc0579c28b8} C:{Var:C Labels:unit_serial=10004, vsecc=vsecc_2 Value:0xc0579c2910}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.40251995s EvaluationString:[ var='B' labels={unit_serial=10004, vsecc=vsecc_2} value=33 ], [ var='C' labels={unit_serial=10004, vsecc=vsecc_2} value=0 ]} {Instance:unit_serial=10004, vsecc=vsecc_3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10004, vsecc=vsecc_3 Value:0xc0579c2c78} C:{Var:C Labels:unit_serial=10004, vsecc=vsecc_3 Value:0xc0579c2ca0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.40252775s EvaluationString:[ var='B' labels={unit_serial=10004, vsecc=vsecc_3} value=12 ], [ var='C' labels={unit_serial=10004, vsecc=vsecc_3} value=0 ]} {Instance:unit_serial=10013, vsecc=vsecc_1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10013, vsecc=vsecc_1 Value:0xc0579c2d48} C:{Var:C Labels:unit_serial=10013, vsecc=vsecc_1 Value:0xc0579c2da0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.40253785s EvaluationString:[ var='B' labels={unit_serial=10013, vsecc=vsecc_1} value=13 ], [ var='C' labels={unit_serial=10013, vsecc=vsecc_1} value=0 ]} {Instance:unit_serial=10013, vsecc=vsecc_2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10013, vsecc=vsecc_2 Value:0xc0579c2e50} C:{Var:C Labels:unit_serial=10013, vsecc=vsecc_2 Value:0xc0579c2e28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.40254425s EvaluationString:[ var='B' labels={unit_serial=10013, vsecc=vsecc_2} value=23 ], [ var='C' labels={unit_serial=10013, vsecc=vsecc_2} value=0 ]} {Instance:unit_serial=10013, vsecc=vsecc_3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10013, vsecc=vsecc_3 Value:0xc0579c2f50} C:{Var:C Labels:unit_serial=10013, vsecc=vsecc_3 Value:0xc0579c2f00}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.40255215s EvaluationString:[ var='B' labels={unit_serial=10013, vsecc=vsecc_3} value=15 ], [ var='C' labels={unit_serial=10013, vsecc=vsecc_3} value=0 ]} {Instance:unit_serial=10014, vsecc=vsecc_1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10014, vsecc=vsecc_1 Value:0xc0579c2fe0} C:{Var:C Labels:unit_serial=10014, vsecc=vsecc_1 Value:0xc0579c3030}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.40256055s EvaluationString:[ var='B' labels={unit_serial=10014, vsecc=vsecc_1} value=16 ], [ var='C' labels={unit_serial=10014, vsecc=vsecc_1} value=0 ]} {Instance:unit_serial=10014, vsecc=vsecc_2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10014, vsecc=vsecc_2 Value:0xc0579c3108} C:{Var:C Labels:unit_serial=10014, vsecc=vsecc_2 Value:0xc0579c3150}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.40256685s EvaluationString:[ var='B' labels={unit_serial=10014, vsecc=vsecc_2} value=21 ], [ var='C' labels={unit_serial=10014, vsecc=vsecc_2} value=0 ]} {Instance:unit_serial=10014, vsecc=vsecc_3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10014, vsecc=vsecc_3 Value:0xc0579c31e8} C:{Var:C Labels:unit_serial=10014, vsecc=vsecc_3 Value:0xc0579c32c0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.40257345s EvaluationString:[ var='B' labels={unit_serial=10014, vsecc=vsecc_3} value=18 ], [ var='C' labels={unit_serial=10014, vsecc=vsecc_3} value=0 ]} {Instance:unit_serial=10015, vsecc=vsecc_1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10015, vsecc=vsecc_1 Value:0xc0579c3340} C:{Var:C Labels:unit_serial=10015, vsecc=vsecc_1 Value:0xc0579c3540}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.40257995s EvaluationString:[ var='B' labels={unit_serial=10015, vsecc=vsecc_1} value=35 ], [ var='C' labels={unit_serial=10015, vsecc=vsecc_1} value=0 ]} {Instance:unit_serial=10015, vsecc=vsecc_3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10015, vsecc=vsecc_3 Value:0xc0579c35d0} C:{Var:C Labels:unit_serial=10015, vsecc=vsecc_3 Value:0xc0579c3610}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.40258655s EvaluationString:[ var='B' labels={unit_serial=10015, vsecc=vsecc_3} value=30 ], [ var='C' labels={unit_serial=10015, vsecc=vsecc_3} value=0 ]} {Instance:unit_serial=10015, vsecc=vsecc_4 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10015, vsecc=vsecc_4 Value:0xc0579c36a8} C:{Var:C Labels:unit_serial=10015, vsecc=vsecc_4 Value:0xc0579c36e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.40259315s EvaluationString:[ var='B' labels={unit_serial=10015, vsecc=vsecc_4} value=23 ], [ var='C' labels={unit_serial=10015, vsecc=vsecc_4} value=0 ]} {Instance:unit_serial=10016, vsecc=vsecc_1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10016, vsecc=vsecc_1 Value:0xc0579c37b0} C:{Var:C Labels:unit_serial=10016, vsecc=vsecc_1 Value:0xc0579c3768}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.40259975s EvaluationString:[ var='B' labels={unit_serial=10016, vsecc=vsecc_1} value=22 ], [ var='C' labels={unit_serial=10016, vsecc=vsecc_1} value=0 ]} {Instance:unit_serial=10016, vsecc=vsecc_2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10016, vsecc=vsecc_2 Value:0xc0579c3828} C:{Var:C Labels:unit_serial=10016, vsecc=vsecc_2 Value:0xc0579c3870}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.40260645s EvaluationString:[ var='B' labels={unit_serial=10016, vsecc=vsecc_2} value=22 ], [ var='C' labels={unit_serial=10016, vsecc=vsecc_2} value=0 ]} {Instance:unit_serial=10016, vsecc=vsecc_3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10016, vsecc=vsecc_3 Value:0xc0579c3950} C:{Var:C Labels:unit_serial=10016, vsecc=vsecc_3 Value:0xc0579c3910}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.40261545s EvaluationString:[ var='B' labels={unit_serial=10016, vsecc=vsecc_3} value=31 ], [ var='C' labels={unit_serial=10016, vsecc=vsecc_3} value=0 ]} {Instance:unit_serial=10017, vsecc=vsecc_1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10017, vsecc=vsecc_1 Value:0xc0579c39f8} C:{Var:C Labels:unit_serial=10017, vsecc=vsecc_1 Value:0xc0579c3a20}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.40262525s EvaluationString:[ var='B' labels={unit_serial=10017, vsecc=vsecc_1} value=29 ], [ var='C' labels={unit_serial=10017, vsecc=vsecc_1} value=0 ]} {Instance:unit_serial=10017, vsecc=vsecc_2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10017, vsecc=vsecc_2 Value:0xc0579c3b00} C:{Var:C Labels:unit_serial=10017, vsecc=vsecc_2 Value:0xc0579c3a78}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.40263205s EvaluationString:[ var='B' labels={unit_serial=10017, vsecc=vsecc_2} value=28 ], [ var='C' labels={unit_serial=10017, vsecc=vsecc_2} value=0 ]} {Instance:unit_serial=10019, vsecc=vsecc_1 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10019, vsecc=vsecc_1 Value:0xc032420008} C:{Var:C Labels:unit_serial=10019, vsecc=vsecc_1 Value:0xc032420070}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.40263855s EvaluationString:[ var='B' labels={unit_serial=10019, vsecc=vsecc_1} value=36 ], [ var='C' labels={unit_serial=10019, vsecc=vsecc_1} value=0 ]} {Instance:unit_serial=10019, vsecc=vsecc_2 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10019, vsecc=vsecc_2 Value:0xc032420130} C:{Var:C Labels:unit_serial=10019, vsecc=vsecc_2 Value:0xc032420100}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.40264605s EvaluationString:[ var='B' labels={unit_serial=10019, vsecc=vsecc_2} value=11 ], [ var='C' labels={unit_serial=10019, vsecc=vsecc_2} value=0 ]} {Instance:unit_serial=10019, vsecc=vsecc_3 State:Normal Error: Results:map[] Values:map[B:{Var:B Labels:unit_serial=10019, vsecc=vsecc_3 Value:0xc0324201d8} C:{Var:C Labels:unit_serial=10019, vsecc=vsecc_3 Value:0xc032420220}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.40265485s EvaluationString:[ var='B' labels={unit_serial=10019, vsecc=vsecc_3} value=35 ], [ var='C' labels={unit_serial=10019, vsecc=vsecc_3} value=0 ]}]" duration=51.976146ms + level=debug ts=2024-05-29T13:44:13.404963834Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=112387 slug=lucidhq version=1 fingerprint=8ef7c640ee14f863 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.404863017Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.40461261s EvaluationString:}]" duration=18.299679ms + level=debug ts=2024-05-29T13:44:13.404895765Z caller=remote_instance_store.go:51 user=171235 slug=circleslabs msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.404430699Z caller=remote_instance_store.go:51 user=350551 slug=loopme msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.404235388Z caller=remote_image_capturer.go:33 user=350551 slug=loopme rule_org_id=1 rule_uid=da0b4ace-045c-4a08-83d3-99a3ebec6bc8 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=350551 slug=loopme instance="database_id=loopme-cloud-postgres:datastats-a2b936a" t=2024-05-29T13:44:13.404130416Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.scheduler user=350551 slug=loopme version=18 fingerprint=e6eb165335186109 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.403764827Z level=debug msg="Alert rule evaluated" results="[{Instance:database_id=loopme-cloud-postgres:datastats-a2b936a State:Alerting Error: Results:map[] Values:map[A:{Var:A Labels:database_id=loopme-cloud-postgres:datastats-a2b936a Value:0xc03c3ba070} B:{Var:B Labels:database_id=loopme-cloud-postgres:datastats-a2b936a Value:0xc03c3ba048}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.403255142s EvaluationString:[ var='A' labels={database_id=loopme-cloud-postgres:datastats-a2b936a} value=8 ], [ var='B' labels={database_id=loopme-cloud-postgres:datastats-a2b936a} value=1 ]}]" duration=180.61108ms + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:13.404141925Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=798556 slug=zenithhr t=2024-05-29T13:44:13.404028482Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.199344ms + logger=ngalert.state.manager user=765874 slug=rhwstaging instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.403878822Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=765874 slug=rhwstaging t=2024-05-29T13:44:13.403859752Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=309009 slug=elestyle instance="datasource_uid=zxr_3eR4z, ref_id=A" t=2024-05-29T13:44:13.403945254Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=309009 slug=elestyle t=2024-05-29T13:44:13.403918269Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=765874 slug=rhwstaging version=1 fingerprint=c9e43b86f3f598b8 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.40377248Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.403592394s EvaluationString:}]" duration=7.206703ms + logger=ngalert.scheduler user=309009 slug=elestyle version=1 fingerprint=ae67cb42e468b42b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.403866337Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=zxr_3eR4z, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.403639058s EvaluationString:}]" duration=157.921383ms + level=debug ts=2024-05-29T13:44:13.403591304Z caller=remote_instance_store.go:51 user=753403 slug=romich msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:13.403617764Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.scheduler user=430961 slug=solifi version=1 fingerprint=3b675ec15f81eaf3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.403412944Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.40311085s EvaluationString:}]" duration=138.053193ms + level=debug ts=2024-05-29T13:44:13.403270242Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.403335176Z caller=remote_instance_store.go:51 user=517596 slug=datar msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.403234234Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:13.403321146Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=517596 slug=datar instance="datasource_uid=uleqBno4z, ref_id=A" t=2024-05-29T13:44:13.403267457Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.403276397Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.403270547Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=708531 slug=dooshimagbamwuan instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.403257597Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.391072604Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kb1g89z9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.402627152Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kb162x17-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.402539561Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kay2lgch-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.402332109Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kay2lgch-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.402307019Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=44181 slug=prospa t=2024-05-29T13:44:13.402326143Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=44181 slug=prospa version=1 fingerprint=391300c081c85627 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.402263187Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.401968731s EvaluationString:}]" duration=41.438449ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kay2lgch-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.402236248Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kay2lgch-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.402179717Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.402019503Z caller=remote_instance_store.go:51 user=349246 slug=metricgamingdev msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.402075399Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=250150 slug=bizagi version=58 fingerprint=6f9787be1e093b3f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.402072034Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.401849767s EvaluationString:}]" duration=236.836465ms + level=debug ts=2024-05-29T13:44:13.402013203Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kap4hery-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.402092267Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kap4hery-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.402021346Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.401879266Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kam643b7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.401836824Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.401781607Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=250150 slug=bizagi version=58 fingerprint=b17baefe58790d4d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.401680651Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.401406137s EvaluationString:}]" duration=585.022086ms + logger=ngalert.state.manager.persist user=174675 slug=journalprod t=2024-05-29T13:44:13.40165647Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kagz60ou-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.4014171Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kafpzk9v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.401290718Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kafpzk9v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.401108936Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.401012355Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.400871151Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.400755716Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.400649858Z caller=remote_instance_store.go:51 user=208505 slug=liberachat msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=447873 slug=pn0625test01 t=2024-05-29T13:44:13.400621414Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.314331ms + level=debug ts=2024-05-29T13:44:13.400528757Z caller=remote_instance_store.go:51 user=206439 slug=relaypro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=672210 slug=noteb5 t=2024-05-29T13:44:13.40044667Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=info ts=2024-05-29T13:44:13.400414681Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.156.57:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=bdbhspy0lbh1cd alerts=1 + logger=ngalert.scheduler user=672210 slug=noteb5 version=30 fingerprint=930278c201dbecb4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.400303678Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A,C State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.399872774s EvaluationString:}]" duration=24.491191ms + level=debug ts=2024-05-29T13:44:13.400232448Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=806229 slug=simplisafe t=2024-05-29T13:44:13.400020375Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.811981ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kady99g2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.400096076Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.400111787Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.400070084Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kady99g2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.400035435Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kady99g2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.399925024Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:13.400049956Z caller=remote_alert_sender.go:94 user=22398 slug=sunfolding host=sunfolding-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.37.117:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=bd0d467d-c617-4437-abb4-7127d3dadf71 alerts=1 + logger=ngalert.scheduler user=426229 slug=accelbyte version=288 fingerprint=51c18c856583118f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.399949551Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.399693247s EvaluationString:}]" duration=583.713679ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kady99g2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.399893424Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.399848037Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kac0oqb2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.399774153Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.399662656Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-kac0oqb2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.399645941Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.399538808Z caller=remote_instance_store.go:51 user=180994 slug=cgmonitor msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=teleport, env=production, namespace=teleport-6, pod=teleport-6-969995c5c-5lvgd" t=2024-05-29T13:44:13.399395782Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ka66i6pv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.399367279Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=teleport, env=office, namespace=teleport-6, pod=teleport-6-568f8bf9c4-jp4mj" t=2024-05-29T13:44:13.39927556Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ka66i6pv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.399199857Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ka66i6pv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.399068166Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ka25mzw4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.398883484Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.398982019Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.388945832Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ka25mzw4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.39854093Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ka1qwtum-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.398211457Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=strategy-planner, env=production, namespace=atlas-agent, pod=strategy-planner-6bddf4fdd5-s9zwq" t=2024-05-29T13:44:13.398900989Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.398805839Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=strategy-planner, env=office, namespace=atlas-agent, pod=strategy-planner-597f97698-58f7n" t=2024-05-29T13:44:13.398802949Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=strategy-persister, env=office, namespace=atlas-agent, pod=strategy-persister-77f77d979f-67qsh" t=2024-05-29T13:44:13.398624556Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=strategy-evaluator, env=production, namespace=atlas-agent, pod=strategy-evaluator-74c5c69c84-wxldh" t=2024-05-29T13:44:13.398563261Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=shell, env=production, namespace=legacy, pod=shell-pnh38y" t=2024-05-29T13:44:13.398454015Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=disk_monitor_nvme, drive=nvme1, host=db6.telecomx.dk, host_short=db6, instance=db6.telecomx.dk:9110, job=custom, localname=DB6 Statsd, measurement=percentage_used_percent, orgunit=TELE" t=2024-05-29T13:44:13.39829333Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.398280888Z caller=remote_instance_store.go:51 user=114516 slug=heliumdashboard msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=reflector, env=production, namespace=kube-system, pod=reflector-5cb5865b8f-pgf4v" t=2024-05-29T13:44:13.398237102Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.398054574Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=reflector, env=office, namespace=kube-system, pod=reflector-5cb5865b8f-8wqhf" t=2024-05-29T13:44:13.398169547Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=114516 slug=heliumdashboard version=75 fingerprint=23c2fb4e15b98bcc attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.398026319Z level=debug msg="Alert rule evaluated" results="[{Instance:Env=mainnet, Role=index, Stack=iot, agent_hostname=mainnet-iot-index0-oregon, instance=mainnet-iot-index0-oregon:19002, job=reward_index_metrics State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:Env=mainnet, Role=index, Stack=iot, agent_hostname=mainnet-iot-index0-oregon, instance=mainnet-iot-index0-oregon:19002, job=reward_index_metrics Value:0xc029519588} C:{Var:C Labels:Env=mainnet, Role=index, Stack=iot, agent_hostname=mainnet-iot-index0-oregon, instance=mainnet-iot-index0-oregon:19002, job=reward_index_metrics Value:0xc0295195e0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.397647208s EvaluationString:[ var='A' labels={Env=mainnet, Role=index, Stack=iot, agent_hostname=mainnet-iot-index0-oregon, instance=mainnet-iot-index0-oregon:19002, job=reward_index_metrics} value=41916 ], [ var='C' labels={Env=mainnet, Role=index, Stack=iot, agent_hostname=mainnet-iot-index0-oregon, instance=mainnet-iot-index0-oregon:19002, job=reward_index_metrics} value=0 ]}]" duration=11.355989ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=postgresql-migrator, env=production, namespace=crossnokaye, pod=postgresql-migrations-8546f6d978-98dkl" t=2024-05-29T13:44:13.398097885Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet instance="__name__=disk_monitor_nvme, drive=nvme0, host=db6.telecomx.dk, host_short=db6, instance=db6.telecomx.dk:9110, job=custom, localname=DB6 Statsd, measurement=percentage_used_percent, orgunit=TELE" t=2024-05-29T13:44:13.398006926Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=postgresql-migrator, env=production, namespace=crossnokaye, pod=postgresql-migrations-8546f6d978-98dkl" t=2024-05-29T13:44:13.39808879Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.398036618Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=postgresql-migrator, env=production, namespace=crossnokaye, pod=postgresql-migrations-5b67f5b4d5-tczkp" t=2024-05-29T13:44:13.397970191Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.397907075Z caller=remote_instance_store.go:51 user=114492 slug=railsbank msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:13.397850265Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=postgresql-migrator, env=production, namespace=crossnokaye, pod=postgresql-migrations-5b67f5b4d5-gg5jn" t=2024-05-29T13:44:13.39787943Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=114492 slug=railsbank t=2024-05-29T13:44:13.397793525Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=postgresql-migrator, env=production, namespace=crossnokaye, pod=postgresql-migrations-5b67f5b4d5-dcrvt" t=2024-05-29T13:44:13.397772789Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=560336 slug=powernet t=2024-05-29T13:44:13.397677421Z level=debug msg="State manager processing evaluation results" resultCount=4 + logger=ngalert.scheduler user=560336 slug=powernet version=4 fingerprint=3c34299c458714bf attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.397474318Z level=debug msg="Alert rule evaluated" results="[{Instance:__name__=disk_monitor_nvme, drive=nvme0, host=db1.telecomx.dk, host_short=db1, instance=db1.telecomx.dk:9110, job=custom, localname=DB1 Statsd, measurement=percentage_used_percent, orgunit=TELE State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_monitor_nvme, drive=nvme0, host=db1.telecomx.dk, host_short=db1, instance=db1.telecomx.dk:9110, job=custom, localname=DB1 Statsd, measurement=percentage_used_percent, orgunit=TELE Value:0xc014804ba0} B:{Var:B Labels:__name__=disk_monitor_nvme, drive=nvme0, host=db1.telecomx.dk, host_short=db1, instance=db1.telecomx.dk:9110, job=custom, localname=DB1 Statsd, measurement=percentage_used_percent, orgunit=TELE Value:0xc014804c90} C:{Var:C Labels:__name__=disk_monitor_nvme, drive=nvme0, host=db1.telecomx.dk, host_short=db1, instance=db1.telecomx.dk:9110, job=custom, localname=DB1 Statsd, measurement=percentage_used_percent, orgunit=TELE Value:0xc014804ab0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.400016714s EvaluationString:[ var='A' labels={__name__=disk_monitor_nvme, drive=nvme0, host=db1.telecomx.dk, host_short=db1, instance=db1.telecomx.dk:9110, job=custom, localname=DB1 Statsd, measurement=percentage_used_percent, orgunit=TELE} value=6 ], [ var='B' labels={__name__=disk_monitor_nvme, drive=nvme0, host=db1.telecomx.dk, host_short=db1, instance=db1.telecomx.dk:9110, job=custom, localname=DB1 Statsd, measurement=percentage_used_percent, orgunit=TELE} value=6 ], [ var='C' labels={__name__=disk_monitor_nvme, drive=nvme0, host=db1.telecomx.dk, host_short=db1, instance=db1.telecomx.dk:9110, job=custom, localname=DB1 Statsd, measurement=percentage_used_percent, orgunit=TELE} value=0 ]} {Instance:__name__=disk_monitor_nvme, drive=nvme0, host=db6.telecomx.dk, host_short=db6, instance=db6.telecomx.dk:9110, job=custom, localname=DB6 Statsd, measurement=percentage_used_percent, orgunit=TELE State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_monitor_nvme, drive=nvme0, host=db6.telecomx.dk, host_short=db6, instance=db6.telecomx.dk:9110, job=custom, localname=DB6 Statsd, measurement=percentage_used_percent, orgunit=TELE Value:0xc0148051d8} B:{Var:B Labels:__name__=disk_monitor_nvme, drive=nvme0, host=db6.telecomx.dk, host_short=db6, instance=db6.telecomx.dk:9110, job=custom, localname=DB6 Statsd, measurement=percentage_used_percent, orgunit=TELE Value:0xc014804e88} C:{Var:C Labels:__name__=disk_monitor_nvme, drive=nvme0, host=db6.telecomx.dk, host_short=db6, instance=db6.telecomx.dk:9110, job=custom, localname=DB6 Statsd, measurement=percentage_used_percent, orgunit=TELE Value:0xc014804fd8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.400077113s EvaluationString:[ var='A' labels={__name__=disk_monitor_nvme, drive=nvme0, host=db6.telecomx.dk, host_short=db6, instance=db6.telecomx.dk:9110, job=custom, localname=DB6 Statsd, measurement=percentage_used_percent, orgunit=TELE} value=6 ], [ var='B' labels={__name__=disk_monitor_nvme, drive=nvme0, host=db6.telecomx.dk, host_short=db6, instance=db6.telecomx.dk:9110, job=custom, localname=DB6 Statsd, measurement=percentage_used_percent, orgunit=TELE} value=6 ], [ var='C' labels={__name__=disk_monitor_nvme, drive=nvme0, host=db6.telecomx.dk, host_short=db6, instance=db6.telecomx.dk:9110, job=custom, localname=DB6 Statsd, measurement=percentage_used_percent, orgunit=TELE} value=0 ]} {Instance:__name__=disk_monitor_nvme, drive=nvme1, host=db1.telecomx.dk, host_short=db1, instance=db1.telecomx.dk:9110, job=custom, localname=DB1 Statsd, measurement=percentage_used_percent, orgunit=TELE State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_monitor_nvme, drive=nvme1, host=db1.telecomx.dk, host_short=db1, instance=db1.telecomx.dk:9110, job=custom, localname=DB1 Statsd, measurement=percentage_used_percent, orgunit=TELE Value:0xc014805730} B:{Var:B Labels:__name__=disk_monitor_nvme, drive=nvme1, host=db1.telecomx.dk, host_short=db1, instance=db1.telecomx.dk:9110, job=custom, localname=DB1 Statsd, measurement=percentage_used_percent, orgunit=TELE Value:0xc014805a80} C:{Var:C Labels:__name__=disk_monitor_nvme, drive=nvme1, host=db1.telecomx.dk, host_short=db1, instance=db1.telecomx.dk:9110, job=custom, localname=DB1 Statsd, measurement=percentage_used_percent, orgunit=TELE Value:0xc014805460}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.400098013s EvaluationString:[ var='A' labels={__name__=disk_monitor_nvme, drive=nvme1, host=db1.telecomx.dk, host_short=db1, instance=db1.telecomx.dk:9110, job=custom, localname=DB1 Statsd, measurement=percentage_used_percent, orgunit=TELE} value=6 ], [ var='B' labels={__name__=disk_monitor_nvme, drive=nvme1, host=db1.telecomx.dk, host_short=db1, instance=db1.telecomx.dk:9110, job=custom, localname=DB1 Statsd, measurement=percentage_used_percent, orgunit=TELE} value=6 ], [ var='C' labels={__name__=disk_monitor_nvme, drive=nvme1, host=db1.telecomx.dk, host_short=db1, instance=db1.telecomx.dk:9110, job=custom, localname=DB1 Statsd, measurement=percentage_used_percent, orgunit=TELE} value=0 ]} {Instance:__name__=disk_monitor_nvme, drive=nvme1, host=db6.telecomx.dk, host_short=db6, instance=db6.telecomx.dk:9110, job=custom, localname=DB6 Statsd, measurement=percentage_used_percent, orgunit=TELE State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:__name__=disk_monitor_nvme, drive=nvme1, host=db6.telecomx.dk, host_short=db6, instance=db6.telecomx.dk:9110, job=custom, localname=DB6 Statsd, measurement=percentage_used_percent, orgunit=TELE Value:0xc014cd41c8} B:{Var:B Labels:__name__=disk_monitor_nvme, drive=nvme1, host=db6.telecomx.dk, host_short=db6, instance=db6.telecomx.dk:9110, job=custom, localname=DB6 Statsd, measurement=percentage_used_percent, orgunit=TELE Value:0xc014805fb0} C:{Var:C Labels:__name__=disk_monitor_nvme, drive=nvme1, host=db6.telecomx.dk, host_short=db6, instance=db6.telecomx.dk:9110, job=custom, localname=DB6 Statsd, measurement=percentage_used_percent, orgunit=TELE Value:0xc014cd40f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.400122713s EvaluationString:[ var='A' labels={__name__=disk_monitor_nvme, drive=nvme1, host=db6.telecomx.dk, host_short=db6, instance=db6.telecomx.dk:9110, job=custom, localname=DB6 Statsd, measurement=percentage_used_percent, orgunit=TELE} value=6 ], [ var='B' labels={__name__=disk_monitor_nvme, drive=nvme1, host=db6.telecomx.dk, host_short=db6, instance=db6.telecomx.dk:9110, job=custom, localname=DB6 Statsd, measurement=percentage_used_percent, orgunit=TELE} value=6 ], [ var='C' labels={__name__=disk_monitor_nvme, drive=nvme1, host=db6.telecomx.dk, host_short=db6, instance=db6.telecomx.dk:9110, job=custom, localname=DB6 Statsd, measurement=percentage_used_percent, orgunit=TELE} value=0 ]}]" duration=24.659174ms + level=debug ts=2024-05-29T13:44:13.397700234Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=postgresql, env=office, namespace=crossnokaye, pod=postgresql-postgresql-0" t=2024-05-29T13:44:13.397490162Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.397428617Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=705083 slug=mediakindsaas t=2024-05-29T13:44:13.397092222Z level=debug msg="Saving alert states" count=6 max_state_save_concurrency=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=opentelemetry-collector, env=office, namespace=open-telemetry, pod=collector-opentelemetry-collector-agent-vd9xk" t=2024-05-29T13:44:13.397237715Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=705083 slug=mediakindsaas instance="datasource_uid=2WtVVsznk, ref_id=A" t=2024-05-29T13:44:13.397079322Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=705083 slug=mediakindsaas instance="datasource_uid=2WtVVsznk, ref_id=A" t=2024-05-29T13:44:13.396987716Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.397008887Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=network-monitor, env=office, namespace=agent-tools, pod=network-monitor-857b857bb5-8h79z" t=2024-05-29T13:44:13.397040056Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=705083 slug=mediakindsaas instance="datasource_uid=2WtVVsznk, ref_id=A" t=2024-05-29T13:44:13.396902011Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=705083 slug=mediakindsaas instance="datasource_uid=2WtVVsznk, ref_id=A" t=2024-05-29T13:44:13.39688831Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.396914874Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=705083 slug=mediakindsaas instance="datasource_uid=2WtVVsznk, ref_id=A" t=2024-05-29T13:44:13.396848107Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=705083 slug=mediakindsaas t=2024-05-29T13:44:13.396820005Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=metrics-server, env=production, namespace=kube-system, pod=metrics-server-8496f9869f-57sbq" t=2024-05-29T13:44:13.396916906Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=metrics-server, env=office, namespace=kube-system, pod=metrics-server-8496f9869f-w2g44" t=2024-05-29T13:44:13.39680124Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=700783 slug=gsgmedia t=2024-05-29T13:44:13.396668027Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=700783 slug=gsgmedia instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.396655947Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=700783 slug=gsgmedia t=2024-05-29T13:44:13.396601275Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=manager, env=production, namespace=flux-system, pod=notification-controller-6c65b7f89b-crgq8" t=2024-05-29T13:44:13.396542778Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.396502276Z caller=remote_instance_store.go:51 user=243675 slug=oneschema msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=manager, env=production, namespace=flux-system, pod=notification-controller-6c65b7f89b-crgq8" t=2024-05-29T13:44:13.396531687Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=manager, env=production, namespace=flux-system, pod=notification-controller-6c65b7f89b-7mmgs" t=2024-05-29T13:44:13.396438068Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=744007 slug=letterfest t=2024-05-29T13:44:13.396396792Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.state.manager user=565508 slug=diagnosticrapid instance="agent_hostname=open-cloud-server, container_label_com_docker_compose_config_hash=34288d3d7b93e7ba0f6ba90c09d2ceac54a19998eb83ca3044364e7e095316ba, container_label_com_docker_compose_container_number=1, container_label_com_docker_compose_image=sha256:13538d1facd808c85741953f0c2bccb53f3c63f63b0826d207e389eed61ff960, container_label_com_docker_compose_oneoff=False, container_label_com_docker_compose_project=open-cloud-server, container_label_com_docker_compose_project_config_files=/root/open-cloud-server/docker-compose.yml, container_label_com_docker_compose_project_working_dir=/root/open-cloud-server, container_label_com_docker_compose_service=open_cloud, container_label_com_docker_compose_version=2.15.1, container_label_org_opencontainers_image_ref_name=ubuntu, container_label_org_opencontainers_image_version=22.04, id=/system.slice/docker-19b018ec4ff52e715df99091168f90eb448180ad688ce6db3d676b408618e48b.scope, image=open-cloud-server-open_cloud, instance=localhost:9090, job=integrations/docker, name=open-cloud-server-open_cloud-1" t=2024-05-29T13:44:13.396205812Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=manager, env=production, namespace=flux-system, pod=kustomize-controller-75bb97945d-kbsxb" t=2024-05-29T13:44:13.396320441Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=manager, env=production, namespace=flux-system, pod=kustomize-controller-75bb97945d-kbsxb" t=2024-05-29T13:44:13.396306998Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.396190069Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=manager, env=production, namespace=flux-system, pod=image-reflector-controller-78c5d97ff5-gbppn" t=2024-05-29T13:44:13.396110971Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.395848893Z caller=remote_instance_store.go:51 user=554491 slug=safeskyindustries msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.395853003Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=manager, env=production, namespace=flux-system, pod=image-reflector-controller-78c5d97ff5-4nr48" t=2024-05-29T13:44:13.395915491Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=manager, env=production, namespace=flux-system, pod=image-reflector-controller-78c5d97ff5-4nr48" t=2024-05-29T13:44:13.395907259Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=manager, env=production, namespace=flux-system, pod=image-automation-controller-54bf875b97-tjhng" t=2024-05-29T13:44:13.395859136Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.39574029Z caller=remote_instance_store.go:51 user=744007 slug=letterfest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.395740657Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=744007 slug=letterfest t=2024-05-29T13:44:13.395685297Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=744007 slug=letterfest instance="environment=prod" t=2024-05-29T13:44:13.395673589Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=93995 slug=reclaim t=2024-05-29T13:44:13.395673796Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=744007 slug=letterfest instance="environment=prod" t=2024-05-29T13:44:13.395663242Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=93995 slug=reclaim instance= t=2024-05-29T13:44:13.395648772Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=93995 slug=reclaim t=2024-05-29T13:44:13.395608426Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=manager, env=production, namespace=flux-system, pod=helm-controller-75968bdc68-p7f5h" t=2024-05-29T13:44:13.395579682Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.395548292Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.395511912Z caller=remote_instance_store.go:51 user=447897 slug=mysten msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=manager, env=office, namespace=flux-system, pod=source-controller-557867c58b-x4gsm" t=2024-05-29T13:44:13.395475174Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.395336763Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.395264175Z caller=remote_instance_store.go:51 user=350551 slug=loopme msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.395359243Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=manager, env=office, namespace=flux-system, pod=notification-controller-6c65b7f89b-8s24x" t=2024-05-29T13:44:13.395359041Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=87955 slug=icssmonitoring t=2024-05-29T13:44:13.395232424Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=71.373583ms + level=debug ts=2024-05-29T13:44:13.395231731Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=manager, env=office, namespace=flux-system, pod=kustomize-controller-75bb97945d-sfm57" t=2024-05-29T13:44:13.395286756Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=manager, env=office, namespace=flux-system, pod=image-reflector-controller-78c5d97ff5-zkfbf" t=2024-05-29T13:44:13.395205261Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=538037 slug=drivewealth t=2024-05-29T13:44:13.395168317Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.395084218Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=391577 slug=daghouse t=2024-05-29T13:44:13.395044963Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=391577 slug=daghouse instance= t=2024-05-29T13:44:13.395023775Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=manager, env=office, namespace=flux-system, pod=helm-controller-75968bdc68-4bprl" t=2024-05-29T13:44:13.395012048Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=391577 slug=daghouse t=2024-05-29T13:44:13.394960835Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.394946718Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=391577 slug=daghouse version=13 fingerprint=62497b7bf8250e68 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.394891214Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[F0:{Var:F Labels: Value:} F1:{Var:F Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.394561583s EvaluationString:[ var='F0' metric='NoData' labels={} value=null ], [ var='F1' metric='NoData' labels={} value=null ]}]" duration=272.998533ms + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.394861028Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=426229 slug=accelbyte instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.394852555Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.394733046Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=698963 slug=lemonade t=2024-05-29T13:44:13.394758508Z level=debug msg="Saving alert states done" count=15 max_state_save_concurrency=1 duration=1.145931199s + level=debug ts=2024-05-29T13:44:13.394685011Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.394612538Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.39459514Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.394528798Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=kube-secret-syncer, env=production, namespace=kube-system, pod=kube-secret-syncer-controller-bdc8cbf45-nzclg" t=2024-05-29T13:44:13.39449335Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.394480944Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.394445792Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.394357128Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=kube-secret-syncer, env=production, namespace=kube-system, pod=kube-secret-syncer-controller-7647c96845-6zcxp" t=2024-05-29T13:44:13.394206927Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.394128673Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.394020053Z caller=remote_instance_store.go:51 user=662363 slug=facephi msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.393847553Z caller=remote_instance_store.go:51 user=884708 slug=turmeric msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.393809229Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.393755457Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=kube-proxy, env=production, namespace=kube-system, pod=kube-proxy-nzcqd" t=2024-05-29T13:44:13.393791452Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=kube-proxy, env=office, namespace=kube-system, pod=kube-proxy-phw8l" t=2024-05-29T13:44:13.393713711Z level=debug msg="Keeping state" state=Normal + level=info ts=2024-05-29T13:44:13.393685948Z caller=remote_alert_sender.go:94 user=384712 slug=nearinc host=nearinc-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.43.9:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=b294b46a-65cf-4ee9-91fe-93421b5d083f alerts=1 + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:13.393603052Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.355744ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=kube-controller-manager, env=production, namespace=kube-system, pod=kube-controller-manager-usfoods-norwich-5704a247-389fa615" t=2024-05-29T13:44:13.393580594Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=173730 slug=nikon t=2024-05-29T13:44:13.393451049Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=38.141194ms + level=debug ts=2024-05-29T13:44:13.393477545Z caller=remote_rule_evaluator.go:193 user=491157 slug=prd01wr msg="sending loaded metrics" count=0 reason="expression contains hysteresis command" + logger=ngalert.state.manager.persist user=873368 slug=euid t=2024-05-29T13:44:13.393413828Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.702917ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=kube-apiserver, env=production, namespace=kube-system, pod=kube-apiserver-usfoods-norwich-5704a247-389fa615" t=2024-05-29T13:44:13.39344282Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=kube-apiserver, env=production, namespace=kube-system, pod=kube-apiserver-usfoods-norwich-5704a247-389fa615" t=2024-05-29T13:44:13.393426767Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=kube-apiserver, env=office, namespace=kube-system, pod=kube-apiserver-usfoods-norwich-bf45f847-bdb9a959" t=2024-05-29T13:44:13.393334202Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=kube-apiserver, env=office, namespace=kube-system, pod=kube-apiserver-usfoods-norwich-bf45f847-bdb9a959" t=2024-05-29T13:44:13.393319612Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=heartbeat, env=production, namespace=atlas-agent, pod=heartbeat-6467884b59-p7hf9" t=2024-05-29T13:44:13.39322992Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=heartbeat, env=production, namespace=atlas-agent, pod=heartbeat-6467884b59-p7hf9" t=2024-05-29T13:44:13.393214248Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.393114854Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=heartbeat, env=office, namespace=atlas-agent, pod=heartbeat-555755984d-46kcs" t=2024-05-29T13:44:13.393134021Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.393103293Z caller=remote_instance_store.go:51 user=127813 slug=clearsale msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=127813 slug=clearsale instance= t=2024-05-29T13:44:13.392968371Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=127813 slug=clearsale t=2024-05-29T13:44:13.392875341Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=127813 slug=clearsale version=22 fingerprint=5ba31480633acf81 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.392775256Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.39245692s EvaluationString:}]" duration=185.423988ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=ecr-renew, env=production, namespace=kube-system, pod=ecr-login-renew-28616040-bvjbt" t=2024-05-29T13:44:13.39284311Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.392586412Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=ecr-renew, env=office, namespace=kube-system, pod=ecr-login-renew-vvrtm" t=2024-05-29T13:44:13.392622759Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=ecr-renew, env=office, namespace=kube-system, pod=ecr-login-renew-28616040-pdwps" t=2024-05-29T13:44:13.392393284Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=ecr-renew, env=office, namespace=kube-system, pod=ecr-login-renew-28615680-8rk5p" t=2024-05-29T13:44:13.39230737Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=ecr-renew, env=office, namespace=kube-system, pod=ecr-login-renew-28615680-8rk5p" t=2024-05-29T13:44:13.392293121Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.392136087Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=darkwing, env=production, namespace=darkwing, pod=darkwing-84f5987bf4-6svst" t=2024-05-29T13:44:13.392183092Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=darkwing, env=production, namespace=darkwing, pod=darkwing-67bdc678c4-hmngp" t=2024-05-29T13:44:13.392129832Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=darkwing, env=production, namespace=darkwing, pod=darkwing-5b946cdddb-b22gk" t=2024-05-29T13:44:13.391987816Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.391918526Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=coredns, env=production, namespace=kube-system, pod=coredns-6d6869bf68-7t4hq" t=2024-05-29T13:44:13.391709276Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.391412561Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=coredns, env=office, namespace=kube-system, pod=coredns-6d6869bf68-d4k7j" t=2024-05-29T13:44:13.391487341Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=config-syncer, env=production, namespace=kube-system, pod=config-syncer-6784dcdd9d-hz46k" t=2024-05-29T13:44:13.391380836Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=config-syncer, env=production, namespace=kube-system, pod=config-syncer-6784dcdd9d-bs8dj" t=2024-05-29T13:44:13.391303297Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.391225842Z caller=remote_instance_store.go:51 user=916144 slug=cmjjilpd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.391087098Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.391092597Z caller=remote_instance_store.go:51 user=269887 slug=blackrockdev msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=371756 slug=asapp instance="company=mt-cell-1" t=2024-05-29T13:44:13.391078715Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=config-syncer, env=office, namespace=kube-system, pod=config-syncer-6784dcdd9d-xkqrf" t=2024-05-29T13:44:13.391079004Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=313382 slug=hyai t=2024-05-29T13:44:13.390883961Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.494846ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=cloud-commander, env=production, namespace=atlas-agent, pod=cloud-commander-6bc9774dcd-t7rtp" t=2024-05-29T13:44:13.390945431Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.390831481Z caller=remote_instance_store.go:51 user=163215 slug=tripadvisor msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=806229 slug=simplisafe version=205 fingerprint=0637e06445b45419 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.390739297Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[C:{Var:C Labels: Value:0xc06922c3c0} D:{Var:D Labels: Value:0xc06922c3c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.390148814s EvaluationString:[ var='C' labels={} value=0 ], [ var='D' labels={} value=0 ]}]" duration=66.594264ms + logger=ngalert.state.manager user=371756 slug=asapp t=2024-05-29T13:44:13.390833917Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=cloud-commander, env=office, namespace=atlas-agent, pod=cloud-commander-6ffb94569f-7hr2v" t=2024-05-29T13:44:13.390816396Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.390771675Z caller=remote_instance_store.go:51 user=290313 slug=replit msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=798556 slug=zenithhr instance="__name__=node_filesystem_avail_bytes, device=/dev/sda1, environment=qa, fstype=ext4, instance=127.0.0.1:9100, job=prometheus-qa, monitor=linux-qa-monitor, mountpoint=/" t=2024-05-29T13:44:13.390787878Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.390514712Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.390469133Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=cilium-agent, env=production, namespace=kube-system, pod=cilium-2642s" t=2024-05-29T13:44:13.390508132Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=cilium-agent, env=production, namespace=kube-system, pod=cilium-2642s" t=2024-05-29T13:44:13.390492673Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=atlasclient, env=production, namespace=legacy, pod=atlasclient-d9b446d94-bvdhs" t=2024-05-29T13:44:13.390280629Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.390163485Z caller=remote_instance_store.go:51 user=706031 slug=miceutest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=astral-tcpdump-ed9c342e, env=production, namespace=default, pod=astral-tcpdump-ed9c342e" t=2024-05-29T13:44:13.390091635Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=4947 slug=mediamath t=2024-05-29T13:44:13.389852762Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=alarm-planner, env=office, namespace=atlas-agent, pod=alarm-planner-78b797f67f-hlnzr" t=2024-05-29T13:44:13.389890222Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=alarm-persister, env=production, namespace=atlas-agent, pod=alarm-persister-f9686d794-shwk6" t=2024-05-29T13:44:13.389798494Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=alarm-evaluator, env=production, namespace=atlas-agent, pod=alarm-evaluator-6c9977bb4b-xkjvk" t=2024-05-29T13:44:13.389662243Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=alarm-evaluator, env=office, namespace=atlas-agent, pod=alarm-evaluator-577b85d885-ncb8j" t=2024-05-29T13:44:13.389608039Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-norwich, container=agent, env=production, namespace=monitoring, pod=grafana-agent-sc85w" t=2024-05-29T13:44:13.389550241Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=696798 slug=mcv instance="datasource_uid=a7c4b457-6a7a-416c-b994-1407dd32ed34, ref_id=Query" t=2024-05-29T13:44:13.389356209Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.389361983Z caller=remote_instance_store.go:51 user=447873 slug=pn0625test01 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=strategy-planner, env=sandbox, namespace=atlas-agent, pod=strategy-planner-86bd7b4955-c44db" t=2024-05-29T13:44:13.389322582Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=447873 slug=pn0625test01 version=16 fingerprint=22ea4f60f005f5a3 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.389151082Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.389163825s EvaluationString:}]" duration=23.915167ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=strategy-persister, env=sandbox, namespace=atlas-agent, pod=strategy-persister-687794cc8d-jmk96" t=2024-05-29T13:44:13.389197702Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=237484 slug=bitcoinwisdom instance= t=2024-05-29T13:44:13.388980907Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=reflector, env=sandbox, namespace=kube-system, pod=reflector-8dd7dcc56-bxdwt" t=2024-05-29T13:44:13.388940624Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=postgresql-migrator, env=sandbox, namespace=crossnokaye, pod=postgresql-migrations-59c4b4dffd-4jws2" t=2024-05-29T13:44:13.388879914Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=postgresql-migrator, env=sandbox, namespace=crossnokaye, pod=postgresql-migrations-59c4b4dffd-4jws2" t=2024-05-29T13:44:13.388868601Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=metrics-server, env=sandbox, namespace=kube-system, pod=metrics-server-fff49897d-n7n5h" t=2024-05-29T13:44:13.388668854Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.388614439Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.388544723Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.388529533Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=manager, env=sandbox, namespace=flux-system, pod=notification-controller-5d5b74d7f9-twz4l" t=2024-05-29T13:44:13.388546102Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=manager, env=sandbox, namespace=flux-system, pod=kustomize-controller-6f89bc5987-8dbp6" t=2024-05-29T13:44:13.388464608Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.388352326Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.388328561Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.388292302Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.364961ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=manager, env=sandbox, namespace=flux-system, pod=helm-controller-57c87f4bb9-m8gvs" t=2024-05-29T13:44:13.388335894Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=163513 slug=dialpad t=2024-05-29T13:44:13.388159453Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.902684ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=kube-state-metrics, env=sandbox, namespace=kube-system, pod=kube-state-metrics-67b94c9b5d-cmbr8" t=2024-05-29T13:44:13.388179672Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=a1d99fb9442c4d46 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.388052637Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.387696516s EvaluationString:}]" duration=295.183855ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=kube-secret-syncer, env=sandbox, namespace=kube-system, pod=kube-secret-syncer-controller-99986fffd-pwv5f" t=2024-05-29T13:44:13.388087432Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=kube-scheduler, env=sandbox, namespace=kube-system, pod=kube-scheduler-usfoods-loveland-7d3d0187-85b91e27" t=2024-05-29T13:44:13.387989495Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=824501 slug=bendingspoons instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.387979024Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=824501 slug=bendingspoons instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.387956764Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=kube-proxy, env=sandbox, namespace=kube-system, pod=kube-proxy-w68tc" t=2024-05-29T13:44:13.387877212Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=kube-proxy, env=sandbox, namespace=kube-system, pod=kube-proxy-w68tc" t=2024-05-29T13:44:13.38786147Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.38781729Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=844274 slug=tixity t=2024-05-29T13:44:13.387700531Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.563459ms + level=debug ts=2024-05-29T13:44:13.387632048Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=kube-eventrouter, env=sandbox, namespace=kube-system, pod=eventrouter-5cf55bb7b8-znzv5" t=2024-05-29T13:44:13.387657907Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=kube-eventrouter, env=sandbox, namespace=kube-system, pod=eventrouter-5cf55bb7b8-znzv5" t=2024-05-29T13:44:13.387645726Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=kube-controller-manager, env=sandbox, namespace=kube-system, pod=kube-controller-manager-usfoods-loveland-7d3d0187-85b91e27" t=2024-05-29T13:44:13.387557149Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=kube-controller-manager, env=sandbox, namespace=kube-system, pod=kube-controller-manager-usfoods-loveland-7d3d0187-85b91e27" t=2024-05-29T13:44:13.387548124Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.387146186Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=kube-apiserver, env=sandbox, namespace=kube-system, pod=kube-apiserver-usfoods-loveland-7d3d0187-85b91e27" t=2024-05-29T13:44:13.387446935Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.387406031Z caller=remote_alert_sender.go:94 user=542900 slug=yuktarthtrehan host=yuktarthtrehan-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.184.24.35:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=db5428c3-9fdc-46ec-8b29-aa506c2e6929 alerts=1 + logger=ngalert.state.manager.persist user=542900 slug=yuktarthtrehan t=2024-05-29T13:44:13.387320356Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.776399ms + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:13.387220405Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.387207663Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=heartbeat, env=sandbox, namespace=atlas-agent, pod=heartbeat-5bd89c5ccf-pt674" t=2024-05-29T13:44:13.387256287Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:13.3872221Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=430961 slug=solifi version=3 fingerprint=32ff2ff9730fdba2 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.387128598Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.386850131s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=126.132725ms + logger=ngalert.state.manager user=191103 slug=amazonadmin t=2024-05-29T13:44:13.38718458Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=191103 slug=amazonadmin version=12 fingerprint=10d62178261dafda attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.387128678Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.386958589s EvaluationString:}]" duration=234.801745ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=darkwing, env=sandbox, namespace=darkwing, pod=darkwing-6c7b69ff97-nmzrk" t=2024-05-29T13:44:13.387149448Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=251760 slug=forgerock t=2024-05-29T13:44:13.387070577Z level=debug msg="Deleting alert states" count=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=coredns, env=sandbox, namespace=kube-system, pod=coredns-85b955d87b-tjsx7" t=2024-05-29T13:44:13.387064642Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.387063776Z caller=remote_image_capturer.go:33 user=251760 slug=forgerock rule_org_id=1 rule_uid=cdewjcstdhdkwd msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=coredns, env=sandbox, namespace=kube-system, pod=coredns-85b955d87b-tjsx7" t=2024-05-29T13:44:13.387051685Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock t=2024-05-29T13:44:13.387049373Z level=info msg="Detected stale state entry" cacheID="[[\"__alert_rule_namespace_uid__\",\"adcask5685ywwa\"],[\"__alert_rule_uid__\",\"cdewjcstdhdkwd\"],[\"alertname\",\"General - Container - Crash Loop Backoff\"],[\"cluster\",\"qa-aa-apr30\"],[\"container\",\"load-autoaccess-entities-job\"],[\"grafana_folder\",\"Pre-Production Alert Rules\"],[\"pod\",\"load-autoaccess-entities-job-28616436-cg7td\"],[\"severity\",\"critical\"]]" state=Alerting reason= + level=debug ts=2024-05-29T13:44:13.387014587Z caller=remote_image_capturer.go:33 user=251760 slug=forgerock rule_org_id=1 rule_uid=cdewjcstdhdkwd msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=coredns, env=sandbox, namespace=kube-system, pod=coredns-85b955d87b-d9q5h" t=2024-05-29T13:44:13.386958267Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=295631 slug=dapvizor instance="datasource_uid=ioFV1Jn4z, ref_id=A" t=2024-05-29T13:44:13.386794155Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=uiat-dev, container=manager, pod=gatekeeper-controller-manager-747c994579-qgq69" t=2024-05-29T13:44:13.386906806Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:13.386741457Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.386828826Z caller=remote_image_capturer.go:33 user=251760 slug=forgerock rule_org_id=1 rule_uid=cdewjcstdhdkwd msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:13.386727065Z caller=remote_image_capturer.go:33 user=251760 slug=forgerock rule_org_id=1 rule_uid=cdewjcstdhdkwd msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=support-platformui, container=manager, pod=gatekeeper-controller-manager-5974fb86d7-xm9j7" t=2024-05-29T13:44:13.386701398Z level=debug msg="Setting next state" handler=resultAlerting + level=info ts=2024-05-29T13:44:13.386615226Z caller=remote_alert_sender.go:94 user=543660 slug=jobcloudprogrammaticstage host=jobcloudprogrammaticstage-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.148.102.113:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=bdlamjoeee3nke alerts=1 + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=support-platformui, container=manager, pod=gatekeeper-controller-manager-5974fb86d7-m9kn9" t=2024-05-29T13:44:13.386616422Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.386627223Z caller=remote_image_capturer.go:33 user=251760 slug=forgerock rule_org_id=1 rule_uid=cdewjcstdhdkwd msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=support-platformui, container=manager, pod=gatekeeper-controller-manager-5974fb86d7-m9kn9" t=2024-05-29T13:44:13.386600824Z level=debug msg="Setting next state" handler=resultAlerting + level=info ts=2024-05-29T13:44:13.386539319Z caller=remote_image_capturer.go:61 user=166705 slug=crossnokaye rule_org_id=1 rule_uid=bhmaeSc7k dashboard=7YbPY4jMk panel=105 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=7KMB-KJVz, ref_id=A" t=2024-05-29T13:44:13.386529396Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=7KMB-KJVz, ref_id=A" t=2024-05-29T13:44:13.386521784Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=430961 slug=solifi t=2024-05-29T13:44:13.386482583Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=support-platformui, container=manager, pod=gatekeeper-controller-manager-5974fb86d7-bgtr6" t=2024-05-29T13:44:13.386507846Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=spetrov, container=manager, pod=gatekeeper-controller-manager-5974fb86d7-lr2lp" t=2024-05-29T13:44:13.386423305Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:13.386297119Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=191103 slug=amazonadmin instance= t=2024-05-29T13:44:13.386271166Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=191103 slug=amazonadmin t=2024-05-29T13:44:13.386237957Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=432323 slug=lithic t=2024-05-29T13:44:13.386189709Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.386199298Z caller=remote_image_capturer.go:33 user=251760 slug=forgerock rule_org_id=1 rule_uid=cdewjcstdhdkwd msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=884708 slug=turmeric instance="exchange=mm_bot_wrx, job=mm-bot-prod" t=2024-05-29T13:44:13.386061709Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=sean-001, container=manager, pod=gatekeeper-controller-manager-67d5f4bdf8-9m66r" t=2024-05-29T13:44:13.386087562Z level=debug msg="Changing state" previous_state=Normal next_state=Pending previous_ends_at=2024-05-29T13:44:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=sean-001, container=manager, pod=gatekeeper-controller-manager-67d5f4bdf8-9m66r" t=2024-05-29T13:44:13.38607483Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=516613 slug=blackrocktp instance="agent_hostname=400VRT01P, instance=400VRT01P:60026, job=integrations/windows_exporter, volume=E:" t=2024-05-29T13:44:13.386052358Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=884708 slug=turmeric instance="exchange=mm_bot_test-csx, job=mm-bot-prod" t=2024-05-29T13:44:13.385997248Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=516613 slug=blackrocktp t=2024-05-29T13:44:13.385965923Z level=debug msg="State manager processing evaluation results" resultCount=5 + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=qa-aa-apr30, container=load-autoaccess-entities-job, pod=load-autoaccess-entities-job-28616483-wmsvt" t=2024-05-29T13:44:13.38572149Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:13.385632676Z caller=remote_image_capturer.go:33 user=251760 slug=forgerock rule_org_id=1 rule_uid=cdewjcstdhdkwd msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:13.385569079Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.385521728Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=qa-aa-apr30, container=load-autoaccess-entities-job, pod=load-autoaccess-entities-job-28616477-6vxl6" t=2024-05-29T13:44:13.385512337Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=alarm-planner, env=sandbox, namespace=atlas-agent, pod=alarm-planner-ccf7ff7f8-c9c9c" t=2024-05-29T13:44:13.385532742Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.385520753Z caller=remote_image_capturer.go:33 user=251760 slug=forgerock rule_org_id=1 rule_uid=cdewjcstdhdkwd msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:13.385345006Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=alarm-persister, env=sandbox, namespace=atlas-agent, pod=alarm-persister-7948f9f866-4k6ps" t=2024-05-29T13:44:13.385317369Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=alarm-evaluator, env=sandbox, namespace=atlas-agent, pod=alarm-evaluator-bf87674b9-npskr" t=2024-05-29T13:44:13.385199084Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=usfoods-loveland, container=alarm-evaluator, env=sandbox, namespace=atlas-agent, pod=alarm-evaluator-5cf59bf47c-xv24h" t=2024-05-29T13:44:13.385037587Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.385007741Z caller=remote_alert_sender.go:94 user=548157 slug=kushkiprod host=kushkiprod-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.8.101.51:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=b293bcda-6329-4c00-a38f-bfd34735ca0a alerts=1 + level=debug ts=2024-05-29T13:44:13.385025581Z caller=remote_image_capturer.go:33 user=251760 slug=forgerock rule_org_id=1 rule_uid=cdewjcstdhdkwd msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=qa-aa-apr30, container=load-autoaccess-entities-job, pod=load-autoaccess-entities-job-28616442-v75x6" t=2024-05-29T13:44:13.384880251Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:13.384736611Z caller=remote_instance_store.go:51 user=112732 slug=gleamer msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.384760726Z caller=remote_instance_store.go:51 user=873368 slug=euid msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=teleport, env=office, namespace=teleport-6, pod=teleport-6-69586bf578-2lstp" t=2024-05-29T13:44:13.384784138Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=873368 slug=euid t=2024-05-29T13:44:13.384708175Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager.persist user=112732 slug=gleamer t=2024-05-29T13:44:13.384679526Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=perf-rapid-1m, container=fluentbit-gke, pod=fluentbit-gke-wcj9c" t=2024-05-29T13:44:13.38470132Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=vpn.gcp.roman.sh, device=/dev/sda15, fstype=vfat, instance=vpn.gcp.roman.sh, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:13.384684544Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=873368 slug=euid t=2024-05-29T13:44:13.384640936Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=perf-rapid-1m, container=fluentbit-gke, pod=fluentbit-gke-wcj9c" t=2024-05-29T13:44:13.384688047Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=112732 slug=gleamer instance= t=2024-05-29T13:44:13.384636135Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=873368 slug=euid version=42 fingerprint=268785c81aaf2301 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.384560213Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[QUERY:{Var:QUERY Labels: Value:0xc006fde238} THRESHOLD:{Var:THRESHOLD Labels: Value:0xc006fde200}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.38423132s EvaluationString:[ var='QUERY' labels={} value=NaN ], [ var='THRESHOLD' labels={} value=0 ]}]" duration=8.979675ms + logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=B" t=2024-05-29T13:44:13.384647548Z level=warn msg="Failed to take an image" dashboard=OZFHyBFWk panel=69 error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=perf-nemanja-aa, container=seed-autoaccess-config-job, pod=seed-autoaccess-config-job-28616502-tfkk5" t=2024-05-29T13:44:13.384593644Z level=debug msg="Changing state" previous_state=Normal next_state=Pending previous_ends_at=2024-05-29T13:44:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=vpn.gcp.roman.sh, device=/dev/sda1, fstype=ext4, instance=vpn.gcp.roman.sh, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:13.384586258Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=syncer, env=office, namespace=agent-facility, pod=syncer-58698f5758-8trgm" t=2024-05-29T13:44:13.384599428Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=perf-nemanja-aa, container=seed-autoaccess-config-job, pod=seed-autoaccess-config-job-28616492-gqhnp" t=2024-05-29T13:44:13.384416295Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=strategy-persister, env=office, namespace=atlas-agent, pod=strategy-persister-5c68c8b67-n7zlr" t=2024-05-29T13:44:13.384349218Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=strategy-persister, env=office, namespace=atlas-agent, pod=strategy-persister-5c68c8b67-n7zlr" t=2024-05-29T13:44:13.38432004Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=perf-nemanja-aa, container=seed-autoaccess-config-job, pod=seed-autoaccess-config-job-28616484-5l6qv" t=2024-05-29T13:44:13.384296502Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=strategy-evaluator, env=office, namespace=atlas-agent, pod=strategy-evaluator-5786d9875f-z7m4g" t=2024-05-29T13:44:13.38420233Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.384162199Z caller=remote_instance_store.go:51 user=109928 slug=deadhappy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=perf-nemanja-aa, container=seed-autoaccess-config-job, pod=seed-autoaccess-config-job-28616476-dgnj4" t=2024-05-29T13:44:13.384131709Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=109928 slug=deadhappy instance= t=2024-05-29T13:44:13.384078056Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=rpi3, device=/dev/mmcblk0p2, fstype=ext4, instance=rpi3.homelab.lan, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:13.384138888Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=109928 slug=deadhappy instance= t=2024-05-29T13:44:13.384067622Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=reloader-reloader, env=office, namespace=kube-system, pod=reloader-reloader-fbdf5d78-6tkq6" t=2024-05-29T13:44:13.384089348Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=109928 slug=deadhappy version=1 fingerprint=4ecba9212bd6eba6 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.383945852Z level=debug msg="Alert rule evaluated" results="[{Instance: State:NoData Error: Results:map[] Values:map[B0:{Var:B Labels: Value:}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.383560095s EvaluationString:[ var='B0' metric='NoData' labels={} value=null ]}]" duration=153.211224ms + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=perf-nemanja-aa, container=seed-autoaccess-config-job, pod=seed-autoaccess-config-job-28616468-ngtgd" t=2024-05-29T13:44:13.383974887Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:13.383894338Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:13.383881662Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.38387368Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=perf-nemanja-aa, container=seed-autoaccess-config-job, pod=seed-autoaccess-config-job-28616460-d8tsz" t=2024-05-29T13:44:13.383831357Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:13.383796855Z caller=remote_image_capturer.go:54 user=22398 slug=sunfolding rule_org_id=1 rule_uid=bd0d467d-c617-4437-abb4-7127d3dadf71 dashboard=OZFHyBFWk panel=69 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=perf-nemanja-aa, container=seed-autoaccess-config-job, pod=seed-autoaccess-config-job-28616452-fddth" t=2024-05-29T13:44:13.383742475Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.383723915Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=infra, device=/dev/sdb15, fstype=vfat, instance=infra.homelab.lan, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:13.383730635Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=507549 slug=coindcx instance="pod_name=auth-api-ext-5b56978859-drbnf" t=2024-05-29T13:44:13.383690508Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.383753089Z caller=remote_image_capturer.go:33 user=507549 slug=coindcx rule_org_id=1 rule_uid=b924a99c-a898-4b8b-a82d-7a6805bd6974 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=infra, device=/dev/sdb15, fstype=vfat, instance=infra.homelab.lan, job=integrations/node_exporter, mountpoint=/boot/efi" t=2024-05-29T13:44:13.383713606Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.383701131Z caller=remote_image_capturer.go:33 user=507549 slug=coindcx rule_org_id=1 rule_uid=b924a99c-a898-4b8b-a82d-7a6805bd6974 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=B" t=2024-05-29T13:44:13.383588133Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=infra, device=/dev/sdb1, fstype=ext4, instance=infra.homelab.lan, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:13.383627682Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.383638642Z caller=remote_image_capturer.go:33 user=507549 slug=coindcx rule_org_id=1 rule_uid=b924a99c-a898-4b8b-a82d-7a6805bd6974 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=507549 slug=coindcx instance="pod_name=auth-api-ext-5b56978859-6mb6l" t=2024-05-29T13:44:13.383573898Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=22398 slug=sunfolding instance="datasource_uid=grafanacloud-sunfolding, ref_id=B" t=2024-05-29T13:44:13.382731703Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.38353295Z caller=remote_image_capturer.go:33 user=507549 slug=coindcx rule_org_id=1 rule_uid=b924a99c-a898-4b8b-a82d-7a6805bd6974 msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=507549 slug=coindcx instance="pod_name=auth-api-ext-5b56978859-5qvz7" t=2024-05-29T13:44:13.38351622Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=507549 slug=coindcx instance="pod_name=auth-api-ext-5b56978859-ztlbl" t=2024-05-29T13:44:13.383461156Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=507549 slug=coindcx instance="pod_name=auth-api-ext-5b56978859-xg4hs" t=2024-05-29T13:44:13.383417738Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=opentelemetry-collector, env=office, namespace=open-telemetry, pod=collector-opentelemetry-collector-agent-6kdz8" t=2024-05-29T13:44:13.383482837Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=753403 slug=romich instance="agent_hostname=infra, device=/dev/sda1, fstype=ext4, instance=infra.home.lan, job=integrations/node_exporter, mountpoint=/" t=2024-05-29T13:44:13.38340925Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=753403 slug=romich t=2024-05-29T13:44:13.383239435Z level=debug msg="State manager processing evaluation results" resultCount=12 + level=debug ts=2024-05-29T13:44:13.3831045Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=perf-nemanja-aa, container=load-autoaccess-entities-job, pod=load-autoaccess-entities-job-28616474-kxctb" t=2024-05-29T13:44:13.38300775Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:13.382953298Z caller=remote_image_capturer.go:33 user=443783 slug=gt123test rule_org_id=1 rule_uid=edk83vopohxj4d msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=manager, env=office, namespace=flux-system, pod=notification-controller-6c65b7f89b-fvxmk" t=2024-05-29T13:44:13.382986141Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=443783 slug=gt123test instance="__name__=instance:node_cpu_utilisation:rate5m, agent_hostname=centos-2gb-ash-1, instance=centos-2gb-ash-1, job=integrations/node_exporter" t=2024-05-29T13:44:13.382936998Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=443783 slug=gt123test instance="__name__=instance:node_cpu_utilisation:rate5m, agent_hostname=centos-2gb-ash-1, instance=centos-2gb-ash-1, job=integrations/node_exporter" t=2024-05-29T13:44:13.382919697Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=perf-nemanja-aa, container=load-autoaccess-entities-job, pod=load-autoaccess-entities-job-28616468-qv8gs" t=2024-05-29T13:44:13.382898809Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=perf-nemanja-aa, container=load-autoaccess-entities-job, pod=load-autoaccess-entities-job-28616462-nx9q7" t=2024-05-29T13:44:13.382788984Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=manager, env=office, namespace=flux-system, pod=kustomize-controller-75bb97945d-2gl7g" t=2024-05-29T13:44:13.382805422Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=perf-nemanja-aa, container=load-autoaccess-entities-job, pod=load-autoaccess-entities-job-28616462-nx9q7" t=2024-05-29T13:44:13.3827707Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=22398 slug=sunfolding t=2024-05-29T13:44:13.382682698Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=22398 slug=sunfolding version=1 fingerprint=86e9ae668b7f65d7 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.382544291Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-sunfolding, ref_id=B State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.3819508s EvaluationString:}]" duration=21.281032ms + level=debug ts=2024-05-29T13:44:13.382514473Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=perf-nemanja-aa, container=load-autoaccess-entities-job, pod=load-autoaccess-entities-job-28616449-l99v2" t=2024-05-29T13:44:13.382541552Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=manager, env=office, namespace=flux-system, pod=image-reflector-controller-78c5d97ff5-sw9lt" t=2024-05-29T13:44:13.382526629Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k9v94b6k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.382420184Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=perf-nemanja-aa, container=load-autoaccess-entities-job, pod=load-autoaccess-entities-job-28616443-t8667" t=2024-05-29T13:44:13.382417415Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k9td2msh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.382380904Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k9td2msh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.382357184Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k9td2msh-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.382330123Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=manager, env=office, namespace=flux-system, pod=image-automation-controller-54bf875b97-tslrn" t=2024-05-29T13:44:13.382333949Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=perf-iga-05071, container=committer, pod=config-committer-55c77bbcd4-s2hxp" t=2024-05-29T13:44:13.382195713Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.382119462Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k9qc3019-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.382131051Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=local-path-provisioner, env=office, namespace=local-path-storage, pod=local-path-provisioner-7fdb4745c6-62j7f" t=2024-05-29T13:44:13.382035695Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k9notrfa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.38195717Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=gov-rapid-17, container=backup-platform, pod=backup-platform-28616445-4m4qj" t=2024-05-29T13:44:13.381949142Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=fr18892-01, container=orch-sequencer, pod=orch-sequencer-7d76bd5796-bmzsf" t=2024-05-29T13:44:13.381852006Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=fr18892-01, container=orch-sequencer, pod=orch-sequencer-7d76bd5796-bmzsf" t=2024-05-29T13:44:13.381838511Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k9nbm4zy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.381785998Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=kube-state-metrics, env=office, namespace=kube-system, pod=kube-state-metrics-7b66c9956f-rp727" t=2024-05-29T13:44:13.381850346Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=fr18613-06, container=orch-sequencer, pod=orch-sequencer-7d76bd5796-ktckg" t=2024-05-29T13:44:13.381657658Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k9n4c04h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.381607746Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.381570777Z caller=remote_image_capturer.go:33 user=251760 slug=forgerock rule_org_id=1 rule_uid=cdewjcstdhdkwd msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:13.381508405Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.381377326Z caller=remote_image_capturer.go:33 user=251760 slug=forgerock rule_org_id=1 rule_uid=cdewjcstdhdkwd msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:13.38133127Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=fr18613-06, container=cloud-sql-proxy, pod=orch-changelog-6cb75d569c-gjxtk" t=2024-05-29T13:44:13.381253151Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k9knkoi0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.381246082Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=kube-controller-manager, env=office, namespace=kube-system, pod=kube-controller-manager-uscold-denton-9a74ce60-a191b628" t=2024-05-29T13:44:13.381161974Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=kube-apiserver, env=office, namespace=kube-system, pod=kube-apiserver-uscold-denton-9a74ce60-a191b628" t=2024-05-29T13:44:13.380956538Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=112387 slug=lucidhq instance="aggregatedBy=sum, name=sumSeries(perSecond(prd-prd.logstash.prd-euc1-prd-eks-media-measurement-logging.*.*.*.dlq.*.*.logstash_events.filtered)) A" t=2024-05-29T13:44:13.380883123Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=emeasupport-upper, container=org-promo, pod=org-promo-7f46bcb576-7dmkr" t=2024-05-29T13:44:13.380804313Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:13.380778937Z caller=remote_instance_store.go:51 user=476835 slug=adevintashield msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=heartbeat, env=office, namespace=atlas-agent, pod=heartbeat-5786644d44-sbqn7" t=2024-05-29T13:44:13.380823227Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=476835 slug=adevintashield instance= t=2024-05-29T13:44:13.380721249Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k93iikm8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.380729357Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=476835 slug=adevintashield version=18 fingerprint=a236635ca9e692cf attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.380574311Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.380194222s EvaluationString:}]" duration=31.994646ms + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=emeasupport-upper, container=org-promo, pod=org-promo-74fdc74755-6zrbx" t=2024-05-29T13:44:13.380687236Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:13.380709297Z caller=remote_image_capturer.go:33 user=251760 slug=forgerock rule_org_id=1 rule_uid=cdewjcstdhdkwd msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:13.38062408Z caller=remote_instance_store.go:51 user=214309 slug=spenmo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=ecr-renew, env=office, namespace=kube-system, pod=ecr-login-renew-g7cj4" t=2024-05-29T13:44:13.380573112Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=dx-kl04, container=manager, pod=gatekeeper-controller-manager-68466dc598-zz7h7" t=2024-05-29T13:44:13.380473722Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k8ts2yif-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.380355363Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=dx-kl04, container=manager, pod=gatekeeper-controller-manager-68466dc598-vldbm" t=2024-05-29T13:44:13.38031488Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.380285185Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=ecr-renew, env=office, namespace=kube-system, pod=ecr-login-renew-28616400-w9d72" t=2024-05-29T13:44:13.380300355Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k8ts2yif-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.380315683Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k8ts2yif-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.380238642Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=dx-kl04, container=manager, pod=gatekeeper-controller-manager-68466dc598-vgdgw" t=2024-05-29T13:44:13.380191069Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.380138401Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=ecr-renew, env=office, namespace=kube-system, pod=ecr-login-renew-28616040-rjr4g" t=2024-05-29T13:44:13.380179337Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=b163fba4ca620935 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.38010658Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=FRANKFURT Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc014fb9f30} Threshold:{Var:Threshold Labels: Value:0xc014fb9f38} compare:{Var:compare Labels:aggregatedBy=sum, name=FRANKFURT Query Value:0xc08c0c6028} sum:{Var:sum Labels:aggregatedBy=sum, name=FRANKFURT Query Value:0xc014fb9f08}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.379889856s EvaluationString:[ var='Breaches' labels={} value=72 ], [ var='Threshold' labels={} value=2 ], [ var='compare' labels={aggregatedBy=sum, name=FRANKFURT Query} value=0 ], [ var='sum' labels={aggregatedBy=sum, name=FRANKFURT Query} value=0 ]}]" duration=257.016412ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k8scqddk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.38008642Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.38005514Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k8scqddk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.38005346Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=ecr-renew, env=office, namespace=kube-system, pod=ecr-login-renew-28616040-rjr4g" t=2024-05-29T13:44:13.380104772Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.380065773Z caller=remote_instance_store.go:51 user=456946 slug=menlosecurityredge msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.380025883Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.380027636Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=dx-kl04, container=cert-manager-cainjector, pod=cert-manager-cainjector-998dcb955-v62pn" t=2024-05-29T13:44:13.380068345Z level=debug msg="Setting next state" handler=resultAlerting + level=debug ts=2024-05-29T13:44:13.3800315Z caller=remote_instance_store.go:51 user=432323 slug=lithic msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.379971029Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=432323 slug=lithic instance= t=2024-05-29T13:44:13.379977689Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=ecr-renew, env=office, namespace=kube-system, pod=ecr-login-renew-28615680-xcj2f" t=2024-05-29T13:44:13.379923077Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k8qo76tj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.379879538Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.379905029Z caller=remote_image_capturer.go:33 user=251760 slug=forgerock rule_org_id=1 rule_uid=cdewjcstdhdkwd msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:13.379869414Z caller=remote_instance_store.go:51 user=343338 slug=f5sdc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k8qo76tj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.379850218Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.379819985Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=avi-oct2-all, container=backup-platform, pod=backup-platform-28616445-xjv2p" t=2024-05-29T13:44:13.379806937Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k8qo76tj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.379808578Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k8qo76tj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.379798808Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=avi-oct2-all, container=backup-platform, pod=backup-platform-28616445-xjv2p" t=2024-05-29T13:44:13.379796321Z level=debug msg="Setting next state" handler=resultAlerting + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k8qnm41j-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.379743657Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.379670805Z caller=remote_instance_store.go:51 user=71676 slug=lemonbeat msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=384712 slug=nearinc t=2024-05-29T13:44:13.379720335Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.379722268Z caller=remote_image_capturer.go:33 user=251760 slug=forgerock rule_org_id=1 rule_uid=cdewjcstdhdkwd msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=430961 slug=solifi instance= t=2024-05-29T13:44:13.379629958Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k8qnm41j-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.379637296Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.379582952Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.379577553Z caller=remote_image_capturer.go:61 user=384712 slug=nearinc rule_org_id=1 rule_uid=b294b46a-65cf-4ee9-91fe-93421b5d083f dashboard=666YYqJnz panel=11 msg="skipping screenshot for tenant" error="rpc error: code = Code(422) desc = screenshots unavailable" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k8lf2mld-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.379572185Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.379629467Z caller=remote_image_capturer.go:33 user=251760 slug=forgerock rule_org_id=1 rule_uid=cdewjcstdhdkwd msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k8lf2mld-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.379549735Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=327842 slug=exabeam t=2024-05-29T13:44:13.379538939Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=327842 slug=exabeam instance= t=2024-05-29T13:44:13.37951618Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=avi-oct18-all, container=manager, pod=gatekeeper-controller-manager-5974fb86d7-dzxmx" t=2024-05-29T13:44:13.379520973Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k8kzwnus-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.379458214Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k8kzwnus-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.379429184Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=114492 slug=railsbank t=2024-05-29T13:44:13.37938749Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=24.511776ms + logger=ngalert.state.manager.persist user=288032 slug=dapperlabssre t=2024-05-29T13:44:13.379399428Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=73.798946ms + logger=ngalert.state.manager.persist user=313382 slug=hyai t=2024-05-29T13:44:13.379383841Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.379420708Z caller=remote_image_capturer.go:33 user=251760 slug=forgerock rule_org_id=1 rule_uid=cdewjcstdhdkwd msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + logger=ngalert.state.manager user=313382 slug=hyai instance="resource.label.project_id=hyai-development-eec1, resource.type=cloudsql_database" t=2024-05-29T13:44:13.379365406Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.379369844Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=313382 slug=hyai instance="resource.label.project_id=hyai-development-eec1, resource.type=cloudsql_database" t=2024-05-29T13:44:13.379350117Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=71697 slug=lovelysystems instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.379336613Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.379370542Z caller=remote_instance_store.go:51 user=171235 slug=circleslabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k8kwdngb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.379299992Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=171235 slug=circleslabs instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.379294995Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=argo-ci, container=orch-sequencer, pod=orch-sequencer-7d76bd5796-mmkf9" t=2024-05-29T13:44:13.37930083Z level=debug msg="Keeping state" state=Alerting previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.379309098Z caller=remote_image_capturer.go:33 user=251760 slug=forgerock rule_org_id=1 rule_uid=cdewjcstdhdkwd msg="Cannot take screenshot for alert rule as it is not associated with a dashboard" + level=debug ts=2024-05-29T13:44:13.379240176Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k8kwdngb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.379218482Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=158536 slug=clearsaleantifraude t=2024-05-29T13:44:13.37915599Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=31.252619ms + logger=ngalert.state.manager user=171235 slug=circleslabs instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.37924807Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=171235 slug=circleslabs instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.379241802Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k8kwdngb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.379194011Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k8d7q1rw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.37911171Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock t=2024-05-29T13:44:13.379014438Z level=debug msg="State manager processing evaluation results" resultCount=72 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=config-syncer, env=office, namespace=kube-system, pod=config-syncer-7f8d8bc659-l7s64" t=2024-05-29T13:44:13.379095922Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=config-syncer, env=office, namespace=kube-system, pod=config-syncer-7f8d8bc659-l7s64" t=2024-05-29T13:44:13.3790381Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.379023124Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=245291 slug=pismo instance="datasource_uid=grafanacloud-logs, ref_id=Query" t=2024-05-29T13:44:13.379003923Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k807ej11-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.378904358Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.378855389Z caller=remote_instance_store.go:51 user=159781 slug=suncornoc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k807ej11-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.378847728Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=cloud-commander, env=office, namespace=atlas-agent, pod=cloud-commander-6bb8cffd67-4rlxx" t=2024-05-29T13:44:13.378865628Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=384712 slug=nearinc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378787668Z level=debug msg="Execution no data state is Alerting" handler=resultAlerting previous_handler=resultNoData + logger=ngalert.state.manager user=384712 slug=nearinc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378778547Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=cloud-commander, env=office, namespace=atlas-agent, pod=cloud-commander-6bb8cffd67-4rlxx" t=2024-05-29T13:44:13.378850136Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378760088Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378754388Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378747088Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378734588Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378721888Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.37871766Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378614187Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378601887Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=cilium-operator, env=office, namespace=kube-system, pod=cilium-operator-77bb5b96c8-sq8cr" t=2024-05-29T13:44:13.378693249Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378596886Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378541786Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k7ya3qrr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.378567605Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378495885Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378484185Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378466185Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k7wb3r8h-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.378503614Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378401984Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378394284Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378374284Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378362184Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k7wb3r8h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.378356583Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.378315211Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378305183Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k7va3gih-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.378295482Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k7va3gih-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.378235171Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378095581Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378088881Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378083081Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378065281Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.378059981Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.37800948Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k7o8qzs8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.37807153Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.37798398Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=alarm-persister, env=office, namespace=atlas-agent, pod=alarm-persister-66bd875f5-nwdkk" t=2024-05-29T13:44:13.378059383Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.377953579Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.377946879Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.377987471Z caller=remote_instance_store.go:51 user=482907 slug=wavelonp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.377931579Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k7o8qzs8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.378009489Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.377960111Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.377916979Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.377901579Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.377892279Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.377860378Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.377833678Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.377822078Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.377814478Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.377796078Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=uscold-denton, container=alarm-evaluator, env=office, namespace=atlas-agent, pod=alarm-evaluator-5cf9b55877-24sb8" t=2024-05-29T13:44:13.377812561Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.377756741Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k770qmjh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.377757247Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=159781 slug=suncornoc instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.377753677Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k770qmjh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.377731906Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k770qmjh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.377648215Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k70dkscd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.377608195Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.377578198Z caller=remote_instance_store.go:51 user=542900 slug=yuktarthtrehan msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=542900 slug=yuktarthtrehan t=2024-05-29T13:44:13.377537359Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=teleport, env=production, namespace=teleport-6, pod=teleport-6-7d4596b76f-6rmsx" t=2024-05-29T13:44:13.377537491Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k6zi9k36-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.377306372Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=strategy-planner, env=production, namespace=atlas-agent, pod=strategy-planner-577b4db57c-zlwvn" t=2024-05-29T13:44:13.377322961Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k6zi9k36-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.377239091Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k6zi9k36-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.377207581Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k6tadhwv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.37709085Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k6tadhwv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.377036039Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.377016223Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k6ncyrmx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.376858817Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k6ncyrmx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.376816657Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k6ncyrmx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.376788507Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.376752566Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.376748036Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k6kid7ju-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.376720386Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k6kid7ju-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.376689596Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=restic, env=production, namespace=oas, pod=oas-linux-config-backup-28616040-zl9xd" t=2024-05-29T13:44:13.37665036Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.37651595Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k6e18kqk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.376479873Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k6d9wv8e-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.376356122Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.376307025Z caller=remote_instance_store.go:51 user=430961 slug=solifi msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=reloader-reloader, env=production, namespace=kube-system, pod=reloader-reloader-fbdf5d78-8t56c" t=2024-05-29T13:44:13.376346524Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k643x1ua-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.376212801Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=DY6IOGSVz, ref_id=A" t=2024-05-29T13:44:13.376227411Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=reflector, env=production, namespace=kube-system, pod=reflector-5cb5865b8f-xlqd6" t=2024-05-29T13:44:13.376224494Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=reflector, env=production, namespace=kube-system, pod=reflector-5cb5865b8f-xlqd6" t=2024-05-29T13:44:13.376192297Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.376078434Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k61wux72-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.375969528Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=postgresql, env=production, namespace=crossnokaye, pod=postgresql-postgresql-0" t=2024-05-29T13:44:13.375984426Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=postgresql, env=production, namespace=crossnokaye, pod=postgresql-postgresql-0" t=2024-05-29T13:44:13.375944386Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k61wux72-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.375810717Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=opentelemetry-collector, env=production, namespace=open-telemetry, pod=collector-opentelemetry-collector-agent-psvtq" t=2024-05-29T13:44:13.375847311Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=824501 slug=bendingspoons instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.375683757Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=824501 slug=bendingspoons instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.375678845Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=824501 slug=bendingspoons instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.375671893Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k5wgmklj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.375636485Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=824501 slug=bendingspoons t=2024-05-29T13:44:13.375630063Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=824501 slug=bendingspoons version=2 fingerprint=a92476ad16f7af9e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.375570455Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.375274281s EvaluationString:}]" duration=26.405229ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=network-monitor, env=production, namespace=agent-tools, pod=network-monitor-79c4cc78cd-sgqj8" t=2024-05-29T13:44:13.375576328Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k5qwkdjn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.375509754Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.375507604Z caller=remote_instance_store.go:51 user=472647 slug=planet msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=metrics-server, env=production, namespace=kube-system, pod=metrics-server-8496f9869f-7l49d" t=2024-05-29T13:44:13.375438445Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=472647 slug=planet instance="__name__=g4_task_instance_partitions_to_drop, g4_cluster=g4c-sub-04" t=2024-05-29T13:44:13.375443049Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k5qwkdjn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.375442143Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k5qwkdjn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.375413493Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k5kj70zt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.375376372Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k5kj70zt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.375317272Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k5kj70zt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.375276741Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k58ffl3a-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.375223071Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k58ffl3a-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.37519129Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=manager, env=production, namespace=flux-system, pod=notification-controller-5db88868fb-9r6vx" t=2024-05-29T13:44:13.375148436Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k58ffl3a-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.375088279Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=77750 slug=screenmeet t=2024-05-29T13:44:13.375030979Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=38.621839ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=manager, env=production, namespace=flux-system, pod=kustomize-controller-5f49cb697d-vtkrd" t=2024-05-29T13:44:13.374980981Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.374709756Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k55xom80-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.374703115Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.374826362Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=manager, env=production, namespace=flux-system, pod=helm-controller-56758887d-8sskm" t=2024-05-29T13:44:13.374861071Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.37444601Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.37454811Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.374492029Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.374444638Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k4vmyyuq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.374430212Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=kube-secret-syncer, env=production, namespace=kube-system, pod=kube-secret-syncer-controller-7d476c68b5-2hb6s" t=2024-05-29T13:44:13.374439386Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.374229107Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=kube-scheduler, env=production, namespace=kube-system, pod=kube-scheduler-us-foods-loveland-building-1" t=2024-05-29T13:44:13.374280619Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k4vjpsod-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.37420221Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.37416287Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=806502 slug=sxhclgitpd t=2024-05-29T13:44:13.373980124Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k4vjpsod-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.37415952Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=806502 slug=sxhclgitpd instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.373954173Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.374053524Z caller=remote_instance_store.go:51 user=806502 slug=sxhclgitpd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=806502 slug=sxhclgitpd t=2024-05-29T13:44:13.373920953Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k4s71fs2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.374013158Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.374000318Z caller=remote_instance_store.go:51 user=548157 slug=kushkiprod msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=806502 slug=sxhclgitpd version=5 fingerprint=989deb48cc60de24 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.373840852Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.373601086s EvaluationString:}]" duration=8.604282ms + level=debug ts=2024-05-29T13:44:13.373918566Z caller=remote_instance_store.go:51 user=613896 slug=mnitcommprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=548157 slug=kushkiprod t=2024-05-29T13:44:13.373957327Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=kube-eventrouter, env=production, namespace=kube-system, pod=eventrouter-65c6888598-6sm8x" t=2024-05-29T13:44:13.373965324Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k4s71fs2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.373942577Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=548157 slug=kushkiprod version=124 fingerprint=bead30a1cd0dcb26 attempt=1 now=2024-05-29T13:44:00Z t=2024-05-29T13:44:13.373836171Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=TBDEU1bVz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:00 +0000 UTC EvaluationDuration:13.373201159s EvaluationString:}]" duration=4.213230339s + level=debug ts=2024-05-29T13:44:13.373670488Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=kube-apiserver, env=production, namespace=kube-system, pod=kube-apiserver-us-foods-loveland-building-1" t=2024-05-29T13:44:13.373741761Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k45uwazw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.373508263Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k45uwazw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.373440842Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=ecr-renew, env=production, namespace=kube-system, pod=ecr-login-renew-jvvvt" t=2024-05-29T13:44:13.37339241Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k45uwazw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.373410732Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k43oo5b7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.373368712Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=ecr-renew, env=production, namespace=kube-system, pod=ecr-login-renew-28616400-xbd54" t=2024-05-29T13:44:13.373293614Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k43oo5b7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.37325711Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k43oo5b7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.37321976Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k43oo5b7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.37318887Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k43m6phu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.373036368Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=ecr-renew, env=production, namespace=kube-system, pod=ecr-login-renew-28615680-hpsr9" t=2024-05-29T13:44:13.37303115Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=ecr-renew, env=production, namespace=kube-system, pod=ecr-login-renew-28615680-hpsr9" t=2024-05-29T13:44:13.372994153Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.372849224Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k3z2ptpx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.372895647Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=e4b5bdb092c7d415 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.372759883Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.372469212s EvaluationString:}]" duration=171.330893ms + logger=ngalert.state.manager.persist user=337951 slug=pawapay t=2024-05-29T13:44:13.372789581Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=9.152417ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k3z2ptpx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.372828076Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=darkwing, env=production, namespace=darkwing, pod=darkwing-6ddbbd5998-lhdfl" t=2024-05-29T13:44:13.372835421Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k3z2ptpx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.372760405Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k3y9y1uc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.372650004Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k3y9y1uc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.372621214Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=coredns, env=production, namespace=kube-system, pod=coredns-77c7b7d9b-788n8" t=2024-05-29T13:44:13.372579989Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=coredns, env=production, namespace=kube-system, pod=coredns-77c7b7d9b-788n8" t=2024-05-29T13:44:13.372549079Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k3y9vwrh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.372480822Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k3y9vwrh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.372366051Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=config-syncer, env=production, namespace=kube-system, pod=config-syncer-5455865ffb-ljzmm" t=2024-05-29T13:44:13.372455628Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.372352909Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k3v816j6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.372156839Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=car-reporting-worker, pod=car-reporting-worker-f544fd7f-lg9l2" t=2024-05-29T13:44:13.371956824Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=car-reporting-worker, pod=car-reporting-worker-f544fd7f-g9fwg" t=2024-05-29T13:44:13.371888135Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=car-reporting-worker, pod=car-reporting-worker-f544fd7f-g9fwg" t=2024-05-29T13:44:13.371878341Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=car-reporting-worker, pod=car-reporting-worker-f544fd7f-dzxh6" t=2024-05-29T13:44:13.371851485Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=atlasclient, env=production, namespace=legacy, pod=atlasclient-798775b894-zhzfx" t=2024-05-29T13:44:13.371892477Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.371797945Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k3rv7q4s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.371778175Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=car-reporting-worker, pod=car-reporting-worker-f544fd7f-8rqr7" t=2024-05-29T13:44:13.371800522Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k3rv7q4s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.371736095Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=alarm-planner, env=production, namespace=atlas-agent, pod=alarm-planner-68d56cf4b4-nqvn9" t=2024-05-29T13:44:13.371766506Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698963 slug=lemonade t=2024-05-29T13:44:13.37163211Z level=debug msg="State manager processing evaluation results" resultCount=10 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k3p3042n-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.371653424Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k3p3042n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.371610784Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k3p3042n-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.371581473Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-loveland-building-1, container=alarm-persister, env=production, namespace=atlas-agent, pod=alarm-persister-8f95cc6b5-gg8rg" t=2024-05-29T13:44:13.371584521Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.371375843Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k3lp8kwa-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.371378281Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k3lp8kwa-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.371325221Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=163513 slug=dialpad t=2024-05-29T13:44:13.371251474Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.371196553Z caller=remote_instance_store.go:51 user=844274 slug=tixity msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=163513 slug=dialpad version=192 fingerprint=66b3ae79ac9aba6c attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.37109519Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.370691957s EvaluationString:}]" duration=480.168225ms + logger=ngalert.state.manager user=245291 slug=pismo instance="datasource_uid=grafanacloud-logs, ref_id=ABSENT_HEART_BEATS" t=2024-05-29T13:44:13.371107486Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=844274 slug=tixity instance= t=2024-05-29T13:44:13.37112067Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=844274 slug=tixity instance= t=2024-05-29T13:44:13.371103618Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.370996487Z caller=remote_instance_store.go:51 user=692010 slug=mercariusprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k2lul5gb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.370999637Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=strategy-persister, env=production, namespace=atlas-agent, pod=strategy-persister-57869765fd-j7vx8" t=2024-05-29T13:44:13.371011867Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k2hqxzec-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.370894176Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.370836602Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.85152ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k2hqxzec-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.370744195Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.370681106Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=16.507643ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k2gtp7dv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.370627693Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k2gtp7dv-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.370612573Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=restic, env=production, namespace=oas, pod=oas-linux-config-backup-28616040-7dsvl" t=2024-05-29T13:44:13.370663902Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.37050636Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k2bepj6f-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.370386871Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k2bepj6f-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.370359671Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k2bepj6f-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.37032642Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k2bepj6f-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.37031196Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k26pz53z-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.37026856Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.37030007Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=reloader-reloader, env=production, namespace=kube-system, pod=reloader-reloader-fbdf5d78-xrmt6" t=2024-05-29T13:44:13.370335999Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.370116794Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k25c7294-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.370019057Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=reflector, env=production, namespace=kube-system, pod=reflector-5cb5865b8f-b29jp" t=2024-05-29T13:44:13.370204918Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.370061368Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=postgresql, env=production, namespace=crossnokaye, pod=postgresql-postgresql-0" t=2024-05-29T13:44:13.369985469Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k25c7294-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.369854996Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k25broy4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.369789695Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.369747898Z caller=remote_instance_store.go:51 user=521139 slug=adevintamobiledepro msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=740777 slug=nequico instance="datasource_uid=d91ac18f-cbae-474d-adda-50733b1929d3, ref_id=A,B,D,E" t=2024-05-29T13:44:13.369673806Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=740777 slug=nequico t=2024-05-29T13:44:13.369636668Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k21bu02q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.369592373Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k21bu02q-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.369564233Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=740777 slug=nequico version=135 fingerprint=fe18fa501cf7657b attempt=1 now=2024-05-29T13:43:40Z t=2024-05-29T13:44:13.369578414Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=d91ac18f-cbae-474d-adda-50733b1929d3, ref_id=A,B,D,E State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:43:40 +0000 UTC EvaluationDuration:33.369252748s EvaluationString:}]" duration=27.802678652s + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=network-monitor, env=production, namespace=agent-tools, pod=network-monitor-78d5bcf4f6-x8z8d" t=2024-05-29T13:44:13.369614353Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=network-monitor, env=production, namespace=agent-tools, pod=network-monitor-78d5bcf4f6-x8z8d" t=2024-05-29T13:44:13.369588046Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k21bu02q-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.369483922Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k21bu02q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.369446821Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k21bu02q-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.369419571Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=460915 slug=funrise instance="agent_hostname=d1a4be9c0cb8, instance=node-exporter-ev-tsdb-1, job=integrations/node_exporter, node=ev-tsdb-1, site=wellhead-stanton, source_component=metrics-ingestor" t=2024-05-29T13:44:13.369415918Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.369281538Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.369269801Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:13.369247368Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=532884 slug=prodrevefi t=2024-05-29T13:44:13.369277185Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=10.845219ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=manager, env=production, namespace=flux-system, pod=source-controller-88fd68664-swdq7" t=2024-05-29T13:44:13.369293053Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.369178415Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k1k1nlm9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.369116068Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k1e2c9o6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.369037817Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.369039196Z caller=remote_instance_store.go:51 user=543660 slug=jobcloudprogrammaticstage msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k1e2c9o6-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.368891056Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=543660 slug=jobcloudprogrammaticstage t=2024-05-29T13:44:13.368893601Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager.persist user=277970 slug=teckresourcestest t=2024-05-29T13:44:13.368811522Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=30.979587ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k16sp50m-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.368794195Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k16sp50m-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.368720354Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=local-path-provisioner, env=production, namespace=local-path-storage, pod=local-path-provisioner-7fdb4745c6-9vwrj" t=2024-05-29T13:44:13.368650243Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k16mjhge-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.368576032Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.368519898Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k16l8nha-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.368481431Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k16l8nha-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.36834309Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k11nkg61-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.36830667Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=kube-scheduler, env=production, namespace=kube-system, pod=kube-scheduler-us-foods-indianapolis-building-1" t=2024-05-29T13:44:13.368287697Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k11nkg61-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.368220199Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=kube-proxy, env=production, namespace=kube-system, pod=kube-proxy-76xzr" t=2024-05-29T13:44:13.368187279Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k11nkg61-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.368147528Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k11nkg61-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.368115648Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.368065484Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=kube-eventrouter, env=production, namespace=kube-system, pod=eventrouter-6f9ddb7fc6-khkm8" t=2024-05-29T13:44:13.368076741Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k0wohtb1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.367981086Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k0wohtb1-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.367928776Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k0v3ibe0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.367805195Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.367705907Z caller=remote_instance_store.go:51 user=196013 slug=inmediasoftware msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=kube-apiserver, env=production, namespace=kube-system, pod=kube-apiserver-us-foods-indianapolis-building-1" t=2024-05-29T13:44:13.367785027Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=heartbeat, env=production, namespace=atlas-agent, pod=heartbeat-86ddfc6f7c-2mw49" t=2024-05-29T13:44:13.367705012Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k0r1tsy0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.367626943Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k0r1tsy0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.367598902Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k0npo6x9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.367554272Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k0npo6x9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.3673973Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k0ixb883-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.367285219Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=ecr-renew, env=production, namespace=kube-system, pod=ecr-login-renew-28616040-fgvfs" t=2024-05-29T13:44:13.367284822Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=ecr-renew, env=production, namespace=kube-system, pod=ecr-login-renew-28615680-mnwwn" t=2024-05-29T13:44:13.367164462Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=446686 slug=coinfx t=2024-05-29T13:44:13.367084737Z level=debug msg="Saving alert states done" count=17 max_state_save_concurrency=1 duration=234.634415ms + level=debug ts=2024-05-29T13:44:13.36704718Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=darkwing, env=production, namespace=darkwing, pod=darkwing-7dd9c85684-wtd7d" t=2024-05-29T13:44:13.367035987Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=18335 slug=semaphore instance= t=2024-05-29T13:44:13.367004585Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k0i51o3e-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.367001136Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=18335 slug=semaphore instance= t=2024-05-29T13:44:13.366996828Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.366934076Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=18335 slug=semaphore version=2 fingerprint=b41f503f11639bd1 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.366915163Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.366651341s EvaluationString:}]" duration=35.185193ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k0ciy21u-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.366933136Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=coredns, env=production, namespace=kube-system, pod=coredns-77c7b7d9b-wfbd4" t=2024-05-29T13:44:13.366914976Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=coredns, env=production, namespace=kube-system, pod=coredns-77c7b7d9b-r9g8r" t=2024-05-29T13:44:13.366807285Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k08fuqzi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.366717913Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k08fuqzi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.366693553Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.366654172Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k070n6ne-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.366600702Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.366620476Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=config-syncer, env=production, namespace=kube-system, pod=config-syncer-6fb787d9dc-2gwhf" t=2024-05-29T13:44:13.366527107Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k070n6ne-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.366496781Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-k070n6ne-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.366469711Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.366215796Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=306551 slug=teckresourcesalerts t=2024-05-29T13:44:13.366138053Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=17.052802ms + level=debug ts=2024-05-29T13:44:13.366125746Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdicucphttr7ka, ref_id=A" t=2024-05-29T13:44:13.366076214Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=206107 slug=hydrolix instance="datasource_uid=fdicucphttr7ka, ref_id=A" t=2024-05-29T13:44:13.366070095Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.scheduler user=206107 slug=hydrolix version=4 fingerprint=60a8a9489731cd1e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.365940899Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=fdicucphttr7ka, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.365665304s EvaluationString:}]" duration=52.366423ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jzqi4h1b-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.365978246Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.365815342Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.365529016Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jzoz9qf4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.365705733Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=atlasclient, env=production, namespace=legacy, pod=atlasclient-fd46dc49f-lbxxk" t=2024-05-29T13:44:13.36567023Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jzkk2ytg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.365636212Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jzkk2ytg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.365570192Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jzkk2ytg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.365511781Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jzkk2ytg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.365502661Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jzi3lo3x-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.36546462Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.365421886Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=523906 slug=cyberark instance="ClusterName=syslog-server-prod-JenkinsSyslogServerProdMaster, Series=syslog-server-prod-JenkinsSyslogServerProdMaster SyslogServer-prod-JenkinsSyslogServerProdMaster, ServiceName=SyslogServer-prod-JenkinsSyslogServerProdMaster" t=2024-05-29T13:44:13.365397373Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.365319916Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jzi3lo3x-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.365352459Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=alarm-persister, env=production, namespace=atlas-agent, pod=alarm-persister-c4c957d56-pq4qg" t=2024-05-29T13:44:13.365353217Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jzhmudqo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.365254518Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jzhmudqo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.365225498Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jzhmudqo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.365185318Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-foods-fishers-building-1, container=alarm-evaluator, env=production, namespace=atlas-agent, pod=alarm-evaluator-67f974d447-wqtdl" t=2024-05-29T13:44:13.365236058Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jzfi916o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.365110337Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:13.365140836Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jzfi916o-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.364934625Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=teleport, env=production, namespace=teleport-6, pod=teleport-6-56fbd8d897-zkmjj" t=2024-05-29T13:44:13.364979724Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jzenih37-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.364858094Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=strategy-planner, env=production, namespace=atlas-agent, pod=strategy-planner-dfc65f56f-pf6tb" t=2024-05-29T13:44:13.364819234Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=strategy-planner, env=production, namespace=atlas-agent, pod=strategy-planner-dfc65f56f-pf6tb" t=2024-05-29T13:44:13.36476806Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jzenih37-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.364720933Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.364600308Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=strategy-persister, env=production, namespace=atlas-agent, pod=strategy-persister-6ccb9ff696-2zx9x" t=2024-05-29T13:44:13.364651822Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=692721 slug=agrigateone instance="carrier_name=MSC, response_code=504" t=2024-05-29T13:44:13.36437751Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jzdkyt8j-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.36446616Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.364461651Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jzav1wsl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.36442553Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jzav1wsl-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.364395859Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jzav1wsl-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.364264268Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=shell, env=production, namespace=legacy, pod=shell-tschcs" t=2024-05-29T13:44:13.364339948Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=692721 slug=agrigateone t=2024-05-29T13:44:13.364274218Z level=debug msg="State manager processing evaluation results" resultCount=3 + level=debug ts=2024-05-29T13:44:13.364204399Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jywjxs9s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.364132957Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jywjxs9s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.364123336Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=restic, env=production, namespace=oas, pod=oas-linux-config-backup-28616400-62vqp" t=2024-05-29T13:44:13.364191545Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jyuenoma-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.364068996Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.363840398Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.363838258Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.363727574Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=restic, env=production, namespace=oas, pod=oas-linux-config-backup-28615680-tkcjz" t=2024-05-29T13:44:13.363835686Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.363789246Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=430961 slug=solifi t=2024-05-29T13:44:13.363735731Z level=debug msg="Saving alert states done" count=2 max_state_save_concurrency=1 duration=28.252439ms + level=debug ts=2024-05-29T13:44:13.363776047Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.363761921Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.363741811Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=696798 slug=mcv instance="aggregatedBy=sum, name=US CENTRAL Query" t=2024-05-29T13:44:13.363708879Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.363672898Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=374423 slug=bitburst t=2024-05-29T13:44:13.363538131Z level=debug msg="Saving alert states done" count=9 max_state_save_concurrency=1 duration=146.978506ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jyl9momv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.363587301Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jyl9momv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.363558791Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.363458725Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=696798 slug=mcv version=1 fingerprint=ab57630910f37b7b attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.363486605Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=US CENTRAL Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc005cd7698} IgnoreBelow:{Var:IgnoreBelow Labels: Value:0xc005cd76d0} Threshold:{Var:Threshold Labels: Value:0xc005cd76d8} compare:{Var:compare Labels:aggregatedBy=sum, name=US CENTRAL Query Value:0xc005cd7648} sum:{Var:sum Labels:aggregatedBy=sum, name=US CENTRAL Query Value:0xc005cd7680}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.363136055s EvaluationString:[ var='Breaches' labels={} value=1 ], [ var='IgnoreBelow' labels={} value=100 ], [ var='Threshold' labels={} value=-20 ], [ var='compare' labels={aggregatedBy=sum, name=US CENTRAL Query} value=0 ], [ var='sum' labels={aggregatedBy=sum, name=US CENTRAL Query} value=0 ]}]" duration=30.270567ms + level=debug ts=2024-05-29T13:44:13.363406032Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jyksqeri-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.363297858Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jyksqeri-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.363274218Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=163215 slug=tripadvisor t=2024-05-29T13:44:13.363361927Z level=debug msg="Saving alert states" count=2 max_state_save_concurrency=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=postgresql-migrator, env=production, namespace=crossnokaye, pod=postgresql-migrations-6f774b4df9-pg44q" t=2024-05-29T13:44:13.363356867Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=163215 slug=tripadvisor instance="ip_address=10.195.142.56" t=2024-05-29T13:44:13.363341747Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=608555 slug=ias t=2024-05-29T13:44:13.363301608Z level=debug msg="Deleting alert states" count=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jyksqeri-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.363238667Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jyiogxwk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.363164987Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.363048206Z caller=remote_instance_store.go:51 user=350551 slug=loopme msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jyiogxwk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.363096306Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=opentelemetry-collector, env=production, namespace=open-telemetry, pod=collector-opentelemetry-collector-agent-5m6zd" t=2024-05-29T13:44:13.36313607Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=opentelemetry-collector, env=production, namespace=open-telemetry, pod=collector-opentelemetry-collector-agent-5m6zd" t=2024-05-29T13:44:13.363095655Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jyiogxwk-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.363072186Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.363065895Z caller=remote_instance_store.go:51 user=613896 slug=mnitcommprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=613896 slug=mnitcommprod instance="datasource_uid=grafanacloud-logs, ref_id=errors" t=2024-05-29T13:44:13.363008993Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=613896 slug=mnitcommprod instance="datasource_uid=grafanacloud-logs, ref_id=errors" t=2024-05-29T13:44:13.363003393Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=613896 slug=mnitcommprod instance="datasource_uid=grafanacloud-logs, ref_id=errors" t=2024-05-29T13:44:13.362997093Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jyiogxwk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.363042915Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=613896 slug=mnitcommprod instance="datasource_uid=grafanacloud-logs, ref_id=errors" t=2024-05-29T13:44:13.362980593Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=613896 slug=mnitcommprod instance="datasource_uid=grafanacloud-logs, ref_id=errors" t=2024-05-29T13:44:13.362973292Z level=debug msg="Setting next state" handler=resultNoData + level=debug ts=2024-05-29T13:44:13.36298937Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jyc9vib6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.362987175Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.36297361Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=613896 slug=mnitcommprod version=12 fingerprint=fe13f8a061f8ee82 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.36288259Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-logs, ref_id=errors State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.351025499s EvaluationString:}]" duration=18.230755ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jyc9bilp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.362685642Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.362663515Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jy93jui2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.36255579Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.36252755Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.362466414Z caller=remote_instance_store.go:51 user=27737 slug=edfmancapital msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jxsj28tn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.362383749Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jxsj28tn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.362339438Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.36235103Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jxrturk3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.362201817Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=manager, env=production, namespace=flux-system, pod=source-controller-88fd68664-hkn9t" t=2024-05-29T13:44:13.362339219Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.362140479Z caller=remote_instance_store.go:51 user=778383 slug=nicolegbilaw msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=manager, env=production, namespace=flux-system, pod=notification-controller-5db88868fb-x6c8d" t=2024-05-29T13:44:13.36220309Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=163513 slug=dialpad t=2024-05-29T13:44:13.362001414Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=23.870115ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jxj760gi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.362096436Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jxj760gi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.362066235Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.362039243Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jxj760gi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.362025685Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jxj760gi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.361951364Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=manager, env=production, namespace=flux-system, pod=kustomize-controller-5f49cb697d-s8sz2" t=2024-05-29T13:44:13.361970284Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jxgqpti3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.361803613Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.36187041Z caller=remote_instance_store.go:51 user=94501 slug=datastax msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.36178449Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=manager, env=production, namespace=flux-system, pod=kustomize-controller-5f49cb697d-72nsc" t=2024-05-29T13:44:13.361824708Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.361688489Z caller=remote_instance_store.go:51 user=893151 slug=cmtdsnp msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.361707103Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.156.57:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=edbhspynrdkhtd alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jxfiliup-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.361646241Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:13.361557454Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=35.171959ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jxfiliup-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.36158099Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jx97vg3o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.361484449Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jx97vg3o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.361473939Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jx8r3d0s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.361406208Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=local-path-provisioner, env=production, namespace=local-path-storage, pod=local-path-provisioner-7fdb4745c6-vtngm" t=2024-05-29T13:44:13.361568673Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jx8r3d0s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.361394658Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jx8r3d0s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.361357058Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jx8r3d0s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.361342958Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.361373083Z caller=remote_instance_store.go:51 user=93308 slug=cede msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=kube-state-metrics, env=production, namespace=kube-system, pod=kube-state-metrics-85d79fb8db-nw6qc" t=2024-05-29T13:44:13.361387343Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=kube-secret-syncer, env=production, namespace=kube-system, pod=kube-secret-syncer-controller-76895d7f6c-mwk7b" t=2024-05-29T13:44:13.361280421Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jx3hrgkg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.361204626Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.361138218Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=kube-secret-syncer, env=production, namespace=kube-system, pod=kube-secret-syncer-controller-76895d7f6c-mwk7b" t=2024-05-29T13:44:13.3612552Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jx0h3d3t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.361030865Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jwzdnwq7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.360831993Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jwkygxrt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.360770692Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jwkygxrt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.360740932Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jwhjrkoj-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.36054224Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jwhjrkoj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.360489899Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jwhjrkoj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.360480589Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jwhixvu0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.360429158Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jwhixvu0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.360401358Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=kube-controller-manager, env=production, namespace=kube-system, pod=kube-controller-manager-us-cold-denton-building-1" t=2024-05-29T13:44:13.360783388Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jwhixvu0-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.360359968Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.360645091Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.360240501Z caller=remote_instance_store.go:51 user=706031 slug=miceutest msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=312340 slug=lakefs t=2024-05-29T13:44:13.360255478Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=ecr-renew, env=production, namespace=kube-system, pod=ecr-login-renew-manual-spz-7n7j2" t=2024-05-29T13:44:13.360246047Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jwfv3q2k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.360179876Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jwfv3q2k-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.360152916Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jwfq7xay-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.360109795Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=ecr-renew, env=production, namespace=kube-system, pod=ecr-login-renew-manual-j6h-xgcqn" t=2024-05-29T13:44:13.360142777Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.359935082Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jw7iodtw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.359736511Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jvw3p0ys-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.359659811Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=ecr-renew, env=production, namespace=kube-system, pod=ecr-login-renew-28615680-ll7t2" t=2024-05-29T13:44:13.35971294Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=darkwing, env=production, namespace=darkwing, pod=darkwing-585bdb9fd6-qhtgx" t=2024-05-29T13:44:13.359630076Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jvw3p0ys-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.359546749Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.359490877Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jvras8vd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.359479519Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jvras8vd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.359451058Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jvras8vd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.359389548Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.359192491Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=coredns, env=production, namespace=kube-system, pod=coredns-77c7b7d9b-czgmw" t=2024-05-29T13:44:13.359353604Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jvo22u1w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.359223226Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jvo22u1w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.359134515Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.359147917Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jvbdy7x2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.359071545Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=cloud-commander, env=production, namespace=atlas-agent, pod=cloud-commander-dfc5b8c7-xx96t" t=2024-05-29T13:44:13.359137589Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=245291 slug=pismo t=2024-05-29T13:44:13.359015186Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jvar1x5f-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.358911993Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.358847155Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=245291 slug=pismo t=2024-05-29T13:44:13.358554735Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.358732925Z caller=remote_instance_store.go:51 user=80938 slug=fispan msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=atlasclient, env=production, namespace=legacy, pod=atlasclient-6596c956b5-kz269" t=2024-05-29T13:44:13.358783229Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jv9qhvop-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.3586576Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=alarm-planner, env=production, namespace=atlas-agent, pod=alarm-planner-65678f7b4d-lgl8x" t=2024-05-29T13:44:13.358668388Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jv9qhvop-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.358564579Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=532884 slug=prodrevefi instance="datasource_uid=grafanacloud-prom, ref_id=mes_dq_check_model_create_failures" t=2024-05-29T13:44:13.358414166Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.358411657Z caller=remote_instance_store.go:51 user=327842 slug=exabeam msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=532884 slug=prodrevefi instance="datasource_uid=grafanacloud-prom, ref_id=mes_dq_check_model_create_failures" t=2024-05-29T13:44:13.358405266Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=us-cold-denton-building-1, container=alarm-evaluator, env=production, namespace=atlas-agent, pod=alarm-evaluator-7bbdd45645-fjdhw" t=2024-05-29T13:44:13.358453388Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=532884 slug=prodrevefi version=5 fingerprint=4da45c1a89c31f9d attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.358304665Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=mes_dq_check_model_create_failures State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.357017843s EvaluationString:}]" duration=14.254556ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jv88bmni-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.358310587Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jv53648o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.358253186Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jv53648o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.358241746Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.35810517Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=teleport, env=office, namespace=teleport-6, pod=teleport-6-5b6849f566-8vkjb" t=2024-05-29T13:44:13.3581535Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jv53648o-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.358105375Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-juxxnjtq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.358032494Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-juxxnjtq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.357921003Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.357905733Z caller=remote_instance_store.go:51 user=410504 slug=nordictrustee msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-juxxnjtq-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.357894422Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.357881899Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-julokceb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.357854942Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.357747286Z caller=remote_instance_store.go:51 user=371756 slug=asapp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.357806521Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-julokceb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.357717611Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-juh1b60u-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.35764844Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=20177 slug=paddledash instance="Component=notification-service, SLI=DeliveryLatency" t=2024-05-29T13:44:13.357651715Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-juh1b60u-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.357511969Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jufu7vut-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.357469288Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=strategy-persister, env=office, namespace=atlas-agent, pod=strategy-persister-7bc6767489-x6swt" t=2024-05-29T13:44:13.357465678Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jufu7vut-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.357398697Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jufu7vut-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.357332697Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-judot7nk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.357262746Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.357196718Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.357239668Z caller=remote_instance_store.go:51 user=765752 slug=octave msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.35714718Z caller=remote_instance_store.go:51 user=485459 slug=heroiclabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-judot7nk-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.357116254Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.356919071Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ju6yhpyn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.356858022Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=432323 slug=lithic instance="name=stats.timers.ecs-prod-live.services.queues.api.webhook_ledger.e2e_latency.count A" t=2024-05-29T13:44:13.356763565Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ju6uwra5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.356766161Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ju6uwra5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.356729211Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.356696879Z caller=remote_instance_store.go:51 user=265585 slug=engageli msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=265585 slug=engageli t=2024-05-29T13:44:13.356657613Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ju6uwra5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.35665268Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.356543516Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=265585 slug=engageli t=2024-05-29T13:44:13.356605527Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ju3atlwu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.356558799Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ju3atlwu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.356495258Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.35636269Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ju3atlwu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.356453938Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=633335 slug=promqlworkshop instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.356357787Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=633335 slug=promqlworkshop t=2024-05-29T13:44:13.356325895Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jtwtcue2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.356274816Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=reflector, env=office, namespace=kube-system, pod=reflector-5cb5865b8f-9kfv9" t=2024-05-29T13:44:13.356271914Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jtwtcue2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.356207555Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=postgresql-migrator, env=office, namespace=crossnokaye, pod=postgresql-migrations-799bbfcdb7-hlhrn" t=2024-05-29T13:44:13.356185297Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jto7mrvy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.356162895Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.356050008Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=opentelemetry-collector, env=office, namespace=open-telemetry, pod=collector-opentelemetry-collector-agent-nm2lx" t=2024-05-29T13:44:13.356037223Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jtmez71v-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.355949853Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=network-monitor, env=office, namespace=agent-tools, pod=network-monitor-ff744bcdf-6xnlg" t=2024-05-29T13:44:13.355953395Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jtmez71v-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.355835651Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.355785397Z caller=remote_instance_store.go:51 user=171897 slug=croesus msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jtmez71v-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.355777431Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=manager, env=office, namespace=flux-system, pod=source-controller-557867c58b-v7xw5" t=2024-05-29T13:44:13.355772151Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jtl4tdnp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.35568595Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jtl4tdnp-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.355620779Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jtl4tdnp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.355591799Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jtl4tdnp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.355566409Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jtii3ziq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.355538928Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=manager, env=office, namespace=flux-system, pod=kustomize-controller-75bb97945d-qvr4s" t=2024-05-29T13:44:13.355586901Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jtii3ziq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.355485908Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jtii3ziq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.355454627Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.355370887Z caller=remote_instance_store.go:51 user=465816 slug=metricgamingqa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.355419679Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.355389513Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=173730 slug=nikon instance= t=2024-05-29T13:44:13.355282407Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=173730 slug=nikon t=2024-05-29T13:44:13.355251446Z level=warn msg="Rule declares one or many reserved labels. Those rules labels will be ignored" labels="alertname=PreOrderStockLevelReportCronJob alert" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jtbqhe70-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.355271086Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=183214 slug=vectorizedio t=2024-05-29T13:44:13.355299828Z level=debug msg="Saving alert states" count=20 max_state_save_concurrency=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=manager, env=office, namespace=flux-system, pod=helm-controller-75968bdc68-92ngq" t=2024-05-29T13:44:13.355265913Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="cell=cno2vq0kjtj7e2s7g8n0, pod=proxy-2, serverless_id=pro-eu-central-1" t=2024-05-29T13:44:13.35527464Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.355211035Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.355144704Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="cell=cno2vq0kjtj7e2s7g8n0, pod=proxy-1, serverless_id=pro-eu-central-1" t=2024-05-29T13:44:13.35521123Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jt9abdza-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.355122014Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.355100139Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jt9abdza-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.355107794Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=kube-state-metrics, env=office, namespace=kube-system, pod=kube-state-metrics-7877f95489-6xrtr" t=2024-05-29T13:44:13.355081199Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="cell=cno2vq0kjtj7e2s7g8n0, pod=adjuster-786f87495c-jlfcx, serverless_id=pro-eu-central-1" t=2024-05-29T13:44:13.35505417Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.35502508Z caller=remote_instance_store.go:51 user=93046 slug=nese msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jt91tv04-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.354953682Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jt91tv04-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.354925112Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jt91tv04-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.354834661Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=kube-scheduler, env=office, namespace=kube-system, pod=kube-scheduler-tyson-pottsvilledc-dc8ae6e5-a142120b" t=2024-05-29T13:44:13.354862904Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:13.354830776Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=114492 slug=railsbank instance= t=2024-05-29T13:44:13.354820357Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + level=debug ts=2024-05-29T13:44:13.354656403Z caller=remote_instance_store.go:51 user=543654 slug=jobcloudprogrammaticprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=kube-proxy, env=office, namespace=kube-system, pod=kube-proxy-48cjp" t=2024-05-29T13:44:13.354777573Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jt5vr0dy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.3547067Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jt5vr0dy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.354609809Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="cell=cno2vhokjtj7e2s7g8k0, pod=adjuster-7bff45fd69-xmtgv, serverless_id=pro-us-east-1" t=2024-05-29T13:44:13.354668896Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="cell=cno2vhokjtj7e2s7g8k0, pod=adjuster-7bff45fd69-xmtgv, serverless_id=pro-us-east-1" t=2024-05-29T13:44:13.354652124Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.354601869Z caller=remote_instance_store.go:51 user=686798 slug=miceuprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="cell=cno2vg8kjtj7e2s7g8ig, pod=proxy-2, serverless_id=pro-us-east-1" t=2024-05-29T13:44:13.354574694Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="cell=cno2vg8kjtj7e2s7g8ig, pod=proxy-2, serverless_id=pro-us-east-1" t=2024-05-29T13:44:13.354561082Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jt5vr0dy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.354571618Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=kube-apiserver, env=office, namespace=kube-system, pod=kube-apiserver-tyson-pottsvilledc-dc8ae6e5-a142120b" t=2024-05-29T13:44:13.354535354Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jsswfznm-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.354441677Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="cell=cno2vg8kjtj7e2s7g8ig, pod=proxy-0, serverless_id=pro-us-east-1" t=2024-05-29T13:44:13.354413225Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jsmchq0i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.354306926Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="cell=cno2vg8kjtj7e2s7g8ig, pod=adjuster-7bff45fd69-xmtgv, serverless_id=pro-us-east-1" t=2024-05-29T13:44:13.354335563Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=196413 slug=form3production t=2024-05-29T13:44:13.354184222Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.698022ms + level=debug ts=2024-05-29T13:44:13.354244609Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.354256475Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.354252447Z caller=remote_instance_store.go:51 user=845543 slug=deliveryhero msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jsmchq0i-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.354193175Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="cell=cihcamco6oo60d8405d0, pod=proxy-2, serverless_id=pro-us-east-1" t=2024-05-29T13:44:13.354244345Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=ecr-renew, env=office, namespace=kube-system, pod=ecr-login-renew-28616400-bc8qx" t=2024-05-29T13:44:13.354243318Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="cell=cihcamco6oo60d8405d0, pod=proxy-1, serverless_id=pro-us-east-1" t=2024-05-29T13:44:13.354163711Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.354139951Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.354038783Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=ecr-renew, env=office, namespace=kube-system, pod=ecr-login-renew-28616040-wfwm7" t=2024-05-29T13:44:13.354150213Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jsj0jhdf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.354118964Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="cell=cihcamco6oo60d8405d0, pod=proxy-0, serverless_id=pro-us-east-1" t=2024-05-29T13:44:13.354096737Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jsj0jhdf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.354082053Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jsj0jhdf-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.354066923Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=ecr-renew, env=office, namespace=kube-system, pod=ecr-login-renew-28615680-4hhkg" t=2024-05-29T13:44:13.354069633Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=ecr-renew, env=office, namespace=kube-system, pod=ecr-login-renew-28615680-4hhkg" t=2024-05-29T13:44:13.354054943Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.354015161Z caller=remote_instance_store.go:51 user=250150 slug=bizagi msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.353982988Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=darkwing, env=office, namespace=darkwing, pod=darkwing-68c47d9595-6lqb5" t=2024-05-29T13:44:13.353958525Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.353933604Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="cell=cihcamco6oo60d8405d0, pod=adjuster-7bff45fd69-xmtgv, serverless_id=pro-us-east-1" t=2024-05-29T13:44:13.35395026Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jrydzqfz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.353831801Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="cell=cih9b8so6oo60d8402g0, pod=proxy-2, serverless_id=pro-eu-central-1" t=2024-05-29T13:44:13.353849655Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=coredns, env=office, namespace=kube-system, pod=coredns-6d6869bf68-zllt8" t=2024-05-29T13:44:13.353857809Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.35374736Z caller=remote_instance_store.go:51 user=112732 slug=gleamer msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=coredns, env=office, namespace=kube-system, pod=coredns-6d6869bf68-vlh25" t=2024-05-29T13:44:13.353763242Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.353668165Z caller=remote_instance_store.go:51 user=137351 slug=pinnacle21 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="cell=cih9b8so6oo60d8402g0, pod=proxy-0, serverless_id=pro-eu-central-1" t=2024-05-29T13:44:13.353672956Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jrwbmmhk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.353566888Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=183214 slug=vectorizedio instance="cell=cih9b8so6oo60d8402g0, pod=adjuster-786f87495c-jlfcx, serverless_id=pro-eu-central-1" t=2024-05-29T13:44:13.353587095Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.353558267Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=183214 slug=vectorizedio version=4 fingerprint=518ea05eef53768f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.353231782Z level=debug msg="Alert rule evaluated" results="[{Instance:cell=cih9b8so6oo60d8402g0, pod=adjuster-786f87495c-jlfcx, serverless_id=pro-eu-central-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cell=cih9b8so6oo60d8402g0, pod=adjuster-786f87495c-jlfcx, serverless_id=pro-eu-central-1 Value:0xc03a0105f0} RES:{Var:RES Labels:cell=cih9b8so6oo60d8402g0, pod=adjuster-786f87495c-jlfcx, serverless_id=pro-eu-central-1 Value:0xc03a010628}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.352444596s EvaluationString:[ var='A' labels={cell=cih9b8so6oo60d8402g0, pod=adjuster-786f87495c-jlfcx, serverless_id=pro-eu-central-1} value=1 ], [ var='RES' labels={cell=cih9b8so6oo60d8402g0, pod=adjuster-786f87495c-jlfcx, serverless_id=pro-eu-central-1} value=0 ]} {Instance:cell=cih9b8so6oo60d8402g0, pod=proxy-0, serverless_id=pro-eu-central-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cell=cih9b8so6oo60d8402g0, pod=proxy-0, serverless_id=pro-eu-central-1 Value:0xc03a0106e8} RES:{Var:RES Labels:cell=cih9b8so6oo60d8402g0, pod=proxy-0, serverless_id=pro-eu-central-1 Value:0xc03a0106d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.352457601s EvaluationString:[ var='A' labels={cell=cih9b8so6oo60d8402g0, pod=proxy-0, serverless_id=pro-eu-central-1} value=1 ], [ var='RES' labels={cell=cih9b8so6oo60d8402g0, pod=proxy-0, serverless_id=pro-eu-central-1} value=0 ]} {Instance:cell=cih9b8so6oo60d8402g0, pod=proxy-1, serverless_id=pro-eu-central-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cell=cih9b8so6oo60d8402g0, pod=proxy-1, serverless_id=pro-eu-central-1 Value:0xc03a010798} RES:{Var:RES Labels:cell=cih9b8so6oo60d8402g0, pod=proxy-1, serverless_id=pro-eu-central-1 Value:0xc03a0107c8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.352463862s EvaluationString:[ var='A' labels={cell=cih9b8so6oo60d8402g0, pod=proxy-1, serverless_id=pro-eu-central-1} value=1 ], [ var='RES' labels={cell=cih9b8so6oo60d8402g0, pod=proxy-1, serverless_id=pro-eu-central-1} value=0 ]} {Instance:cell=cih9b8so6oo60d8402g0, pod=proxy-2, serverless_id=pro-eu-central-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cell=cih9b8so6oo60d8402g0, pod=proxy-2, serverless_id=pro-eu-central-1 Value:0xc03a010858} RES:{Var:RES Labels:cell=cih9b8so6oo60d8402g0, pod=proxy-2, serverless_id=pro-eu-central-1 Value:0xc03a0108d8}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.35246862s EvaluationString:[ var='A' labels={cell=cih9b8so6oo60d8402g0, pod=proxy-2, serverless_id=pro-eu-central-1} value=1 ], [ var='RES' labels={cell=cih9b8so6oo60d8402g0, pod=proxy-2, serverless_id=pro-eu-central-1} value=0 ]} {Instance:cell=cihcamco6oo60d8405d0, pod=adjuster-7bff45fd69-xmtgv, serverless_id=pro-us-east-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cell=cihcamco6oo60d8405d0, pod=adjuster-7bff45fd69-xmtgv, serverless_id=pro-us-east-1 Value:0xc03a010918} RES:{Var:RES Labels:cell=cihcamco6oo60d8405d0, pod=adjuster-7bff45fd69-xmtgv, serverless_id=pro-us-east-1 Value:0xc03a010990}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.352474286s EvaluationString:[ var='A' labels={cell=cihcamco6oo60d8405d0, pod=adjuster-7bff45fd69-xmtgv, serverless_id=pro-us-east-1} value=1 ], [ var='RES' labels={cell=cihcamco6oo60d8405d0, pod=adjuster-7bff45fd69-xmtgv, serverless_id=pro-us-east-1} value=0 ]} {Instance:cell=cihcamco6oo60d8405d0, pod=proxy-0, serverless_id=pro-us-east-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cell=cihcamco6oo60d8405d0, pod=proxy-0, serverless_id=pro-us-east-1 Value:0xc03a010a20} RES:{Var:RES Labels:cell=cihcamco6oo60d8405d0, pod=proxy-0, serverless_id=pro-us-east-1 Value:0xc03a010a90}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.35247963s EvaluationString:[ var='A' labels={cell=cihcamco6oo60d8405d0, pod=proxy-0, serverless_id=pro-us-east-1} value=1 ], [ var='RES' labels={cell=cihcamco6oo60d8405d0, pod=proxy-0, serverless_id=pro-us-east-1} value=0 ]} {Instance:cell=cihcamco6oo60d8405d0, pod=proxy-1, serverless_id=pro-us-east-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cell=cihcamco6oo60d8405d0, pod=proxy-1, serverless_id=pro-us-east-1 Value:0xc03a010b70} RES:{Var:RES Labels:cell=cihcamco6oo60d8405d0, pod=proxy-1, serverless_id=pro-us-east-1 Value:0xc03a010bb0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.3524843s EvaluationString:[ var='A' labels={cell=cihcamco6oo60d8405d0, pod=proxy-1, serverless_id=pro-us-east-1} value=1 ], [ var='RES' labels={cell=cihcamco6oo60d8405d0, pod=proxy-1, serverless_id=pro-us-east-1} value=0 ]} {Instance:cell=cihcamco6oo60d8405d0, pod=proxy-2, serverless_id=pro-us-east-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cell=cihcamco6oo60d8405d0, pod=proxy-2, serverless_id=pro-us-east-1 Value:0xc03a010c70} RES:{Var:RES Labels:cell=cihcamco6oo60d8405d0, pod=proxy-2, serverless_id=pro-us-east-1 Value:0xc03a010cc0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.352488952s EvaluationString:[ var='A' labels={cell=cihcamco6oo60d8405d0, pod=proxy-2, serverless_id=pro-us-east-1} value=1 ], [ var='RES' labels={cell=cihcamco6oo60d8405d0, pod=proxy-2, serverless_id=pro-us-east-1} value=0 ]} {Instance:cell=cno2vg8kjtj7e2s7g8ig, pod=adjuster-7bff45fd69-xmtgv, serverless_id=pro-us-east-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cell=cno2vg8kjtj7e2s7g8ig, pod=adjuster-7bff45fd69-xmtgv, serverless_id=pro-us-east-1 Value:0xc03a010d40} RES:{Var:RES Labels:cell=cno2vg8kjtj7e2s7g8ig, pod=adjuster-7bff45fd69-xmtgv, serverless_id=pro-us-east-1 Value:0xc03a010d58}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.352493231s EvaluationString:[ var='A' labels={cell=cno2vg8kjtj7e2s7g8ig, pod=adjuster-7bff45fd69-xmtgv, serverless_id=pro-us-east-1} value=1 ], [ var='RES' labels={cell=cno2vg8kjtj7e2s7g8ig, pod=adjuster-7bff45fd69-xmtgv, serverless_id=pro-us-east-1} value=0 ]} {Instance:cell=cno2vg8kjtj7e2s7g8ig, pod=proxy-0, serverless_id=pro-us-east-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cell=cno2vg8kjtj7e2s7g8ig, pod=proxy-0, serverless_id=pro-us-east-1 Value:0xc03a010dd8} RES:{Var:RES Labels:cell=cno2vg8kjtj7e2s7g8ig, pod=proxy-0, serverless_id=pro-us-east-1 Value:0xc03a010e28}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.352499677s EvaluationString:[ var='A' labels={cell=cno2vg8kjtj7e2s7g8ig, pod=proxy-0, serverless_id=pro-us-east-1} value=1 ], [ var='RES' labels={cell=cno2vg8kjtj7e2s7g8ig, pod=proxy-0, serverless_id=pro-us-east-1} value=0 ]} {Instance:cell=cno2vg8kjtj7e2s7g8ig, pod=proxy-1, serverless_id=pro-us-east-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cell=cno2vg8kjtj7e2s7g8ig, pod=proxy-1, serverless_id=pro-us-east-1 Value:0xc03a011028} RES:{Var:RES Labels:cell=cno2vg8kjtj7e2s7g8ig, pod=proxy-1, serverless_id=pro-us-east-1 Value:0xc03a011010}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.352506436s EvaluationString:[ var='A' labels={cell=cno2vg8kjtj7e2s7g8ig, pod=proxy-1, serverless_id=pro-us-east-1} value=1 ], [ var='RES' labels={cell=cno2vg8kjtj7e2s7g8ig, pod=proxy-1, serverless_id=pro-us-east-1} value=0 ]} {Instance:cell=cno2vg8kjtj7e2s7g8ig, pod=proxy-2, serverless_id=pro-us-east-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cell=cno2vg8kjtj7e2s7g8ig, pod=proxy-2, serverless_id=pro-us-east-1 Value:0xc03a0110a8} RES:{Var:RES Labels:cell=cno2vg8kjtj7e2s7g8ig, pod=proxy-2, serverless_id=pro-us-east-1 Value:0xc03a011108}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.352512308s EvaluationString:[ var='A' labels={cell=cno2vg8kjtj7e2s7g8ig, pod=proxy-2, serverless_id=pro-us-east-1} value=1 ], [ var='RES' labels={cell=cno2vg8kjtj7e2s7g8ig, pod=proxy-2, serverless_id=pro-us-east-1} value=0 ]} {Instance:cell=cno2vhokjtj7e2s7g8k0, pod=adjuster-7bff45fd69-xmtgv, serverless_id=pro-us-east-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cell=cno2vhokjtj7e2s7g8k0, pod=adjuster-7bff45fd69-xmtgv, serverless_id=pro-us-east-1 Value:0xc03a011178} RES:{Var:RES Labels:cell=cno2vhokjtj7e2s7g8k0, pod=adjuster-7bff45fd69-xmtgv, serverless_id=pro-us-east-1 Value:0xc03a0111f0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.352516687s EvaluationString:[ var='A' labels={cell=cno2vhokjtj7e2s7g8k0, pod=adjuster-7bff45fd69-xmtgv, serverless_id=pro-us-east-1} value=1 ], [ var='RES' labels={cell=cno2vhokjtj7e2s7g8k0, pod=adjuster-7bff45fd69-xmtgv, serverless_id=pro-us-east-1} value=0 ]} {Instance:cell=cno2vhokjtj7e2s7g8k0, pod=proxy-0, serverless_id=pro-us-east-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cell=cno2vhokjtj7e2s7g8k0, pod=proxy-0, serverless_id=pro-us-east-1 Value:0xc03a011270} RES:{Var:RES Labels:cell=cno2vhokjtj7e2s7g8k0, pod=proxy-0, serverless_id=pro-us-east-1 Value:0xc03a0112d0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.352521447s EvaluationString:[ var='A' labels={cell=cno2vhokjtj7e2s7g8k0, pod=proxy-0, serverless_id=pro-us-east-1} value=1 ], [ var='RES' labels={cell=cno2vhokjtj7e2s7g8k0, pod=proxy-0, serverless_id=pro-us-east-1} value=0 ]} {Instance:cell=cno2vhokjtj7e2s7g8k0, pod=proxy-1, serverless_id=pro-us-east-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cell=cno2vhokjtj7e2s7g8k0, pod=proxy-1, serverless_id=pro-us-east-1 Value:0xc03a011360} RES:{Var:RES Labels:cell=cno2vhokjtj7e2s7g8k0, pod=proxy-1, serverless_id=pro-us-east-1 Value:0xc03a0113a0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.352525893s EvaluationString:[ var='A' labels={cell=cno2vhokjtj7e2s7g8k0, pod=proxy-1, serverless_id=pro-us-east-1} value=1 ], [ var='RES' labels={cell=cno2vhokjtj7e2s7g8k0, pod=proxy-1, serverless_id=pro-us-east-1} value=0 ]} {Instance:cell=cno2vhokjtj7e2s7g8k0, pod=proxy-2, serverless_id=pro-us-east-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cell=cno2vhokjtj7e2s7g8k0, pod=proxy-2, serverless_id=pro-us-east-1 Value:0xc03a011438} RES:{Var:RES Labels:cell=cno2vhokjtj7e2s7g8k0, pod=proxy-2, serverless_id=pro-us-east-1 Value:0xc03a011488}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.352530134s EvaluationString:[ var='A' labels={cell=cno2vhokjtj7e2s7g8k0, pod=proxy-2, serverless_id=pro-us-east-1} value=1 ], [ var='RES' labels={cell=cno2vhokjtj7e2s7g8k0, pod=proxy-2, serverless_id=pro-us-east-1} value=0 ]} {Instance:cell=cno2vq0kjtj7e2s7g8n0, pod=adjuster-786f87495c-jlfcx, serverless_id=pro-eu-central-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cell=cno2vq0kjtj7e2s7g8n0, pod=adjuster-786f87495c-jlfcx, serverless_id=pro-eu-central-1 Value:0xc03a011550} RES:{Var:RES Labels:cell=cno2vq0kjtj7e2s7g8n0, pod=adjuster-786f87495c-jlfcx, serverless_id=pro-eu-central-1 Value:0xc03a011510}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.352534296s EvaluationString:[ var='A' labels={cell=cno2vq0kjtj7e2s7g8n0, pod=adjuster-786f87495c-jlfcx, serverless_id=pro-eu-central-1} value=1 ], [ var='RES' labels={cell=cno2vq0kjtj7e2s7g8n0, pod=adjuster-786f87495c-jlfcx, serverless_id=pro-eu-central-1} value=0 ]} {Instance:cell=cno2vq0kjtj7e2s7g8n0, pod=proxy-0, serverless_id=pro-eu-central-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cell=cno2vq0kjtj7e2s7g8n0, pod=proxy-0, serverless_id=pro-eu-central-1 Value:0xc03a0115e0} RES:{Var:RES Labels:cell=cno2vq0kjtj7e2s7g8n0, pod=proxy-0, serverless_id=pro-eu-central-1 Value:0xc03a011630}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.352539517s EvaluationString:[ var='A' labels={cell=cno2vq0kjtj7e2s7g8n0, pod=proxy-0, serverless_id=pro-eu-central-1} value=1 ], [ var='RES' labels={cell=cno2vq0kjtj7e2s7g8n0, pod=proxy-0, serverless_id=pro-eu-central-1} value=0 ]} {Instance:cell=cno2vq0kjtj7e2s7g8n0, pod=proxy-1, serverless_id=pro-eu-central-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cell=cno2vq0kjtj7e2s7g8n0, pod=proxy-1, serverless_id=pro-eu-central-1 Value:0xc03a0116c0} RES:{Var:RES Labels:cell=cno2vq0kjtj7e2s7g8n0, pod=proxy-1, serverless_id=pro-eu-central-1 Value:0xc03a011700}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.352544276s EvaluationString:[ var='A' labels={cell=cno2vq0kjtj7e2s7g8n0, pod=proxy-1, serverless_id=pro-eu-central-1} value=1 ], [ var='RES' labels={cell=cno2vq0kjtj7e2s7g8n0, pod=proxy-1, serverless_id=pro-eu-central-1} value=0 ]} {Instance:cell=cno2vq0kjtj7e2s7g8n0, pod=proxy-2, serverless_id=pro-eu-central-1 State:Normal Error: Results:map[] Values:map[A:{Var:A Labels:cell=cno2vq0kjtj7e2s7g8n0, pod=proxy-2, serverless_id=pro-eu-central-1 Value:0xc03a0117f0} RES:{Var:RES Labels:cell=cno2vq0kjtj7e2s7g8n0, pod=proxy-2, serverless_id=pro-eu-central-1 Value:0xc03a011890}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.352550094s EvaluationString:[ var='A' labels={cell=cno2vq0kjtj7e2s7g8n0, pod=proxy-2, serverless_id=pro-eu-central-1} value=1 ], [ var='RES' labels={cell=cno2vq0kjtj7e2s7g8n0, pod=proxy-2, serverless_id=pro-eu-central-1} value=0 ]}]" duration=37.236364ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=cloud-commander, env=office, namespace=atlas-agent, pod=cloud-commander-6d7fd4cb89-wbtgf" t=2024-05-29T13:44:13.353537418Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=480731 slug=brightmove t=2024-05-29T13:44:13.353490258Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=480731 slug=brightmove instance= t=2024-05-29T13:44:13.353470605Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=cilium-operator, env=office, namespace=kube-system, pod=cilium-operator-77bb5b96c8-zdzkn" t=2024-05-29T13:44:13.353438211Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.353372242Z caller=remote_instance_store.go:51 user=112387 slug=lucidhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=cilium-operator, env=office, namespace=kube-system, pod=cilium-operator-77bb5b96c8-zdzkn" t=2024-05-29T13:44:13.353425271Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.353365131Z caller=remote_instance_store.go:51 user=109452 slug=deltarisk msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=480731 slug=brightmove version=19 fingerprint=80250507bf7a6585 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.353360724Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.353102492s EvaluationString:}]" duration=118.7476ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jru80pvx-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.353253405Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jru80pvx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.353212234Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jru80pvx-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.353191354Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=atlasclient, env=office, namespace=legacy, pod=atlasclient-6d88864fb7-lxpsd" t=2024-05-29T13:44:13.353241531Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.353105379Z caller=remote_instance_store.go:51 user=636704 slug=nmartin2 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=atlasclient, env=office, namespace=legacy, pod=atlasclient-6d88864fb7-lxpsd" t=2024-05-29T13:44:13.353227497Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.353099689Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=alarm-planner, env=office, namespace=atlas-agent, pod=alarm-planner-757fb5b7b5-gwfgs" t=2024-05-29T13:44:13.353141684Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=alarm-planner, env=office, namespace=atlas-agent, pod=alarm-planner-757fb5b7b5-gwfgs" t=2024-05-29T13:44:13.353110966Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.353013945Z caller=remote_instance_store.go:51 user=554491 slug=safeskyindustries msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jrrhiody-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.352876731Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=agent, env=office, namespace=monitoring, pod=grafana-agent-swdlt" t=2024-05-29T13:44:13.352824772Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsvilledc, container=agent, env=office, namespace=monitoring, pod=grafana-agent-swdlt" t=2024-05-29T13:44:13.352814202Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.352648655Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + level=info ts=2024-05-29T13:44:13.352619417Z caller=remote_alert_sender.go:94 user=35611 slug=play host=play-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.45.16:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=VbXx4uNnz alerts=1 + level=info ts=2024-05-29T13:44:13.35261507Z caller=remote_alert_sender.go:94 user=35611 slug=play host=play-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.161.168:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=VbXx4uNnz alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jrgv73uj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.352469607Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jr6hrfi2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.352389376Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jr6hrfi2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.352323995Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jr6hrfi2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.352293065Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jr6hrfi2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.352235434Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jr3hj7tr-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.352192534Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=87052 slug=polystream t=2024-05-29T13:44:13.352360281Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=15.219457ms + level=debug ts=2024-05-29T13:44:13.352372373Z caller=remote_instance_store.go:51 user=112732 slug=gleamer msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:13.352372361Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:13.352366684Z level=debug msg="Execution error state is Normal" handler=resultNormal previous_handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jqyfozon-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.351901251Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=2 fingerprint=c455c1c78e34d2df attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.352294322Z level=error msg="Failed to evaluate rule" error="failed to build query 'A': data source not found" duration=7.627989ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jqhyogam-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.351856331Z level=debug msg="Setting next state" handler=resultNormal + level=error ts=2024-05-29T13:44:13.352244848Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'A': data source not found" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=reloader-reloader, env=production, namespace=kube-system, pod=reloader-reloader-fbdf5d78-brxx5" t=2024-05-29T13:44:13.352291518Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jqgidnvc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.351694009Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=postgresql-migrator, env=production, namespace=crossnokaye, pod=postgresql-migrations-668c6d9687-cvg9r" t=2024-05-29T13:44:13.352138897Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=postgresql-migrator, env=production, namespace=crossnokaye, pod=postgresql-migrations-668c6d9687-cvg9r" t=2024-05-29T13:44:13.352128407Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=260796 slug=expressvpn t=2024-05-29T13:44:13.352057067Z level=debug msg="Saving alert states" count=30 max_state_save_concurrency=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=postgresql, env=production, namespace=crossnokaye, pod=postgresql-postgresql-0" t=2024-05-29T13:44:13.352042704Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=USA - Salt Lake City" t=2024-05-29T13:44:13.352020355Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.351946739Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.351855979Z caller=remote_instance_store.go:51 user=821294 slug=bcpdesa msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.351608503Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=manager, env=production, namespace=flux-system, pod=notification-controller-5db88868fb-ch669" t=2024-05-29T13:44:13.351595687Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=manager, env=production, namespace=flux-system, pod=notification-controller-5db88868fb-ch669" t=2024-05-29T13:44:13.351585226Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jqgidnvc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.351595768Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=USA - Albuquerque" t=2024-05-29T13:44:13.351511221Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=manager, env=production, namespace=flux-system, pod=kustomize-controller-5f49cb697d-hvt74" t=2024-05-29T13:44:13.351504061Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jqbplzek-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.351346155Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.351297837Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jq7l7cn3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.351273695Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jq7l7cn3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.351231024Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=kube-state-metrics, env=production, namespace=kube-system, pod=kube-state-metrics-774ff95cb5-nkmbw" t=2024-05-29T13:44:13.351191958Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jq7l7cn3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.351153493Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jq7l7cn3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.351117773Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=70430 slug=dapperlabs t=2024-05-29T13:44:13.351087276Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jq4lz4op-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.351033962Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=70430 slug=dapperlabs instance="datasource_uid=000000002, ref_id=A" t=2024-05-29T13:44:13.351043371Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Switzerland - 2" t=2024-05-29T13:44:13.351011285Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jq4lz4op-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.350967711Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jq17cwja-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.35083829Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.350869964Z caller=remote_instance_store.go:51 user=129335 slug=neomantra msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jq17cwja-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.350740829Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jq11g6d8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.350611358Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jq11g6d8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.350486206Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jq11g6d8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.350471536Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jpu7soya-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.350430806Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jpu7soya-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.350406096Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jpu7soya-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.350361995Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.350322371Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.350190515Z caller=remote_instance_store.go:51 user=464933 slug=piadina msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.350164346Z caller=remote_instance_store.go:51 user=458064 slug=swptest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.350121377Z caller=remote_image_capturer.go:54 user=129335 slug=neomantra rule_org_id=1 rule_uid=mp5iy3Onk dashboard=4YKa-7Z7k panel=2 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=129335 slug=neomantra instance= t=2024-05-29T13:44:13.349970212Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jpfwcpd7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.34983894Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=kube-eventrouter, env=production, namespace=kube-system, pod=eventrouter-69b9bbdf47-2gkwk" t=2024-05-29T13:44:13.349973197Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=kube-eventrouter, env=production, namespace=kube-system, pod=eventrouter-69b9bbdf47-2gkwk" t=2024-05-29T13:44:13.349959037Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jpfwcpd7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.349760969Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.349786289Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.349740706Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.34982175Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.349798105Z caller=remote_instance_store.go:51 user=417450 slug=legitsecurity msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.349728377Z caller=remote_instance_store.go:51 user=550657 slug=garrigues msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.349678725Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=kube-apiserver, env=production, namespace=kube-system, pod=kube-apiserver-tyson-pottsville-dc-building-1" t=2024-05-29T13:44:13.349764157Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jpfwcpd7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.349719359Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=417450 slug=legitsecurity instance= t=2024-05-29T13:44:13.349732863Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jpfwcpd7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.349692328Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.349668856Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=kube-apiserver, env=production, namespace=kube-system, pod=kube-apiserver-tyson-pottsville-dc-building-1" t=2024-05-29T13:44:13.34973083Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-joyymbq3-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.349613417Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.349680071Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.349602817Z caller=remote_instance_store.go:51 user=633501 slug=y2engineering msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-joyymbq3-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.349573517Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Singapore - CBD" t=2024-05-29T13:44:13.34965539Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=heartbeat, env=production, namespace=atlas-agent, pod=heartbeat-86f6cf6b84-8m4x7" t=2024-05-29T13:44:13.34964417Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Poland" t=2024-05-29T13:44:13.349513273Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.349492365Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=251760 slug=forgerock t=2024-05-29T13:44:13.349435681Z level=debug msg="Saving alert states" count=78 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.349414776Z caller=remote_instance_store.go:51 user=308298 slug=xbto msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=valvolinegp-usc1, name=instance-0000000006" t=2024-05-29T13:44:13.349399984Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.349356871Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=ecr-renew, env=production, namespace=kube-system, pod=ecr-login-renew-28616400-jzmb4" t=2024-05-29T13:44:13.349423452Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-joyug3s2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.349389685Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=ecr-renew, env=production, namespace=kube-system, pod=ecr-login-renew-28616400-jzmb4" t=2024-05-29T13:44:13.349394661Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-joyug3s2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.349349395Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-joyug3s2-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.349321334Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Norway" t=2024-05-29T13:44:13.349351824Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-joyug3s2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.349282564Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-joqg7vgz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.349228323Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=New Zealand" t=2024-05-29T13:44:13.349224293Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=trucksl2l-use4, name=instance-0000000003" t=2024-05-29T13:44:13.349198332Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=ecr-renew, env=production, namespace=kube-system, pod=ecr-login-renew-28615680-nss2x" t=2024-05-29T13:44:13.349193872Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-joqg7vgz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.349157493Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-joqg7vgz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.349128792Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jomdvurc-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.349014901Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=darkwing, env=production, namespace=darkwing, pod=darkwing-5bc56fc999-vms27" t=2024-05-29T13:44:13.349102921Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=trucksl2l-use4, name=instance-0000000001" t=2024-05-29T13:44:13.348995001Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=306551 slug=teckresourcesalerts t=2024-05-29T13:44:13.349082327Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jomdvurc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.348846159Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=306551 slug=teckresourcesalerts instance="datasource_uid=KUQSvJx7z, ref_id=A,B" t=2024-05-29T13:44:13.349065371Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-joe7mpt1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.348799909Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-joe7mpt1-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.348732528Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.348854871Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.295167ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jo9y9m6w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.348635277Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jo9y9m6w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.348570237Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=trowe-use4, name=instance-0000000000" t=2024-05-29T13:44:13.34883541Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jo9y9m6w-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.348532066Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=thr-usc1, name=instance-0000000001" t=2024-05-29T13:44:13.348777131Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jnz07riu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.348474156Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=thr-usc1, name=instance-0000000001" t=2024-05-29T13:44:13.348766341Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Mexico" t=2024-05-29T13:44:13.348785127Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=thr-usc1, name=instance-0000000000" t=2024-05-29T13:44:13.348698886Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=cloud-commander, env=production, namespace=atlas-agent, pod=cloud-commander-7c655654db-p442s" t=2024-05-29T13:44:13.348640404Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=cilium-operator, env=production, namespace=kube-system, pod=cilium-operator-77bb5b96c8-gkth2" t=2024-05-29T13:44:13.348542757Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=teluscustomer-nane1, name=instance-0000000006" t=2024-05-29T13:44:13.348475864Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.348399129Z caller=remote_instance_store.go:51 user=389168 slug=porhamis msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=cilium-agent, env=production, namespace=kube-system, pod=cilium-9n8qj" t=2024-05-29T13:44:13.348434362Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=teluscustomer-nane1, name=instance-0000000004" t=2024-05-29T13:44:13.348403004Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=tap-euw2, name=instance-0000000001" t=2024-05-29T13:44:13.348330669Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.348190307Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=tap-euw2, name=instance-0000000001" t=2024-05-29T13:44:13.348318828Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=tap-euw2, name=instance-0000000000" t=2024-05-29T13:44:13.3482473Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.348187491Z caller=remote_instance_store.go:51 user=340882 slug=hopstack msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=sydneyairport-au, name=instance-0000000001" t=2024-05-29T13:44:13.348186894Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jnw6x5xb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.348120312Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.347934551Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jnw06g34-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.34788563Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.347902149Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jnpedz1t-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.347842909Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=alarm-persister, env=production, namespace=atlas-agent, pod=alarm-persister-7f965f4d7f-v5646" t=2024-05-29T13:44:13.347873844Z level=debug msg="Keeping state" state=Normal + logger=ngalert.scheduler user=158536 slug=clearsaleantifraude version=16 fingerprint=024dbf6fa5d35569 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.347713037Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=grafanacloud-prom, ref_id=orchestrator_engine_p95 State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.347382473s EvaluationString:}]" duration=11.842146ms + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=santascarolina-use1, name=instance-0000000000" t=2024-05-29T13:44:13.347723369Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=ryder-use1, name=instance-0000000009" t=2024-05-29T13:44:13.347646614Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=ryder-use1, name=instance-0000000008" t=2024-05-29T13:44:13.347588224Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=ryder-use1, name=instance-0000000008" t=2024-05-29T13:44:13.347577491Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.347460679Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jnp9s2ie-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.347446035Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=rccl-use1, name=instance-0000000005" t=2024-05-29T13:44:13.347443781Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=rccl-use1, name=instance-0000000005" t=2024-05-29T13:44:13.347433898Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jnlgigys-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.347340064Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=qrgus-use4, name=instance-0000000001" t=2024-05-29T13:44:13.347373503Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=qrgus-use4, name=instance-0000000001" t=2024-05-29T13:44:13.347362967Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=qrgus-use4, name=instance-0000000000" t=2024-05-29T13:44:13.347304406Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=alarm-evaluator, env=production, namespace=atlas-agent, pod=alarm-evaluator-d55dbb797-hg272" t=2024-05-29T13:44:13.347281188Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=alarm-evaluator, env=production, namespace=atlas-agent, pod=alarm-evaluator-d55dbb797-hg272" t=2024-05-29T13:44:13.347268971Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=tyson-pottsville-dc-building-1, container=agent, env=production, namespace=monitoring, pod=grafana-agent-7hws2" t=2024-05-29T13:44:13.347177819Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=teleport, env=office, namespace=teleport-6, pod=teleport-6-668bd456c7-lkhvp" t=2024-05-29T13:44:13.34709736Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=teleport, env=office, namespace=teleport-6, pod=teleport-6-668bd456c7-lkhvp" t=2024-05-29T13:44:13.347084822Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=pruciamjp, name=instance-0000000005" t=2024-05-29T13:44:13.347085158Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jnhipjhr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.347016221Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=syncer, env=office, namespace=gemini-simulator, pod=syncer-864658bcf9-s7wrz" t=2024-05-29T13:44:13.34636942Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jneeqhrr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.346858849Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=prucams-use4, name=instance-0000000003" t=2024-05-29T13:44:13.346868476Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=okq8-euw3, name=instance-0000000001" t=2024-05-29T13:44:13.346780988Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Germany - Nuremberg" t=2024-05-29T13:44:13.346797668Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.346662892Z caller=remote_instance_store.go:51 user=350037 slug=morpho msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=okq8-euw3, name=instance-0000000000" t=2024-05-29T13:44:13.346709425Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jn8dizjt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.346485375Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jn8dizjt-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.346470375Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=mswm-use4, name=instance-0000000006" t=2024-05-29T13:44:13.346489831Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=60199 slug=wallapop instance="datasource_uid=000000026, ref_id=B" t=2024-05-29T13:44:13.346368653Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.346459469Z caller=remote_instance_store.go:51 user=60199 slug=wallapop msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jn7kk77w-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.346376764Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jn7kk77w-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.346338274Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=France - Paris - 1" t=2024-05-29T13:44:13.34560339Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=syncer, env=office, namespace=agent-facility, pod=syncer-cd5b4dc7f-9q84w" t=2024-05-29T13:44:13.346299735Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=syncer, env=office, namespace=agent-facility, pod=syncer-cd5b4dc7f-9q84w" t=2024-05-29T13:44:13.346287885Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jn69upm8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.346200702Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=jpmorgan-use4, name=instance-0000000006" t=2024-05-29T13:44:13.346191068Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=jpmorgan-use4, name=instance-0000000006" t=2024-05-29T13:44:13.346178379Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jn69upm8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.346112821Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=jpmorgan-use4, name=instance-0000000004" t=2024-05-29T13:44:13.346116211Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jn69upm8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.346067461Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.346068154Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jn69upm8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.34603932Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=jpmcic-euw2, name=instance-0000000006" t=2024-05-29T13:44:13.346046119Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=jpmcic-euw2, name=instance-0000000006" t=2024-05-29T13:44:13.346035921Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=strategy-evaluator, env=office, namespace=atlas-agent, pod=strategy-evaluator-75b86d4448-hbtdk" t=2024-05-29T13:44:13.345987265Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=jpmcic-euw2, name=instance-0000000003" t=2024-05-29T13:44:13.345963516Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.3459214Z caller=remote_instance_store.go:51 user=810903 slug=vespaai msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jn4oibdd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.345920369Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=instyul-nane1, name=instance-0000000001" t=2024-05-29T13:44:13.345875343Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance= t=2024-05-29T13:44:13.345865308Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=307381 slug=kambitaskforce instance= t=2024-05-29T13:44:13.345852212Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=307381 slug=kambitaskforce t=2024-05-29T13:44:13.345804918Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jn4nsnk9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.345761688Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=instyul-nane1, name=instance-0000000000" t=2024-05-29T13:44:13.345781762Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=refresher, env=office, namespace=gemini-simulator, pod=refresher-66fdfbf959-j2kcp" t=2024-05-29T13:44:13.345701832Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jn4nsnk9-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.345686227Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=inst-use4, name=instance-0000000001" t=2024-05-29T13:44:13.345616469Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=geodis-euw1, name=instance-0000000003" t=2024-05-29T13:44:13.345513314Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jn3knurr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.345486545Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=pyrunner, env=office, namespace=gemini-simulator, pod=pyrunner-66f5c68b79-b4pxp" t=2024-05-29T13:44:13.34548402Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=geodis-euw1, name=instance-0000000001" t=2024-05-29T13:44:13.345432503Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=fortumcsas-eun1, name=instance-0000000010" t=2024-05-29T13:44:13.345333426Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jn1pteex-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.345407854Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jn1pteex-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.345378704Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.345217961Z caller=remote_instance_store.go:51 user=307381 slug=kambitaskforce msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jn0oofwn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.345211452Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.345099089Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Denmark" t=2024-05-29T13:44:13.34518582Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=fortumcsas-eun1, name=instance-0000000006" t=2024-05-29T13:44:13.345187764Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=footlocker-use4, name=instance-0000000006" t=2024-05-29T13:44:13.345111206Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jn0oofwn-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.345084361Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Austria" t=2024-05-29T13:44:13.34506818Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jn0jl0rj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.3450318Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=footlocker-use4, name=instance-0000000003" t=2024-05-29T13:44:13.345049971Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jn0jl0rj-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.34500785Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi t=2024-05-29T13:44:13.345047902Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=footlocker-use4, name=instance-0000000003" t=2024-05-29T13:44:13.345036708Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=776563 slug=eagleeye4els t=2024-05-29T13:44:13.344954562Z level=debug msg="Saving alert states done" count=6 max_state_save_concurrency=1 duration=84.191132ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jn0jl0rj-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.344918459Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Australia - Woolloomooloo" t=2024-05-29T13:44:13.344923536Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jn0acduu-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.344868158Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jn0acduu-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.344813418Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=footlocker-euw4, name=instance-0000000005" t=2024-05-29T13:44:13.34485162Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=eid-eidusc1, name=instance-0000000003" t=2024-05-29T13:44:13.344770224Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jn0acduu-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.344777268Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Australia - Sydney - 2" t=2024-05-29T13:44:13.344745525Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jmwf0zn5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.344650256Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.344645235Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jmvs6g5l-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.344541475Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jmvs6g5l-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.344513055Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jmvs6g5l-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.344392404Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jmtlj3zy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.344231472Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jmtlj3zy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.344172281Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jmt4fk34-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.344118721Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=260796 slug=expressvpn instance="cluster_name=Australia - Brisbane" t=2024-05-29T13:44:13.344573625Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jmt4fk34-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.34405814Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jmt4fk34-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.34403317Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.344507083Z caller=remote_instance_store.go:51 user=35611 slug=play msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jmscdnvt-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.343914289Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jmrvbmxo-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.343762327Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.344464813Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jmrvbmxo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.343675276Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jmrvbmxo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.343616176Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.344307681Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.344301535Z caller=remote_instance_store.go:51 user=475799 slug=dpdcz msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=260796 slug=expressvpn t=2024-05-29T13:44:13.344307748Z level=debug msg="State manager processing evaluation results" resultCount=30 + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=cathaypacific-ase1, name=instance-0000000001" t=2024-05-29T13:44:13.344336021Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.344055795Z caller=remote_image_capturer.go:54 user=166705 slug=crossnokaye rule_org_id=1 rule_uid=bhmaeSc7k dashboard=7YbPY4jMk panel=105 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=bfa-euw2, name=instance-0000000000" t=2024-05-29T13:44:13.344010083Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=network-monitor, env=office, namespace=agent-tools, pod=network-monitor-76b94fc6f6-7m2g5" t=2024-05-29T13:44:13.343798563Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=availity-use1, name=instance-0000000004" t=2024-05-29T13:44:13.343628191Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jmmv33yc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.343493804Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jmmv33yc-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.343459914Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=metrics-server, env=office, namespace=kube-system, pod=metrics-server-8496f9869f-7z4hh" t=2024-05-29T13:44:13.343442358Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=316418 slug=workmotion instance="ClusterName=beta-ecs-cluster, ServiceName=beta-backend-currency-conversion" t=2024-05-29T13:44:13.343378622Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=amfamgroup-usc1, name=instance-0000000001" t=2024-05-29T13:44:13.343359901Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=manager, env=office, namespace=flux-system, pod=source-controller-557867c58b-z62br" t=2024-05-29T13:44:13.343326094Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.343184019Z caller=remote_instance_store.go:51 user=475170 slug=paypaplane msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jm63zli3-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.343209451Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=allstatecan-nane1, name=instance-0000000006" t=2024-05-29T13:44:13.343200279Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jm1rl648-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.343161431Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=allstatecan-nane1, name=instance-0000000005" t=2024-05-29T13:44:13.343136398Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=manager, env=office, namespace=flux-system, pod=kustomize-controller-75bb97945d-wbng6" t=2024-05-29T13:44:13.343133714Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=1identity-usc1, name=instance-0000000006" t=2024-05-29T13:44:13.343060471Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=251760 slug=forgerock instance="cluster=1identity-usc1, name=instance-0000000005" t=2024-05-29T13:44:13.342987539Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=251760 slug=forgerock t=2024-05-29T13:44:13.342855282Z level=debug msg="State manager processing evaluation results" resultCount=78 + level=debug ts=2024-05-29T13:44:13.342909739Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=manager, env=office, namespace=flux-system, pod=helm-controller-75968bdc68-dxmds" t=2024-05-29T13:44:13.342849748Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=manager, env=office, namespace=flux-system, pod=helm-controller-75968bdc68-dxmds" t=2024-05-29T13:44:13.34283802Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jlxn2wqi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.342820207Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=507549 slug=coindcx version=37 fingerprint=5a22c5ecc9a249b4 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.342713967Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[Cache Errors:{Var:Cache Errors Labels: Value:0xc005964588} G:{Var:G Labels: Value:0xc0059645b0}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.342467183s EvaluationString:[ var='Cache Errors' labels={} value=0 ], [ var='G' labels={} value=0 ]}]" duration=454.687232ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=local-path-provisioner, env=office, namespace=local-path-storage, pod=local-path-provisioner-7fdb4745c6-bj9vn" t=2024-05-29T13:44:13.342763545Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=20177 slug=paddledash instance= t=2024-05-29T13:44:13.34267451Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jlxk29sw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.342601925Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jlxk29sw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.342573335Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.342500398Z caller=remote_instance_store.go:51 user=507549 slug=coindcx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jlwttpp1-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.342496114Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=kube-secret-syncer, env=office, namespace=kube-system, pod=kube-secret-syncer-controller-79ffbddd56-6hflz" t=2024-05-29T13:44:13.342514111Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jlu1wwzh-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.342293632Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jlu1wwzh-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.342160241Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=kube-proxy, env=office, namespace=kube-system, pod=kube-proxy-sbxxx" t=2024-05-29T13:44:13.3422959Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jlpjio75-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.34211988Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jlpjio75-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.342038029Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jlpjio75-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.341994509Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.342242486Z caller=remote_instance_store.go:51 user=22410 slug=gwsys msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jllz4f1h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.341635105Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jllz4f1h-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.341582365Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jlj00sbw-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.341540444Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jlj00sbw-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.341463044Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jlj00sbw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.341420133Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jlj00sbw-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.341391083Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jkzuykp7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.341346622Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jkzuykp7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.341207731Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jkzuykp7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.341180201Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=kube-apiserver, env=office, namespace=kube-system, pod=kube-apiserver-office-crossnokaye-sandiego-4d42488a-7b0ec59d" t=2024-05-29T13:44:13.342065771Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jkyb5aou-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.34107154Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jkyb5aou-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.340958688Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jkqnc35g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.340915128Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jkqnc35g-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.340892898Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jkqnc35g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.340837917Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jkqnc35g-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.340824837Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jkqnc35g-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.340787267Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=heartbeat, env=office, namespace=atlas-agent, pod=heartbeat-5dbdb47b47-c8wq5" t=2024-05-29T13:44:13.341978201Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=heartbeat, env=office, namespace=atlas-agent, pod=heartbeat-5dbdb47b47-c8wq5" t=2024-05-29T13:44:13.34196386Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jkkyh731-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.340580895Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jkh97mge-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.340414123Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.341796749Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jkfkxfhd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.340282801Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.341680091Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jkcrnih5-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.339946388Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jkcrnih5-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.339872687Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jk9p5oke-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.339834477Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:13.341635329Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.94481ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jk9p5oke-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.339740636Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.341559366Z caller=remote_instance_store.go:51 user=714300 slug=faino msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jk5n1z4y-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.339300561Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=ecr-renew, env=office, namespace=kube-system, pod=ecr-login-renew-28615680-klwld" t=2024-05-29T13:44:13.341551804Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=843304 slug=ppcgroup t=2024-05-29T13:44:13.34144897Z level=debug msg="Skip rule evaluation because it is paused" + level=debug ts=2024-05-29T13:44:13.340746796Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.339914428Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=523906 slug=cyberark t=2024-05-29T13:44:13.341326247Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=523906 slug=cyberark instance="ClusterName=syslog-server-prod-JenkinsSyslogServerProdMaster, Series=syslog-server-prod-JenkinsSyslogServerProdMaster SyslogServer-prod-JenkinsSyslogServerProdMaster, ServiceName=SyslogServer-prod-JenkinsSyslogServerProdMaster" t=2024-05-29T13:44:13.341255917Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=523906 slug=cyberark t=2024-05-29T13:44:13.341127603Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.341119104Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.340862378Z caller=remote_instance_store.go:51 user=727299 slug=dellisgtechops msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.340718921Z caller=remote_image_capturer.go:54 user=166705 slug=crossnokaye rule_org_id=1 rule_uid=bhmaeSc7k dashboard=7YbPY4jMk panel=105 msg="rendering alert image with grafana" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=darkwing, env=office, namespace=darkwing, pod=darkwing-586f746c46-n8t5p" t=2024-05-29T13:44:13.340471776Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.340372496Z caller=remote_instance_store.go:51 user=426229 slug=accelbyte msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=coredns, env=office, namespace=kube-system, pod=coredns-6d6869bf68-l7tcj" t=2024-05-29T13:44:13.340342124Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=coredns, env=office, namespace=kube-system, pod=coredns-6d6869bf68-l7tcj" t=2024-05-29T13:44:13.340326363Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=coredns, env=office, namespace=kube-system, pod=coredns-6d6869bf68-8ggxv" t=2024-05-29T13:44:13.340205564Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.340061809Z caller=remote_instance_store.go:51 user=446686 slug=coinfx msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=config-syncer, env=office, namespace=kube-system, pod=config-syncer-5dc6b48767-5wnjx" t=2024-05-29T13:44:13.340102929Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=cloud-commander, env=office, namespace=atlas-agent, pod=cloud-commander-94c78c84d-75n4b" t=2024-05-29T13:44:13.340022558Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=cilium-operator, env=office, namespace=kube-system, pod=cilium-operator-77bb5b96c8-6mhvs" t=2024-05-29T13:44:13.339902053Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.339789002Z caller=remote_instance_store.go:51 user=476637 slug=cryptomm msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.339720556Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=344017 slug=descript t=2024-05-29T13:44:13.339726138Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=13.843396ms + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=alarm-persister, env=office, namespace=atlas-agent, pod=alarm-persister-77bfc659bb-cmd6m" t=2024-05-29T13:44:13.339472592Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=alarm-evaluator, env=office, namespace=atlas-agent, pod=alarm-evaluator-c747886b4-dft6n" t=2024-05-29T13:44:13.339356443Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-sandiego, container=alarm-evaluator, env=office, namespace=atlas-agent, pod=alarm-evaluator-c747886b4-dft6n" t=2024-05-29T13:44:13.339336829Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jjzit6cy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.33913805Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jjzit6cy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.339078869Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jjzit6cy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.339045609Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=syncer, env=office, namespace=agent-facility, pod=syncer-85f7f4bbf4-9tqjx" t=2024-05-29T13:44:13.339022776Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jjoxvpxs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.338979238Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jjo7c5m5-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.338797386Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=strategy-persister, env=office, namespace=atlas-agent, pod=strategy-persister-7495fc5bb4-g7f7g" t=2024-05-29T13:44:13.338820256Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=strategy-persister, env=office, namespace=atlas-agent, pod=strategy-persister-7495fc5bb4-g7f7g" t=2024-05-29T13:44:13.3388051Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.338725551Z caller=remote_instance_store.go:51 user=537068 slug=bitvavotrading msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=strategy-evaluator, env=office, namespace=atlas-agent, pod=strategy-evaluator-58878ff6b4-g52l5" t=2024-05-29T13:44:13.338703689Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.338674757Z caller=remote_instance_store.go:51 user=245291 slug=pismo msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=537068 slug=bitvavotrading t=2024-05-29T13:44:13.33867061Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=537068 slug=bitvavotrading instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.33865244Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=537068 slug=bitvavotrading instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.33864441Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=245291 slug=pismo instance= t=2024-05-29T13:44:13.338609066Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=reloader-reloader, env=office, namespace=kube-system, pod=reloader-reloader-fbdf5d78-6pqs2" t=2024-05-29T13:44:13.338603379Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=reloader-reloader, env=office, namespace=kube-system, pod=reloader-reloader-fbdf5d78-6pqs2" t=2024-05-29T13:44:13.338592351Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.338538397Z caller=remote_instance_store.go:51 user=196413 slug=form3production msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jjj8pn4s-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.338493123Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=reflector, env=office, namespace=kube-system, pod=reflector-5cb5865b8f-qlpl7" t=2024-05-29T13:44:13.338467069Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jjj8pn4s-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.338420422Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jjj8pn4s-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.338348312Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jjh1gjs2-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.338286361Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=postgresql-migrator, env=office, namespace=crossnokaye, pod=postgresql-migrations-56fccfcbfd-qdg4w" t=2024-05-29T13:44:13.338362526Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=postgresql-migrator, env=office, namespace=crossnokaye, pod=postgresql-migrations-56fccfcbfd-qdg4w" t=2024-05-29T13:44:13.338351205Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.338173102Z caller=remote_instance_store.go:51 user=163513 slug=dialpad msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=163513 slug=dialpad t=2024-05-29T13:44:13.338123119Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.338086076Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jjh1gjs2-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.33815759Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=opentelemetry-collector, env=office, namespace=open-telemetry, pod=collector-opentelemetry-collector-agent-lxjcc" t=2024-05-29T13:44:13.338170527Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=163513 slug=dialpad instance="datasource_uid=grafanacloud-logs, ref_id=A" t=2024-05-29T13:44:13.338101098Z level=debug msg="Execution no data state is Normal" handler=resultNormal previous_handler=resultNoData + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jj8nisci-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.338072719Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.33809388Z caller=remote_instance_store.go:51 user=412779 slug=microstrategy msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=163513 slug=dialpad t=2024-05-29T13:44:13.338079014Z level=debug msg="State manager processing evaluation results" resultCount=1 + level=debug ts=2024-05-29T13:44:13.338035745Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.337999674Z caller=remote_instance_store.go:51 user=251760 slug=forgerock msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=network-monitor, env=office, namespace=agent-tools, pod=network-monitor-5c4887d584-58msh" t=2024-05-29T13:44:13.338014159Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jj8nisci-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.337944347Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jj8ep2cz-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.337815476Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=277970 slug=teckresourcestest instance= t=2024-05-29T13:44:13.337784882Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jj8ep2cz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.337753066Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jj69j98i-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.337718385Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=177465 slug=fairtiq t=2024-05-29T13:44:13.337719762Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=8.370286ms + logger=ngalert.scheduler user=277970 slug=teckresourcestest version=2 fingerprint=54583cd29799ff3e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.337727059Z level=error msg="Failed to evaluate rule" error="failed to build query 'F': data source not found" duration=6.969805ms + level=error ts=2024-05-29T13:44:13.337696636Z caller=remote_rule_evaluator.go:110 user=277970 slug=teckresourcestest msg="remote evaluate failed" code=Code(422) err="failed to build query 'F': data source not found" + level=debug ts=2024-05-29T13:44:13.337702437Z caller=remote_instance_store.go:51 user=18335 slug=semaphore msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jj69j98i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.337652094Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=manager, env=office, namespace=flux-system, pod=notification-controller-6c65b7f89b-h6z8s" t=2024-05-29T13:44:13.337707156Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jj69j98i-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.337622794Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.337552362Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.337365065Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=manager, env=office, namespace=flux-system, pod=kustomize-controller-75bb97945d-bqbgs" t=2024-05-29T13:44:13.337581921Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.337453558Z caller=remote_instance_store.go:51 user=543654 slug=jobcloudprogrammaticprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jj45m0us-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.337398882Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jj45m0us-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.337372782Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=543654 slug=jobcloudprogrammaticprod instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.337285745Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=manager, env=office, namespace=flux-system, pod=image-automation-controller-54bf875b97-q9w99" t=2024-05-29T13:44:13.337331469Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jj112u37-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.33722894Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.337172627Z caller=remote_instance_store.go:51 user=531208 slug=knosc msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jj112u37-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.337153529Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jirnxjc4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.337077469Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=87052 slug=polystream t=2024-05-29T13:44:13.337135981Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=manager, env=office, namespace=flux-system, pod=helm-controller-75968bdc68-gh8zr" t=2024-05-29T13:44:13.337154219Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.33712591Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=87052 slug=polystream instance= t=2024-05-29T13:44:13.337114071Z level=warn msg="Failed to take an image" dashboard=A8VGUjvZk panel=35 error="rpc error: code = Code(422) desc = screenshots unavailable" + level=debug ts=2024-05-29T13:44:13.337090016Z caller=remote_instance_store.go:51 user=214309 slug=spenmo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.337090846Z caller=remote_instance_store.go:51 user=556147 slug=bettercloudholding msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jirnxjc4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.336953187Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=local-path-provisioner, env=office, namespace=local-path-storage, pod=local-path-provisioner-7fdb4745c6-wn9fw" t=2024-05-29T13:44:13.337046001Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jikkk418-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.336921947Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jikkk418-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.336912407Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jikkk418-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.336881457Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jikkk418-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.336869446Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jikkk418-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.336799226Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=kube-state-metrics, env=office, namespace=kube-system, pod=kube-state-metrics-6456d44cb6-m6x2d" t=2024-05-29T13:44:13.336927117Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.336784293Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=kube-secret-syncer, env=office, namespace=kube-system, pod=kube-secret-syncer-controller-76dcdd8d6d-wx626" t=2024-05-29T13:44:13.336817806Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=kube-secret-syncer, env=office, namespace=kube-system, pod=kube-secret-syncer-controller-76dcdd8d6d-wx626" t=2024-05-29T13:44:13.336803691Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ji8emjxq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.336741325Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.336628794Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ji8emjxq-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.336691075Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=kube-scheduler, env=office, namespace=kube-system, pod=kube-scheduler-office-crossnokaye-rochelle-50e5c186-c2a999b8" t=2024-05-29T13:44:13.336720767Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=kube-scheduler, env=office, namespace=kube-system, pod=kube-scheduler-office-crossnokaye-rochelle-50e5c186-c2a999b8" t=2024-05-29T13:44:13.336707901Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ji1njq2o-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.336573233Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=kube-proxy, env=office, namespace=kube-system, pod=kube-proxy-4dzlb" t=2024-05-29T13:44:13.336618244Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.336551837Z caller=remote_instance_store.go:51 user=439643 slug=swirldslabspreproduction msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ji1njq2o-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.336504793Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=kube-controller-manager, env=office, namespace=kube-system, pod=kube-controller-manager-office-crossnokaye-rochelle-50e5c186-c2a999b8" t=2024-05-29T13:44:13.336533453Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.336370856Z caller=remote_instance_store.go:51 user=490454 slug=fps msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-ji1njq2o-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.336476042Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jhxik8cf-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.336404992Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=kube-apiserver, env=office, namespace=kube-system, pod=kube-apiserver-office-crossnokaye-rochelle-50e5c186-c2a999b8" t=2024-05-29T13:44:13.336434294Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.336284797Z caller=remote_instance_store.go:51 user=516613 slug=blackrocktp msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.336296446Z caller=remote_instance_store.go:51 user=485459 slug=heroiclabs msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=heartbeat, env=office, namespace=atlas-agent, pod=heartbeat-97f4ccfdc-rcf97" t=2024-05-29T13:44:13.336320461Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jhxik8cf-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.336291501Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=884866 slug=cnonumerique t=2024-05-29T13:44:13.336216032Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + level=debug ts=2024-05-29T13:44:13.336136535Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.scheduler user=884866 slug=cnonumerique version=57 fingerprint=49aa9b393c64b94f attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.33607826Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=fdhk917z41xj4a, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.33574425s EvaluationString:}]" duration=9.081497ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jhsxl3tb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.336170259Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=ecr-renew, env=office, namespace=kube-system, pod=ecr-login-renew-28616400-7j2hf" t=2024-05-29T13:44:13.336145159Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jhrsadnk-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.336068978Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.336102779Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=ecr-renew, env=office, namespace=kube-system, pod=ecr-login-renew-28616040-gnldr" t=2024-05-29T13:44:13.336017244Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.335981079Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.335777507Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jhpvw3al-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.335695034Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.335544702Z caller=remote_instance_store.go:51 user=465816 slug=metricgamingqa msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=coredns, env=office, namespace=kube-system, pod=coredns-6d6869bf68-rg29q" t=2024-05-29T13:44:13.335724777Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jhkxjdms-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.335582963Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=108112 slug=btctrader t=2024-05-29T13:44:13.335558079Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=29.098833ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jhkxjdms-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.335541473Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.335567238Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jhkxjdms-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.335513983Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.335484128Z caller=remote_instance_store.go:51 user=912534 slug=useat msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.335478Z caller=remote_instance_store.go:51 user=206107 slug=hydrolix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jhjfdwqg-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.335474802Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.335460057Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jhjfdwqg-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.335387821Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=config-syncer, env=office, namespace=kube-system, pod=config-syncer-6c9c47f555-ppbl8" t=2024-05-29T13:44:13.335496227Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=hDPyPu04k, ref_id=A" t=2024-05-29T13:44:13.335448467Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=hDPyPu04k, ref_id=A" t=2024-05-29T13:44:13.335429132Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=430961 slug=solifi instance="datasource_uid=hDPyPu04k, ref_id=A" t=2024-05-29T13:44:13.335403351Z level=debug msg="Setting next state" handler=resultNoData + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=cloud-commander, env=office, namespace=atlas-agent, pod=cloud-commander-56dbf7c6b9-m454p" t=2024-05-29T13:44:13.335364955Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jhjfdwqg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.335342861Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jhjfdwqg-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.335314881Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jhj10wxy-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.3352743Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.33529301Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.335273399Z caller=remote_instance_store.go:51 user=260796 slug=expressvpn msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.335239256Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.335220653Z caller=remote_instance_store.go:51 user=459086 slug=metricgamingprd msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jhj10wxy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.335200949Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jhj10wxy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.335162559Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=cilium-agent, env=office, namespace=kube-system, pod=cilium-h252t" t=2024-05-29T13:44:13.335171263Z level=debug msg="Setting next state" handler=resultNormal + level=info ts=2024-05-29T13:44:13.335030214Z caller=remote_alert_sender.go:94 user=4947 slug=mediamath host=mediamath-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.156.57:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=cdbhsq1q9dv5sc alerts=1 + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jhhzli2j-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.334996997Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=atlasclient, env=office, namespace=legacy, pod=atlasclient-74bb99cd95-gpdsl" t=2024-05-29T13:44:13.335068682Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=4947 slug=mediamath t=2024-05-29T13:44:13.334924247Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=33.600592ms + level=debug ts=2024-05-29T13:44:13.334938392Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=alarm-evaluator, env=office, namespace=atlas-agent, pod=alarm-evaluator-744d8465f6-lvm7q" t=2024-05-29T13:44:13.334753046Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-rochelle, container=agent, env=office, namespace=monitoring, pod=grafana-agent-c4dj2" t=2024-05-29T13:44:13.334652639Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager.persist user=250150 slug=bizagi t=2024-05-29T13:44:13.334554828Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=teleport, env=office, namespace=teleport-6, pod=teleport-6-6b76cd6d4f-bjqvz" t=2024-05-29T13:44:13.334554829Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jhf9qipi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.334524672Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=250150 slug=bizagi instance= t=2024-05-29T13:44:13.334523674Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jhdhd8ym-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.334406911Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.scheduler user=250150 slug=bizagi version=1 fingerprint=d2e5f62ca1194ee5 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.334437573Z level=debug msg="Alert rule evaluated" results="[{Instance: State:Normal Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.334252726s EvaluationString:}]" duration=1.062032806s + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=syncer, env=office, namespace=agent-facility, pod=syncer-6fdbc657c7-9xk2w" t=2024-05-29T13:44:13.334448282Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=strategy-planner, env=office, namespace=atlas-agent, pod=strategy-planner-8479787df5-tjd97" t=2024-05-29T13:44:13.334294256Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jh29drxo-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.334170459Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=strategy-persister, env=office, namespace=atlas-agent, pod=strategy-persister-7599cb7bfd-jgz82" t=2024-05-29T13:44:13.334158674Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=strategy-persister, env=office, namespace=atlas-agent, pod=strategy-persister-7599cb7bfd-jgz82" t=2024-05-29T13:44:13.334113012Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jh29drxo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.334112078Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jh29drxo-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.334085438Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jgz4lj57-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.334045178Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=strategy-evaluator, env=office, namespace=atlas-agent, pod=strategy-evaluator-68ddf9996c-mhvcd" t=2024-05-29T13:44:13.334001821Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=reloader-reloader, env=office, namespace=kube-system, pod=reloader-reloader-fbdf5d78-5jbfj" t=2024-05-29T13:44:13.333921956Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jgv887e0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.333840485Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jgv887e0-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.333808085Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=reflector, env=office, namespace=kube-system, pod=reflector-5cb5865b8f-wtnrv" t=2024-05-29T13:44:13.333807632Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jgv887e0-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.333738224Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=457025 slug=truta t=2024-05-29T13:44:13.333736551Z level=debug msg="State manager processing evaluation results" resultCount=2 + logger=ngalert.state.manager.persist user=201644 slug=thoughtspot t=2024-05-29T13:44:13.333680625Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=201644 slug=thoughtspot instance= t=2024-05-29T13:44:13.333669842Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=postgresql-migrator, env=office, namespace=crossnokaye, pod=postgresql-migrations-59957d7584-754r2" t=2024-05-29T13:44:13.333689938Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jgnx1f9p-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.333608293Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jgnx1f9p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.333569713Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jgnx1f9p-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.333545352Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jgnx1f9p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.333516562Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jgnx1f9p-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.333493512Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jgm0x95t-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.333454781Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.333392929Z caller=remote_instance_store.go:51 user=656284 slug=cencosudx msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.333336216Z caller=remote_instance_store.go:51 user=776563 slug=eagleeye4els msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=656284 slug=cencosudx instance="datasource_uid=grafanacloud-prom, ref_id=A" t=2024-05-29T13:44:13.333339198Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=metrics-server, env=office, namespace=kube-system, pod=metrics-server-8496f9869f-pmx2b" t=2024-05-29T13:44:13.333191298Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jg0du6q7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.333093558Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jfye4074-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.332950066Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jfye4074-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.332924936Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.332871628Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=manager, env=office, namespace=flux-system, pod=kustomize-controller-75bb97945d-zr9cw" t=2024-05-29T13:44:13.332871165Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=manager, env=office, namespace=flux-system, pod=image-reflector-controller-78c5d97ff5-7662x" t=2024-05-29T13:44:13.332786442Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.332725978Z caller=remote_instance_store.go:51 user=183214 slug=vectorizedio msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jfvgbb9h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.332525892Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.332462362Z caller=remote_instance_store.go:51 user=765752 slug=octave msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jfv47t72-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.33238287Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jfv47t72-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.33229819Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.332365071Z caller=remote_instance_store.go:51 user=506300 slug=jostens msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=local-path-provisioner, env=office, namespace=local-path-storage, pod=local-path-provisioner-7fdb4745c6-5v9z6" t=2024-05-29T13:44:13.33236195Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=local-path-provisioner, env=office, namespace=local-path-storage, pod=local-path-provisioner-7fdb4745c6-5v9z6" t=2024-05-29T13:44:13.332349112Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jfv47t72-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.332282939Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jfrgwa8t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.332162128Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jfrgwa8t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.332135398Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=kube-secret-syncer, env=office, namespace=kube-system, pod=kube-secret-syncer-controller-7967587dc6-zh4nd" t=2024-05-29T13:44:13.332178944Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.332115528Z caller=remote_instance_store.go:51 user=554491 slug=safeskyindustries msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.331892382Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=kube-proxy, env=office, namespace=kube-system, pod=kube-proxy-sw2bg" t=2024-05-29T13:44:13.331989117Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.331884821Z caller=remote_instance_store.go:51 user=668587 slug=brightacceptance msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=kube-controller-manager, env=office, namespace=kube-system, pod=kube-controller-manager-office-crossnokaye-riverside-4c76a509-ff90b47b" t=2024-05-29T13:44:13.331898137Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jfl3na5d-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.331862475Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jfbmlrzs-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.331822605Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=kube-apiserver, env=office, namespace=kube-system, pod=kube-apiserver-office-crossnokaye-riverside-4c76a509-ff90b47b" t=2024-05-29T13:44:13.331806771Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jfbmlrzs-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.331739544Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.331613663Z caller=remote_instance_store.go:51 user=763196 slug=djtecha msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=heartbeat, env=office, namespace=atlas-agent, pod=heartbeat-57c864778f-bpk5l" t=2024-05-29T13:44:13.331724503Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.331624711Z caller=remote_instance_store.go:51 user=824492 slug=pineapples msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jf4itfbi-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.331574842Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.331568277Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jf4itfbi-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.331533192Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jf4itfbi-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.331462321Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=698963 slug=lemonade instance="app=cooper-platform, pod=cooper-platform-79d45494d9-qdkbm" t=2024-05-29T13:44:13.331416932Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.331385842Z caller=remote_instance_store.go:51 user=527204 slug=lnrsusinsurancenonprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=ecr-renew, env=office, namespace=kube-system, pod=ecr-login-renew-28616040-pssqc" t=2024-05-29T13:44:13.331415513Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=174675 slug=journalprod instance="datasource_uid=uF2hBHyGz, ref_id=A" t=2024-05-29T13:44:13.331378103Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + level=debug ts=2024-05-29T13:44:13.331192103Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jf3nuqna-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.331299509Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.33118492Z caller=remote_instance_store.go:51 user=297794 slug=leanix msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=696798 slug=mcv t=2024-05-29T13:44:13.331195976Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=11.436472ms + Error parsing panelUID for alert annotationruleID5591dashactualerrorstrconv.ParseInt: parsing "": invalid syntaxlogger=ngalert.scheduler user=174675 slug=journalprod version=1 fingerprint=866660aa8d98152e attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.331066072Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=uF2hBHyGz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.330590551s EvaluationString:}]" duration=14.589738ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jf2lpndb-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.331201338Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jf2lpndb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.331112107Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.330922139Z caller=remote_instance_store.go:51 user=391538 slug=risknarrative msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jf2lpndb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.331047267Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.330981819Z caller=remote_instance_store.go:51 user=706031 slug=miceutest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=coredns, env=office, namespace=kube-system, pod=coredns-6d6869bf68-99vwp" t=2024-05-29T13:44:13.331019878Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.330894915Z caller=remote_instance_store.go:51 user=679831 slug=joveostageaws msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jf0ndrrt-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.330849505Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=config-syncer, env=office, namespace=kube-system, pod=config-syncer-7658f89c6b-mrbtz" t=2024-05-29T13:44:13.330887708Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.330696072Z caller=remote_instance_store.go:51 user=325783 slug=bloxprod msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=cloud-commander, env=office, namespace=atlas-agent, pod=cloud-commander-cdcf98d56-zs6lv" t=2024-05-29T13:44:13.330799634Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jewzy8c8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.330747164Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jewzy8c8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.330683923Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jewzy8c8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.330659543Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=cilium-agent, env=office, namespace=kube-system, pod=cilium-bk5s7" t=2024-05-29T13:44:13.330618212Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=cilium-agent, env=office, namespace=kube-system, pod=cilium-bk5s7" t=2024-05-29T13:44:13.330608357Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jesizz2f-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.330526241Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=atlasclient, env=office, namespace=legacy, pod=atlasclient-dcd6d6644-42d65" t=2024-05-29T13:44:13.33050055Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=atlasclient, env=office, namespace=legacy, pod=atlasclient-dcd6d6644-42d65" t=2024-05-29T13:44:13.330486315Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jesizz2f-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.33043339Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=alarm-persister, env=office, namespace=atlas-agent, pod=alarm-persister-76c8d455cb-mqn2c" t=2024-05-29T13:44:13.330277991Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jedl5xwr-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.330247169Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jedl5xwr-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.330184788Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.33012681Z caller=remote_instance_store.go:51 user=449554 slug=metricgamingppe msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=alarm-evaluator, env=office, namespace=atlas-agent, pod=alarm-evaluator-7f768b97b5-w5nrw" t=2024-05-29T13:44:13.330168946Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.330058617Z caller=remote_instance_store.go:51 user=536824 slug=forgerockit msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jed3gmb6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.330107837Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-riverside, container=agent, env=office, namespace=monitoring, pod=grafana-agent-cw86x" t=2024-05-29T13:44:13.33003142Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-je7twhiq-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.329926445Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=syncer, env=office, namespace=agent-facility, pod=syncer-5fc9fd4c97-mpl42" t=2024-05-29T13:44:13.329850412Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-je5ljgyv-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.329676303Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-je5ljgyv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.329640502Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-je5ljgyv-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.329610942Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=strategy-persister, env=office, namespace=atlas-agent, pod=strategy-persister-6c5bdf8858-xc8fw" t=2024-05-29T13:44:13.32963075Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=strategy-evaluator, env=office, namespace=atlas-agent, pod=strategy-evaluator-8698bfbfc9-p47lw" t=2024-05-29T13:44:13.329530012Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-je3f7u4j-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.329422Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-je3f7u4j-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.32939668Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=reloader-reloader, env=office, namespace=kube-system, pod=reloader-reloader-fbdf5d78-rgwbx" t=2024-05-29T13:44:13.329383688Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-je3f491t-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.329313109Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=reloader-reloader, env=office, namespace=kube-system, pod=reloader-reloader-fbdf5d78-rgwbx" t=2024-05-29T13:44:13.329367704Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=177465 slug=fairtiq t=2024-05-29T13:44:13.329267816Z level=debug msg="State manager processing evaluation results" resultCount=1 + logger=ngalert.scheduler user=177465 slug=fairtiq version=23 fingerprint=9ee5087cae26d035 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.329140262Z level=debug msg="Alert rule evaluated" results="[{Instance:datasource_uid=Og2xWvHnz, ref_id=A State:NoData Error: Results:map[] Values:map[] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.328625524s EvaluationString:}]" duration=47.978757ms + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-je3f491t-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.329246078Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-je2p34gn-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.329183287Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-je2p34gn-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.329096977Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=opentelemetry-collector, env=office, namespace=open-telemetry, pod=collector-opentelemetry-collector-agent-mk7km" t=2024-05-29T13:44:13.328906164Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jdtwuyjz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.328808494Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.328737302Z caller=remote_instance_store.go:51 user=528849 slug=bitvavo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=network-monitor, env=office, namespace=agent-tools, pod=network-monitor-7b84dc5fcb-r8n5g" t=2024-05-29T13:44:13.328798546Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.32864026Z caller=remote_instance_store.go:51 user=788769 slug=pfde msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jds95skx-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.328512011Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=manager, env=office, namespace=flux-system, pod=source-controller-557867c58b-6kq4m" t=2024-05-29T13:44:13.328488986Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jdqyhlqb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.328372689Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jdojfmap-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.328200297Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jdojfmap-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.328146277Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jdmtd3ea-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.328101216Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jdmtd3ea-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.328004595Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=manager, env=office, namespace=flux-system, pod=kustomize-controller-75bb97945d-2gf9r" t=2024-05-29T13:44:13.328277492Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jdlx49tc-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.327910474Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=manager, env=office, namespace=flux-system, pod=image-reflector-controller-78c5d97ff5-7qdq4" t=2024-05-29T13:44:13.328191662Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=manager, env=office, namespace=flux-system, pod=image-automation-controller-54bf875b97-ntv4s" t=2024-05-29T13:44:13.328078108Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=manager, env=office, namespace=flux-system, pod=helm-controller-75968bdc68-7wdlp" t=2024-05-29T13:44:13.327963093Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=manager, env=office, namespace=flux-system, pod=helm-controller-75968bdc68-7wdlp" t=2024-05-29T13:44:13.327950015Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.327834833Z caller=remote_instance_store.go:51 user=612525 slug=adleyeview msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=kube-state-metrics, env=office, namespace=kube-system, pod=kube-state-metrics-64bbfdcc6-2fkr9" t=2024-05-29T13:44:13.327774154Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=kube-state-metrics, env=office, namespace=kube-system, pod=kube-state-metrics-64bbfdcc6-2fkr9" t=2024-05-29T13:44:13.327760234Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager.persist user=191103 slug=amazonadmin t=2024-05-29T13:44:13.327686321Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=kube-secret-syncer, env=office, namespace=kube-system, pod=kube-secret-syncer-controller-786f47d88d-xmb4n" t=2024-05-29T13:44:13.327672677Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.327525157Z caller=remote_instance_store.go:51 user=452115 slug=ybmetrics msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jdg0hjd7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.327434609Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=kube-proxy, env=office, namespace=kube-system, pod=kube-proxy-ptgnw" t=2024-05-29T13:44:13.327363484Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jdg0hjd7-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.327325318Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jdf5ik38-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.327253837Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=kube-controller-manager, env=office, namespace=kube-system, pod=kube-controller-manager-office-crossnokaye-oxnard3-6329d21c-ed62c4bf" t=2024-05-29T13:44:13.32726046Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=kube-apiserver, env=office, namespace=kube-system, pod=kube-apiserver-office-crossnokaye-oxnard3-6329d21c-ed62c4bf" t=2024-05-29T13:44:13.327172823Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jdf5ik38-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.327097936Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.327031495Z caller=remote_instance_store.go:51 user=688926 slug=atriumhq msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jd83n7ng-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.327021675Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.326922681Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.326949791Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jd83n7ng-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.326888844Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jd473b2b-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.326848123Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=ecr-renew, env=office, namespace=kube-system, pod=ecr-login-renew-28616400-76n52" t=2024-05-29T13:44:13.326842181Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=ecr-renew, env=office, namespace=kube-system, pod=ecr-login-renew-28616040-nrs2c" t=2024-05-29T13:44:13.326755286Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=ecr-renew, env=office, namespace=kube-system, pod=ecr-login-renew-28616040-nrs2c" t=2024-05-29T13:44:13.326738107Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.326656276Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.326513824Z caller=remote_instance_store.go:51 user=325146 slug=farseer msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.326608304Z caller=remote_instance_store.go:51 user=627877 slug=cookidoo msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jd33wj7a-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.326649851Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jd33wj7a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.326620891Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jd33wj7a-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.326598521Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jcxq0cr6-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.326442679Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jcxq0cr6-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.326369448Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=4947 slug=mediamath instance= t=2024-05-29T13:44:13.326364546Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=coredns, env=office, namespace=kube-system, pod=coredns-6d6869bf68-59t6k" t=2024-05-29T13:44:13.326279509Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.326168279Z caller=remote_instance_store.go:51 user=608555 slug=ias msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jctoul5h-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.326170696Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=cloud-commander, env=office, namespace=atlas-agent, pod=cloud-commander-6d787d7b8-qr7xb" t=2024-05-29T13:44:13.32608407Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.326044075Z caller=remote_instance_store.go:51 user=698963 slug=lemonade msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.325979821Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + logger=ngalert.state.manager.persist user=538037 slug=drivewealth t=2024-05-29T13:44:13.325973398Z level=debug msg="Saving alert states done" count=3 max_state_save_concurrency=1 duration=79.280402ms + level=debug ts=2024-05-29T13:44:13.325949479Z caller=remote_instance_store.go:51 user=469851 slug=yello msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=cilium-operator, env=office, namespace=kube-system, pod=cilium-operator-77bb5b96c8-nk62d" t=2024-05-29T13:44:13.325973541Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=cilium-operator, env=office, namespace=kube-system, pod=cilium-operator-77bb5b96c8-nk62d" t=2024-05-29T13:44:13.32595961Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jcq89vwb-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.325929084Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.325924847Z caller=remote_instance_store.go:51 user=344017 slug=descript msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jcq89vwb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.325895273Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.325913206Z caller=remote_instance_store.go:51 user=538037 slug=drivewealth msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jcq89vwb-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.325869833Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=cilium-agent, env=office, namespace=kube-system, pod=cilium-2g76d" t=2024-05-29T13:44:13.325860243Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jcmllow4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.325797372Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jcmllow4-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.325754812Z level=debug msg="Keeping state" state=Normal + level=debug ts=2024-05-29T13:44:13.325747969Z caller=remote_instance_store.go:51 user=482907 slug=wavelonp msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jcl22iz8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.325611511Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jcl22iz8-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.32557893Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.325621704Z caller=remote_instance_store.go:51 user=687021 slug=heviai msg="calling SaveAlertInstance" + level=debug ts=2024-05-29T13:44:13.325644477Z caller=remote_instance_store.go:51 user=517562 slug=microstrategytest msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=alarm-planner, env=office, namespace=atlas-agent, pod=alarm-planner-84dbb984db-7qmwd" t=2024-05-29T13:44:13.325650193Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=alarm-persister, env=office, namespace=atlas-agent, pod=alarm-persister-5ffb79bf9b-5xzb5" t=2024-05-29T13:44:13.325564518Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=alarm-persister, env=office, namespace=atlas-agent, pod=alarm-persister-5ffb79bf9b-5xzb5" t=2024-05-29T13:44:13.325551467Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jcl22iz8-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.325504139Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jcl22iz8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.325461509Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jcl22iz8-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.325435929Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=alarm-evaluator, env=office, namespace=atlas-agent, pod=alarm-evaluator-77b46dfc97-6tqh7" t=2024-05-29T13:44:13.325467648Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=alarm-evaluator, env=office, namespace=atlas-agent, pod=alarm-evaluator-77b46dfc97-6tqh7" t=2024-05-29T13:44:13.32545419Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jcft3yal-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.325289017Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jcft3yal-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.325254417Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard3, container=agent, env=office, namespace=monitoring, pod=grafana-agent-6hh6g" t=2024-05-29T13:44:13.325338257Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=472647 slug=planet instance= t=2024-05-29T13:44:13.325252085Z level=debug msg="Setting next state" handler=resultError + logger=ngalert.state.manager user=767797 slug=mgmresorts instance="datasource_uid=d1aebc62-96b9-4d63-9239-4734a6bc96ce, ref_id=A" t=2024-05-29T13:44:13.325267058Z level=debug msg="Keeping state" state=NoData previous_ends_at=2024-05-29T13:47:10Z next_ends_at=2024-05-29T13:48:10Z + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard2, container=teleport, env=office, namespace=teleport-6, pod=teleport-6-6b87b5f78d-vv7pj" t=2024-05-29T13:44:13.325238983Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jcex9ye4-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.325206936Z level=debug msg="Keeping state" state=Normal + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard2, container=teleport, env=office, namespace=teleport-6, pod=teleport-6-6b87b5f78d-vv7pj" t=2024-05-29T13:44:13.32522607Z level=debug msg="Setting next state" handler=resultNormal + logger=ngalert.state.manager user=838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jcex9ye4-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.325009234Z level=debug msg="Setting next state" handler=resultNormal + level=debug ts=2024-05-29T13:44:13.325044022Z caller=remote_instance_store.go:51 user=166705 slug=crossnokaye msg="calling SaveAlertInstance" + logger=ngalert.state.manager user=166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard2, container=strategy-planner, env=office, namespace=atlas-agent, pod=strategy-planner-57f5c6f9c8-lcqlb" t=2024-05-29T13:44:13.325000668Z level=debug msg="Keeping state" state=Normal +166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard2, container=strategy-planner, env=office, namespace=atlas-agent, pod=strategy-planner-57f5c6f9c8-lcqlb" t=2024-05-29T13:44:13.324986449Z level=debug msg="Setting next state" handler=resultNormal +166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard2, container=strategy-persister, env=office, namespace=atlas-agent, pod=strategy-persister-64555f9c6c-q9vqd" t=2024-05-29T13:44:13.324901848Z level=debug msg="Keeping state" state=Normal +166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard2, container=strategy-persister, env=office, namespace=atlas-agent, pod=strategy-persister-64555f9c6c-q9vqd" t=2024-05-29T13:44:13.324889747Z level=debug msg="Setting next state" handler=resultNormal +838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jcbshv33-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.324820862Z level=debug msg="Keeping state" state=Normal +.324822652Z caller=remote_instance_store.go:51 user=696798 slug=mcv msg="calling SaveAlertInstance" +.32465794Z caller=remote_instance_store.go:51 user=806229 slug=simplisafe msg="calling SaveAlertInstance" +838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jc9c7kcy-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.324677191Z level=debug msg="Keeping state" state=Normal +166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard2, container=reloader-reloader, env=office, namespace=kube-system, pod=reloader-reloader-fbdf5d78-7xkxx" t=2024-05-29T13:44:13.32469106Z level=debug msg="Setting next state" handler=resultNormal +838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jc9c7kcy-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.32459563Z level=debug msg="Setting next state" handler=resultNormal +838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jc2ip3r7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.32455891Z level=debug msg="Keeping state" state=Normal +838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jc2ip3r7-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.324531019Z level=debug msg="Setting next state" handler=resultNormal +838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jc2ip3r7-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.324496979Z level=debug msg="Keeping state" state=Normal +98 slug=mcv version=1 fingerprint=d4d18d7d7674bf74 attempt=1 now=2024-05-29T13:44:10Z t=2024-05-29T13:44:13.324142303Z level=debug msg="Alert rule evaluated" results="[{Instance:aggregatedBy=sum, name=TOKYO Query State:Normal Error: Results:map[] Values:map[Breaches:{Var:Breaches Labels: Value:0xc07d1dc0a8} Threshold:{Var:Threshold Labels: Value:0xc07d1dc110} compare:{Var:compare Labels:aggregatedBy=sum, name=TOKYO Query Value:0xc07d1dc160} sum:{Var:sum Labels:aggregatedBy=sum, name=TOKYO Query Value:0xc07d1dc190}] EvaluatedAt:2024-05-29 13:44:10 +0000 UTC EvaluationDuration:3.323730005s EvaluationString:[ var='Breaches' labels={} value=72 ], [ var='Threshold' labels={} value=1 ], [ var='compare' labels={aggregatedBy=sum, name=TOKYO Query} value=0 ], [ var='sum' labels={aggregatedBy=sum, name=TOKYO Query} value=4 ]}]" duration=28.696593ms +st user=843304 slug=ppcgroup t=2024-05-29T13:44:13.324096507Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=14.694723ms +166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard2, container=network-monitor, env=office, namespace=agent-tools, pod=network-monitor-775675b79f-jzphq" t=2024-05-29T13:44:13.324126487Z level=debug msg="Keeping state" state=Normal +838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jbmvk3un-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.324038224Z level=debug msg="Setting next state" handler=resultNormal +st user=236496 slug=improbable t=2024-05-29T13:44:13.323970873Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=12.000628ms +.32387694Z caller=remote_instance_store.go:51 user=900395 slug=jcla1234 msg="calling SaveAlertInstance" +.323867106Z caller=remote_instance_store.go:51 user=214309 slug=spenmo msg="calling SaveAlertInstance" +838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jbkaych9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.323902373Z level=debug msg="Setting next state" handler=resultNormal +166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard2, container=manager, env=office, namespace=flux-system, pod=source-controller-557867c58b-qg9sq" t=2024-05-29T13:44:13.323871015Z level=debug msg="Setting next state" handler=resultNormal +st user=87955 slug=icssmonitoring t=2024-05-29T13:44:13.323833073Z level=debug msg="Saving alert states" count=1 max_state_save_concurrency=1 +87955 slug=icssmonitoring instance= t=2024-05-29T13:44:13.323815981Z level=debug msg="Keeping state" state=Normal +87955 slug=icssmonitoring t=2024-05-29T13:44:13.323753731Z level=debug msg="State manager processing evaluation results" resultCount=1 +.32369586Z caller=remote_instance_store.go:51 user=776563 slug=eagleeye4els msg="calling SaveAlertInstance" +323695223Z caller=remote_alert_sender.go:94 user=84360 slug=sib host=sib-grafana-http.hosted-grafana.svc.cluster.local.:10000 addr=10.145.245.254:10000 msg="sending alerts to grafana" rule_org_id=1 rule_uid=IAa-Qjm4k alerts=1 +838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jbcwu8u9-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.323713141Z level=debug msg="Keeping state" state=Normal +166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard2, container=manager, env=office, namespace=flux-system, pod=kustomize-controller-75bb97945d-xbx87" t=2024-05-29T13:44:13.323687355Z level=debug msg="Keeping state" state=Normal +st user=84360 slug=sib t=2024-05-29T13:44:13.323622496Z level=debug msg="Saving alert states done" count=1 max_state_save_concurrency=1 duration=34.124625ms +.323550308Z caller=remote_instance_store.go:51 user=495410 slug=hedgehog msg="calling SaveAlertInstance" +838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jbcwu8u9-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.32362522Z level=debug msg="Keeping state" state=Normal +838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jbb3cclp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.323465789Z level=debug msg="Setting next state" handler=resultNormal +166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard2, container=manager, env=office, namespace=flux-system, pod=image-reflector-controller-78c5d97ff5-fwlqn" t=2024-05-29T13:44:13.323571951Z level=debug msg="Setting next state" handler=resultNormal +166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard2, container=manager, env=office, namespace=flux-system, pod=image-automation-controller-54bf875b97-84s52" t=2024-05-29T13:44:13.323481395Z level=debug msg="Keeping state" state=Normal +.323307077Z caller=remote_instance_store.go:51 user=838012 slug=lepton msg="calling SaveAlertInstance" +838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jbb3cclp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.323309657Z level=debug msg="Setting next state" handler=resultNormal +838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jbal1waz-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.323254346Z level=debug msg="Setting next state" handler=resultNormal +166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard2, container=manager, env=office, namespace=flux-system, pod=helm-controller-75968bdc68-h6rb5" t=2024-05-29T13:44:13.323350056Z level=debug msg="Setting next state" handler=resultNormal +838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jbal1waz-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.323164135Z level=debug msg="Setting next state" handler=resultNormal +838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jb8setwp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.323132775Z level=debug msg="Keeping state" state=Normal +838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jb8setwp-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.323108955Z level=debug msg="Setting next state" handler=resultNormal +838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jb8setwp-termination-metadata-pv, phase=Available, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.323030604Z level=debug msg="Keeping state" state=Normal +838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jb8cbogd-termination-metadata-pv, phase=Released, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.322991804Z level=debug msg="Keeping state" state=Normal +166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard2, container=kube-state-metrics, env=office, namespace=kube-system, pod=kube-state-metrics-7486df6f47-kf47x" t=2024-05-29T13:44:13.323134291Z level=debug msg="Keeping state" state=Normal +166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard2, container=kube-state-metrics, env=office, namespace=kube-system, pod=kube-state-metrics-7486df6f47-kf47x" t=2024-05-29T13:44:13.323104593Z level=debug msg="Setting next state" handler=resultNormal +166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard2, container=kube-secret-syncer, env=office, namespace=kube-system, pod=kube-secret-syncer-controller-5f8fd699c5-fsnsm" t=2024-05-29T13:44:13.323031318Z level=debug msg="Keeping state" state=Normal +838012 slug=lepton instance="__name__=kube_persistentvolume_status_phase, container=kube-state-metrics, endpoint=http, instance=10.0.43.255:8080, job=kube-state-metrics, namespace=kube-prometheus-stack, persistentvolume=ws-jb8cbogd-termination-metadata-pv, phase=Failed, pod=kube-prometheus-stack-kube-state-metrics-5fb8589c46-gq85n, service=kube-prometheus-stack-kube-state-metrics" t=2024-05-29T13:44:13.322905033Z level=debug msg="Setting next state" handler=resultNormal +166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard2, container=kube-scheduler, env=office, namespace=kube-system, pod=kube-scheduler-office-crossnokaye-oxnard2-62f4cd34-3a0477dc" t=2024-05-29T13:44:13.322931021Z level=debug msg="Keeping state" state=Normal +166705 slug=crossnokaye instance="atlas_agent_name=office-crossnokaye-oxnard2, container=kube-scheduler, env=office, namespace=kube-system, pod=kube-scheduler-office-crossnokaye-oxnard2-62f4cd34-3a0477dc" t=2024-05-29T13:44:13.322919226Z level=debug msg="Setting next state" handler=resultNormal diff --git a/pkg/pattern/ingester_querier_test.go b/pkg/pattern/ingester_querier_test.go index 940d42da9926a..6c499e9ea27a0 100644 --- a/pkg/pattern/ingester_querier_test.go +++ b/pkg/pattern/ingester_querier_test.go @@ -13,7 +13,7 @@ import ( ) func Test_prunePatterns(t *testing.T) { - file, err := os.Open("testdata/patterns.txt") + file, err := os.Open(`testdata/patterns.txt`) require.NoError(t, err) defer file.Close() @@ -27,39 +27,83 @@ func Test_prunePatterns(t *testing.T) { require.NoError(t, scanner.Err()) startingPatterns := len(resp.Series) - prunePatterns(resp, 0, newIngesterQuerierMetrics(prometheus.DefaultRegisterer, `test`)) + prunePatterns(resp, 0, newIngesterQuerierMetrics(prometheus.DefaultRegisterer, "test")) expectedPatterns := []string{ - `<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=0 <_>`, - `<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=1 <_>`, - `<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=2 <_>`, - `<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=3 <_>`, - `<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=4 <_>`, - `<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=5 <_>`, - `<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=6 <_>`, - `<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=7 <_>`, - `<_> caller=batcher.go:155 level=info msg="batcher:processing aggregation result" <_> partitionID=0, <_> +0000 UTC, <_>`, - `<_> caller=batcher.go:155 level=info msg="batcher:processing aggregation result" <_> partitionID=7, <_> +0000 UTC, <_>`, - `<_> caller=batcher.go:155 level=info msg="batcher:processing aggregation result" result="user=9960, partitionID=0, <_> +0000 UTC, <_>`, - `<_> caller=batcher.go:155 level=info msg="batcher:processing aggregation result" result="user=9960, partitionID=1, <_> +0000 UTC, <_>`, - `<_> caller=batcher.go:155 level=info msg="batcher:processing aggregation result" result="user=9960, partitionID=2, <_> +0000 UTC, <_>`, - `<_> caller=batcher.go:155 level=info msg="batcher:processing aggregation result" result="user=9960, partitionID=3, <_> +0000 UTC, <_>`, - `<_> caller=batcher.go:155 level=info msg="batcher:processing aggregation result" result="user=9960, partitionID=3, <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_>`, - `<_> caller=batcher.go:155 level=info msg="batcher:processing aggregation result" result="user=9960, partitionID=4, <_> +0000 UTC, <_>`, - `<_> caller=batcher.go:155 level=info msg="batcher:processing aggregation result" result="user=9960, partitionID=4, <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_>`, - `<_> caller=batcher.go:155 level=info msg="batcher:processing aggregation result" result="user=9960, partitionID=5, <_> +0000 UTC, <_>`, - `<_> caller=batcher.go:155 level=info msg="batcher:processing aggregation result" result="user=9960, partitionID=5, <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_>`, - `<_> caller=batcher.go:155 level=info msg="batcher:processing aggregation result" result="user=9960, partitionID=6, <_> +0000 UTC, <_>`, - `<_> caller=batcher.go:155 level=info msg="batcher:processing aggregation result" result="user=9960, partitionID=7, <_> +0000 UTC, <_>`, - `<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=0 <_> +0000 UTC" <_> +0000 UTC" <_> currentBuckets="unsupported value type"`, - `<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=1 <_> +0000 UTC" <_> +0000 UTC" <_> currentBuckets="unsupported value type"`, - `<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=2 <_> +0000 UTC" <_> +0000 UTC" <_> currentBuckets="unsupported value type"`, - `<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=3 handledMessageTime="2024-04-03 <_> +0000 UTC" <_> +0000 UTC" <_> currentBuckets="unsupported value type"`, - `<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=4 handledMessageTime="2024-04-03 <_> +0000 UTC" <_> +0000 UTC" <_> currentBuckets="unsupported value type"`, - `<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=5 handledMessageTime="2024-04-03 <_> +0000 UTC" <_> +0000 UTC" <_> currentBuckets="unsupported value type"`, - `<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=6 <_> +0000 UTC" <_> +0000 UTC" <_> currentBuckets="unsupported value type"`, - `<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=7 <_> +0000 UTC" <_> +0000 UTC" <_> currentBuckets="unsupported value type"`, - `<_> caller=wrapper.go:48 level=info component=distributor msg="sample remote write" eventType=bi <_>`, + `<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=0 <_> <_>`, + `<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=1 <_> <_>`, + `<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=2 <_> <_>`, + `<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=3 <_> <_>`, + `<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=4 <_> <_>`, + `<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=5 <_> <_>`, + `<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=6 <_> <_>`, + `<_> caller=aggregator.go:139 level=info msg="received kafka message" topic=cortex-dev-01-aggregations partition=7 <_> <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" <_> partitionID=0, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" <_> partitionID=0, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" <_> partitionID=7, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=0, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=0, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=0, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=0, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=0, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=1, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=1, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=1, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=1, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=1, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=1, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=1, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=1, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=2, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=2, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=2, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=2, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=2, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=2, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=2, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=2, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=3, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=3, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=3, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=3, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=3, <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=4, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=4, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=4, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=4, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=4, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=4, <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=4, <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=5, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=5, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=5, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=5, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=5, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=5, <_> <_> <_> <_> <_> <_> <_> <_> <_> sampleTimestamp=2024-04-03 <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=6, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=6, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=6, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=6, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=6, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=6, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=6, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=6, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=7, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=7, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=7, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=7, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=7, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=7, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=batcher.go:155 level=info msg="batcher: processing aggregation result" result="user=9960, partitionID=7, <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> +0000 UTC, <_>`, + `<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=0 <_> <_> +0000 UTC" <_> <_> +0000 UTC" <_> currentBuckets="unsupported value type"`, + `<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=1 <_> <_> +0000 UTC" <_> <_> +0000 UTC" <_> currentBuckets="unsupported value type"`, + `<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=2 <_> <_> +0000 UTC" <_> <_> +0000 UTC" <_> currentBuckets="unsupported value type"`, + `<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=3 handledMessageTime="2024-04-03 <_> +0000 UTC" <_> <_> +0000 UTC" <_> currentBuckets="unsupported value type"`, + `<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=4 handledMessageTime="2024-04-03 <_> +0000 UTC" <_> <_> +0000 UTC" <_> currentBuckets="unsupported value type"`, + `<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=5 handledMessageTime="2024-04-03 <_> +0000 UTC" <_> <_> +0000 UTC" <_> currentBuckets="unsupported value type"`, + `<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=6 <_> <_> +0000 UTC" <_> <_> +0000 UTC" <_> currentBuckets="unsupported value type"`, + `<_> caller=offset_committer.go:174 level=info msg="partition offset committer committed offset" topic=cortex-dev-01-aggregations partition=7 <_> <_> +0000 UTC" <_> <_> +0000 UTC" <_> currentBuckets="unsupported value type"`, + `<_> caller=wrapper.go:48 level=info component=distributor msg="sample remote write" eventType=bi <_> <_> <_>`, } patterns := make([]string, 0, len(resp.Series)) @@ -69,5 +113,5 @@ func Test_prunePatterns(t *testing.T) { slices.Sort(patterns) require.Equal(t, expectedPatterns, patterns) - require.Less(t, len(patterns), startingPatterns, `prunePatterns should remove duplicates`) + require.Less(t, len(patterns), startingPatterns, "prunePatterns should remove duplicates") } diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go index 7bab6f6c5d054..d1d93b7bcfdfe 100644 --- a/pkg/querier/queryrange/codec.go +++ b/pkg/querier/queryrange/codec.go @@ -919,7 +919,10 @@ func (c Codec) EncodeRequest(ctx context.Context, r queryrangebase.Request) (*ht "end": []string{fmt.Sprintf("%d", request.End.UnixNano())}, "line_limit": []string{fmt.Sprintf("%d", request.GetLineLimit())}, "field_limit": []string{fmt.Sprintf("%d", request.GetFieldLimit())}, - "step": []string{fmt.Sprintf("%d", request.GetStep())}, + } + + if request.Step != 0 { + params["step"] = []string{fmt.Sprintf("%f", float64(request.Step)/float64(1e3))} } u := &url.URL{ @@ -940,7 +943,10 @@ func (c Codec) EncodeRequest(ctx context.Context, r queryrangebase.Request) (*ht "query": []string{request.GetQuery()}, "start": []string{fmt.Sprintf("%d", request.Start.UnixNano())}, "end": []string{fmt.Sprintf("%d", request.End.UnixNano())}, - "step": []string{fmt.Sprintf("%d", request.GetStep())}, + } + + if request.Step != 0 { + params["step"] = []string{fmt.Sprintf("%f", float64(request.Step)/float64(1e3))} } u := &url.URL{ diff --git a/pkg/querier/queryrange/codec_test.go b/pkg/querier/queryrange/codec_test.go index af047d8e84e1e..f5d00e263d0f6 100644 --- a/pkg/querier/queryrange/codec_test.go +++ b/pkg/querier/queryrange/codec_test.go @@ -10,7 +10,7 @@ import ( "net/http/httptest" "net/url" "strconv" - strings "strings" + "strings" "testing" "time" @@ -202,6 +202,57 @@ func Test_codec_EncodeDecodeRequest(t *testing.T) { Step: 30 * 1e3, // step is expected in ms; default is 0 or no step AggregateBy: "series", }, false}, + {"detected_fields", func() (*http.Request, error) { + return DefaultCodec.EncodeRequest(ctx, &DetectedFieldsRequest{ + logproto.DetectedFieldsRequest{ + Query: `{foo="bar"}`, + Start: start, + End: end, + Step: 30 * 1e3, // step is expected in ms; default is 0 or no step + LineLimit: 100, + FieldLimit: 100, + }, + "/loki/api/v1/detected_fields", + }) + }, &DetectedFieldsRequest{ + logproto.DetectedFieldsRequest{ + Query: `{foo="bar"}`, + Start: start, + End: end, + Step: 30 * 1e3, // step is expected in ms; default is 0 or no step + LineLimit: 100, + FieldLimit: 100, + }, + "/loki/api/v1/detected_fields", + }, false}, + {"patterns", func() (*http.Request, error) { + return DefaultCodec.EncodeRequest(ctx, &logproto.QueryPatternsRequest{ + Start: start, + End: end, + Step: 30 * 1e3, // step is expected in ms + }) + }, &logproto.QueryPatternsRequest{ + Start: start, + End: end, + Step: 30 * 1e3, // step is expected in ms; default is 0 or no step + }, false}, + {"detected_labels", func() (*http.Request, error) { + return DefaultCodec.EncodeRequest(ctx, &DetectedLabelsRequest{ + "/loki/api/v1/detected_labels", + logproto.DetectedLabelsRequest{ + Query: `{foo="bar"}`, + Start: start, + End: end, + }, + }) + }, &DetectedLabelsRequest{ + "/loki/api/v1/detected_labels", + logproto.DetectedLabelsRequest{ + Query: `{foo="bar"}`, + Start: start, + End: end, + }, + }, false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index b0472c3a347a2..98ee66f3b61ae 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -206,7 +206,7 @@ func NewMiddleware( return nil, nil, err } - limitedTripperware, err := NewLimitedTripperware(cfg, engineOpts, log, limits, schema, metrics, indexStatsTripperware, codec, iqo) + limitedTripperware, err := NewLimitedTripperware(cfg, engineOpts, log, limits, schema, metrics, indexStatsTripperware, codec, iqo, metricsNamespace) if err != nil { return nil, nil, err } @@ -613,7 +613,7 @@ func NewLogFilterTripperware(cfg Config, engineOpts logql.EngineOpts, log log.Lo } // NewLimitedTripperware creates a new frontend tripperware responsible for handling log requests which are label matcher only, no filter expression. -func NewLimitedTripperware(_ Config, engineOpts logql.EngineOpts, log log.Logger, limits Limits, schema config.SchemaConfig, metrics *Metrics, indexStatsTripperware base.Middleware, merger base.Merger, iqo util.IngesterQueryOptions) (base.Middleware, error) { +func NewLimitedTripperware(cfg Config, engineOpts logql.EngineOpts, log log.Logger, limits Limits, schema config.SchemaConfig, metrics *Metrics, indexStatsTripperware base.Middleware, merger base.Merger, iqo util.IngesterQueryOptions, metricsNamespace string) (base.Middleware, error) { return base.MiddlewareFunc(func(next base.Handler) base.Handler { statsHandler := indexStatsTripperware.Wrap(next) @@ -625,6 +625,12 @@ func NewLimitedTripperware(_ Config, engineOpts logql.EngineOpts, log log.Logger SplitByIntervalMiddleware(schema.Configs, WithMaxParallelism(limits, limitedQuerySplits), merger, newDefaultSplitter(limits, iqo), metrics.SplitByMetrics), NewQuerierSizeLimiterMiddleware(schema.Configs, engineOpts, log, limits, statsHandler), } + if cfg.MaxRetries > 0 { + queryRangeMiddleware = append( + queryRangeMiddleware, base.InstrumentMiddleware("retry", metrics.InstrumentMiddlewareMetrics), + base.NewRetryMiddleware(log, cfg.MaxRetries, metrics.RetryMiddlewareMetrics, metricsNamespace), + ) + } if len(queryRangeMiddleware) > 0 { return NewLimitedRoundTripper(next, limits, schema.Configs, queryRangeMiddleware...) diff --git a/pkg/storage/bloom/v1/archive_test.go b/pkg/storage/bloom/v1/archive_test.go index d6131c166f674..63a321bca4f3f 100644 --- a/pkg/storage/bloom/v1/archive_test.go +++ b/pkg/storage/bloom/v1/archive_test.go @@ -17,8 +17,7 @@ func TestArchive(t *testing.T) { dir2 := t.TempDir() numSeries := 100 - numKeysPerSeries := 10000 - data, _ := MkBasicSeriesWithBlooms(numSeries, numKeysPerSeries, 0x0000, 0xffff, 0, 10000) + data, _ := MkBasicSeriesWithBlooms(numSeries, 0x0000, 0xffff, 0, 10000) builder, err := NewBlockBuilder( BlockOptions{ @@ -33,7 +32,7 @@ func TestArchive(t *testing.T) { ) require.Nil(t, err) - itr := NewSliceIter[SeriesWithBloom](data) + itr := NewSliceIter[SeriesWithBlooms](data) _, err = builder.BuildFrom(itr) require.Nil(t, err) diff --git a/pkg/storage/bloom/v1/block.go b/pkg/storage/bloom/v1/block.go index ba661de79c498..b0b4e5ad9647a 100644 --- a/pkg/storage/bloom/v1/block.go +++ b/pkg/storage/bloom/v1/block.go @@ -4,7 +4,6 @@ import ( "fmt" "github.com/pkg/errors" - "github.com/prometheus/common/model" ) type BlockMetadata struct { @@ -104,12 +103,10 @@ func (b *Block) Schema() (Schema, error) { } type BlockQuerier struct { - series *LazySeriesIter + *LazySeriesIter blooms *LazyBloomIter block *Block // ref to underlying block - - cur *SeriesWithBloom } // NewBlockQuerier returns a new BlockQuerier for the given block. @@ -117,11 +114,13 @@ type BlockQuerier struct { // will be returned to the pool for efficiency. This can only safely be used // when the underlying bloom bytes don't escape the decoder, i.e. // when loading blooms for querying (bloom-gw) but not for writing (bloom-compactor). -func NewBlockQuerier(b *Block, noCapture bool, maxPageSize int) *BlockQuerier { +// When usePool is true, the bloom MUST NOT be captured by the caller. Rather, +// it should be discarded before another call to Next(). +func NewBlockQuerier(b *Block, usePool bool, maxPageSize int) *BlockQuerier { return &BlockQuerier{ - block: b, - series: NewLazySeriesIter(b), - blooms: NewLazyBloomIter(b, noCapture, maxPageSize), + block: b, + LazySeriesIter: NewLazySeriesIter(b), + blooms: NewLazyBloomIter(b, usePool, maxPageSize), } } @@ -134,42 +133,77 @@ func (bq *BlockQuerier) Schema() (Schema, error) { } func (bq *BlockQuerier) Reset() error { - return bq.series.Seek(0) + return bq.LazySeriesIter.Seek(0) +} + +func (bq *BlockQuerier) Err() error { + if err := bq.LazySeriesIter.Err(); err != nil { + return err + } + + return bq.blooms.Err() +} + +type BlockQuerierIter struct { + *BlockQuerier +} + +// Iter returns a new BlockQuerierIter, which changes the iteration type to SeriesWithBlooms, +// automatically loading the blooms for each series rather than requiring the caller to +// turn the offset to a `Bloom` via `LoadOffset` +func (bq *BlockQuerier) Iter() *BlockQuerierIter { + return &BlockQuerierIter{BlockQuerier: bq} +} + +func (b *BlockQuerierIter) Next() bool { + return b.LazySeriesIter.Next() +} + +func (b *BlockQuerierIter) At() *SeriesWithBlooms { + s := b.LazySeriesIter.At() + res := &SeriesWithBlooms{ + Series: &s.Series, + Blooms: newOffsetsIter(b.blooms, s.Offsets), + } + return res +} + +type offsetsIter struct { + blooms *LazyBloomIter + offsets []BloomOffset + cur int } -func (bq *BlockQuerier) Seek(fp model.Fingerprint) error { - return bq.series.Seek(fp) +func newOffsetsIter(blooms *LazyBloomIter, offsets []BloomOffset) *offsetsIter { + return &offsetsIter{ + blooms: blooms, + offsets: offsets, + } } -func (bq *BlockQuerier) Next() bool { - for bq.series.Next() { - series := bq.series.At() - if skip := bq.blooms.LoadOffset(series.Offset); skip { - // can't seek to the desired bloom, likely because the page was too large to load - // so we skip this series and move on to the next +func (it *offsetsIter) Next() bool { + for it.cur < len(it.offsets) { + + if skip := it.blooms.LoadOffset(it.offsets[it.cur]); skip { + it.cur++ continue } - if !bq.blooms.Next() { - return false - } - bloom := bq.blooms.At() - bq.cur = &SeriesWithBloom{ - Series: &series.Series, - Bloom: bloom, - } - return true + + it.cur++ + return it.blooms.Next() + } return false } -func (bq *BlockQuerier) At() *SeriesWithBloom { - return bq.cur +func (it *offsetsIter) At() *Bloom { + return it.blooms.At() } -func (bq *BlockQuerier) Err() error { - if err := bq.series.Err(); err != nil { - return err - } +func (it *offsetsIter) Err() error { + return it.blooms.Err() +} - return bq.blooms.Err() +func (it *offsetsIter) Remaining() int { + return len(it.offsets) - it.cur } diff --git a/pkg/storage/bloom/v1/block_writer.go b/pkg/storage/bloom/v1/block_writer.go index 1bdc38f32fca9..70ed868235a75 100644 --- a/pkg/storage/bloom/v1/block_writer.go +++ b/pkg/storage/bloom/v1/block_writer.go @@ -21,6 +21,7 @@ type BlockWriter interface { Index() (io.WriteCloser, error) Blooms() (io.WriteCloser, error) Size() (int, error) // byte size of accumualted index & blooms + Full(maxSize uint64) (full bool, size int, err error) } // in memory impl @@ -46,6 +47,19 @@ func (b MemoryBlockWriter) Size() (int, error) { return b.index.Len() + b.blooms.Len(), nil } +func (b MemoryBlockWriter) Full(maxSize uint64) (full bool, size int, err error) { + size, err = b.Size() + if err != nil { + return false, 0, errors.Wrap(err, "getting block size") + } + + if maxSize == 0 { + return false, size, nil + } + + return uint64(size) >= maxSize, size, nil +} + // Directory based impl type DirectoryBlockWriter struct { dir string @@ -112,3 +126,16 @@ func (b *DirectoryBlockWriter) Size() (int, error) { } return size, nil } + +func (b *DirectoryBlockWriter) Full(maxSize uint64) (full bool, size int, err error) { + size, err = b.Size() + if err != nil { + return false, 0, errors.Wrap(err, "getting block size") + } + + if maxSize == 0 { + return false, size, nil + } + + return uint64(size) >= maxSize, size, nil +} diff --git a/pkg/storage/bloom/v1/bloom.go b/pkg/storage/bloom/v1/bloom.go index aa51762d4e4ec..b9f4b0cdc6a9a 100644 --- a/pkg/storage/bloom/v1/bloom.go +++ b/pkg/storage/bloom/v1/bloom.go @@ -225,12 +225,6 @@ type BloomBlock struct { pageHeaders []BloomPageHeader } -func NewBloomBlock(encoding chunkenc.Encoding) BloomBlock { - return BloomBlock{ - schema: Schema{version: DefaultSchemaVersion, encoding: encoding}, - } -} - func (b *BloomBlock) DecodeHeaders(r io.ReadSeeker) (uint32, error) { if err := b.schema.DecodeFrom(r); err != nil { return 0, errors.Wrap(err, "decoding schema") diff --git a/pkg/storage/bloom/v1/bloom_builder.go b/pkg/storage/bloom/v1/bloom_builder.go new file mode 100644 index 0000000000000..ea54ba248f7c4 --- /dev/null +++ b/pkg/storage/bloom/v1/bloom_builder.go @@ -0,0 +1,116 @@ +package v1 + +import ( + "io" + + "github.com/pkg/errors" + + "github.com/grafana/loki/v3/pkg/util/encoding" +) + +type BloomBlockBuilder struct { + opts BlockOptions + writer io.WriteCloser + + offset int // track the offset of the file + writtenSchema bool + pages []BloomPageHeader + page PageWriter + scratch *encoding.Encbuf +} + +func NewBloomBlockBuilder(opts BlockOptions, writer io.WriteCloser) *BloomBlockBuilder { + return &BloomBlockBuilder{ + opts: opts, + writer: writer, + page: NewPageWriter(int(opts.BloomPageSize)), + scratch: &encoding.Encbuf{}, + } +} + +func (b *BloomBlockBuilder) WriteSchema() error { + b.scratch.Reset() + b.opts.Schema.Encode(b.scratch) + if _, err := b.writer.Write(b.scratch.Get()); err != nil { + return errors.Wrap(err, "writing schema") + } + b.writtenSchema = true + b.offset += b.scratch.Len() + return nil +} + +func (b *BloomBlockBuilder) Append(bloom *Bloom) (BloomOffset, error) { + if !b.writtenSchema { + if err := b.WriteSchema(); err != nil { + return BloomOffset{}, errors.Wrap(err, "writing schema") + } + } + + b.scratch.Reset() + if err := bloom.Encode(b.scratch); err != nil { + return BloomOffset{}, errors.Wrap(err, "encoding bloom") + } + + if !b.page.SpaceFor(b.scratch.Len()) { + if err := b.flushPage(); err != nil { + return BloomOffset{}, errors.Wrap(err, "flushing bloom page") + } + } + + return BloomOffset{ + Page: len(b.pages), + ByteOffset: b.page.Add(b.scratch.Get()), + }, nil +} + +func (b *BloomBlockBuilder) Close() (uint32, error) { + if b.page.Count() > 0 { + if err := b.flushPage(); err != nil { + return 0, errors.Wrap(err, "flushing final bloom page") + } + } + + b.scratch.Reset() + b.scratch.PutUvarint(len(b.pages)) + for _, h := range b.pages { + h.Encode(b.scratch) + } + // put offset to beginning of header section + // cannot be varint encoded because it's offset will be calculated as + // the 8 bytes prior to the checksum + b.scratch.PutBE64(uint64(b.offset)) + + crc32Hash := Crc32HashPool.Get() + defer Crc32HashPool.Put(crc32Hash) + // wrap with final checksum + b.scratch.PutHash(crc32Hash) + _, err := b.writer.Write(b.scratch.Get()) + if err != nil { + return 0, errors.Wrap(err, "writing bloom page headers") + } + return crc32Hash.Sum32(), errors.Wrap(b.writer.Close(), "closing bloom writer") +} + +func (b *BloomBlockBuilder) flushPage() error { + crc32Hash := Crc32HashPool.Get() + defer Crc32HashPool.Put(crc32Hash) + + decompressedLen, compressedLen, err := b.page.writePage( + b.writer, + b.opts.Schema.CompressorPool(), + crc32Hash, + ) + if err != nil { + return errors.Wrap(err, "writing bloom page") + } + header := BloomPageHeader{ + N: b.page.Count(), + Offset: b.offset, + Len: compressedLen, + DecompressedLen: decompressedLen, + } + b.pages = append(b.pages, header) + b.offset += compressedLen + b.page.Reset() + return nil +} diff --git a/pkg/storage/bloom/v1/bloom_tokenizer.go b/pkg/storage/bloom/v1/bloom_tokenizer.go index 606f4a84dc3c6..7d2ba41b7f49c 100644 --- a/pkg/storage/bloom/v1/bloom_tokenizer.go +++ b/pkg/storage/bloom/v1/bloom_tokenizer.go @@ -1,17 +1,15 @@ package v1 import ( - "fmt" "math" - "time" - "github.com/c2h5oh/datasize" "github.com/go-kit/log/level" - "github.com/pkg/errors" - - "github.com/grafana/dskit/multierror" "github.com/grafana/loki/v3/pkg/iter" + "github.com/grafana/loki/v3/pkg/logproto" + "github.com/grafana/loki/v3/pkg/storage/bloom/v1/filter" + + "github.com/grafana/loki/pkg/push" "github.com/grafana/loki/v3/pkg/util/encoding" util_log "github.com/grafana/loki/v3/pkg/util/log" @@ -26,7 +24,7 @@ Bloom filters are utilized for faster lookups of log lines. type BloomTokenizer struct { metrics *Metrics - maxBloomSize int + maxBloomSize int // size in bytes lineTokenizer *NGramTokenizer cache map[string]interface{} } @@ -59,10 +57,6 @@ func (bt *BloomTokenizer) SkipFactor() uint64 { return uint64(bt.lineTokenizer.SkipFactor()) } -func clearCache(cache map[string]interface{}) { - clear(cache) -} - // prefixedToken returns a byte slice with sufficient capacity for a chunk-ref prefixed token // of specific ngram length, along with the length of the prefix. // It ensures enough capacity for the prefix and the token so additional tokens can be created @@ -91,153 +85,198 @@ type ChunkRefWithIter struct { Itr iter.EntryIterator } -// Populate adds the tokens from the given chunks to the given seriesWithBloom. -// The `skip` return value indicates whether this series should be discarded and is used to short-circuit -// bloom generation for series that are too large. We will undoubtedly improve this in the future. -func (bt *BloomTokenizer) Populate(swb *SeriesWithBloom, chks Iterator[ChunkRefWithIter]) (bytesAdded int, skip bool, err error) { - startTime := time.Now().UnixMilli() +// n ≈ −m ln(1 − p). +func estimatedCount(m uint, p float64) uint { + return uint(-float64(m) * math.Log(1-p)) +} + +func (bt *BloomTokenizer) newBloom() *Bloom { + return &Bloom{ + // TODO parameterise SBF options. fp_rate + ScalableBloomFilter: *filter.NewScalableBloomFilter(1024, 0.01, 0.8), + } +} + +func (bt *BloomTokenizer) Populate( + blooms SizedIterator[*Bloom], + chks Iterator[ChunkRefWithIter], + ch chan *BloomCreation, +) { + var next bool + + // All but the last bloom are considered full -- send back unaltered + for next = blooms.Next(); next && blooms.Remaining() > 0; next = blooms.Next() { + ch <- &BloomCreation{ + Bloom: blooms.At(), + SourceBytesAdded: 0, + } + } + + var bloom *Bloom + if next { + // The last bloom has been made available via the `Next()` call above + bloom = blooms.At() + } else { + bloom = bt.newBloom() + } + + var bytesAdded int + + for chks.Next() { + chk := chks.At() + itr := newPeekingEntryIterAdapter(chk.Itr) + + for { + full, newBytes := bt.addChunkToBloom( + bloom, + chk.Ref, + itr, + ) + bytesAdded += newBytes + + // If a bloom is full, the chunk wasn't completely added + // so we'll submit this bloom, start a new one, and continue indexing + if full { + bt.sendBloom(ch, bloom, bytesAdded) + + // start a new bloom + reset bytesAdded counter + bytesAdded = 0 + bloom = bt.newBloom() + + // cache _MUST_ be cleared when a new bloom is created to ensure that all tokens from + // each line are indexed into at least one bloom + clear(bt.cache) + continue + } + + break + } + + } - clearCache(bt.cache) + // Send the last bloom + bt.sendBloom(ch, bloom, bytesAdded) + close(ch) +} + +func (bt *BloomTokenizer) sendBloom( + ch chan<- *BloomCreation, + bloom *Bloom, + bytesAdded int, +) { + fillRatio := bloom.ScalableBloomFilter.FillRatio() + bt.metrics.hammingWeightRatio.Observe(fillRatio) + bt.metrics.estimatedCount.Observe( + float64(estimatedCount(bloom.ScalableBloomFilter.Capacity(), fillRatio)), + ) + bt.metrics.bloomSize.Observe(float64(bloom.ScalableBloomFilter.Capacity() / eightBits)) + bt.metrics.bloomsTotal.Inc() + ch <- &BloomCreation{ + Bloom: bloom, + SourceBytesAdded: bytesAdded, + } +} +// addChunkToBloom adds the tokens from the given chunk to the given bloom. +// It continues until the chunk is exhausted or the bloom is full. +// NB(owen-d): We ensure the invariant that each line is indexed entirely into at least one bloom. +// This includes both raw ngrams and chunk-prefixed ngrams and is why we use a peeking iterator -- +// so we can advance the iterator only after we're sure the bloom has accepted the line. +// This is because the _line_ is the atom in Loki's data model and a query must either match (or not) an individual line. +// Therefore, we index entire lines into a bloom to ensure a lookups are accurate. +func (bt *BloomTokenizer) addChunkToBloom(bloom *Bloom, ref ChunkRef, entryIter PeekingIterator[push.Entry]) (full bool, bytesAdded int) { var ( - tokenBuf []byte - prefixLn int - // TODO(owen-d): slightly more efficient to expose the - // UncompressedSize() method on the chunk interface and use that - sourceBytes int // source bytes processed + tokenBuf, prefixLn = prefixedToken(bt.lineTokenizer.N(), ref, nil) + tokens int + successfulInserts int + cachedInserts int + collisionInserts int + chunkBytes int + linesAdded int ) - // Iterate over chunks - for chks.Next() && chks.Err() == nil { - - var ( - tokens int - successfulInserts int - cachedInserts int - collisionInserts int - chunkSuccessfulInserts int - chunkCachedInserts int - chunkCollisionInserts int - chunkBytes int - chk = chks.At() - itr = chk.Itr - ) - tokenBuf, prefixLn = prefixedToken(bt.lineTokenizer.N(), chk.Ref, tokenBuf) - - // Iterate over lines in the chunk - entries: - for itr.Next() && itr.Error() == nil { - // TODO(owen-d): rather than iterate over the line twice, once for prefixed tokenizer & once for - // raw tokenizer, we could iterate once and just return (prefix, token) pairs from the tokenizer. - // Double points for them being different-ln references to the same data. - line := itr.Entry().Line - chunkBytes += len(line) - - tokenItrs := []Iterator[[]byte]{ - // two iterators, one for the raw tokens and one for the chunk prefixed tokens. - // Warning: the underlying line tokenizer (used in both iterators) uses the same buffer for tokens. - // They are NOT SAFE for concurrent use. - NewPrefixedTokenIter(tokenBuf, prefixLn, bt.lineTokenizer.Tokens(line)), - bt.lineTokenizer.Tokens(line), - } - for _, itr := range tokenItrs { - for itr.Next() { - tok := itr.At() - tokens++ - // TODO(owen-d): [n]byte this - str := string(tok) - _, found := bt.cache[str] // A cache is used ahead of the SBF, as it cuts out the costly operations of scaling bloom filters - if found { - cachedInserts++ - continue - } + // We use a peeking iterator to avoid advancing the iterator until we're sure the bloom has accepted the line. +outer: + for entry, ok := entryIter.Peek(); ok; entry, ok = entryIter.Peek() { + line := entry.Line + chunkBytes += len(line) + + tokenItrs := []Iterator[[]byte]{ + // two iterators, one for the raw tokens and one for the chunk prefixed tokens. + // Warning: the underlying line tokenizer (used in both iterators) uses the same buffer for tokens. + // They are NOT SAFE for concurrent use. + NewPrefixedTokenIter(tokenBuf, prefixLn, bt.lineTokenizer.Tokens(line)), + bt.lineTokenizer.Tokens(line), + } - bt.cache[str] = nil - collision, sz := swb.Bloom.ScalableBloomFilter.HeavyAdd(tok) - if collision { - collisionInserts++ - } else { - successfulInserts++ - } + for _, itr := range tokenItrs { + for itr.Next() { + tok := itr.At() + tokens++ + // TODO[owen-d]: [n]byte this + str := string(tok) + _, found := bt.cache[str] // A cache is used ahead of the SBF, as it cuts out the costly operations of scaling bloom filters + if found { + cachedInserts++ + continue + } - if bt.maxBloomSize > 0 && sz > bt.maxBloomSize { - skip = true - break entries - } + // maxBloomSize is in bytes, but blooms operate at the bit level; adjust + var collision bool + collision, full = bloom.ScalableBloomFilter.TestAndAddWithMaxSize(tok, bt.maxBloomSize*eightBits) - if len(bt.cache) >= cacheSize { // While crude, this has proven efficient in performance testing. This speaks to the similarity in log lines near each other - clearCache(bt.cache) + if full { + // edge case: one line maxed out the bloom size -- retrying is futile + // (and will loop endlessly), so we'll just skip indexing it + if linesAdded == 0 { + _ = entryIter.Next() } - } - } - } + break outer + } - // add the recorded chunkbytes to the sourcebytes counter in case we return early via error - sourceBytes += chunkBytes + if collision { + collisionInserts++ + } else { + successfulInserts++ + } - var es multierror.MultiError - if err := itr.Close(); err != nil { - es.Add(errors.Wrapf(err, "error closing chunk: %#v", chk.Ref)) - } - if err := itr.Error(); err != nil { - es.Add(errors.Wrapf(err, "error iterating chunk: %#v", chk.Ref)) - } - if combined := es.Err(); combined != nil { - return sourceBytes, skip, combined - } - swb.Series.Chunks = append(swb.Series.Chunks, chk.Ref) - - // update metrics after each chunk added for more consistent reporting - bt.metrics.tokensTotal.Add(float64(tokens)) - bt.metrics.insertsTotal.WithLabelValues(tokenTypeRaw, collisionTypeFalse).Add(float64(successfulInserts)) - bt.metrics.insertsTotal.WithLabelValues(tokenTypeRaw, collisionTypeCache).Add(float64(cachedInserts)) - bt.metrics.insertsTotal.WithLabelValues(tokenTypeRaw, collisionTypeTrue).Add(float64(collisionInserts)) - bt.metrics.insertsTotal.WithLabelValues(tokenTypeChunkPrefixed, collisionTypeFalse).Add(float64(chunkSuccessfulInserts)) - bt.metrics.insertsTotal.WithLabelValues(tokenTypeChunkPrefixed, collisionTypeCache).Add(float64(chunkCachedInserts)) - bt.metrics.insertsTotal.WithLabelValues(tokenTypeChunkPrefixed, collisionTypeTrue).Add(float64(chunkCollisionInserts)) - bt.metrics.sourceBytesAdded.Add(float64(chunkBytes)) - - // Exit early if the series is too large - if skip { - break + // only register the key in the cache if it was successfully added to the bloom + // as can prevent us from trying subsequent copies + bt.cache[str] = nil + if len(bt.cache) >= cacheSize { // While crude, this has proven efficient in performance testing. This speaks to the similarity in log lines near each other + clear(bt.cache) + } + } } - } - if err := chks.Err(); err != nil { - level.Error(util_log.Logger).Log("msg", "error downloading chunks batch", "err", err) - return sourceBytes, skip, fmt.Errorf("error downloading chunks batch: %w", err) + // Only advance the iterator once we're sure the bloom has accepted the line + linesAdded++ + _ = entryIter.Next() } - level.Debug(util_log.Logger).Log( - "msg", "bloom filter populated", - "chunks", len(swb.Series.Chunks), - "fp", swb.Series.Fingerprint, - "sourceBytes", datasize.ByteSize(sourceBytes).HumanReadable(), - "bloomSize", datasize.ByteSize(swb.Bloom.Capacity()/8).HumanReadable(), - "skipped", skip, - ) + // update metrics after each chunk added for more consistent reporting + bt.metrics.tokensTotal.Add(float64(tokens)) + bt.metrics.insertsTotal.WithLabelValues(collisionTypeFalse).Add(float64(successfulInserts)) + bt.metrics.insertsTotal.WithLabelValues(collisionTypeCache).Add(float64(cachedInserts)) + bt.metrics.insertsTotal.WithLabelValues(collisionTypeTrue).Add(float64(collisionInserts)) + bt.metrics.sourceBytesAdded.Add(float64(chunkBytes)) - endTime := time.Now().UnixMilli() + return full, chunkBytes +} - fillRatio := swb.Bloom.ScalableBloomFilter.FillRatio() - bt.metrics.hammingWeightRatio.Observe(fillRatio) - bt.metrics.estimatedCount.Observe( - float64(estimatedCount(swb.Bloom.ScalableBloomFilter.Capacity(), fillRatio)), - ) - bt.metrics.bloomSize.Observe(float64(swb.Bloom.ScalableBloomFilter.Capacity() / eightBits)) +type entryIterAdapter struct { + iter.EntryIterator +} - ty := bloomCreationTypeIndexed - if skip { - ty = bloomCreationTypeSkipped - } - bt.metrics.sbfCreationTime.WithLabelValues(ty).Add(float64(endTime - startTime)) - bt.metrics.bloomsTotal.WithLabelValues(ty).Inc() +func (a entryIterAdapter) At() logproto.Entry { + return a.EntryIterator.Entry() +} - return sourceBytes, skip, nil +func (a entryIterAdapter) Err() error { + return a.EntryIterator.Error() } -// n ≈ −m ln(1 − p). -func estimatedCount(m uint, p float64) uint { - return uint(-float64(m) * math.Log(1-p)) +func newPeekingEntryIterAdapter(itr iter.EntryIterator) *PeekIter[logproto.Entry] { + return NewPeekingIter[logproto.Entry](entryIterAdapter{itr}) } diff --git a/pkg/storage/bloom/v1/bloom_tokenizer_test.go b/pkg/storage/bloom/v1/bloom_tokenizer_test.go index dec23f91e80bb..7685faaa92427 100644 --- a/pkg/storage/bloom/v1/bloom_tokenizer_test.go +++ b/pkg/storage/bloom/v1/bloom_tokenizer_test.go @@ -4,14 +4,16 @@ import ( "context" "fmt" "math" + "math/rand" "testing" "time" - "github.com/prometheus/prometheus/model/labels" + "github.com/grafana/dskit/multierror" "github.com/grafana/loki/pkg/push" "github.com/grafana/loki/v3/pkg/chunkenc" + "github.com/grafana/loki/v3/pkg/iter" "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/logql/log" @@ -97,8 +99,6 @@ func TestTokenizerPopulate(t *testing.T) { bt := NewBloomTokenizer(DefaultNGramLength, DefaultNGramSkip, 0, metrics) sbf := filter.NewScalableBloomFilter(1024, 0.01, 0.8) - var lbsList []labels.Labels - lbsList = append(lbsList, labels.FromStrings("foo", "bar")) memChunk := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncSnappy, chunkenc.ChunkHeadFormatFor(chunkenc.ChunkFormatV4), 256000, 1500000) _ = memChunk.Append(&push.Entry{ @@ -117,22 +117,140 @@ func TestTokenizerPopulate(t *testing.T) { bloom := Bloom{ ScalableBloomFilter: *sbf, } - series := Series{ - Fingerprint: model.Fingerprint(lbsList[0].Hash()), - } - swb := SeriesWithBloom{ - Bloom: &bloom, - Series: &series, + + blooms, err := populateAndConsumeBloom( + bt, + NewSliceIter([]*Bloom{&bloom}), + NewSliceIter([]ChunkRefWithIter{{Ref: ChunkRef{}, + Itr: itr}}), + ) + require.NoError(t, err) + require.Equal(t, 1, len(blooms)) + + tokenizer := NewNGramTokenizer(DefaultNGramLength, DefaultNGramSkip) + toks := tokenizer.Tokens(testLine) + for toks.Next() { + token := toks.At() + require.True(t, blooms[0].Test(token)) } +} + +func TestBloomTokenizerPopulateWithoutPreexistingBloom(t *testing.T) { + var testLine = "this is a log line" + bt := NewBloomTokenizer(DefaultNGramLength, DefaultNGramSkip, 0, metrics) + + memChunk := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncSnappy, chunkenc.ChunkHeadFormatFor(chunkenc.ChunkFormatV4), 256000, 1500000) + _ = memChunk.Append(&push.Entry{ + Timestamp: time.Unix(0, 1), + Line: testLine, + }) + itr, err := memChunk.Iterator( + context.Background(), + time.Unix(0, 0), // TODO: Parameterize/better handle the timestamps? + time.Unix(0, math.MaxInt64), + logproto.FORWARD, + log.NewNoopPipeline().ForStream(nil), + ) + require.Nil(t, err) - _, _, err = bt.Populate(&swb, NewSliceIter([]ChunkRefWithIter{{Ref: ChunkRef{}, Itr: itr}})) + blooms, err := populateAndConsumeBloom( + bt, + NewEmptyIter[*Bloom](), + NewSliceIter([]ChunkRefWithIter{{Ref: ChunkRef{}, + Itr: itr}}), + ) require.NoError(t, err) + require.Equal(t, 1, len(blooms)) + tokenizer := NewNGramTokenizer(DefaultNGramLength, DefaultNGramSkip) toks := tokenizer.Tokens(testLine) for toks.Next() { token := toks.At() - require.True(t, swb.Bloom.Test(token)) + require.True(t, blooms[0].Test(token)) + } + +} + +func chunkRefItrFromLines(lines ...string) (iter.EntryIterator, error) { + memChunk := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncSnappy, chunkenc.ChunkHeadFormatFor(chunkenc.ChunkFormatV4), 256000, 1500000) + for i, line := range lines { + if err := memChunk.Append(&push.Entry{ + Timestamp: time.Unix(0, int64(i)), + Line: line, + }); err != nil { + return nil, err + } + } + + itr, err := memChunk.Iterator( + context.Background(), + time.Unix(0, 0), // TODO: Parameterize/better handle the timestamps? + time.Unix(0, math.MaxInt64), + logproto.FORWARD, + log.NewNoopPipeline().ForStream(nil), + ) + return itr, err +} + +func randomStr(ln int) string { + rng := rand.New(rand.NewSource(0)) + charset := []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_!@#$%^&*() ") + + res := make([]rune, ln) + for i := 0; i < ln; i++ { + res[i] = charset[rng.Intn(len(charset))] + } + return string(res) +} + +func TestTokenizerPopulateWontExceedMaxSize(t *testing.T) { + maxSize := 2048 + bt := NewBloomTokenizer(DefaultNGramLength, DefaultNGramSkip, maxSize, NewMetrics(nil)) + ch := make(chan *BloomCreation) + line := randomStr(10e3) + itr, err := chunkRefItrFromLines(line) + require.NoError(t, err) + go bt.Populate( + NewSliceIter([]*Bloom{ + { + *filter.NewScalableBloomFilter(1024, 0.01, 0.8), + }, + }), + NewSliceIter([]ChunkRefWithIter{ + { + Ref: ChunkRef{}, + Itr: itr, + }, + }), + ch, + ) + + var ct int + for created := range ch { + ct++ + capacity := created.Bloom.ScalableBloomFilter.Capacity() / 8 + require.Less(t, int(capacity), maxSize) } + // ensure we created two bloom filters from this dataset + require.Equal(t, 2, ct) +} + +func populateAndConsumeBloom( + bt *BloomTokenizer, + blooms SizedIterator[*Bloom], + chks Iterator[ChunkRefWithIter], +) (res []*Bloom, err error) { + var e multierror.MultiError + ch := make(chan *BloomCreation) + go bt.Populate(blooms, chks, ch) + for x := range ch { + if x.Err != nil { + e = append(e, x.Err) + } else { + res = append(res, x.Bloom) + } + } + return res, e.Err() } func BenchmarkPopulateSeriesWithBloom(b *testing.B) { @@ -141,8 +259,6 @@ func BenchmarkPopulateSeriesWithBloom(b *testing.B) { bt := NewBloomTokenizer(DefaultNGramLength, DefaultNGramSkip, 0, metrics) sbf := filter.NewScalableBloomFilter(1024, 0.01, 0.8) - var lbsList []labels.Labels - lbsList = append(lbsList, labels.FromStrings("foo", "bar")) memChunk := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncSnappy, chunkenc.ChunkHeadFormatFor(chunkenc.ChunkFormatV4), 256000, 1500000) _ = memChunk.Append(&push.Entry{ @@ -161,15 +277,13 @@ func BenchmarkPopulateSeriesWithBloom(b *testing.B) { bloom := Bloom{ ScalableBloomFilter: *sbf, } - series := Series{ - Fingerprint: model.Fingerprint(lbsList[0].Hash()), - } - swb := SeriesWithBloom{ - Bloom: &bloom, - Series: &series, - } - _, _, err = bt.Populate(&swb, NewSliceIter([]ChunkRefWithIter{{Ref: ChunkRef{}, Itr: itr}})) + _, err = populateAndConsumeBloom( + bt, + NewSliceIter([]*Bloom{&bloom}), + NewSliceIter([]ChunkRefWithIter{{Ref: ChunkRef{}, + Itr: itr}}), + ) require.NoError(b, err) } } @@ -181,7 +295,7 @@ func BenchmarkMapClear(b *testing.B) { bt.cache[fmt.Sprint(k)] = k } - clearCache(bt.cache) + clear(bt.cache) } } diff --git a/pkg/storage/bloom/v1/builder.go b/pkg/storage/bloom/v1/builder.go index 323da86b67c38..4ccd011fd1ec3 100644 --- a/pkg/storage/bloom/v1/builder.go +++ b/pkg/storage/bloom/v1/builder.go @@ -2,15 +2,12 @@ package v1 import ( "bytes" - "fmt" "hash" "io" "github.com/pkg/errors" - "github.com/prometheus/common/model" "github.com/grafana/loki/v3/pkg/chunkenc" - "github.com/grafana/loki/v3/pkg/storage/bloom/v1/filter" "github.com/grafana/loki/v3/pkg/util/encoding" ) @@ -68,17 +65,9 @@ func (b BlockOptions) Encode(enc *encoding.Encbuf) { enc.PutBE64(b.BlockSize) } -type BlockBuilder struct { - opts BlockOptions - - writer BlockWriter - index *IndexBuilder - blooms *BloomBlockBuilder -} - func NewBlockOptions(enc chunkenc.Encoding, nGramLength, nGramSkip, maxBlockSizeBytes, maxBloomSizeBytes uint64) BlockOptions { opts := NewBlockOptionsFromSchema(Schema{ - version: byte(1), + version: DefaultSchemaVersion, encoding: enc, nGramLength: nGramLength, nGramSkip: nGramSkip, @@ -97,202 +86,6 @@ func NewBlockOptionsFromSchema(s Schema) BlockOptions { } } -func NewBlockBuilder(opts BlockOptions, writer BlockWriter) (*BlockBuilder, error) { - index, err := writer.Index() - if err != nil { - return nil, errors.Wrap(err, "initializing index writer") - } - blooms, err := writer.Blooms() - if err != nil { - return nil, errors.Wrap(err, "initializing blooms writer") - } - - return &BlockBuilder{ - opts: opts, - writer: writer, - index: NewIndexBuilder(opts, index), - blooms: NewBloomBlockBuilder(opts, blooms), - }, nil -} - -type SeriesWithBloom struct { - Series *Series - Bloom *Bloom -} - -func (b *BlockBuilder) BuildFrom(itr Iterator[SeriesWithBloom]) (uint32, error) { - for itr.Next() { - blockFull, err := b.AddSeries(itr.At()) - if err != nil { - return 0, err - } - if blockFull { - break - } - } - - if err := itr.Err(); err != nil { - return 0, errors.Wrap(err, "iterating series with blooms") - } - - return b.Close() -} - -func (b *BlockBuilder) Close() (uint32, error) { - bloomChecksum, err := b.blooms.Close() - if err != nil { - return 0, errors.Wrap(err, "closing bloom file") - } - indexCheckSum, err := b.index.Close() - if err != nil { - return 0, errors.Wrap(err, "closing series file") - } - return combineChecksums(indexCheckSum, bloomChecksum), nil -} - -// AddSeries adds a series to the block. It returns true after adding the series, the block is full. -func (b *BlockBuilder) AddSeries(series SeriesWithBloom) (bool, error) { - offset, err := b.blooms.Append(series) - if err != nil { - return false, errors.Wrapf(err, "writing bloom for series %v", series.Series.Fingerprint) - } - - if err := b.index.Append(SeriesWithOffset{ - Offset: offset, - Series: *series.Series, - }); err != nil { - return false, errors.Wrapf(err, "writing index for series %v", series.Series.Fingerprint) - } - - full, _, err := b.IsBlockFull() - if err != nil { - return false, errors.Wrap(err, "checking if block is full") - } - - return full, nil -} - -func (b *BlockBuilder) IsBlockFull() (full bool, size int, err error) { - size, err = b.writer.Size() - if err != nil { - return false, 0, errors.Wrap(err, "getting block size") - } - - // if the block size is 0, the max size is unlimited - if b.opts.BlockSize == 0 { - return false, size, nil - } - - return uint64(size) >= b.opts.BlockSize, size, nil -} - -type BloomBlockBuilder struct { - opts BlockOptions - writer io.WriteCloser - - offset int // track the offset of the file - writtenSchema bool - pages []BloomPageHeader - page PageWriter - scratch *encoding.Encbuf -} - -func NewBloomBlockBuilder(opts BlockOptions, writer io.WriteCloser) *BloomBlockBuilder { - return &BloomBlockBuilder{ - opts: opts, - writer: writer, - page: NewPageWriter(int(opts.BloomPageSize)), - scratch: &encoding.Encbuf{}, - } -} - -func (b *BloomBlockBuilder) WriteSchema() error { - b.scratch.Reset() - b.opts.Schema.Encode(b.scratch) - if _, err := b.writer.Write(b.scratch.Get()); err != nil { - return errors.Wrap(err, "writing schema") - } - b.writtenSchema = true - b.offset += b.scratch.Len() - return nil -} - -func (b *BloomBlockBuilder) Append(series SeriesWithBloom) (BloomOffset, error) { - if !b.writtenSchema { - if err := b.WriteSchema(); err != nil { - return BloomOffset{}, errors.Wrap(err, "writing schema") - } - } - - b.scratch.Reset() - if err := series.Bloom.Encode(b.scratch); err != nil { - return BloomOffset{}, errors.Wrapf(err, "encoding bloom for %v", series.Series.Fingerprint) - } - - if !b.page.SpaceFor(b.scratch.Len()) { - if err := b.flushPage(); err != nil { - return BloomOffset{}, errors.Wrap(err, "flushing bloom page") - } - } - - return BloomOffset{ - Page: len(b.pages), - ByteOffset: b.page.Add(b.scratch.Get()), - }, nil -} - -func (b *BloomBlockBuilder) Close() (uint32, error) { - if b.page.Count() > 0 { - if err := b.flushPage(); err != nil { - return 0, errors.Wrap(err, "flushing final bloom page") - } - } - - b.scratch.Reset() - b.scratch.PutUvarint(len(b.pages)) - for _, h := range b.pages { - h.Encode(b.scratch) - } - // put offset to beginning of header section - // cannot be varint encoded because it's offset will be calculated as - // the 8 bytes prior to the checksum - b.scratch.PutBE64(uint64(b.offset)) - - crc32Hash := Crc32HashPool.Get() - defer Crc32HashPool.Put(crc32Hash) - // wrap with final checksum - b.scratch.PutHash(crc32Hash) - _, err := b.writer.Write(b.scratch.Get()) - if err != nil { - return 0, errors.Wrap(err, "writing bloom page headers") - } - return crc32Hash.Sum32(), errors.Wrap(b.writer.Close(), "closing bloom writer") -} - -func (b *BloomBlockBuilder) flushPage() error { - crc32Hash := Crc32HashPool.Get() - defer Crc32HashPool.Put(crc32Hash) - - decompressedLen, compressedLen, err := b.page.writePage( - b.writer, - b.opts.Schema.CompressorPool(), - crc32Hash, - ) - if err != nil { - return errors.Wrap(err, "writing bloom page") - } - header := BloomPageHeader{ - N: b.page.Count(), - Offset: b.offset, - Len: compressedLen, - DecompressedLen: decompressedLen, - } - b.pages = append(b.pages, header) - b.offset += compressedLen - b.page.Reset() - return nil -} - type PageWriter struct { enc *encoding.Encbuf targetSize int @@ -357,199 +150,60 @@ func (w *PageWriter) writePage(writer io.Writer, pool chunkenc.WriterPool, crc32 return decompressedLen, w.enc.Len(), nil } -type IndexBuilder struct { - opts BlockOptions - writer io.WriteCloser - - offset int // track the offset of the file - writtenSchema bool - pages []SeriesPageHeaderWithOffset - page PageWriter - scratch *encoding.Encbuf - - previousFp model.Fingerprint - previousOffset BloomOffset - fromFp model.Fingerprint - fromTs, throughTs model.Time -} - -func NewIndexBuilder(opts BlockOptions, writer io.WriteCloser) *IndexBuilder { - return &IndexBuilder{ - opts: opts, - writer: writer, - page: NewPageWriter(int(opts.SeriesPageSize)), - scratch: &encoding.Encbuf{}, - } -} - -func (b *IndexBuilder) WriteOpts() error { - b.scratch.Reset() - b.opts.Encode(b.scratch) - if _, err := b.writer.Write(b.scratch.Get()); err != nil { - return errors.Wrap(err, "writing opts+schema") - } - b.writtenSchema = true - b.offset += b.scratch.Len() - return nil -} - -func (b *IndexBuilder) Append(series SeriesWithOffset) error { - if !b.writtenSchema { - if err := b.WriteOpts(); err != nil { - return errors.Wrap(err, "appending series") - } - } - - b.scratch.Reset() - // we don't want to update the previous pointers yet in case - // we need to flush the page first which would - // be passed the incorrect final fp/offset - previousFp, previousOffset := series.Encode(b.scratch, b.previousFp, b.previousOffset) - - if !b.page.SpaceFor(b.scratch.Len()) { - if err := b.flushPage(); err != nil { - return errors.Wrap(err, "flushing series page") - } - - // re-encode now that a new page has been cut and we use delta-encoding - b.scratch.Reset() - previousFp, previousOffset = series.Encode(b.scratch, b.previousFp, b.previousOffset) - } - b.previousFp = previousFp - b.previousOffset = previousOffset - - switch { - case b.page.Count() == 0: - // Special case: this is the first series in a page - if len(series.Chunks) < 1 { - return fmt.Errorf("series with zero chunks for fingerprint %v", series.Fingerprint) - } - b.fromFp = series.Fingerprint - b.fromTs, b.throughTs = chkBounds(series.Chunks) - case b.previousFp > series.Fingerprint: - return fmt.Errorf("out of order series fingerprint for series %v", series.Fingerprint) - default: - from, through := chkBounds(series.Chunks) - if b.fromTs.After(from) { - b.fromTs = from - } - if b.throughTs.Before(through) { - b.throughTs = through - } - } - - _ = b.page.Add(b.scratch.Get()) - b.previousFp = series.Fingerprint - b.previousOffset = series.Offset - return nil -} - -// must be > 1 -func chkBounds(chks []ChunkRef) (from, through model.Time) { - from, through = chks[0].From, chks[0].Through - for _, chk := range chks[1:] { - if chk.From.Before(from) { - from = chk.From - } - - if chk.Through.After(through) { - through = chk.Through - } - } - return -} - -func (b *IndexBuilder) flushPage() error { - crc32Hash := Crc32HashPool.Get() - defer Crc32HashPool.Put(crc32Hash) - - decompressedLen, compressedLen, err := b.page.writePage( - b.writer, - b.opts.Schema.CompressorPool(), - crc32Hash, - ) - if err != nil { - return errors.Wrap(err, "writing series page") - } - - header := SeriesPageHeaderWithOffset{ - Offset: b.offset, - Len: compressedLen, - DecompressedLen: decompressedLen, - SeriesHeader: SeriesHeader{ - NumSeries: b.page.Count(), - Bounds: NewBounds(b.fromFp, b.previousFp), - FromTs: b.fromTs, - ThroughTs: b.throughTs, - }, - } - - b.pages = append(b.pages, header) - b.offset += compressedLen - - b.fromFp = 0 - b.fromTs = 0 - b.throughTs = 0 - b.previousFp = 0 - b.previousOffset = BloomOffset{} - b.page.Reset() - - return nil -} - -func (b *IndexBuilder) Close() (uint32, error) { - if b.page.Count() > 0 { - if err := b.flushPage(); err != nil { - return 0, errors.Wrap(err, "flushing final series page") - } - } - - b.scratch.Reset() - b.scratch.PutUvarint(len(b.pages)) - for _, h := range b.pages { - h.Encode(b.scratch) - } - - // put offset to beginning of header section - // cannot be varint encoded because it's offset will be calculated as - // the 8 bytes prior to the checksum - b.scratch.PutBE64(uint64(b.offset)) - crc32Hash := Crc32HashPool.Get() - defer Crc32HashPool.Put(crc32Hash) - // wrap with final checksum - b.scratch.PutHash(crc32Hash) - _, err := b.writer.Write(b.scratch.Get()) - if err != nil { - return 0, errors.Wrap(err, "writing series page headers") - } - return crc32Hash.Sum32(), errors.Wrap(b.writer.Close(), "closing series writer") +type BloomCreation struct { + Bloom *Bloom + SourceBytesAdded int + Err error } // Simplistic implementation of a merge builder that builds a single block // from a list of blocks and a store of series. type MergeBuilder struct { // existing blocks - blocks Iterator[*SeriesWithBloom] + blocks Iterator[*SeriesWithBlooms] // store store Iterator[*Series] // Add chunks to a bloom - populate func(*Series, *Bloom) (sourceBytesAdded int, skipSeries bool, err error) + populate func(s *Series, srcBlooms SizedIterator[*Bloom], toAdd ChunkRefs, ch chan *BloomCreation) metrics *Metrics } +type BloomPopulatorFunc = func(s *Series, srcBlooms SizedIterator[*Bloom], toAdd ChunkRefs, ch chan *BloomCreation) + // NewMergeBuilder is a specific builder which does the following: // 1. merges multiple blocks into a single ordered querier, // i) When two blocks have the same series, it will prefer the one with the most chunks already indexed // 2. iterates through the store, adding chunks to the relevant blooms via the `populate` argument func NewMergeBuilder( - blocks Iterator[*SeriesWithBloom], + blocks Iterator[*SeriesWithBlooms], store Iterator[*Series], - populate func(*Series, *Bloom) (int, bool, error), + populate BloomPopulatorFunc, metrics *Metrics, ) *MergeBuilder { + // combinedSeriesIter handles series with fingerprint collisions: + // because blooms dont contain the label-set (only the fingerprint), + // in the case of a fingerprint collision we simply treat it as one + // series with multiple chunks. + combinedSeriesIter := NewDedupingIter[*Series, *Series]( + // eq + func(s1, s2 *Series) bool { + return s1.Fingerprint == s2.Fingerprint + }, + // from + Identity[*Series], + // merge + func(s1, s2 *Series) *Series { + return &Series{ + Fingerprint: s1.Fingerprint, + Chunks: s1.Chunks.Union(s2.Chunks), + } + }, + NewPeekingIter[*Series](store), + ) + return &MergeBuilder{ blocks: blocks, - store: store, + store: combinedSeriesIter, populate: populate, metrics: metrics, } @@ -557,10 +211,10 @@ func NewMergeBuilder( func (mb *MergeBuilder) processNextSeries( builder *BlockBuilder, - nextInBlocks *SeriesWithBloom, + nextInBlocks *SeriesWithBlooms, blocksFinished bool, ) ( - *SeriesWithBloom, // nextInBlocks pointer update + *SeriesWithBlooms, // nextInBlocks pointer update int, // bytes added bool, // blocksFinished update bool, // done building block @@ -600,53 +254,42 @@ func (mb *MergeBuilder) processNextSeries( nextInBlocks = mb.blocks.At() } - cur := nextInBlocks - chunksToAdd := nextInStore.Chunks - // The next series from the store doesn't exist in the blocks, so we add it - // in its entirety - if nextInBlocks == nil || nextInBlocks.Series.Fingerprint > nextInStore.Fingerprint { - cur = &SeriesWithBloom{ - Series: nextInStore, - Bloom: &Bloom{ - // TODO parameterise SBF options. fp_rate - ScalableBloomFilter: *filter.NewScalableBloomFilter(1024, 0.01, 0.8), - }, - } - } else { + var ( + offsets []BloomOffset + chunksToAdd = nextInStore.Chunks + preExistingBlooms SizedIterator[*Bloom] = NewEmptyIter[*Bloom]() + ) + + if nextInBlocks != nil && nextInBlocks.Series.Fingerprint == nextInStore.Fingerprint { // if the series already exists in the block, we only need to add the new chunks chunksToAdd = nextInStore.Chunks.Unless(nextInBlocks.Series.Chunks) chunksCopied += len(nextInStore.Chunks) - len(chunksToAdd) + preExistingBlooms = nextInBlocks.Blooms } chunksIndexed += len(chunksToAdd) - var ( - err error - skip bool - done bool - sourceBytes int - ) - - if len(chunksToAdd) > 0 { - sourceBytes, skip, err = mb.populate( - &Series{ - Fingerprint: nextInStore.Fingerprint, - Chunks: chunksToAdd, - }, - cur.Bloom, - ) - bytesAdded += sourceBytes + // populate bloom + ch := make(chan *BloomCreation) + go mb.populate(nextInStore, preExistingBlooms, chunksToAdd, ch) + for bloom := range ch { + if bloom.Err != nil { + return nil, bytesAdded, false, false, errors.Wrap(bloom.Err, "populating bloom") + } + offset, err := builder.AddBloom(bloom.Bloom) if err != nil { - return nil, bytesAdded, false, false, errors.Wrapf(err, "populating bloom for series with fingerprint: %v", nextInStore.Fingerprint) + return nil, bytesAdded, false, false, errors.Wrapf( + err, "adding bloom to block for fp (%s)", nextInStore.Fingerprint, + ) } + offsets = append(offsets, offset) + bytesAdded += bloom.SourceBytesAdded } - if !skip { - done, err = builder.AddSeries(*cur) - if err != nil { - return nil, bytesAdded, false, false, errors.Wrap(err, "adding series to block") - } + done, err := builder.AddSeries(*nextInStore, offsets) + if err != nil { + return nil, bytesAdded, false, false, errors.Wrap(err, "committing series") } return nextInBlocks, bytesAdded, blocksFinished, done, nil @@ -654,7 +297,7 @@ func (mb *MergeBuilder) processNextSeries( func (mb *MergeBuilder) Build(builder *BlockBuilder) (checksum uint32, totalBytes int, err error) { var ( - nextInBlocks *SeriesWithBloom + nextInBlocks *SeriesWithBlooms blocksFinished bool // whether any previous blocks have been exhausted while building new block done bool ) @@ -675,7 +318,7 @@ func (mb *MergeBuilder) Build(builder *BlockBuilder) (checksum uint32, totalByte } flushedFor := blockFlushReasonFinished - full, sz, _ := builder.IsBlockFull() + full, sz, _ := builder.writer.Full(builder.opts.BlockSize) if full { flushedFor = blockFlushReasonFull } diff --git a/pkg/storage/bloom/v1/builder_test.go b/pkg/storage/bloom/v1/builder_test.go index 56d03cbd7c930..ae1b440af09b8 100644 --- a/pkg/storage/bloom/v1/builder_test.go +++ b/pkg/storage/bloom/v1/builder_test.go @@ -2,7 +2,6 @@ package v1 import ( "bytes" - "errors" "fmt" "sort" "testing" @@ -11,6 +10,7 @@ import ( "github.com/stretchr/testify/require" "github.com/grafana/loki/v3/pkg/chunkenc" + "github.com/grafana/loki/v3/pkg/storage/bloom/v1/filter" "github.com/grafana/loki/v3/pkg/util/encoding" ) @@ -48,8 +48,7 @@ func TestBlockOptionsRoundTrip(t *testing.T) { func TestBlockBuilder_RoundTrip(t *testing.T) { numSeries := 100 - numKeysPerSeries := 10000 - data, keys := MkBasicSeriesWithBlooms(numSeries, numKeysPerSeries, 0, 0xffff, 0, 10000) + data, keys := MkBasicSeriesWithLiteralBlooms(numSeries, 0, 0xffff, 0, 10000) for _, enc := range blockEncodings { // references for linking in memory reader+writer @@ -101,7 +100,12 @@ func TestBlockBuilder_RoundTrip(t *testing.T) { builder, err := NewBlockBuilder(blockOpts, tc.writer) require.Nil(t, err) - itr := NewPeekingIter[SeriesWithBloom](NewSliceIter[SeriesWithBloom](data)) + itr := NewPeekingIter[SeriesWithBlooms]( + NewMapIter( + NewSliceIter[SeriesWithLiteralBlooms](data), + func(x SeriesWithLiteralBlooms) SeriesWithBlooms { return x.SeriesWithBlooms() }, + ), + ) _, err = builder.BuildFrom(itr) require.Nil(t, err) @@ -117,7 +121,7 @@ func TestBlockBuilder_RoundTrip(t *testing.T) { } block := NewBlock(tc.reader, NewMetrics(nil)) - querier := NewBlockQuerier(block, false, DefaultMaxPageSize) + querier := NewBlockQuerier(block, false, DefaultMaxPageSize).Iter() err = block.LoadHeaders() require.Nil(t, err) @@ -127,9 +131,18 @@ func TestBlockBuilder_RoundTrip(t *testing.T) { for i := 0; i < len(processedData); i++ { require.Equal(t, true, querier.Next(), "on iteration %d with error %v", i, querier.Err()) got := querier.At() + blooms, err := Collect(got.Blooms) + require.Nil(t, err) require.Equal(t, processedData[i].Series, got.Series) for _, key := range keys[i] { - require.True(t, got.Bloom.Test(key)) + found := false + for _, b := range blooms { + if b.Test(key) { + found = true + break + } + } + require.True(t, found) } require.NoError(t, querier.Err()) } @@ -145,9 +158,18 @@ func TestBlockBuilder_RoundTrip(t *testing.T) { for j := 0; j < len(halfData); j++ { require.Equal(t, true, querier.Next(), "on iteration %d", j) got := querier.At() + blooms, err := Collect(got.Blooms) + require.Nil(t, err) require.Equal(t, halfData[j].Series, got.Series) for _, key := range halfKeys[j] { - require.True(t, got.Bloom.Test(key)) + found := false + for _, b := range blooms { + if b.Test(key) { + found = true + break + } + } + require.True(t, found) } require.NoError(t, querier.Err()) } @@ -160,20 +182,20 @@ func TestBlockBuilder_RoundTrip(t *testing.T) { } } -func dedupedBlocks(blocks []PeekingIterator[*SeriesWithBloom]) Iterator[*SeriesWithBloom] { +func dedupedBlocks(blocks []PeekingIterator[*SeriesWithBlooms]) Iterator[*SeriesWithBlooms] { orderedBlocks := NewHeapIterForSeriesWithBloom(blocks...) - return NewDedupingIter[*SeriesWithBloom]( - func(a *SeriesWithBloom, b *SeriesWithBloom) bool { + return NewDedupingIter[*SeriesWithBlooms]( + func(a *SeriesWithBlooms, b *SeriesWithBlooms) bool { return a.Series.Fingerprint == b.Series.Fingerprint }, - Identity[*SeriesWithBloom], - func(a *SeriesWithBloom, b *SeriesWithBloom) *SeriesWithBloom { + Identity[*SeriesWithBlooms], + func(a *SeriesWithBlooms, b *SeriesWithBlooms) *SeriesWithBlooms { if len(a.Series.Chunks) > len(b.Series.Chunks) { return a } return b }, - NewPeekingIter[*SeriesWithBloom](orderedBlocks), + NewPeekingIter[*SeriesWithBlooms](orderedBlocks), ) } @@ -182,9 +204,8 @@ func TestMergeBuilder(t *testing.T) { nBlocks := 10 numSeries := 100 - numKeysPerSeries := 100 - blocks := make([]PeekingIterator[*SeriesWithBloom], 0, nBlocks) - data, _ := MkBasicSeriesWithBlooms(numSeries, numKeysPerSeries, 0, 0xffff, 0, 10000) + blocks := make([]PeekingIterator[*SeriesWithBlooms], 0, nBlocks) + data, _ := MkBasicSeriesWithBlooms(numSeries, 0, 0xffff, 0, 10000) blockOpts := BlockOptions{ Schema: Schema{ version: DefaultSchemaVersion, @@ -215,22 +236,29 @@ func TestMergeBuilder(t *testing.T) { ) require.Nil(t, err) - itr := NewSliceIter[SeriesWithBloom](data[min:max]) + itr := NewSliceIter[SeriesWithBlooms](data[min:max]) _, err = builder.BuildFrom(itr) require.Nil(t, err) - blocks = append(blocks, NewPeekingIter[*SeriesWithBloom](NewBlockQuerier(NewBlock(reader, NewMetrics(nil)), false, DefaultMaxPageSize))) + blocks = append(blocks, NewPeekingIter[*SeriesWithBlooms](NewBlockQuerier(NewBlock(reader, NewMetrics(nil)), false, DefaultMaxPageSize).Iter())) } // We're not testing the ability to extend a bloom in this test - pop := func(_ *Series, _ *Bloom) (int, bool, error) { - return 0, false, errors.New("not implemented") + pop := func(s *Series, srcBlooms SizedIterator[*Bloom], toAdd ChunkRefs, ch chan *BloomCreation) { + for srcBlooms.Next() { + bloom := srcBlooms.At() + ch <- &BloomCreation{ + Bloom: bloom, + SourceBytesAdded: int(bloom.Capacity()) / 8, + } + } + close(ch) } // storage should contain references to all the series we ingested, // regardless of block allocation/overlap. - storeItr := NewMapIter[SeriesWithBloom, *Series]( - NewSliceIter[SeriesWithBloom](data), - func(swb SeriesWithBloom) *Series { + storeItr := NewMapIter[SeriesWithBlooms, *Series]( + NewSliceIter[SeriesWithBlooms](data), + func(swb SeriesWithBlooms) *Series { return swb.Series }, ) @@ -254,21 +282,114 @@ func TestMergeBuilder(t *testing.T) { block := NewBlock(reader, NewMetrics(nil)) querier := NewBlockQuerier(block, false, DefaultMaxPageSize) - EqualIterators[*SeriesWithBloom]( + EqualIterators[*SeriesWithBlooms]( t, - func(a, b *SeriesWithBloom) { + func(a, b *SeriesWithBlooms) { require.Equal(t, a.Series, b.Series, "expected %+v, got %+v", a, b) }, - NewSliceIter[*SeriesWithBloom](PointerSlice(data)), - querier, + NewSliceIter[*SeriesWithBlooms](PointerSlice(data)), + querier.Iter(), ) } +// Fingerprint collisions are treated as the same series. +func TestMergeBuilderFingerprintCollision(t *testing.T) { + t.Parallel() + + // references for linking in memory reader+writer + indexBuf := bytes.NewBuffer(nil) + bloomsBuf := bytes.NewBuffer(nil) + writer := NewMemoryBlockWriter(indexBuf, bloomsBuf) + reader := NewByteReader(indexBuf, bloomsBuf) + + blockOpts := BlockOptions{ + Schema: Schema{ + version: DefaultSchemaVersion, + encoding: chunkenc.EncSnappy, + }, + SeriesPageSize: 100, + BloomPageSize: 10 << 10, + } + + builder, err := NewBlockBuilder( + blockOpts, + writer, + ) + + // two series with the same fingerprint but different chunks + chks := []ChunkRef{ + { + From: 0, + Through: 0, + Checksum: 0, + }, + { + From: 1, + Through: 1, + Checksum: 1, + }, + { + From: 2, + Through: 2, + Checksum: 2, + }, + } + + data := []*Series{ + { + Fingerprint: 0, + Chunks: []ChunkRef{ + chks[0], chks[1], + }, + }, + { + Fingerprint: 0, + Chunks: []ChunkRef{ + chks[2], + }, + }, + } + + // We're not testing the ability to extend a bloom in this test + pop := func(s *Series, srcBlooms SizedIterator[*Bloom], toAdd ChunkRefs, ch chan *BloomCreation) { + ch <- &BloomCreation{ + Bloom: &Bloom{ + ScalableBloomFilter: *filter.NewScalableBloomFilter(1024, 0.01, 0.8), + }, + } + close(ch) + } + + require.Nil(t, err) + mergeBuilder := NewMergeBuilder( + NewEmptyIter[*SeriesWithBlooms](), + NewSliceIter(data), + pop, + NewMetrics(nil), + ) + + _, _, err = mergeBuilder.Build(builder) + require.Nil(t, err) + + block := NewBlock(reader, NewMetrics(nil)) + querier := NewBlockQuerier(block, false, DefaultMaxPageSize) + + require.True(t, querier.Next()) + require.Equal(t, + Series{ + Fingerprint: 0, + Chunks: chks, + }, + querier.At().Series, + ) + + require.False(t, querier.Next()) +} + func TestBlockReset(t *testing.T) { t.Parallel() numSeries := 100 - numKeysPerSeries := 10000 - data, _ := MkBasicSeriesWithBlooms(numSeries, numKeysPerSeries, 1, 0xffff, 0, 10000) + data, _ := MkBasicSeriesWithBlooms(numSeries, 1, 0xffff, 0, 10000) indexBuf := bytes.NewBuffer(nil) bloomsBuf := bytes.NewBuffer(nil) @@ -292,7 +413,7 @@ func TestBlockReset(t *testing.T) { ) require.Nil(t, err) - itr := NewSliceIter[SeriesWithBloom](data) + itr := NewSliceIter[SeriesWithBlooms](data) _, err = builder.BuildFrom(itr) require.Nil(t, err) block := NewBlock(reader, NewMetrics(nil)) @@ -320,11 +441,10 @@ func TestMergeBuilder_Roundtrip(t *testing.T) { t.Parallel() numSeries := 100 - numKeysPerSeries := 100 minTs, maxTs := model.Time(0), model.Time(10000) - xs, _ := MkBasicSeriesWithBlooms(numSeries, numKeysPerSeries, 0, 0xffff, minTs, maxTs) + xs, _ := MkBasicSeriesWithBlooms(numSeries, 0, 0xffff, minTs, maxTs) - var data [][]*SeriesWithBloom + var data [][]*SeriesWithBlooms // First, we build the blocks @@ -358,15 +478,15 @@ func TestMergeBuilder_Roundtrip(t *testing.T) { require.Nil(t, err) // each set of copies gets a different slice of the data minIdx, maxIdx := i*len(xs)/len(sets), (i+1)*len(xs)/len(sets) - itr := NewSliceIter[SeriesWithBloom](xs[minIdx:maxIdx]) + itr := NewSliceIter[SeriesWithBlooms](xs[minIdx:maxIdx]) _, err = builder.BuildFrom(itr) require.Nil(t, err) block := NewBlock(reader, NewMetrics(nil)) - querier := NewBlockQuerier(block, false, DefaultMaxPageSize) + querier := NewBlockQuerier(block, false, DefaultMaxPageSize).Iter() // rather than use the block querier directly, collect it's data // so we can use it in a few places later - var tmp []*SeriesWithBloom + var tmp []*SeriesWithBlooms for querier.Next() { tmp = append(tmp, querier.At()) } @@ -376,31 +496,43 @@ func TestMergeBuilder_Roundtrip(t *testing.T) { // we keep 2 copies of the data as iterators. One for the blocks, and one for the "store" // which will force it to reference the same series - var blocks []PeekingIterator[*SeriesWithBloom] - var store []PeekingIterator[*SeriesWithBloom] + var blocks []PeekingIterator[*SeriesWithBlooms] + var store []PeekingIterator[*SeriesWithBlooms] for _, x := range data { - blocks = append(blocks, NewPeekingIter[*SeriesWithBloom](NewSliceIter[*SeriesWithBloom](x))) - store = append(store, NewPeekingIter[*SeriesWithBloom](NewSliceIter[*SeriesWithBloom](x))) + blocks = append(blocks, NewPeekingIter[*SeriesWithBlooms](NewSliceIter[*SeriesWithBlooms](x))) + store = append(store, NewPeekingIter[*SeriesWithBlooms](NewSliceIter[*SeriesWithBlooms](x))) } orderedStore := NewHeapIterForSeriesWithBloom(store...) - dedupedStore := NewDedupingIter[*SeriesWithBloom, *Series]( - func(a *SeriesWithBloom, b *Series) bool { + dedupedStore := NewDedupingIter[*SeriesWithBlooms, *Series]( + func(a *SeriesWithBlooms, b *Series) bool { return a.Series.Fingerprint == b.Fingerprint }, - func(swb *SeriesWithBloom) *Series { + func(swb *SeriesWithBlooms) *Series { return swb.Series }, - func(a *SeriesWithBloom, b *Series) *Series { + func(a *SeriesWithBlooms, b *Series) *Series { if len(a.Series.Chunks) > len(b.Chunks) { return a.Series } return b }, - NewPeekingIter[*SeriesWithBloom](orderedStore), + NewPeekingIter[*SeriesWithBlooms](orderedStore), ) + // We're not testing the ability to extend a bloom in this test + pop := func(s *Series, srcBlooms SizedIterator[*Bloom], toAdd ChunkRefs, ch chan *BloomCreation) { + for srcBlooms.Next() { + bloom := srcBlooms.At() + ch <- &BloomCreation{ + Bloom: bloom, + SourceBytesAdded: int(bloom.Capacity()) / 8, + } + } + close(ch) + } + // build the new block from the old ones indexBuf, bloomBuf := bytes.NewBuffer(nil), bytes.NewBuffer(nil) writer := NewMemoryBlockWriter(indexBuf, bloomBuf) @@ -408,10 +540,7 @@ func TestMergeBuilder_Roundtrip(t *testing.T) { mb := NewMergeBuilder( dedupedBlocks(blocks), dedupedStore, - func(s *Series, b *Bloom) (int, bool, error) { - // We're not actually indexing new data in this test - return 0, false, nil - }, + pop, NewMetrics(nil), ) builder, err := NewBlockBuilder(blockOpts, writer) @@ -419,19 +548,19 @@ func TestMergeBuilder_Roundtrip(t *testing.T) { checksum, _, err := mb.Build(builder) require.Nil(t, err) - require.Equal(t, uint32(0xc7b4210b), checksum) + require.Equal(t, uint32(0x2a6cdba6), checksum) // ensure the new block contains one copy of all the data // by comparing it against an iterator over the source data mergedBlockQuerier := NewBlockQuerier(NewBlock(reader, NewMetrics(nil)), false, DefaultMaxPageSize) - sourceItr := NewSliceIter[*SeriesWithBloom](PointerSlice[SeriesWithBloom](xs)) + sourceItr := NewSliceIter[*SeriesWithBlooms](PointerSlice[SeriesWithBlooms](xs)) - EqualIterators[*SeriesWithBloom]( + EqualIterators[*SeriesWithBlooms]( t, - func(a, b *SeriesWithBloom) { + func(a, b *SeriesWithBlooms) { require.Equal(t, a.Series.Fingerprint, b.Series.Fingerprint) }, sourceItr, - mergedBlockQuerier, + mergedBlockQuerier.Iter(), ) } diff --git a/pkg/storage/bloom/v1/dedupe_test.go b/pkg/storage/bloom/v1/dedupe_test.go index 7e12f25247036..e008bee6834c1 100644 --- a/pkg/storage/bloom/v1/dedupe_test.go +++ b/pkg/storage/bloom/v1/dedupe_test.go @@ -9,29 +9,28 @@ import ( func TestMergeDedupeIter(t *testing.T) { t.Parallel() var ( - numSeries = 100 - numKeysPerSeries = 10000 - data, _ = MkBasicSeriesWithBlooms(numSeries, numKeysPerSeries, 0, 0xffff, 0, 10000) - dataPtr = PointerSlice(data) - queriers = make([]PeekingIterator[*SeriesWithBloom], 4) + numSeries = 100 + data, _ = MkBasicSeriesWithBlooms(numSeries, 0, 0xffff, 0, 10000) + dataPtr = PointerSlice(data) + queriers = make([]PeekingIterator[*SeriesWithBlooms], 4) ) for i := 0; i < len(queriers); i++ { - queriers[i] = NewPeekingIter[*SeriesWithBloom](NewSliceIter[*SeriesWithBloom](dataPtr)) + queriers[i] = NewPeekingIter[*SeriesWithBlooms](NewSliceIter[*SeriesWithBlooms](dataPtr)) } mbq := NewHeapIterForSeriesWithBloom(queriers...) - eq := func(a, b *SeriesWithBloom) bool { + eq := func(a, b *SeriesWithBlooms) bool { return a.Series.Fingerprint == b.Series.Fingerprint } - merge := func(a, _ *SeriesWithBloom) *SeriesWithBloom { + merge := func(a, _ *SeriesWithBlooms) *SeriesWithBlooms { return a } - deduper := NewDedupingIter[*SeriesWithBloom, *SeriesWithBloom]( + deduper := NewDedupingIter[*SeriesWithBlooms, *SeriesWithBlooms]( eq, - Identity[*SeriesWithBloom], + Identity[*SeriesWithBlooms], merge, - NewPeekingIter[*SeriesWithBloom](mbq), + NewPeekingIter[*SeriesWithBlooms](mbq), ) for i := 0; i < len(data); i++ { diff --git a/pkg/storage/bloom/v1/filter/scalable.go b/pkg/storage/bloom/v1/filter/scalable.go index a7848a2dd62ca..2be2348f9ff5e 100644 --- a/pkg/storage/bloom/v1/filter/scalable.go +++ b/pkg/storage/bloom/v1/filter/scalable.go @@ -139,6 +139,14 @@ func (s *ScalableBloomFilter) Test(data []byte) bool { // Add will add the data to the Bloom filter. It returns the filter to allow // for chaining. func (s *ScalableBloomFilter) Add(data []byte) Filter { + s.AddWithMaxSize(data, 0) + return s +} + +// addWithMaxSize adds a new element to the filter, +// unless adding would require the filter to grow above a given maxSize (0 for unlimited). +// returns true if the filter is full, in which case the key was not added +func (s *ScalableBloomFilter) AddWithMaxSize(data []byte, maxSize int) (full bool) { idx := len(s.filters) - 1 // If the last filter has reached its fill ratio, add a new one. @@ -161,6 +169,10 @@ func (s *ScalableBloomFilter) Add(data []byte) Filter { // calculate the actual fill ratio & update the estimated count for the filter. If the actual fill ratio // is above the target fill ratio, add a new filter. if ratio := s.filters[idx].UpdateCount(); ratio >= s.p { + nextCap, _ := s.nextFilterCapacity() + if maxSize > 0 && s.Capacity()+nextCap > uint(maxSize) { + return true + } s.addFilter() idx++ } @@ -169,7 +181,7 @@ func (s *ScalableBloomFilter) Add(data []byte) Filter { s.filters[idx].Add(data) s.additionsSinceFillRatioCheck++ - return s + return false } // TestAndAdd is equivalent to calling Test followed by Add. It returns true if @@ -180,11 +192,12 @@ func (s *ScalableBloomFilter) TestAndAdd(data []byte) bool { return member } -// HeavyAdd adds a new element to the filter and returns a few metrics (the "heavy" part) -func (s *ScalableBloomFilter) HeavyAdd(data []byte) (noop bool, bloomSize int) { - noop = s.TestAndAdd(data) - sz := s.Capacity() / 8 // convert bits to bytes - return noop, int(sz) +// TestAndAdd is equivalent to calling Test followed by Add. It returns both if the key exists in the filter +// already and if the filter is full. If full, the key was _not_ added. +func (s *ScalableBloomFilter) TestAndAddWithMaxSize(data []byte, maxSize int) (exists, full bool) { + member := s.Test(data) + full = s.AddWithMaxSize(data, maxSize) + return member, full } // Reset restores the Bloom filter to its original state. It returns the filter @@ -195,20 +208,25 @@ func (s *ScalableBloomFilter) Reset() *ScalableBloomFilter { return s } -// addFilter adds a new Bloom filter with a restricted false-positive rate to -// the Scalable Bloom Filter -func (s *ScalableBloomFilter) addFilter() { - fpRate := s.fp * math.Pow(s.r, float64(len(s.filters))) - var p *PartitionedBloomFilter +func (s *ScalableBloomFilter) nextFilterCapacity() (m uint, fpRate float64) { + fpRate = s.fp * math.Pow(s.r, float64(len(s.filters))) // first filter is created with a size determined by the hint. // successive filters are created with a size determined by the // previous filter's capacity and the space growth factor. if len(s.filters) == 0 { - p = NewPartitionedBloomFilter(s.hint, fpRate) + m = OptimalM(s.hint, fpRate) } else { - p = NewPartitionedBloomFilterWithCapacity(s.filters[len(s.filters)-1].Capacity()*s.s, fpRate) + m = s.filters[len(s.filters)-1].Capacity() * s.s } + return +} + +// addFilter adds a new Bloom filter with a restricted false-positive rate to +// the Scalable Bloom Filter +func (s *ScalableBloomFilter) addFilter() { + nextCap, fpRate := s.nextFilterCapacity() + p := NewPartitionedBloomFilterWithCapacity(nextCap, fpRate) if len(s.filters) > 0 { p.SetHash(s.filters[0].hash) diff --git a/pkg/storage/bloom/v1/fuse.go b/pkg/storage/bloom/v1/fuse.go index ed920072b8ca0..435299d129717 100644 --- a/pkg/storage/bloom/v1/fuse.go +++ b/pkg/storage/bloom/v1/fuse.go @@ -228,74 +228,145 @@ func (fq *FusedQuerier) Run() error { return errors.Wrap(err, "seeking to fingerprint") } - if !fq.bq.series.Next() { + if !fq.bq.Next() { // no more series, we're done since we're iterating desired fingerprints in order return nil } - series := fq.bq.series.At() + series := fq.bq.At() if series.Fingerprint != fp { // fingerprint not found, can't remove chunks - level.Debug(fq.logger).Log("msg", "fingerprint not found", "fp", series.Fingerprint, "err", fq.bq.series.Err()) + level.Debug(fq.logger).Log("msg", "fingerprint not found", "fp", series.Fingerprint, "err", fq.bq.Err()) fq.recordMissingFp(nextBatch, fp) continue } - // Now that we've found the series, we need to find the unpack the bloom - skip := fq.bq.blooms.LoadOffset(series.Offset) + fq.runSeries(schema, series, nextBatch) + } + + return nil +} + +func (fq *FusedQuerier) runSeries(schema Schema, series *SeriesWithOffsets, reqs []Request) { + // For a given chunk|series to be removed, it must fail to match all blooms. + // Because iterating/loading blooms can be expensive, we iterate blooms one at a time, collecting + // the removals (failures) for each (bloom, chunk) pair. + // At the end, we intersect the removals for each series to determine if it should be removed + + type inputChunks struct { + Missing ChunkRefs // chunks that do not exist in the blooms and cannot be queried + InBlooms ChunkRefs // chunks which do exist in the blooms and can be queried + + found map[int]bool // map of the index in `InBlooms` to whether the chunk + // was found in _any_ of the blooms for the series. In order to + // be eligible for removal, a chunk must be found in _no_ blooms. + } + + inputs := make([]inputChunks, 0, len(reqs)) + for _, req := range reqs { + missing, inBlooms := req.Chks.Compare(series.Chunks, true) + inputs = append(inputs, inputChunks{ + Missing: missing, + InBlooms: inBlooms, + + found: make(map[int]bool, len(inBlooms)), + }) + } + + for i, offset := range series.Offsets { + skip := fq.bq.blooms.LoadOffset(offset) if skip { // could not seek to the desired bloom, // likely because the page was too large to load - fq.recordSkippedFp(nextBatch, fp) - continue + // NB(owen-d): since all blooms must be tested to guarantee result correctness, + // we do not filter any chunks|series + fq.recordSkippedFp(reqs, series.Fingerprint) + return } if !fq.bq.blooms.Next() { - // fingerprint not found, can't remove chunks - level.Debug(fq.logger).Log("msg", "fingerprint not found", "fp", series.Fingerprint, "err", fq.bq.blooms.Err()) - fq.recordMissingFp(nextBatch, fp) - continue + // bloom not found, can't remove chunks + level.Debug(fq.logger).Log( + "msg", "bloom not found", + "fp", series.Fingerprint, + "err", fq.bq.blooms.Err(), + "i", i, + ) + fq.recordMissingFp(reqs, series.Fingerprint) + return } + // Test each bloom individually bloom := fq.bq.blooms.At() - // test every input against this chunk - for _, input := range nextBatch { - missing, inBlooms := input.Chks.Compare(series.Chunks, true) + for j, req := range reqs { + + // shortcut: series level removal + // we can skip testing chunk keys individually if the bloom doesn't match + // the query. + if !req.Search.Matches(bloom) { + // Nothing else needs to be done for this (bloom, request); + // check the next input request + continue + } + // TODO(owen-d): copying this over, but they're going to be the same + // across any block schema because prefix len is determined by n-gram and + // all chunks have the same encoding length. tl;dr: it's weird/unnecessary to have + // these defined this way and recreated across each bloom var ( - // TODO(owen-d): pool - removals ChunkRefs - // TODO(salvacorts): pool tokenBuf tokenBuf []byte prefixLen int ) + for k, chk := range inputs[j].InBlooms { + // if we've already found this chunk in a previous bloom, skip testing it + if inputs[j].found[k] { + continue + } - // First, see if the search passes the series level bloom before checking for chunks individually - if matchedSeries := input.Search.Matches(bloom); !matchedSeries { - removals = inBlooms - } else { - for _, chk := range inBlooms { - // Get buf to concatenate the chunk and search token - tokenBuf, prefixLen = prefixedToken(schema.NGramLen(), chk, tokenBuf) - if !input.Search.MatchesWithPrefixBuf(bloom, tokenBuf, prefixLen) { - removals = append(removals, chk) - } + // Get buf to concatenate the chunk and search token + tokenBuf, prefixLen = prefixedToken(schema.NGramLen(), chk, tokenBuf) + if matched := req.Search.MatchesWithPrefixBuf(bloom, tokenBuf, prefixLen); matched { + inputs[j].found[k] = true } } - input.Recorder.record( - 1, len(inBlooms), // found - 0, 0, // skipped - 0, len(missing), // missed - len(removals), // filtered - ) - input.Response <- Output{ - Fp: fp, - Removals: removals, - } } } - return nil + for i, req := range reqs { + + removals := removalsFor(inputs[i].InBlooms, inputs[i].found) + + req.Recorder.record( + 1, len(inputs[i].InBlooms), // found + 0, 0, // skipped + 0, len(inputs[i].Missing), // missed + len(removals), // filtered + ) + req.Response <- Output{ + Fp: series.Fingerprint, + Removals: removals, + } + } +} + +func removalsFor(chunks ChunkRefs, found map[int]bool) ChunkRefs { + // shortcut: all chunks removed + if len(found) == 0 { + return chunks + } + + // shortcut: no chunks removed + if len(found) == len(chunks) { + return nil + } + + removals := make(ChunkRefs, 0, len(chunks)) + for i, chk := range chunks { + if !found[i] { + removals = append(removals, chk) + } + } + return removals } diff --git a/pkg/storage/bloom/v1/fuse_test.go b/pkg/storage/bloom/v1/fuse_test.go index a0dc23001e939..7f11eece4c239 100644 --- a/pkg/storage/bloom/v1/fuse_test.go +++ b/pkg/storage/bloom/v1/fuse_test.go @@ -45,19 +45,7 @@ func TestFusedQuerier(t *testing.T) { writer := NewMemoryBlockWriter(indexBuf, bloomsBuf) reader := NewByteReader(indexBuf, bloomsBuf) numSeries := 1000 - data, keys := MkBasicSeriesWithBlooms(numSeries, 0, 0x0000, 0xffff, 0, 10000) - - // Make the first and third series blooms too big to fit into a single page so we skip them while reading - for i := 0; i < 10000; i++ { - tokenizer := NewNGramTokenizer(4, 0) - line := fmt.Sprintf("%04x:%04x", i, i+1) - it := tokenizer.Tokens(line) - for it.Next() { - key := it.At() - data[0].Bloom.Add(key) - data[2].Bloom.Add(key) - } - } + data, keys := MkBasicSeriesWithBlooms(numSeries, 0x0000, 0xffff, 0, 10000) builder, err := NewBlockBuilder( BlockOptions{ @@ -71,14 +59,14 @@ func TestFusedQuerier(t *testing.T) { writer, ) require.Nil(t, err) - itr := NewSliceIter[SeriesWithBloom](data) + itr := NewSliceIter[SeriesWithBlooms](data) _, err = builder.BuildFrom(itr) require.NoError(t, err) require.False(t, itr.Next()) block := NewBlock(reader, NewMetrics(nil)) querier := NewBlockQuerier(block, true, DefaultMaxPageSize) - n := 2 + n := 500 // series per request nReqs := numSeries / n var inputs [][]Request var resChans []chan Output @@ -146,6 +134,118 @@ func TestFusedQuerier(t *testing.T) { } } +// Successfully query series across multiple pages as well as series that only occupy 1 bloom +func TestFuseMultiPage(t *testing.T) { + indexBuf := bytes.NewBuffer(nil) + bloomsBuf := bytes.NewBuffer(nil) + writer := NewMemoryBlockWriter(indexBuf, bloomsBuf) + reader := NewByteReader(indexBuf, bloomsBuf) + + builder, err := NewBlockBuilder( + BlockOptions{ + Schema: Schema{ + version: DefaultSchemaVersion, + encoding: chunkenc.EncSnappy, + nGramLength: 3, // we test trigrams + nGramSkip: 0, + }, + SeriesPageSize: 100, + BloomPageSize: 10, // So we force one bloom per page + }, + writer, + ) + require.Nil(t, err) + + fp := model.Fingerprint(1) + chk := ChunkRef{ + From: 0, + Through: 10, + Checksum: 0, + } + series := &Series{ + Fingerprint: fp, + Chunks: []ChunkRef{chk}, + } + + buf, prefixLn := prefixedToken(3, chk, nil) + + b1 := &Bloom{ + *filter.NewScalableBloomFilter(1024, 0.01, 0.8), + } + key1, key2 := []byte("foo"), []byte("bar") + b1.Add(key1) + b1.Add(append(buf[:prefixLn], key1...)) + + b2 := &Bloom{ + *filter.NewScalableBloomFilter(1024, 0.01, 0.8), + } + b2.Add(key2) + b2.Add(append(buf[:prefixLn], key2...)) + + _, err = builder.BuildFrom(NewSliceIter([]SeriesWithBlooms{ + { + series, + NewSliceIter([]*Bloom{ + b1, b2, + }), + }, + })) + require.NoError(t, err) + + block := NewBlock(reader, NewMetrics(nil)) + + querier := NewBlockQuerier(block, true, 100<<20) // 100MB too large to interfere + + keys := [][]byte{ + key1, // found in the first bloom + key2, // found in the second bloom + []byte("not"), // not found in any bloom + } + + chans := make([]chan Output, len(keys)) + for i := range chans { + chans[i] = make(chan Output, 1) // buffered once to not block in test + } + + req := func(ngram []byte, ch chan Output) Request { + return Request{ + Fp: fp, + Chks: []ChunkRef{chk}, + Search: stringTest{ + ngrams: [][]byte{ngram}, + }, + Response: ch, + Recorder: NewBloomRecorder(context.Background(), "unknown"), + } + } + var reqs []Request + for i, key := range keys { + reqs = append(reqs, req(key, chans[i])) + } + + fused := querier.Fuse( + []PeekingIterator[Request]{ + NewPeekingIter(NewSliceIter(reqs)), + }, + log.NewNopLogger(), + ) + + require.NoError(t, fused.Run()) + + // assume they're returned in order + for i := range reqs { + out := <-chans[i] + + // the last check doesn't match + if i == len(keys)-1 { + require.Equal(t, ChunkRefs{chk}, out.Removals) + continue + } + require.Equal(t, ChunkRefs(nil), out.Removals, "on index %d and key %s", i, string(keys[i])) + } + +} + func TestLazyBloomIter_Seek_ResetError(t *testing.T) { // references for linking in memory reader+writer indexBuf := bytes.NewBuffer(nil) @@ -158,7 +258,7 @@ func TestLazyBloomIter_Seek_ResetError(t *testing.T) { } numSeries := 4 - data := make([]SeriesWithBloom, 0, numSeries) + data := make([]SeriesWithBlooms, 0, numSeries) tokenizer := NewNGramTokenizer(4, 0) for i := 0; i < numSeries; i++ { var series Series @@ -191,9 +291,9 @@ func TestLazyBloomIter_Seek_ResetError(t *testing.T) { } } - data = append(data, SeriesWithBloom{ + data = append(data, SeriesWithBlooms{ Series: &series, - Bloom: &bloom, + Blooms: NewSliceIter([]*Bloom{&bloom}), }) } @@ -209,33 +309,43 @@ func TestLazyBloomIter_Seek_ResetError(t *testing.T) { writer, ) require.Nil(t, err) - itr := NewSliceIter[SeriesWithBloom](data) + itr := NewSliceIter[SeriesWithBlooms](data) _, err = builder.BuildFrom(itr) require.NoError(t, err) require.False(t, itr.Next()) block := NewBlock(reader, NewMetrics(nil)) - querier := NewBlockQuerier(block, true, 1000) + smallMaxPageSize := 1000 // deliberately trigger page skipping for tests + querier := NewBlockQuerier(block, true, smallMaxPageSize) for fp := model.Fingerprint(0); fp < model.Fingerprint(numSeries); fp++ { err := querier.Seek(fp) require.NoError(t, err) - require.True(t, querier.series.Next()) - series := querier.series.At() + require.True(t, querier.Next()) + series := querier.At() + + // earlier test only has 1 bloom offset per series + require.Equal(t, 1, len(series.Offsets)) require.Equal(t, fp, series.Fingerprint) + // seekable := true - if largeSeries(int(fp)) { + if large := largeSeries(int(fp)); large { seekable = false } + if !seekable { - require.True(t, querier.blooms.LoadOffset(series.Offset)) + require.True(t, querier.blooms.LoadOffset(series.Offsets[0])) continue } - require.False(t, querier.blooms.LoadOffset(series.Offset)) - require.True(t, querier.blooms.Next()) - require.NoError(t, querier.blooms.Err()) + + for _, offset := range series.Offsets { + require.False(t, querier.blooms.LoadOffset(offset)) + require.True(t, querier.blooms.Next()) + require.NoError(t, querier.blooms.Err()) + } + } } @@ -245,8 +355,7 @@ func setupBlockForBenchmark(b *testing.B) (*BlockQuerier, [][]Request, []chan Ou writer := NewMemoryBlockWriter(indexBuf, bloomsBuf) reader := NewByteReader(indexBuf, bloomsBuf) numSeries := 10000 - numKeysPerSeries := 100 - data, _ := MkBasicSeriesWithBlooms(numSeries, numKeysPerSeries, 0, 0xffffff, 0, 10000) + data, _ := MkBasicSeriesWithBlooms(numSeries, 0, 0xffffff, 0, 10000) builder, err := NewBlockBuilder( BlockOptions{ @@ -260,7 +369,7 @@ func setupBlockForBenchmark(b *testing.B) (*BlockQuerier, [][]Request, []chan Ou writer, ) require.Nil(b, err) - itr := NewSliceIter[SeriesWithBloom](data) + itr := NewSliceIter[SeriesWithBlooms](data) _, err = builder.BuildFrom(itr) require.Nil(b, err) block := NewBlock(reader, NewMetrics(nil)) diff --git a/pkg/storage/bloom/v1/index.go b/pkg/storage/bloom/v1/index.go index caadfa26ddf74..b11927ed303b2 100644 --- a/pkg/storage/bloom/v1/index.go +++ b/pkg/storage/bloom/v1/index.go @@ -15,7 +15,7 @@ import ( ) type Schema struct { - version byte + version Version encoding chunkenc.Encoding nGramLength, nGramSkip uint64 } @@ -53,7 +53,7 @@ func (s *Schema) CompressorPool() chunkenc.WriterPool { func (s *Schema) Encode(enc *encoding.Encbuf) { enc.Reset() enc.PutBE32(magicNumber) - enc.PutByte(s.version) + enc.PutByte(byte(s.version)) enc.PutByte(byte(s.encoding)) enc.PutBE64(s.nGramLength) enc.PutBE64(s.nGramSkip) @@ -77,8 +77,8 @@ func (s *Schema) Decode(dec *encoding.Decbuf) error { if number != magicNumber { return errors.Errorf("invalid magic number. expected %x, got %x", magicNumber, number) } - s.version = dec.Byte() - if s.version != 1 { + s.version = Version(dec.Byte()) + if s.version != 1 && s.version != 2 { return errors.Errorf("invalid version. expected %d, got %d", 1, s.version) } @@ -190,6 +190,7 @@ func (b *BlockIndex) NewSeriesPageDecoder(r io.ReadSeeker, header SeriesPageHead } res = &SeriesPageDecoder{ + schema: b.opts.Schema, data: decompressed, header: header.SeriesHeader, } @@ -267,13 +268,14 @@ func (h *SeriesHeader) Decode(dec *encoding.Decbuf) error { // can decode a series page one item at a time, useful when we don't // need to iterate an entire page type SeriesPageDecoder struct { + schema Schema data []byte dec encoding.Decbuf header SeriesHeader // state i int // current index - cur *SeriesWithOffset + cur *SeriesWithOffsets err error previousFp model.Fingerprint // previous series' fingerprint for delta-decoding previousOffset BloomOffset // previous series' bloom offset for delta-decoding @@ -298,8 +300,8 @@ func (d *SeriesPageDecoder) Next() bool { return false } - var res SeriesWithOffset - d.previousFp, d.previousOffset, d.err = res.Decode(&d.dec, d.previousFp, d.previousOffset) + var res SeriesWithOffsets + d.previousFp, d.previousOffset, d.err = res.Decode(d.schema.version, &d.dec, d.previousFp, d.previousOffset) if d.err != nil { return false } @@ -348,7 +350,7 @@ func (d *SeriesPageDecoder) Seek(fp model.Fingerprint) { } } -func (d *SeriesPageDecoder) At() (res *SeriesWithOffset) { +func (d *SeriesPageDecoder) At() (res *SeriesWithOffsets) { return d.cur } @@ -364,6 +366,103 @@ type Series struct { Chunks ChunkRefs } +// SeriesWithOffsets is a series with a a variable number +// of bloom offsets. Used in v2+ to store blooms for larger series +// in parts +type SeriesWithOffsets struct { + Offsets []BloomOffset + Series +} + +func (s *SeriesWithOffsets) Encode( + enc *encoding.Encbuf, + previousFp model.Fingerprint, + previousOffset BloomOffset, +) BloomOffset { + sort.Sort(s.Chunks) // ensure order + // delta encode fingerprint + enc.PutUvarint64(uint64(s.Fingerprint - previousFp)) + // encode number of bloom offsets in this series + enc.PutUvarint(len(s.Offsets)) + + lastOffset := previousOffset + for _, offset := range s.Offsets { + // delta encode offsets. + // Multiple offsets per series is a v2+ feature with different encoding implementation, + // so we signal that to the encoder + offset.Encode(enc, V2, lastOffset) + lastOffset = offset + } + + // encode chunks using delta encoded timestamps + var lastEnd model.Time + enc.PutUvarint(len(s.Chunks)) + for _, chunk := range s.Chunks { + lastEnd = chunk.Encode(enc, lastEnd) + } + + return lastOffset +} + +func (s *SeriesWithOffsets) Decode( + version Version, + dec *encoding.Decbuf, + previousFp model.Fingerprint, + previousOffset BloomOffset, +) (model.Fingerprint, BloomOffset, error) { + // Since *SeriesWithOffsets is is still representable by the v1 schema as a len=1 offset group, + // we can decode it even though multiple offsets were introduced in v2 + if version == V1 { + return s.decodeV1(dec, previousFp, previousOffset) + } + + s.Fingerprint = previousFp + model.Fingerprint(dec.Uvarint64()) + numOffsets := dec.Uvarint() + + s.Offsets = make([]BloomOffset, numOffsets) + var ( + err error + lastEnd model.Time + lastOffset = previousOffset + ) + for i := range s.Offsets { + // SeriesWithOffsets is a v2+ feature with multiple bloom offsets per series + // so we signal that to the decoder + err = s.Offsets[i].Decode(dec, V2, lastOffset) + lastOffset = s.Offsets[i] + if err != nil { + return 0, BloomOffset{}, errors.Wrapf(err, "decoding %dth bloom offset", i) + } + } + + // TODO(owen-d): use pool + s.Chunks = make([]ChunkRef, dec.Uvarint()) + for i := range s.Chunks { + lastEnd, err = s.Chunks[i].Decode(dec, lastEnd) + if err != nil { + return 0, BloomOffset{}, errors.Wrapf(err, "decoding %dth chunk", i) + } + } + return s.Fingerprint, lastOffset, dec.Err() +} + +// Decodes a v2 compatible series from a v1 encoding +func (s *SeriesWithOffsets) decodeV1( + dec *encoding.Decbuf, + previousFp model.Fingerprint, + previousOffset BloomOffset, +) (model.Fingerprint, BloomOffset, error) { + var single SeriesWithOffset + fp, last, err := single.Decode(dec, previousFp, previousOffset) + if err != nil { + return 0, BloomOffset{}, errors.Wrap(err, "decoding series with offset") + } + s.Offsets = []BloomOffset{last} + s.Series = single.Series + return fp, last, nil +} + +// Used in v1 schema type SeriesWithOffset struct { Offset BloomOffset Series @@ -378,7 +477,9 @@ func (s *SeriesWithOffset) Encode( // delta encode fingerprint enc.PutBE64(uint64(s.Fingerprint - previousFp)) // delta encode offsets - s.Offset.Encode(enc, previousOffset) + // V1 only has 1 offset per series which has a legacy encoding scheme; + // we signal that to the encoder + s.Offset.Encode(enc, V1, previousOffset) // encode chunks using delta encoded timestamps var lastEnd model.Time @@ -392,7 +493,9 @@ func (s *SeriesWithOffset) Encode( func (s *SeriesWithOffset) Decode(dec *encoding.Decbuf, previousFp model.Fingerprint, previousOffset BloomOffset) (model.Fingerprint, BloomOffset, error) { s.Fingerprint = previousFp + model.Fingerprint(dec.Be64()) - if err := s.Offset.Decode(dec, previousOffset); err != nil { + // V1 only has 1 offset per series which has a legacy encoding scheme; + // we signal that to the decoder + if err := s.Offset.Decode(dec, V1, previousOffset); err != nil { return 0, BloomOffset{}, errors.Wrap(err, "decoding bloom offset") } @@ -457,14 +560,33 @@ type BloomOffset struct { ByteOffset int // offset to beginning of bloom within page } -func (o *BloomOffset) Encode(enc *encoding.Encbuf, previousOffset BloomOffset) { +func (o *BloomOffset) Encode(enc *encoding.Encbuf, v Version, previousOffset BloomOffset) { + // page offsets diffs are always ascending enc.PutUvarint(o.Page - previousOffset.Page) - enc.PutUvarint(o.ByteOffset - previousOffset.ByteOffset) + + switch v { + case V1: + // V1 uses UVarint for bloom offset deltas. This is fine because there is only 1 bloom per series in v1 + enc.PutUvarint(o.ByteOffset - previousOffset.ByteOffset) + default: + // V2 encodes multiple bloom offsets per series and successive blooms may belong to + // separate bloom pages. Therefore, we use Varint64 for byte offset deltas as + // byteOffsets will not be ascending when a new bloom page is written. + enc.PutVarint64(int64(o.ByteOffset - previousOffset.ByteOffset)) + } } -func (o *BloomOffset) Decode(dec *encoding.Decbuf, previousOffset BloomOffset) error { +func (o *BloomOffset) Decode(dec *encoding.Decbuf, v Version, previousOffset BloomOffset) error { o.Page = previousOffset.Page + dec.Uvarint() - o.ByteOffset = previousOffset.ByteOffset + dec.Uvarint() + + // Explained by the Encode method + switch v { + case V1: + o.ByteOffset = previousOffset.ByteOffset + dec.Uvarint() + default: + o.ByteOffset = previousOffset.ByteOffset + int(dec.Varint64()) + } + return dec.Err() } @@ -522,3 +644,38 @@ func (refs ChunkRefs) Compare(others ChunkRefs, populateInclusive bool) (exclusi return } + +func (refs ChunkRefs) Intersect(others ChunkRefs) ChunkRefs { + _, res := refs.Compare(others, true) + return res +} + +func (refs ChunkRefs) Union(others ChunkRefs) ChunkRefs { + var res ChunkRefs + var i, j int + for i < len(refs) && j < len(others) { + switch { + case refs[i] == others[j]: + res = append(res, refs[i]) + i++ + j++ + case refs[i].Less(others[j]): + res = append(res, refs[i]) + i++ + default: + res = append(res, others[j]) + j++ + } + } + + // append any remaining refs + if i < len(refs) { + res = append(res, refs[i:]...) + } + + if j < len(others) { + res = append(res, others[j:]...) + } + + return res +} diff --git a/pkg/storage/bloom/v1/index_builder.go b/pkg/storage/bloom/v1/index_builder.go new file mode 100644 index 0000000000000..36c74a9d87ab1 --- /dev/null +++ b/pkg/storage/bloom/v1/index_builder.go @@ -0,0 +1,228 @@ +package v1 + +import ( + "fmt" + "io" + + "github.com/pkg/errors" + "github.com/prometheus/common/model" + + "github.com/grafana/loki/v3/pkg/util/encoding" +) + +type IndexBuilder struct { + opts BlockOptions + writer io.WriteCloser + + offset int // track the offset of the file + writtenSchema bool + pages []SeriesPageHeaderWithOffset + page PageWriter + scratch *encoding.Encbuf + + previousFp model.Fingerprint + previousOffset BloomOffset + fromFp model.Fingerprint + fromTs, throughTs model.Time +} + +func NewIndexBuilder(opts BlockOptions, writer io.WriteCloser) *IndexBuilder { + return &IndexBuilder{ + opts: opts, + writer: writer, + page: NewPageWriter(int(opts.SeriesPageSize)), + scratch: &encoding.Encbuf{}, + } +} + +func (b *IndexBuilder) WriteOpts() error { + b.scratch.Reset() + b.opts.Encode(b.scratch) + if _, err := b.writer.Write(b.scratch.Get()); err != nil { + return errors.Wrap(err, "writing opts+schema") + } + b.writtenSchema = true + b.offset += b.scratch.Len() + return nil +} + +func (b *IndexBuilder) AppendV2(series SeriesWithOffsets) error { + if !b.writtenSchema { + if err := b.WriteOpts(); err != nil { + return errors.Wrap(err, "appending series") + } + } + + b.scratch.Reset() + // we don't want to update the previous pointers yet in case + // we need to flush the page first which would + // be passed the incorrect final fp/offset + lastOffset := series.Encode(b.scratch, b.previousFp, b.previousOffset) + + if !b.page.SpaceFor(b.scratch.Len()) && b.page.Count() > 0 { + if err := b.flushPage(); err != nil { + return errors.Wrap(err, "flushing series page") + } + + // re-encode now that a new page has been cut and we use delta-encoding + b.scratch.Reset() + lastOffset = series.Encode(b.scratch, b.previousFp, b.previousOffset) + } + + switch { + case b.page.Count() == 0: + // Special case: this is the first series in a page + if len(series.Chunks) < 1 { + return fmt.Errorf("series with zero chunks for fingerprint %v", series.Fingerprint) + } + b.fromFp = series.Fingerprint + b.fromTs, b.throughTs = chkBounds(series.Chunks) + case b.previousFp > series.Fingerprint: + return fmt.Errorf("out of order series fingerprint for series %v", series.Fingerprint) + default: + from, through := chkBounds(series.Chunks) + if b.fromTs.After(from) { + b.fromTs = from + } + if b.throughTs.Before(through) { + b.throughTs = through + } + } + + _ = b.page.Add(b.scratch.Get()) + b.previousFp = series.Fingerprint + b.previousOffset = lastOffset + return nil +} + +func (b *IndexBuilder) AppendV1(series SeriesWithOffset) error { + if !b.writtenSchema { + if err := b.WriteOpts(); err != nil { + return errors.Wrap(err, "appending series") + } + } + + b.scratch.Reset() + // we don't want to update the previous pointers yet in case + // we need to flush the page first which would + // be passed the incorrect final fp/offset + previousFp, previousOffset := series.Encode(b.scratch, b.previousFp, b.previousOffset) + + if !b.page.SpaceFor(b.scratch.Len()) { + if err := b.flushPage(); err != nil { + return errors.Wrap(err, "flushing series page") + } + + // re-encode now that a new page has been cut and we use delta-encoding + b.scratch.Reset() + previousFp, previousOffset = series.Encode(b.scratch, b.previousFp, b.previousOffset) + } + b.previousFp = previousFp + b.previousOffset = previousOffset + + switch { + case b.page.Count() == 0: + // Special case: this is the first series in a page + if len(series.Chunks) < 1 { + return fmt.Errorf("series with zero chunks for fingerprint %v", series.Fingerprint) + } + b.fromFp = series.Fingerprint + b.fromTs, b.throughTs = chkBounds(series.Chunks) + case b.previousFp > series.Fingerprint: + return fmt.Errorf("out of order series fingerprint for series %v", series.Fingerprint) + default: + from, through := chkBounds(series.Chunks) + if b.fromTs.After(from) { + b.fromTs = from + } + if b.throughTs.Before(through) { + b.throughTs = through + } + } + + _ = b.page.Add(b.scratch.Get()) + b.previousFp = series.Fingerprint + b.previousOffset = series.Offset + return nil +} + +// must be > 1 +func chkBounds(chks []ChunkRef) (from, through model.Time) { + from, through = chks[0].From, chks[0].Through + for _, chk := range chks[1:] { + if chk.From.Before(from) { + from = chk.From + } + + if chk.Through.After(through) { + through = chk.Through + } + } + return +} + +func (b *IndexBuilder) flushPage() error { + crc32Hash := Crc32HashPool.Get() + defer Crc32HashPool.Put(crc32Hash) + + decompressedLen, compressedLen, err := b.page.writePage( + b.writer, + b.opts.Schema.CompressorPool(), + crc32Hash, + ) + if err != nil { + return errors.Wrap(err, "writing series page") + } + + header := SeriesPageHeaderWithOffset{ + Offset: b.offset, + Len: compressedLen, + DecompressedLen: decompressedLen, + SeriesHeader: SeriesHeader{ + NumSeries: b.page.Count(), + Bounds: NewBounds(b.fromFp, b.previousFp), + FromTs: b.fromTs, + ThroughTs: b.throughTs, + }, + } + + b.pages = append(b.pages, header) + b.offset += compressedLen + + b.fromFp = 0 + b.fromTs = 0 + b.throughTs = 0 + b.previousFp = 0 + b.previousOffset = BloomOffset{} + b.page.Reset() + + return nil +} + +func (b *IndexBuilder) Close() (uint32, error) { + if b.page.Count() > 0 { + if err := b.flushPage(); err != nil { + return 0, errors.Wrap(err, "flushing final series page") + } + } + + b.scratch.Reset() + b.scratch.PutUvarint(len(b.pages)) + for _, h := range b.pages { + h.Encode(b.scratch) + } + + // put offset to beginning of header section + // cannot be varint encoded because it's offset will be calculated as + // the 8 bytes prior to the checksum + b.scratch.PutBE64(uint64(b.offset)) + crc32Hash := Crc32HashPool.Get() + defer Crc32HashPool.Put(crc32Hash) + // wrap with final checksum + b.scratch.PutHash(crc32Hash) + _, err := b.writer.Write(b.scratch.Get()) + if err != nil { + return 0, errors.Wrap(err, "writing series page headers") + } + return crc32Hash.Sum32(), errors.Wrap(b.writer.Close(), "closing series writer") +} diff --git a/pkg/storage/bloom/v1/index_querier.go b/pkg/storage/bloom/v1/index_querier.go index 8ba984d3df31c..ef270551952ba 100644 --- a/pkg/storage/bloom/v1/index_querier.go +++ b/pkg/storage/bloom/v1/index_querier.go @@ -136,7 +136,7 @@ func (it *LazySeriesIter) next() bool { return false } -func (it *LazySeriesIter) At() *SeriesWithOffset { +func (it *LazySeriesIter) At() *SeriesWithOffsets { return it.curPage.At() } diff --git a/pkg/storage/bloom/v1/index_test.go b/pkg/storage/bloom/v1/index_test.go index 45d967fb11916..8b3a078bc0c46 100644 --- a/pkg/storage/bloom/v1/index_test.go +++ b/pkg/storage/bloom/v1/index_test.go @@ -9,20 +9,26 @@ import ( "github.com/grafana/loki/v3/pkg/util/encoding" ) +var SupportedVersions = []Version{V1, V2} + func TestBloomOffsetEncoding(t *testing.T) { - t.Parallel() - src := BloomOffset{Page: 1, ByteOffset: 2} - enc := &encoding.Encbuf{} - src.Encode(enc, BloomOffset{}) + for _, v := range SupportedVersions { + t.Run(v.String(), func(t *testing.T) { + src := BloomOffset{Page: 1, ByteOffset: 2} + enc := &encoding.Encbuf{} + src.Encode(enc, v, BloomOffset{}) - var dst BloomOffset - dec := encoding.DecWith(enc.Get()) - require.Nil(t, dst.Decode(&dec, BloomOffset{})) + var dst BloomOffset + dec := encoding.DecWith(enc.Get()) + require.Nil(t, dst.Decode(&dec, v, BloomOffset{})) + + require.Equal(t, src, dst) + }) + } - require.Equal(t, src, dst) } -func TestSeriesEncoding(t *testing.T) { +func TestSeriesEncoding_V1(t *testing.T) { t.Parallel() src := SeriesWithOffset{ Series: Series{ @@ -55,6 +61,78 @@ func TestSeriesEncoding(t *testing.T) { require.Equal(t, src, dst) } +func TestSeriesEncoding_V2(t *testing.T) { + t.Parallel() + src := SeriesWithOffsets{ + Series: Series{ + Fingerprint: model.Fingerprint(1), + Chunks: []ChunkRef{ + { + From: 1, + Through: 2, + Checksum: 3, + }, + { + From: 4, + Through: 5, + Checksum: 6, + }, + }, + }, + Offsets: []BloomOffset{ + {Page: 0, ByteOffset: 0}, + {Page: 0, ByteOffset: 100}, + {Page: 1, ByteOffset: 2}, + {Page: 2, ByteOffset: 1}, + }, + } + + enc := &encoding.Encbuf{} + src.Encode(enc, 0, BloomOffset{}) + + dec := encoding.DecWith(enc.Get()) + var dst SeriesWithOffsets + fp, offset, err := dst.Decode(V2, &dec, 0, BloomOffset{}) + require.Nil(t, err) + require.Equal(t, src.Fingerprint, fp) + require.Equal(t, src.Offsets[len(src.Offsets)-1], offset) + require.Equal(t, src, dst) +} + +func TestV2SeriesDecodesV1(t *testing.T) { + t.Parallel() + src := SeriesWithOffset{ + Series: Series{ + Fingerprint: model.Fingerprint(1), + Chunks: []ChunkRef{ + { + From: 1, + Through: 2, + Checksum: 3, + }, + { + From: 4, + Through: 5, + Checksum: 6, + }, + }, + }, + Offset: BloomOffset{Page: 1, ByteOffset: 2}, + } + + enc := &encoding.Encbuf{} + src.Encode(enc, 0, BloomOffset{}) + + dec := encoding.DecWith(enc.Get()) + var dst SeriesWithOffsets + fp, offset, err := dst.decodeV1(&dec, 0, BloomOffset{}) + require.Nil(t, err) + require.Equal(t, src.Fingerprint, fp) + require.Equal(t, src.Offset, offset) + require.Equal(t, []BloomOffset{src.Offset}, dst.Offsets) + require.Equal(t, src.Series, dst.Series) +} + func TestChunkRefCmpLess(t *testing.T) { t.Parallel() for _, tc := range []struct { @@ -193,3 +271,131 @@ func TestChunkRefsCompare(t *testing.T) { }) } } + +func TestChunkRefsUnion(t *testing.T) { + t.Parallel() + for _, tc := range []struct { + desc string + left, right, union ChunkRefs + }{ + { + desc: "empty", + left: nil, + right: nil, + union: nil, + }, + { + desc: "left empty", + left: nil, + right: ChunkRefs{{From: 1, Through: 2}}, + union: ChunkRefs{{From: 1, Through: 2}}, + }, + { + desc: "right empty", + left: ChunkRefs{{From: 1, Through: 2}}, + right: nil, + union: ChunkRefs{{From: 1, Through: 2}}, + }, + { + desc: "left before right", + left: ChunkRefs{{From: 1, Through: 2}}, + right: ChunkRefs{{From: 3, Through: 4}}, + union: ChunkRefs{{From: 1, Through: 2}, {From: 3, Through: 4}}, + }, + { + desc: "left after right", + left: ChunkRefs{{From: 3, Through: 4}}, + right: ChunkRefs{{From: 1, Through: 2}}, + union: ChunkRefs{{From: 1, Through: 2}, {From: 3, Through: 4}}, + }, + { + desc: "left overlaps right", + left: ChunkRefs{ + {From: 1, Through: 3}, + {From: 2, Through: 4}, + {From: 3, Through: 5}, + {From: 4, Through: 6}, + {From: 5, Through: 7}, + }, + right: ChunkRefs{ + {From: 2, Through: 4}, + {From: 4, Through: 6}, + {From: 5, Through: 6}, // not in left + }, + union: ChunkRefs{ + {From: 1, Through: 3}, + {From: 2, Through: 4}, + {From: 3, Through: 5}, + {From: 4, Through: 6}, + {From: 5, Through: 6}, + {From: 5, Through: 7}, + }, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + require.Equal(t, tc.union, tc.left.Union(tc.right)) + }) + } +} + +func TestChunkRefsIntersect(t *testing.T) { + t.Parallel() + for _, tc := range []struct { + desc string + left, right, intersect ChunkRefs + }{ + { + desc: "empty", + left: nil, + right: nil, + intersect: nil, + }, + { + desc: "left empty", + left: nil, + right: ChunkRefs{{From: 1, Through: 2}}, + intersect: nil, + }, + { + desc: "right empty", + left: ChunkRefs{{From: 1, Through: 2}}, + right: nil, + intersect: nil, + }, + { + desc: "left before right", + left: ChunkRefs{{From: 1, Through: 2}}, + right: ChunkRefs{{From: 3, Through: 4}}, + intersect: nil, + }, + { + desc: "left after right", + left: ChunkRefs{{From: 3, Through: 4}}, + right: ChunkRefs{{From: 1, Through: 2}}, + intersect: nil, + }, + { + desc: "left overlaps right", + left: ChunkRefs{ + {From: 1, Through: 3}, + {From: 2, Through: 4}, + {From: 3, Through: 5}, + {From: 4, Through: 6}, + {From: 5, Through: 7}, + }, + right: ChunkRefs{ + {From: 2, Through: 4}, + {From: 4, Through: 6}, + {From: 5, Through: 6}, // not in left + }, + intersect: ChunkRefs{ + {From: 2, Through: 4}, + {From: 4, Through: 6}, + }, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + require.Equal(t, tc.intersect, tc.left.Intersect(tc.right)) + }) + } +} diff --git a/pkg/storage/bloom/v1/merge.go b/pkg/storage/bloom/v1/merge.go index 981582f1234cd..d89ca2a7d7f96 100644 --- a/pkg/storage/bloom/v1/merge.go +++ b/pkg/storage/bloom/v1/merge.go @@ -13,9 +13,9 @@ type HeapIterator[T any] struct { ok bool } -func NewHeapIterForSeriesWithBloom(queriers ...PeekingIterator[*SeriesWithBloom]) *HeapIterator[*SeriesWithBloom] { +func NewHeapIterForSeriesWithBloom(queriers ...PeekingIterator[*SeriesWithBlooms]) *HeapIterator[*SeriesWithBlooms] { return NewHeapIterator( - func(a, b *SeriesWithBloom) bool { + func(a, b *SeriesWithBlooms) bool { return a.Series.Fingerprint < b.Series.Fingerprint }, queriers..., diff --git a/pkg/storage/bloom/v1/merge_test.go b/pkg/storage/bloom/v1/merge_test.go index 545ff2dc168d3..259888ae064d0 100644 --- a/pkg/storage/bloom/v1/merge_test.go +++ b/pkg/storage/bloom/v1/merge_test.go @@ -9,18 +9,17 @@ import ( func TestMergeBlockQuerier_NonOverlapping(t *testing.T) { t.Parallel() var ( - numSeries = 100 - numKeysPerSeries = 10000 - numQueriers = 4 - queriers []PeekingIterator[*SeriesWithBloom] - data, _ = MkBasicSeriesWithBlooms(numSeries, numKeysPerSeries, 0, 0xffff, 0, 10000) + numSeries = 100 + numQueriers = 4 + queriers []PeekingIterator[*SeriesWithBlooms] + data, _ = MkBasicSeriesWithBlooms(numSeries, 0, 0xffff, 0, 10000) ) for i := 0; i < numQueriers; i++ { - var ptrs []*SeriesWithBloom + var ptrs []*SeriesWithBlooms for j := 0; j < numSeries/numQueriers; j++ { ptrs = append(ptrs, &data[i*numSeries/numQueriers+j]) } - queriers = append(queriers, NewPeekingIter[*SeriesWithBloom](NewSliceIter[*SeriesWithBloom](ptrs))) + queriers = append(queriers, NewPeekingIter[*SeriesWithBlooms](NewSliceIter[*SeriesWithBlooms](ptrs))) } mbq := NewHeapIterForSeriesWithBloom(queriers...) @@ -37,18 +36,17 @@ func TestMergeBlockQuerier_NonOverlapping(t *testing.T) { func TestMergeBlockQuerier_Duplicate(t *testing.T) { t.Parallel() var ( - numSeries = 100 - numKeysPerSeries = 10000 - numQueriers = 2 - queriers []PeekingIterator[*SeriesWithBloom] - data, _ = MkBasicSeriesWithBlooms(numSeries, numKeysPerSeries, 0, 0xffff, 0, 10000) + numSeries = 100 + numQueriers = 2 + queriers []PeekingIterator[*SeriesWithBlooms] + data, _ = MkBasicSeriesWithBlooms(numSeries, 0, 0xffff, 0, 10000) ) for i := 0; i < numQueriers; i++ { queriers = append( queriers, - NewPeekingIter[*SeriesWithBloom]( - NewSliceIter[*SeriesWithBloom]( - PointerSlice[SeriesWithBloom](data), + NewPeekingIter[*SeriesWithBlooms]( + NewSliceIter[*SeriesWithBlooms]( + PointerSlice[SeriesWithBlooms](data), ), ), ) @@ -69,18 +67,17 @@ func TestMergeBlockQuerier_Overlapping(t *testing.T) { t.Parallel() var ( - numSeries = 100 - numKeysPerSeries = 10000 - numQueriers = 4 - queriers []PeekingIterator[*SeriesWithBloom] - data, _ = MkBasicSeriesWithBlooms(numSeries, numKeysPerSeries, 0, 0xffff, 0, 10000) - slices = make([][]*SeriesWithBloom, numQueriers) + numSeries = 100 + numQueriers = 4 + queriers []PeekingIterator[*SeriesWithBlooms] + data, _ = MkBasicSeriesWithBlooms(numSeries, 0, 0xffff, 0, 10000) + slices = make([][]*SeriesWithBlooms, numQueriers) ) for i := 0; i < numSeries; i++ { slices[i%numQueriers] = append(slices[i%numQueriers], &data[i]) } for i := 0; i < numQueriers; i++ { - queriers = append(queriers, NewPeekingIter[*SeriesWithBloom](NewSliceIter[*SeriesWithBloom](slices[i]))) + queriers = append(queriers, NewPeekingIter[*SeriesWithBlooms](NewSliceIter[*SeriesWithBlooms](slices[i]))) } mbq := NewHeapIterForSeriesWithBloom(queriers...) diff --git a/pkg/storage/bloom/v1/metrics.go b/pkg/storage/bloom/v1/metrics.go index 4c6b4cee11326..cb94373185d8e 100644 --- a/pkg/storage/bloom/v1/metrics.go +++ b/pkg/storage/bloom/v1/metrics.go @@ -9,11 +9,10 @@ import ( type Metrics struct { // writes - bloomsTotal *prometheus.CounterVec // number of blooms created - sbfCreationTime *prometheus.CounterVec // time spent creating sbfs - bloomSize prometheus.Histogram // size of the bloom filter in bytes - hammingWeightRatio prometheus.Histogram // ratio of the hamming weight of the bloom filter to the number of bits in the bloom filter - estimatedCount prometheus.Histogram // estimated number of elements in the bloom filter + bloomsTotal prometheus.Counter // number of blooms created + bloomSize prometheus.Histogram // size of the bloom filter in bytes + hammingWeightRatio prometheus.Histogram // ratio of the hamming weight of the bloom filter to the number of bits in the bloom filter + estimatedCount prometheus.Histogram // estimated number of elements in the bloom filter chunksIndexed *prometheus.CounterVec chunksPerSeries prometheus.Histogram blockSeriesIterated prometheus.Counter @@ -37,11 +36,9 @@ const ( chunkIndexedTypeIterated = "iterated" chunkIndexedTypeCopied = "copied" - tokenTypeRaw = "raw" - tokenTypeChunkPrefixed = "chunk_prefixed" - collisionTypeFalse = "false" - collisionTypeTrue = "true" - collisionTypeCache = "cache" + collisionTypeFalse = "false" + collisionTypeTrue = "true" + collisionTypeCache = "cache" blockFlushReasonFull = "full" blockFlushReasonFinished = "finished" @@ -53,9 +50,6 @@ const ( skipReasonErr = "err" skipReasonOOB = "out_of_bounds" - bloomCreationTypeIndexed = "indexed" - bloomCreationTypeSkipped = "skipped" - recorderRequested = "requested" recorderFound = "found" recorderSkipped = "skipped" @@ -65,16 +59,11 @@ const ( func NewMetrics(r prometheus.Registerer) *Metrics { return &Metrics{ - bloomsTotal: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + bloomsTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{ Namespace: constants.Loki, Name: "blooms_created_total", Help: "Number of blooms created", - }, []string{"type"}), - sbfCreationTime: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Namespace: constants.Loki, - Name: "bloom_creation_time_total", - Help: "Time spent creating scalable bloom filters", - }, []string{"type"}), + }), bloomSize: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ Namespace: constants.Loki, Name: "bloom_size", @@ -118,7 +107,7 @@ func NewMetrics(r prometheus.Registerer) *Metrics { Namespace: constants.Loki, Name: "bloom_inserts_total", Help: "Number of inserts into the bloom filter. collision type may be `false` (no collision), `cache` (found in token cache) or true (found in bloom filter). token_type may be either `raw` (the original ngram) or `chunk_prefixed` (the ngram with the chunk prefix)", - }, []string{"token_type", "collision"}), + }, []string{"collision"}), sourceBytesAdded: promauto.With(r).NewCounter(prometheus.CounterOpts{ Namespace: constants.Loki, Name: "bloom_source_bytes_added_total", diff --git a/pkg/storage/bloom/v1/test_util.go b/pkg/storage/bloom/v1/test_util.go index d3ac7e427ec51..9c1fb6047497b 100644 --- a/pkg/storage/bloom/v1/test_util.go +++ b/pkg/storage/bloom/v1/test_util.go @@ -15,14 +15,14 @@ import ( // TODO(owen-d): this should probably be in it's own testing-util package -func MakeBlock(t testing.TB, nth int, fromFp, throughFp model.Fingerprint, fromTs, throughTs model.Time) (*Block, []SeriesWithBloom, [][][]byte) { +func MakeBlock(t testing.TB, nth int, fromFp, throughFp model.Fingerprint, fromTs, throughTs model.Time) (*Block, []SeriesWithBlooms, [][][]byte) { // references for linking in memory reader+writer indexBuf := bytes.NewBuffer(nil) bloomsBuf := bytes.NewBuffer(nil) writer := NewMemoryBlockWriter(indexBuf, bloomsBuf) reader := NewByteReader(indexBuf, bloomsBuf) numSeries := int(throughFp-fromFp) / nth - data, keys := MkBasicSeriesWithBlooms(numSeries, nth, fromFp, throughFp, fromTs, throughTs) + data, keys := MkBasicSeriesWithBlooms(numSeries, fromFp, throughFp, fromTs, throughTs) builder, err := NewBlockBuilder( BlockOptions{ @@ -38,16 +38,40 @@ func MakeBlock(t testing.TB, nth int, fromFp, throughFp model.Fingerprint, fromT writer, ) require.Nil(t, err) - itr := NewSliceIter[SeriesWithBloom](data) + itr := NewSliceIter[SeriesWithBlooms](data) _, err = builder.BuildFrom(itr) require.Nil(t, err) block := NewBlock(reader, NewMetrics(nil)) return block, data, keys } -func MkBasicSeriesWithBlooms(nSeries, _ int, fromFp, throughFp model.Fingerprint, fromTs, throughTs model.Time) (seriesList []SeriesWithBloom, keysList [][][]byte) { +// This is a helper type used in tests that buffers blooms and can be turned into +// the commonly used iterator form *SeriesWithBlooms. +type SeriesWithLiteralBlooms struct { + Series *Series + Blooms []*Bloom +} + +func (s *SeriesWithLiteralBlooms) SeriesWithBlooms() SeriesWithBlooms { + return SeriesWithBlooms{ + Series: s.Series, + Blooms: NewSliceIter[*Bloom](s.Blooms), + } +} + +func MkBasicSeriesWithBlooms(nSeries int, fromFp, throughFp model.Fingerprint, fromTs, throughTs model.Time) (seriesList []SeriesWithBlooms, keysList [][][]byte) { + series, keys := MkBasicSeriesWithLiteralBlooms(nSeries, fromFp, throughFp, fromTs, throughTs) + mapped := make([]SeriesWithBlooms, 0, len(series)) + for _, s := range series { + mapped = append(mapped, s.SeriesWithBlooms()) + } + + return mapped, keys +} + +func MkBasicSeriesWithLiteralBlooms(nSeries int, fromFp, throughFp model.Fingerprint, fromTs, throughTs model.Time) (seriesList []SeriesWithLiteralBlooms, keysList [][][]byte) { const nGramLen = 4 - seriesList = make([]SeriesWithBloom, 0, nSeries) + seriesList = make([]SeriesWithLiteralBlooms, 0, nSeries) keysList = make([][][]byte, 0, nSeries) step := (throughFp - fromFp) / model.Fingerprint(nSeries) @@ -91,9 +115,9 @@ func MkBasicSeriesWithBlooms(nSeries, _ int, fromFp, throughFp model.Fingerprint } } - seriesList = append(seriesList, SeriesWithBloom{ + seriesList = append(seriesList, SeriesWithLiteralBlooms{ Series: &series, - Bloom: &bloom, + Blooms: []*Bloom{&bloom}, }) keysList = append(keysList, keys) } @@ -110,3 +134,22 @@ func EqualIterators[T any](t *testing.T, test func(a, b T), expected, actual Ite require.Nil(t, expected.Err()) require.Nil(t, actual.Err()) } + +// CompareIterators is a testing utility for comparing iterators of different types. +// It accepts a callback which can be used to assert characteristics of the corersponding elements +// of the two iterators. +// It also ensures that the lengths are the same and there are no errors from either iterator. +func CompareIterators[A, B any]( + t *testing.T, + f func(t *testing.T, a A, b B), + a Iterator[A], + b Iterator[B], +) { + for a.Next() { + require.True(t, b.Next()) + f(t, a.At(), b.At()) + } + require.False(t, b.Next()) + require.NoError(t, a.Err()) + require.NoError(t, b.Err()) +} diff --git a/pkg/storage/bloom/v1/util.go b/pkg/storage/bloom/v1/util.go index 22fb47e43e799..85aa7baa7b81b 100644 --- a/pkg/storage/bloom/v1/util.go +++ b/pkg/storage/bloom/v1/util.go @@ -2,6 +2,7 @@ package v1 import ( "context" + "fmt" "hash" "hash/crc32" "io" @@ -10,14 +11,23 @@ import ( "github.com/prometheus/prometheus/util/pool" ) +type Version byte + +func (v Version) String() string { + return fmt.Sprintf("v%d", v) +} + const ( magicNumber = uint32(0xCA7CAFE5) // Add new versions below - V1 byte = iota + V1 Version = iota + // V2 supports single series blooms encoded over multiple pages + // to accommodate larger single series + V2 ) const ( - DefaultSchemaVersion = V1 + DefaultSchemaVersion = V2 ) var ( @@ -88,6 +98,11 @@ type Iterator[T any] interface { At() T } +type SizedIterator[T any] interface { + Iterator[T] + Remaining() int // remaining +} + type PeekingIterator[T any] interface { Peek() (T, bool) Iterator[T] @@ -166,8 +181,8 @@ func NewSliceIter[T any](xs []T) *SliceIter[T] { return &SliceIter[T]{xs: xs, cur: -1} } -func (it *SliceIter[T]) Len() int { - return len(it.xs) - (max(0, it.cur)) +func (it *SliceIter[T]) Remaining() int { + return max(0, len(it.xs)-(it.cur+1)) } func (it *SliceIter[T]) Next() bool { @@ -212,6 +227,14 @@ func (it *EmptyIter[T]) At() T { return it.zero } +func (it *EmptyIter[T]) Peek() (T, bool) { + return it.zero, false +} + +func (it *EmptyIter[T]) Remaining() int { + return 0 +} + // noop func (it *EmptyIter[T]) Reset() {} diff --git a/pkg/storage/bloom/v1/util_test.go b/pkg/storage/bloom/v1/util_test.go index afafa4d05a870..8af93231313be 100644 --- a/pkg/storage/bloom/v1/util_test.go +++ b/pkg/storage/bloom/v1/util_test.go @@ -52,3 +52,16 @@ func TestCounterIter(t *testing.T) { // Assert that the count is correct and peeking hasn't jeopardized the count require.Equal(t, len(data), itr.Count()) } + +func TestSliceIterRemaining(t *testing.T) { + ln := 5 + itr := NewSliceIter(make([]int, ln)) + + for i := 0; i < ln; i++ { + require.Equal(t, ln-i, itr.Remaining()) + require.True(t, itr.Next()) + require.Equal(t, ln-i-1, itr.Remaining()) + } + + require.False(t, itr.Next()) +} diff --git a/pkg/storage/bloom/v1/versioned_builder.go b/pkg/storage/bloom/v1/versioned_builder.go new file mode 100644 index 0000000000000..8b262ee62e557 --- /dev/null +++ b/pkg/storage/bloom/v1/versioned_builder.go @@ -0,0 +1,218 @@ +package v1 + +import "github.com/pkg/errors" + +/* +Each binary format (version) has it's own builder. This provides type-safe way to build the binary format +while allowing reuse of underlying logic. As an example, the V2Builder will prevent encoding v1 series (only 1 bloom per series) +as it only provides methods that are v2 compatible. The opposite is also true. + +Builders provide the following methods: +- [Convenience method] BuildFrom: builds the binary format from an iterator of the relevant type. + Primarily used in testing since the MergeBuilder will be used in production and uses the lower level APIs below. + +- AddBloom: adds a bloom filter to the binary format and returns the offset at which it was added. +- AddSeries: adds a series to the binary format and returns a boolean indicating if the series was added or not. +- Close: closes the builder and returns the number of bytes written. +*/ + +// Convenience constructor targeting the most current version. +func NewBlockBuilder(opts BlockOptions, writer BlockWriter) (*V2Builder, error) { + return NewBlockBuilderV2(opts, writer) +} + +// Convenience alias for the most current version. +type BlockBuilder = V2Builder + +type V2Builder struct { + opts BlockOptions + + writer BlockWriter + index *IndexBuilder + blooms *BloomBlockBuilder +} + +type SeriesWithBlooms struct { + Series *Series + Blooms SizedIterator[*Bloom] +} + +func NewBlockBuilderV2(opts BlockOptions, writer BlockWriter) (*V2Builder, error) { + if opts.Schema.version != V2 { + return nil, errors.Errorf("schema mismatch creating v2 builder, expected %v, got %v", V2, opts.Schema.version) + } + + index, err := writer.Index() + if err != nil { + return nil, errors.Wrap(err, "initializing index writer") + } + blooms, err := writer.Blooms() + if err != nil { + return nil, errors.Wrap(err, "initializing blooms writer") + } + + return &V2Builder{ + opts: opts, + writer: writer, + index: NewIndexBuilder(opts, index), + blooms: NewBloomBlockBuilder(opts, blooms), + }, nil +} + +func (b *V2Builder) BuildFrom(itr Iterator[SeriesWithBlooms]) (uint32, error) { + for itr.Next() { + at := itr.At() + var offsets []BloomOffset + for at.Blooms.Next() { + offset, err := b.AddBloom(at.Blooms.At()) + if err != nil { + return 0, errors.Wrap(err, "writing bloom") + } + offsets = append(offsets, offset) + } + + if err := at.Blooms.Err(); err != nil { + return 0, errors.Wrap(err, "iterating blooms") + } + blockFull, err := b.AddSeries(*at.Series, offsets) + if err != nil { + return 0, errors.Wrapf(err, "writing series") + } + if blockFull { + break + } + } + + if err := itr.Err(); err != nil { + return 0, errors.Wrap(err, "iterating series with blooms") + } + + return b.Close() +} + +func (b *V2Builder) Close() (uint32, error) { + bloomChecksum, err := b.blooms.Close() + if err != nil { + return 0, errors.Wrap(err, "closing bloom file") + } + indexCheckSum, err := b.index.Close() + if err != nil { + return 0, errors.Wrap(err, "closing series file") + } + return combineChecksums(indexCheckSum, bloomChecksum), nil +} + +func (b *V2Builder) AddBloom(bloom *Bloom) (BloomOffset, error) { + return b.blooms.Append(bloom) +} + +// AddSeries adds a series to the block. It returns true after adding the series, the block is full. +func (b *V2Builder) AddSeries(series Series, offsets []BloomOffset) (bool, error) { + if err := b.index.AppendV2(SeriesWithOffsets{ + Offsets: offsets, + Series: series, + }); err != nil { + return false, errors.Wrapf(err, "writing index for series %v", series.Fingerprint) + } + + full, _, err := b.writer.Full(b.opts.BlockSize) + if err != nil { + return false, errors.Wrap(err, "checking if block is full") + } + + return full, nil +} + +// Now the same for legacy V1 +type SeriesWithBloom struct { + Series *Series + Bloom *Bloom +} + +//nolint:revive +type V1Builder struct { + opts BlockOptions + + writer BlockWriter + index *IndexBuilder + blooms *BloomBlockBuilder +} + +func NewBlockBuilderV1(opts BlockOptions, writer BlockWriter) (*V1Builder, error) { + if opts.Schema.version != V1 { + return nil, errors.Errorf("schema mismatch creating v1 builder, expected %v, got %v", V1, opts.Schema.version) + } + + index, err := writer.Index() + if err != nil { + return nil, errors.Wrap(err, "initializing index writer") + } + blooms, err := writer.Blooms() + if err != nil { + return nil, errors.Wrap(err, "initializing blooms writer") + } + + return &V1Builder{ + opts: opts, + writer: writer, + index: NewIndexBuilder(opts, index), + blooms: NewBloomBlockBuilder(opts, blooms), + }, nil +} + +func (b *V1Builder) BuildFrom(itr Iterator[SeriesWithBloom]) (uint32, error) { + for itr.Next() { + at := itr.At() + offset, err := b.AddBloom(at.Bloom) + if err != nil { + return 0, errors.Wrap(err, "writing bloom") + } + + blockFull, err := b.AddSeries(*at.Series, offset) + + if err != nil { + return 0, errors.Wrapf(err, "writing series") + } + if blockFull { + break + } + } + + if err := itr.Err(); err != nil { + return 0, errors.Wrap(err, "iterating series") + } + + return b.Close() +} + +func (b *V1Builder) Close() (uint32, error) { + bloomChecksum, err := b.blooms.Close() + if err != nil { + return 0, errors.Wrap(err, "closing bloom file") + } + indexCheckSum, err := b.index.Close() + if err != nil { + return 0, errors.Wrap(err, "closing series file") + } + return combineChecksums(indexCheckSum, bloomChecksum), nil +} + +func (b *V1Builder) AddBloom(bloom *Bloom) (BloomOffset, error) { + return b.blooms.Append(bloom) +} + +func (b *V1Builder) AddSeries(series Series, offset BloomOffset) (bool, error) { + if err := b.index.AppendV1(SeriesWithOffset{ + Series: series, + Offset: offset, + }); err != nil { + return false, errors.Wrapf(err, "writing index for series %v", series.Fingerprint) + } + + full, _, err := b.writer.Full(b.opts.BlockSize) + if err != nil { + return false, errors.Wrap(err, "checking if block is full") + } + + return full, nil +} diff --git a/pkg/storage/bloom/v1/versioned_builder_test.go b/pkg/storage/bloom/v1/versioned_builder_test.go new file mode 100644 index 0000000000000..a88ed9396982e --- /dev/null +++ b/pkg/storage/bloom/v1/versioned_builder_test.go @@ -0,0 +1,146 @@ +package v1 + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/v3/pkg/chunkenc" + "github.com/grafana/loki/v3/pkg/util/encoding" +) + +// smallBlockOpts returns a set of block options that are suitable for testing +// characterized by small page sizes +func smallBlockOpts(v Version, enc chunkenc.Encoding) BlockOptions { + return BlockOptions{ + Schema: Schema{ + version: v, + encoding: enc, + nGramLength: 4, + nGramSkip: 0, + }, + SeriesPageSize: 100, + BloomPageSize: 2 << 10, + BlockSize: 0, // unlimited + } +} + +func setup(v Version) (BlockOptions, []SeriesWithLiteralBlooms, BlockWriter, BlockReader) { + numSeries := 100 + data, _ := MkBasicSeriesWithLiteralBlooms(numSeries, 0, 0xffff, 0, 10000) + indexBuf := bytes.NewBuffer(nil) + bloomsBuf := bytes.NewBuffer(nil) + writer := NewMemoryBlockWriter(indexBuf, bloomsBuf) + reader := NewByteReader(indexBuf, bloomsBuf) + return smallBlockOpts(v, chunkenc.EncNone), data, writer, reader +} + +// Tests v1 format by encoding a block into v1 then decoding it back and comparing the results +// to the source data. +// NB(owen-d): This also tests that the block querier can "up cast" the v1 format to the v2 format +// in the sense that v1 uses a single bloom per series and v2 uses multiple blooms per series and therefore +// v1 can be interpreted as v2 with a single bloom per series. +func TestV1RoundTrip(t *testing.T) { + opts, data, writer, reader := setup(V1) + b, err := NewBlockBuilderV1(opts, writer) + require.NoError(t, err) + + mapped := NewMapIter[SeriesWithLiteralBlooms]( + NewSliceIter(data), + func(s SeriesWithLiteralBlooms) SeriesWithBloom { + return SeriesWithBloom{ + Series: s.Series, + Bloom: s.Blooms[0], + } + }, + ) + + _, err = b.BuildFrom(mapped) + require.NoError(t, err) + + // Ensure Equality + block := NewBlock(reader, NewMetrics(nil)) + querier := NewBlockQuerier(block, false, DefaultMaxPageSize).Iter() + + CompareIterators[SeriesWithLiteralBlooms, *SeriesWithBlooms]( + t, + func(t *testing.T, a SeriesWithLiteralBlooms, b *SeriesWithBlooms) { + require.Equal(t, a.Series, b.Series) // ensure series equality + bs, err := Collect(b.Blooms) + require.NoError(t, err) + + // ensure we only have one bloom in v1 + require.Equal(t, 1, len(a.Blooms)) + require.Equal(t, 1, len(bs)) + + var encA, encB encoding.Encbuf + require.NoError(t, a.Blooms[0].Encode(&encA)) + require.NoError(t, bs[0].Encode(&encB)) + + require.Equal(t, encA.Get(), encB.Get()) + }, + NewSliceIter(data), + querier, + ) +} + +func TestV2Roundtrip(t *testing.T) { + opts, data, writer, reader := setup(V2) + + data, err := Collect( + NewMapIter[SeriesWithLiteralBlooms, SeriesWithLiteralBlooms]( + NewSliceIter(data), + func(swlb SeriesWithLiteralBlooms) SeriesWithLiteralBlooms { + return SeriesWithLiteralBlooms{ + Series: swlb.Series, + // hack(owen-d): data currently only creates one bloom per series, but I want to test multiple. + // we're not checking the contents here, so ensuring the same bloom is used twice is fine. + Blooms: []*Bloom{swlb.Blooms[0], swlb.Blooms[0]}, + } + }, + ), + ) + require.NoError(t, err) + + b, err := NewBlockBuilderV2(opts, writer) + require.NoError(t, err) + + mapped := NewMapIter[SeriesWithLiteralBlooms]( + NewSliceIter(data), + func(s SeriesWithLiteralBlooms) SeriesWithBlooms { + return s.SeriesWithBlooms() + }, + ) + + _, err = b.BuildFrom(mapped) + require.NoError(t, err) + + // Ensure Equality + block := NewBlock(reader, NewMetrics(nil)) + querier := NewBlockQuerier(block, false, DefaultMaxPageSize).Iter() + + CompareIterators[SeriesWithLiteralBlooms, *SeriesWithBlooms]( + t, + func(t *testing.T, a SeriesWithLiteralBlooms, b *SeriesWithBlooms) { + require.Equal(t, a.Series, b.Series) // ensure series equality + bs, err := Collect(b.Blooms) + require.NoError(t, err) + + // ensure we only have one bloom in v1 + require.Equal(t, 2, len(a.Blooms)) + require.Equal(t, 2, len(bs)) + + var encA, encB encoding.Encbuf + for i := range a.Blooms { + require.NoError(t, a.Blooms[i].Encode(&encA)) + require.NoError(t, bs[i].Encode(&encB)) + require.Equal(t, encA.Get(), encB.Get()) + encA.Reset() + encB.Reset() + } + }, + NewSliceIter(data), + querier, + ) +} diff --git a/pkg/storage/stores/shipper/bloomshipper/cache.go b/pkg/storage/stores/shipper/bloomshipper/cache.go index 6ff6ef64948e3..3c324b7b8b0e6 100644 --- a/pkg/storage/stores/shipper/bloomshipper/cache.go +++ b/pkg/storage/stores/shipper/bloomshipper/cache.go @@ -28,11 +28,11 @@ func (c *CloseableBlockQuerier) Close() error { return nil } -func (c *CloseableBlockQuerier) SeriesIter() (v1.PeekingIterator[*v1.SeriesWithBloom], error) { +func (c *CloseableBlockQuerier) SeriesIter() (v1.PeekingIterator[*v1.SeriesWithBlooms], error) { if err := c.Reset(); err != nil { return nil, err } - return v1.NewPeekingIter[*v1.SeriesWithBloom](c.BlockQuerier), nil + return v1.NewPeekingIter[*v1.SeriesWithBlooms](c.BlockQuerier.Iter()), nil } func LoadBlocksDirIntoCache(paths []string, c Cache, logger log.Logger) error { diff --git a/pkg/storage/stores/shipper/bloomshipper/client.go b/pkg/storage/stores/shipper/bloomshipper/client.go index c3b459fee7f05..ce70ce172c02f 100644 --- a/pkg/storage/stores/shipper/bloomshipper/client.go +++ b/pkg/storage/stores/shipper/bloomshipper/client.go @@ -106,9 +106,9 @@ type Meta struct { Blocks []BlockRef } -func (m Meta) MostRecentSource() (tsdb.SingleTenantTSDBIdentifier, error) { +func (m Meta) MostRecentSource() (tsdb.SingleTenantTSDBIdentifier, bool) { if len(m.Sources) == 0 { - return tsdb.SingleTenantTSDBIdentifier{}, errors.New("no sources") + return tsdb.SingleTenantTSDBIdentifier{}, false } mostRecent := m.Sources[0] @@ -118,7 +118,7 @@ func (m Meta) MostRecentSource() (tsdb.SingleTenantTSDBIdentifier, error) { } } - return mostRecent, nil + return mostRecent, true } func MetaRefFrom( diff --git a/pkg/storage/stores/shipper/bloomshipper/store.go b/pkg/storage/stores/shipper/bloomshipper/store.go index 9b18427bacf10..8b2ff3365d766 100644 --- a/pkg/storage/stores/shipper/bloomshipper/store.go +++ b/pkg/storage/stores/shipper/bloomshipper/store.go @@ -113,6 +113,28 @@ func (b *bloomStoreEntry) ResolveMetas(ctx context.Context, params MetaSearchPar return [][]MetaRef{refs}, []*Fetcher{b.fetcher}, nil } +// FilterMetasOverlappingBounds filters metas that are within the given bounds. +// the input metas are expected to be sorted by fingerprint. +func FilterMetasOverlappingBounds(metas []Meta, bounds v1.FingerprintBounds) []Meta { + withinBounds := make([]Meta, 0, len(metas)) + for _, meta := range metas { + // We can stop iterating once we find an item greater + // than the keyspace we're looking for + if bounds.Cmp(meta.Bounds.Min) == v1.After { + break + } + + // Only check keyspace for now, because we don't have start/end timestamps in the refs + if !bounds.Overlaps(meta.Bounds) { + continue + } + + withinBounds = append(withinBounds, meta) + } + + return withinBounds +} + // FetchMetas implements store. func (b *bloomStoreEntry) FetchMetas(ctx context.Context, params MetaSearchParams) ([]Meta, error) { logger := spanlogger.FromContext(ctx) diff --git a/pkg/util/server/error.go b/pkg/util/server/error.go index c120a79176f85..7326f7cecb6cf 100644 --- a/pkg/util/server/error.go +++ b/pkg/util/server/error.go @@ -27,6 +27,15 @@ const ( ErrDeadlineExceeded = "Request timed out, decrease the duration of the request or add more label matchers (prefer exact match over regex match) to reduce the amount of data processed." ) +func ClientGrpcStatusAndError(err error) error { + if err == nil { + return nil + } + + status, newErr := ClientHTTPStatusAndError(err) + return httpgrpc.Errorf(status, "%s", newErr.Error()) +} + // WriteError write a go error with the correct status code. func WriteError(err error, w http.ResponseWriter) { status, cerr := ClientHTTPStatusAndError(err) diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md index 1bb316cf32021..6636170bc0c46 100644 --- a/production/helm/loki/CHANGELOG.md +++ b/production/helm/loki/CHANGELOG.md @@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang [//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) +## 6.6.3 + +- [BUGFIX] Fix indentation of `query_range` Helm chart values + ## 6.6.2 - [BUGFIX] Fix query-frontend (headless) and ruler http-metrics targetPort diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index 47606001b954e..bd2f78049a816 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -3,7 +3,7 @@ name: loki description: Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes. type: application appVersion: 3.0.0 -version: 6.6.2 +version: 6.6.3 home: https://grafana.github.io/helm-charts sources: - https://github.com/grafana/loki diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md index 03ddb05bf6608..a207de47c39a6 100644 --- a/production/helm/loki/README.md +++ b/production/helm/loki/README.md @@ -1,6 +1,6 @@ # loki -![Version: 6.6.1](https://img.shields.io/badge/Version-6.6.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.0.0](https://img.shields.io/badge/AppVersion-3.0.0-informational?style=flat-square) +![Version: 6.6.3](https://img.shields.io/badge/Version-6.6.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.0.0](https://img.shields.io/badge/AppVersion-3.0.0-informational?style=flat-square) Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes. diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml index 4c70bf16fe474..6485a59e71c0a 100644 --- a/production/helm/loki/values.yaml +++ b/production/helm/loki/values.yaml @@ -212,7 +212,7 @@ loki: query_range: align_queries_with_step: true {{- with .Values.loki.query_range }} - {{- tpl (. | toYaml) $ | nindent 4 }} + {{- tpl (. | toYaml) $ | nindent 2 }} {{- end }} {{- if .Values.resultsCache.enabled }} {{- with .Values.resultsCache }} diff --git a/tools/tsdb/bloom-tester/Dockerfile b/tools/tsdb/bloom-tester/Dockerfile deleted file mode 100644 index 3db7db012e43a..0000000000000 --- a/tools/tsdb/bloom-tester/Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -FROM golang:1.22.2-bookworm as build -ENV BUILD_IN_CONTAINER=false - -COPY . /src/bloom-tester -WORKDIR /src/bloom-tester - -RUN make bloom-tester - -FROM alpine:3.18.5 -RUN apk add --update --no-cache ca-certificates -COPY --from=build /src/bloom-tester/tools/tsdb/bloom-tester/bloom-tester /usr/bin/bloom-tester -ENTRYPOINT [ "/usr/bin/bloom-tester", "--config.file=/etc/loki/config.yaml" ] -#CMD tail -f /dev/null diff --git a/tools/tsdb/bloom-tester/concurrent.go b/tools/tsdb/bloom-tester/concurrent.go deleted file mode 100644 index a6a2382a2a4ad..0000000000000 --- a/tools/tsdb/bloom-tester/concurrent.go +++ /dev/null @@ -1,44 +0,0 @@ -package main - -import ( - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index" -) - -type pool struct { - n int // number of workers - ch chan struct{} -} - -func newPool(n int) *pool { - p := &pool{ - n: n, - ch: make(chan struct{}, n), - } - - // seed channel - for i := 0; i < n; i++ { - p.ch <- struct{}{} - } - - return p -} - -func (p *pool) acquire( - ls labels.Labels, fp model.Fingerprint, chks []index.ChunkMeta, - fn func(ls labels.Labels, fp model.Fingerprint, chks []index.ChunkMeta), -) { - <-p.ch - go func() { - fn(ls, fp, chks) - p.ch <- struct{}{} - }() -} - -func (p *pool) drain() { - for i := 0; i < p.n; i++ { - <-p.ch - } -} diff --git a/tools/tsdb/bloom-tester/lib.go b/tools/tsdb/bloom-tester/lib.go deleted file mode 100644 index 0d3a505668047..0000000000000 --- a/tools/tsdb/bloom-tester/lib.go +++ /dev/null @@ -1,530 +0,0 @@ -package main - -import ( - "bufio" - "bytes" - "context" - "flag" - "fmt" - - "github.com/grafana/loki/v3/pkg/storage/bloom/v1/filter" - tsdbindex "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index" - - "hash/fnv" - "math" - "os" - "strconv" - "strings" - "time" - - "github.com/go-kit/log/level" - "github.com/grafana/dskit/services" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - - "github.com/grafana/loki/v3/pkg/chunkenc" - "github.com/grafana/loki/v3/pkg/logproto" - "github.com/grafana/loki/v3/pkg/storage" - bt "github.com/grafana/loki/v3/pkg/storage/bloom/v1" - "github.com/grafana/loki/v3/pkg/storage/chunk" - "github.com/grafana/loki/v3/pkg/storage/chunk/client" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper" - shipperindex "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/index" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" - util_log "github.com/grafana/loki/v3/pkg/util/log" - "github.com/grafana/loki/v3/tools/tsdb/helpers" -) - -const ( - DefaultNGramLength = 4 - DefaultNGramSkip = 0 -) - -func execute() { - conf, svc, bucket, err := helpers.Setup() - helpers.ExitErr("setting up", err) - - _, overrides, clientMetrics := helpers.DefaultConfigs() - - flag.Parse() - - periodCfg, tableRange, tableName, err := helpers.GetPeriodConfigForTableNumber(bucket, conf.SchemaConfig.Configs) - helpers.ExitErr("find period config for bucket", err) - - objectClient, err := storage.NewObjectClient(periodCfg.ObjectType, conf.StorageConfig, clientMetrics) - helpers.ExitErr("creating object client", err) - - chunkClient := client.NewClient(objectClient, nil, conf.SchemaConfig) - - openFn := func(p string) (shipperindex.Index, error) { - return tsdb.OpenShippableTSDB(p) - } - - indexShipper, err := indexshipper.NewIndexShipper( - periodCfg.IndexTables.PathPrefix, - conf.StorageConfig.TSDBShipperConfig, - objectClient, - overrides, - nil, - openFn, - tableRange, - prometheus.WrapRegistererWithPrefix("loki_tsdb_shipper_", prometheus.DefaultRegisterer), - util_log.Logger, - ) - helpers.ExitErr("creating index shipper", err) - - tenants, err := helpers.ResolveTenants(objectClient, periodCfg.IndexTables.PathPrefix, tableName) - level.Info(util_log.Logger).Log("tenants", strings.Join(tenants, ","), "table", tableName) - helpers.ExitErr("resolving tenants", err) - - //sampler, err := NewProbabilisticSampler(0.00008) - sampler, err := NewProbabilisticSampler(1.000) - helpers.ExitErr("creating sampler", err) - - metrics := NewMetrics(prometheus.DefaultRegisterer) - - level.Info(util_log.Logger).Log("msg", "starting server") - err = services.StartAndAwaitRunning(context.Background(), svc) - helpers.ExitErr("waiting for service to start", err) - level.Info(util_log.Logger).Log("msg", "server started") - - err = analyze(metrics, sampler, indexShipper, chunkClient, tableName, tenants, objectClient) - helpers.ExitErr("analyzing", err) -} - -var ( - three = bt.NewNGramTokenizer(3, 0) - four = bt.NewNGramTokenizer(4, 0) - - onePctError = func() *filter.ScalableBloomFilter { return filter.NewScalableBloomFilter(1024, 0.01, 0.8) } -) - -var experiments = []Experiment{ - // n > error > skip > index - - /* - NewExperiment( - "token=3skip0_error=1%_indexchunks=true", - three, - true, - onePctError, - ), - */ - NewExperiment( - "token=4skip0_error=1%_indexchunks=true", - four, - true, - onePctError, - ), - /* - NewExperiment( - "token=4skip1_error=1%_indexchunks=true", - fourSkip1, - true, - onePctError, - ), - - NewExperiment( - "token=4skip2_error=1%_indexchunks=true", - fourSkip2, - true, - onePctError, - ), - NewExperiment( - "token=4skip0_error=5%_indexchunks=true", - four, - true, - fivePctError, - ),*/ - /* - NewExperiment( - "token=4skip1_error=5%_indexchunks=true", - fourSkip1, - true, - fivePctError, - ), - NewExperiment( - "token=4skip2_error=5%_indexchunks=true", - fourSkip2, - true, - fivePctError, - ), - /* - NewExperiment( - "token=5skip0_error=1%_indexchunks=true", - five, - true, - onePctError, - ), - NewExperiment( - "token=6skip0_error=1%_indexchunks=true", - six, - true, - onePctError, - ), - */ - /* - NewExperiment( - "token=3skip0_error=1%_indexchunks=false", - three, - false, - onePctError, - ), - */ - /* - NewExperiment( - "token=3skip1_error=1%_indexchunks=true", - threeSkip1, - true, - onePctError, - ),*/ - /* - NewExperiment( - "token=3skip1_error=1%_indexchunks=false", - threeSkip1, - false, - onePctError, - ), - */ - /* - NewExperiment( - "token=3skip2_error=1%_indexchunks=true", - threeSkip2, - true, - onePctError, - ),*/ - /* - NewExperiment( - "token=3skip2_error=1%_indexchunks=false", - threeSkip2, - false, - onePctError, - ), - */ - /* - NewExperiment( - "token=3skip0_error=5%_indexchunks=true", - three, - true, - fivePctError, - ),*/ - /* - NewExperiment( - "token=3skip0_error=5%_indexchunks=false", - three, - false, - fivePctError, - ), - */ - /* - NewExperiment( - "token=3skip1_error=5%_indexchunks=true", - threeSkip1, - true, - fivePctError, - ),*/ - /* - NewExperiment( - "token=3skip1_error=5%_indexchunks=false", - threeSkip1, - false, - fivePctError, - ), - */ - /* - NewExperiment( - "token=3skip2_error=5%_indexchunks=true", - threeSkip2, - true, - fivePctError, - ),*/ - /* - NewExperiment( - "token=3skip2_error=5%_indexchunks=false", - threeSkip2, - false, - fivePctError, - ), - - */ -} - -func analyze(metrics *Metrics, sampler Sampler, indexShipper indexshipper.IndexShipper, client client.Client, tableName string, tenants []string, objectClient client.ObjectClient) error { - metrics.tenants.Add(float64(len(tenants))) - - testerNumber := extractTesterNumber(os.Getenv("HOSTNAME")) - if testerNumber == -1 { - helpers.ExitErr("extracting hostname index number", nil) - } - numTesters, _ := strconv.Atoi(os.Getenv("NUM_TESTERS")) - if numTesters == -1 { - helpers.ExitErr("extracting total number of testers", nil) - } - level.Info(util_log.Logger).Log("msg", "starting analyze()", "tester", testerNumber, "total", numTesters) - - var n int // count iterated series - // pool := newPool(runtime.NumCPU()) - // pool := newPool(1) - bloomTokenizer, _ := NewBloomTokenizer(prometheus.DefaultRegisterer, DefaultNGramLength, DefaultNGramSkip) - for _, tenant := range tenants { - level.Info(util_log.Logger).Log("Analyzing tenant", tenant, "table", tableName) - err := indexShipper.ForEach( - context.Background(), - tableName, - tenant, - func(isMultiTenantIndex bool, idx shipperindex.Index) error { - if isMultiTenantIndex { - return nil - } - - casted := idx.(*tsdb.TSDBFile).Index.(*tsdb.TSDBIndex) - _ = casted.ForSeries( - context.Background(), - "", nil, model.Earliest, model.Latest, - func(ls labels.Labels, fp model.Fingerprint, chks []tsdbindex.ChunkMeta) (stop bool) { - seriesString := ls.String() - seriesStringHash := FNV32a(seriesString) - pos, _ := strconv.Atoi(seriesStringHash) - - workernumber := AssignToWorker(pos, numTesters) - - if (workernumber == testerNumber) && (len(chks) < 10000) { // for each series - - /*(pool.acquire( - ls.Copy(), - fp, - chksCpy, - func(ls labels.Labels, fp model.Fingerprint, chks []index.ChunkMeta) {*/ - - metrics.series.Inc() - metrics.chunks.Add(float64(len(chks))) - - if !sampler.Sample() { - return - } - - transformed := make([]chunk.Chunk, 0, len(chks)) - for _, chk := range chks { - transformed = append(transformed, chunk.Chunk{ - ChunkRef: logproto.ChunkRef{ - Fingerprint: uint64(fp), - UserID: tenant, - From: chk.From(), - Through: chk.Through(), - Checksum: chk.Checksum, - }, - }) - } - - got, err := client.GetChunks( - context.Background(), - transformed, - ) - if err == nil { - // record raw chunk sizes - var chunkTotalUncompressedSize int - for _, c := range got { - chunkTotalUncompressedSize += c.Data.(*chunkenc.Facade).LokiChunk().UncompressedSize() - } - n += len(got) - - // iterate experiments - for _, experiment := range experiments { - bucketPrefix := os.Getenv("BUCKET_PREFIX") - if strings.EqualFold(bucketPrefix, "") { - bucketPrefix = "named-experiments-" - } - if !sbfFileExists("bloomtests", - fmt.Sprint(bucketPrefix, experiment.name), - os.Getenv("BUCKET"), - tenant, - ls.String(), - objectClient) { - bloomTokenizer.SetLineTokenizer(experiment.tokenizer) - - level.Info(util_log.Logger).Log("Starting work on: ", ls.String(), "'", FNV32a(ls.String()), "'", experiment.name, tenant) - startTime := time.Now().UnixMilli() - - sbf := experiment.bloom() - bloom := bt.Bloom{ - ScalableBloomFilter: *sbf, - } - series := bt.Series{ - Fingerprint: fp, - } - swb := bt.SeriesWithBloom{ - Bloom: &bloom, - Series: &series, - } - err := bloomTokenizer.PopulateSeriesWithBloom(&swb, got) - if err != nil { - level.Error(util_log.Logger).Log("msg", "failed populating SeriesWithBloom", "err", err) - } - endTime := time.Now().UnixMilli() - if len(got) > 0 { - metrics.bloomSize.WithLabelValues(experiment.name).Observe(float64(sbf.Capacity() / 8)) - fillRatio := sbf.FillRatio() - metrics.hammingWeightRatio.WithLabelValues(experiment.name).Observe(fillRatio) - metrics.estimatedCount.WithLabelValues(experiment.name).Observe( - float64(estimatedCount(sbf.Capacity(), sbf.FillRatio())), - ) - - writeSBF(&swb.Bloom.ScalableBloomFilter, - os.Getenv("DIR"), - fmt.Sprint(bucketPrefix, experiment.name), - os.Getenv("BUCKET"), - tenant, - ls.String(), - objectClient) - - metrics.sbfCreationTime.WithLabelValues(experiment.name).Add(float64(endTime - startTime)) - metrics.sbfsCreated.WithLabelValues(experiment.name).Inc() - - if err != nil { - helpers.ExitErr("writing sbf to file", err) - } - } // logging chunk stats block - } // if sbf doesn't exist - } // for each experiment - } else { - level.Info(util_log.Logger).Log("error getting chunks", err) - } - - metrics.seriesKept.Inc() - metrics.chunksKept.Add(float64(len(chks))) - metrics.chunksPerSeries.Observe(float64(len(chks))) - - /*}, - )*/ - } // for each series - - return false - }, - labels.MustNewMatcher(labels.MatchEqual, "", ""), - ) - - return nil - - }, - ) - helpers.ExitErr(fmt.Sprintf("iterating tenant %s", tenant), err) - - } - - level.Info(util_log.Logger).Log("msg", "waiting for workers to finish") - //pool.drain() // wait for workers to finish - level.Info(util_log.Logger).Log("msg", "waiting for final scrape") - //time.Sleep(30 * time.Second) // allow final scrape - time.Sleep(time.Duration(1<<63 - 1)) // wait forever - return nil -} - -// n ≈ −m ln(1 − p). -func estimatedCount(m uint, p float64) uint { - return uint(-float64(m) * math.Log(1-p)) -} - -func extractTesterNumber(input string) int { - // Split the input string by '-' to get individual parts - parts := strings.Split(input, "-") - - // Extract the last part (the number) - lastPart := parts[len(parts)-1] - - // Attempt to convert the last part to an integer - extractedNumber, err := strconv.Atoi(lastPart) - if err != nil { - return -1 - } - - // Send the extracted number to the result channel - return extractedNumber -} - -func AssignToWorker(index int, numWorkers int) int { - // Calculate the hash of the index - h := fnv.New32a() - h.Write([]byte(fmt.Sprintf("%d", index))) - hash := int(h.Sum32()) - - // Use modulo to determine which worker should handle the index - workerID := hash % numWorkers - - return workerID -} - -func FNV32a(text string) string { - hashAlgorithm := fnv.New32a() - hashAlgorithm.Reset() - hashAlgorithm.Write([]byte(text)) - return strconv.Itoa(int(hashAlgorithm.Sum32())) -} - -func sbfFileExists(location, prefix, period, tenant, series string, objectClient client.ObjectClient) bool { - dirPath := fmt.Sprintf("%s/%s/%s/%s", location, prefix, period, tenant) - fullPath := fmt.Sprintf("%s/%s", dirPath, FNV32a(series)) - - result, _ := objectClient.ObjectExists(context.Background(), fullPath) - //fmt.Println(fullPath, result) - return result -} - -func writeSBF(sbf *filter.ScalableBloomFilter, location, prefix, period, tenant, series string, objectClient client.ObjectClient) { - dirPath := fmt.Sprintf("%s/%s/%s/%s", location, prefix, period, tenant) - objectStoragePath := fmt.Sprintf("bloomtests/%s/%s/%s", prefix, period, tenant) - if err := os.MkdirAll(dirPath, os.ModePerm); err != nil { - helpers.ExitErr("error creating sbf dir", err) - } - - err := writeSBFToFile(sbf, fmt.Sprintf("%s/%s", dirPath, FNV32a(series))) - if err != nil { - helpers.ExitErr("writing sbf to file", err) - } - - writeSBFToObjectStorage(sbf, - fmt.Sprintf("%s/%s", objectStoragePath, FNV32a(series)), - fmt.Sprintf("%s/%s", dirPath, FNV32a(series)), - objectClient) -} - -func writeSBFToFile(sbf *filter.ScalableBloomFilter, filename string) error { - f, err := os.Create(filename) - if err != nil { - return err - } - defer f.Close() - - w := bufio.NewWriter(f) - bytesWritten, err := sbf.WriteTo(w) - if err != nil { - return err - } - level.Info(util_log.Logger).Log("msg", "wrote sbf", "bytes", bytesWritten, "file", filename) - - err = w.Flush() - return err -} - -func writeSBFToObjectStorage(_ *filter.ScalableBloomFilter, objectStorageFilename, localFilename string, objectClient client.ObjectClient) { - // Probably a better way to do this than to reopen the file, but it's late - file, err := os.Open(localFilename) - if err != nil { - level.Info(util_log.Logger).Log("error opening", localFilename, "error", err) - } - - defer file.Close() - - fileInfo, _ := file.Stat() - var size = fileInfo.Size() - - buffer := make([]byte, size) - - // read file content to buffer - _, _ = file.Read(buffer) - - fileBytes := bytes.NewReader(buffer) // converted to io.ReadSeeker type - - _ = objectClient.PutObject(context.Background(), objectStorageFilename, fileBytes) - level.Info(util_log.Logger).Log("done writing", objectStorageFilename) -} diff --git a/tools/tsdb/bloom-tester/lib_test.go b/tools/tsdb/bloom-tester/lib_test.go deleted file mode 100644 index 8795c8eb883a2..0000000000000 --- a/tools/tsdb/bloom-tester/lib_test.go +++ /dev/null @@ -1,191 +0,0 @@ -package main - -import ( - "bufio" - "os" - "testing" -) - -const BigFile = "../../../pkg/logql/sketch/testdata/war_peace.txt" - -func BenchmarkSBFTestAndAdd(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - file, _ := os.Open(BigFile) - defer file.Close() - scanner := bufio.NewScanner(file) - experiment := NewExperiment( - "token=3skip0_error=1%_indexchunks=true", - three, - true, - onePctError, - ) - sbf := experiment.bloom() - b.StartTimer() - for scanner.Scan() { - line := scanner.Text() - tokens := experiment.tokenizer.Tokens(line) - - for tokens.Next() { - tok := tokens.At() - sbf.TestAndAdd(tok) - } - } - } -} - -func BenchmarkSBFAdd(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - file, _ := os.Open(BigFile) - defer file.Close() - scanner := bufio.NewScanner(file) - experiment := NewExperiment( - "token=3skip0_error=1%_indexchunks=true", - three, - true, - onePctError, - ) - sbf := experiment.bloom() - b.StartTimer() - for scanner.Scan() { - line := scanner.Text() - tokens := experiment.tokenizer.Tokens(line) - - for tokens.Next() { - tok := tokens.At() - sbf.TestAndAdd(tok) - } - } - } -} - -func BenchmarkSBFSeparateTestAndAdd(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - file, _ := os.Open(BigFile) - defer file.Close() - scanner := bufio.NewScanner(file) - experiment := NewExperiment( - "token=3skip0_error=1%_indexchunks=true", - three, - true, - onePctError, - ) - sbf := experiment.bloom() - b.StartTimer() - for scanner.Scan() { - line := scanner.Text() - tokens := experiment.tokenizer.Tokens(line) - - for tokens.Next() { - tok := tokens.At() - sbf.TestAndAdd(tok) - } - } - } -} - -func BenchmarkSBFTestAndAddWithLRU(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - file, _ := os.Open(BigFile) - defer file.Close() - scanner := bufio.NewScanner(file) - experiment := NewExperiment( - "token=3skip0_error=1%_indexchunks=true", - three, - true, - onePctError, - ) - sbf := experiment.bloom() - cache := NewLRUCache4(150000) - b.StartTimer() - for scanner.Scan() { - line := scanner.Text() - tokens := experiment.tokenizer.Tokens(line) - - for tokens.Next() { - tok := tokens.At() - if !cache.Get(tok) { - cache.Put(tok) - sbf.TestAndAdd(tok) - } - sbf.TestAndAdd(tok) - } - } - } -} - -func BenchmarkSBFSeparateTestAndAddWithLRU(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - file, _ := os.Open(BigFile) - defer file.Close() - scanner := bufio.NewScanner(file) - experiment := NewExperiment( - "token=3skip0_error=1%_indexchunks=true", - three, - true, - onePctError, - ) - sbf := experiment.bloom() - cache := NewLRUCache4(150000) - b.StartTimer() - for scanner.Scan() { - line := scanner.Text() - tokens := experiment.tokenizer.Tokens(line) - for tokens.Next() { - tok := tokens.At() - if !cache.Get(tok) { - cache.Put(tok) - found := sbf.Test(tok) - if !found { - sbf.Add(tok) - } - } - sbf.TestAndAdd(tok) - } - } - } -} - -func BenchmarkSBFSeparateTestAndAddWithMap(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - file, _ := os.Open(BigFile) - defer file.Close() - scanner := bufio.NewScanner(file) - experiment := NewExperiment( - "token=3skip0_error=1%_indexchunks=true", - three, - true, - onePctError, - ) - sbf := experiment.bloom() - cache := make(map[string]interface{}, 150000) - b.StartTimer() - for scanner.Scan() { - line := scanner.Text() - tokens := experiment.tokenizer.Tokens(line) - for tokens.Next() { - tok := tokens.At() - tokStr := string(tok) - _, found := cache[tokStr] - if !found { - cache[tokStr] = "" - f := sbf.Test(tok) - if !f { - sbf.Add(tok) - } - - if len(cache) > 150000 { - for elem := range cache { - delete(cache, elem) - } - } - } - } - } - } -} diff --git a/tools/tsdb/bloom-tester/lrucache.go b/tools/tsdb/bloom-tester/lrucache.go deleted file mode 100644 index 56caba451f1fd..0000000000000 --- a/tools/tsdb/bloom-tester/lrucache.go +++ /dev/null @@ -1,606 +0,0 @@ -package main - -import ( - "container/list" - "fmt" -) - -type LRUCache struct { - capacity int - cache map[string]*list.Element - list *list.List -} - -type Entry struct { - key string -} - -func NewLRUCache(capacity int) *LRUCache { - return &LRUCache{ - capacity: capacity, - cache: make(map[string]*list.Element), - list: list.New(), - } -} - -func (c *LRUCache) Get(key string) bool { - if elem, ok := c.cache[key]; ok { - // Move the accessed element to the front of the list - c.list.MoveToFront(elem) - return true - } - return false -} - -func (c *LRUCache) Put(key string) { - if elem, ok := c.cache[key]; ok { - // If the key already exists, move it to the front - c.list.MoveToFront(elem) - } else { - // If the cache is full, remove the least recently used element - if len(c.cache) >= c.capacity { - // Get the least recently used element from the back of the list - tailElem := c.list.Back() - if tailElem != nil { - deletedEntry := c.list.Remove(tailElem).(*Entry) - delete(c.cache, deletedEntry.key) - } - } - - // Add the new key to the cache and the front of the list - newEntry := &Entry{key} - newElem := c.list.PushFront(newEntry) - c.cache[key] = newElem - } -} - -func (c *LRUCache) Clear() { - // Iterate through the list and remove all elements - for elem := c.list.Front(); elem != nil; elem = elem.Next() { - delete(c.cache, elem.Value.(*Entry).key) - } - - // Clear the list - c.list.Init() -} - -type LRUCache2 struct { - capacity int - cache map[string]*LRUNode2 - head *LRUNode2 - tail *LRUNode2 -} - -type LRUNode2 struct { - key string - //value interface{} - prev *LRUNode2 - next *LRUNode2 -} - -func NewLRUCache2(capacity int) *LRUCache2 { - return &LRUCache2{ - capacity: capacity, - cache: make(map[string]*LRUNode2), - } -} - -func (c *LRUCache2) Get(key string) bool { - if node, ok := c.cache[key]; ok { - // Move the accessed element to the front - c.moveToFront(node) - return true - } - return false -} - -func (c *LRUCache2) Put(key string) { - if node, ok := c.cache[key]; ok { - // If the key already exists, update the value and move it to the front - c.moveToFront(node) - } else { - // If the cache is full, remove the least recently used element - if len(c.cache) >= c.capacity { - c.removeTail() - } - - // Add the new key to the cache and the front - newNode := &LRUNode2{key: key} - c.cache[key] = newNode - c.addToFront(newNode) - } -} - -func (c *LRUCache2) moveToFront(node *LRUNode2) { - if node == c.head { - return - } - if node == c.tail { - c.tail = node.prev - c.tail.next = nil - } else { - node.prev.next = node.next - node.next.prev = node.prev - } - c.addToFront(node) -} - -func (c *LRUCache2) addToFront(node *LRUNode2) { - node.prev = nil - node.next = c.head - if c.head != nil { - c.head.prev = node - } - c.head = node - if c.tail == nil { - c.tail = node - } -} - -func (c *LRUCache2) removeTail() { - if c.tail == nil { - return - } - delete(c.cache, c.tail.key) - if c.tail == c.head { - c.head = nil - c.tail = nil - } else { - c.tail = c.tail.prev - c.tail.next = nil - } -} - -type LRUCache4 struct { - capacity int - cache map[string]*list.Element - list *list.List -} - -type Entry4 struct { - key string - value []byte -} - -func NewLRUCache4(capacity int) *LRUCache4 { - return &LRUCache4{ - capacity: capacity, - cache: make(map[string]*list.Element), - list: list.New(), - } -} - -func (c *LRUCache4) Get(value []byte) bool { - if elem, ok := c.cache[string(value)]; ok { - // Move the accessed element to the front of the list - c.list.MoveToFront(elem) - return true - } - return false -} - -func (c *LRUCache4) GetString(key string) bool { - if elem, ok := c.cache[key]; ok { - // Move the accessed element to the front of the list - c.list.MoveToFront(elem) - return true - } - return false -} - -func (c *LRUCache4) PutStringByte(key string, value []byte) { - - if elem, ok := c.cache[key]; ok { - // If the key already exists, move it to the front - c.list.MoveToFront(elem) - } else { - // If the cache is full, remove the least recently used element - if len(c.cache) >= c.capacity { - // Get the least recently used element from the back of the list - tailElem := c.list.Back() - if tailElem != nil { - deletedEntry := c.list.Remove(tailElem).(*Entry4) - delete(c.cache, deletedEntry.key) - } - } - - // Add the new key to the cache and the front of the list - newEntry := &Entry4{key, value} - newElem := c.list.PushFront(newEntry) - c.cache[key] = newElem - } -} - -func (c *LRUCache4) Put(value []byte) { - c.PutStringByte(string(value), value) -} - -func (c *LRUCache4) PutString(value string) { - c.PutStringByte(value, []byte(value)) -} - -func (c *LRUCache4) Clear() { - // Iterate through the list and remove all elements - for elem := c.list.Front(); elem != nil; elem = elem.Next() { - delete(c.cache, elem.Value.(*Entry4).key) - } - - // Clear the list - c.list.Init() -} - -type HashSet struct { - capacity int - cache map[string][]byte -} - -func NewHashSet(capacity int) *HashSet { - return &HashSet{ - capacity: capacity, - cache: make(map[string][]byte), - } -} - -func (c *HashSet) Get(key string) (bool, []byte) { - if value, ok := c.cache[key]; ok { - return true, value - } - return false, nil -} - -func (c *HashSet) Put(key string) { - c.cache[key] = []byte(key) -} - -func (c *HashSet) PutBytes(value []byte) { - key := string(value) - c.cache[key] = []byte(key) -} - -func (c *HashSet) PutBoth(key string, value []byte) { - c.cache[key] = value -} - -func (c *HashSet) SurfaceMap() map[string][]byte { - return c.cache -} - -func (c *HashSet) Clear() { - for k := range c.cache { - delete(c.cache, k) - } -} - -// ByteKey is an interface for types that represent keys of a certain size. -type ByteKey interface { - Size() int - Equal(other ByteKey) bool -} - -// FourByteKey represents a key of 4 bytes. -type FourByteKey [4]byte - -// Size returns the size of the FourByteKey. -func (k FourByteKey) Size() int { - return 4 -} - -// Equal checks if two FourByteKeys are equal. -func (k FourByteKey) Equal(other ByteKey) bool { - if otherFourByteKey, ok := other.(FourByteKey); ok { - return k == otherFourByteKey - } - return false -} - -// ThirtyOneByteKey represents a key of 31 bytes. -type ThirtyOneByteKey [31]byte - -// Size returns the size of the ThirtyOneByteKey. -func (k ThirtyOneByteKey) Size() int { - return 31 -} - -// Equal checks if two ThirtyOneByteKeys are equal. -func (k ThirtyOneByteKey) Equal(other ByteKey) bool { - if otherThirtyOneByteKey, ok := other.(ThirtyOneByteKey); ok { - return k == otherThirtyOneByteKey - } - return false -} - -type ByteKeyLRUCache struct { - capacity int - //m map[ByteKey]struct{} - m map[ByteKey]*list.Element - list *list.List -} - -func NewByteKeyLRUCache(capacity int) *ByteKeyLRUCache { - return &ByteKeyLRUCache{ - capacity: capacity, - m: make(map[ByteKey]*list.Element, capacity), - list: list.New(), - } -} - -func (c *ByteKeyLRUCache) Get(key ByteKey) bool { - if value, ok := c.m[key]; ok { - // Move the accessed element to the front of the list - c.list.MoveToFront(value) - return true - } - return false -} - -func (c *ByteKeyLRUCache) Put(key ByteKey) { - if value, ok := c.m[key]; ok { - // If the key already exists, move it to the front - c.list.MoveToFront(value) - } else { - // If the cache is full, remove the least recently used element - if len(c.m) >= c.capacity { - // Get the least recently used element from the back of the list - tailElem := c.list.Back() - if tailElem != nil { - deletedEntry := c.list.Remove(tailElem).(ByteKey) - delete(c.m, deletedEntry) - } - } - - // Add the new key to the cache and the front of the list - elem := c.list.PushFront(key) - c.m[key] = elem - } -} - -func (c *ByteKeyLRUCache) Clear() { - // Iterate through the list and remove all elements - for elem := c.list.Front(); elem != nil; elem = elem.Next() { - delete(c.m, elem.Value.(ByteKey)) - } - - // Clear the list - c.list.Init() -} - -// ByteKeyMap is a map that uses ByteKey as a key. -type ByteKeyMap struct { - capacity int - m map[ByteKey]struct{} -} - -// NewByteKeyMap creates a new ByteKeyMap. -func NewByteKeyMap(capacity int) ByteKeyMap { - return ByteKeyMap{ - capacity: capacity, - m: make(map[ByteKey]struct{}, capacity), - } -} - -// Put adds an entry to the map. -func (bm *ByteKeyMap) Put(key ByteKey) { - bm.m[key] = struct{}{} -} - -// Get retrieves a value from the map based on the key. -func (bm *ByteKeyMap) Get(key ByteKey) bool { - _, exists := bm.m[key] - return exists -} - -type ByteSet struct { - capacity int - cache map[[4]byte]struct{} -} - -func NewByteSet(capacity int) *ByteSet { - return &ByteSet{ - capacity: capacity, - cache: make(map[[4]byte]struct{}), - } -} - -func sliceToByteArray(slice []byte) [4]byte { - // Define the desired size of the byte array - // If you want to make it dynamically sized, use len(slice) - var array [4]byte - - // Copy elements from the slice to the array - copy(array[:], slice) - - return array -} - -// NewFourByteKeyFromSlice converts a byte slice to a FourByteKey. -func NewFourByteKeyFromSlice(slice []byte) FourByteKey { - var key FourByteKey - copy(key[:], slice) - return key -} - -// NewThirtyOneByteKeyFromSlice converts a byte slice to a FourByteKey. -func NewThirtyOneByteKeyFromSlice(slice []byte) ThirtyOneByteKey { - var key ThirtyOneByteKey - copy(key[:], slice) - return key -} - -func (c ByteSet) Get(key string) bool { - if _, ok := c.cache[sliceToByteArray([]byte(key))]; ok { - return true - } - return false -} - -func (c *ByteSet) Put(key string) { - c.cache[sliceToByteArray([]byte(key))] = struct{}{} -} - -func (c *ByteSet) PutBytes(value []byte) { - c.cache[sliceToByteArray(value)] = struct{}{} -} - -func (c *ByteSet) Clear() { - for k := range c.cache { - delete(c.cache, k) - } -} - -type FourByteKeyLRUCache struct { - capacity int - m map[[4]byte]*list.Element - list *list.List -} - -func NewFourByteKeyLRUCache(capacity int) *FourByteKeyLRUCache { - return &FourByteKeyLRUCache{ - capacity: capacity, - m: make(map[[4]byte]*list.Element, capacity), - list: list.New(), - } -} - -func (c *FourByteKeyLRUCache) Get(key [4]byte) bool { - if value, ok := c.m[key]; ok { - // Move the accessed element to the front of the list - c.list.MoveToFront(value) - return true - } - return false -} - -func (c *FourByteKeyLRUCache) Put(key [4]byte) { - if value, ok := c.m[key]; ok { - // If the key already exists, move it to the front - c.list.MoveToFront(value) - } else { - // If the cache is full, remove the least recently used element - if len(c.m) >= c.capacity { - // Get the least recently used element from the back of the list - tailElem := c.list.Back() - if tailElem != nil { - deletedEntry := c.list.Remove(tailElem).([4]byte) - delete(c.m, deletedEntry) - } - } - - // Add the new key to the cache and the front of the list - elem := c.list.PushFront(key) - c.m[key] = elem - } -} - -func (c *FourByteKeyLRUCache) Clear() { - // Iterate through the list and remove all elements - for elem := c.list.Front(); elem != nil; elem = elem.Next() { - delete(c.m, elem.Value.([4]byte)) - } - - // Clear the list - c.list.Init() -} - -type LRUCache5 struct { - capacity int - cache map[string]*LRUNode5 - head *LRUNode5 - tail *LRUNode5 -} - -type LRUNode5 struct { - key string - prev *LRUNode5 - next *LRUNode5 -} - -func NewLRUCache5(capacity int) *LRUCache5 { - return &LRUCache5{ - capacity: capacity, - } -} -func (c *LRUCache5) init() { - c.cache = make(map[string]*LRUNode5, c.capacity) - c.head = new(LRUNode5) - c.tail = new(LRUNode5) - c.head.next = c.tail - c.tail.prev = c.head -} - -func (c *LRUCache5) pop(item *LRUNode5) { - item.prev.next = item.next - item.next.prev = item.prev -} - -func (c *LRUCache5) push(item *LRUNode5) { - c.head.next.prev = item - item.next = c.head.next - item.prev = c.head - c.head.next = item -} - -func (c *LRUCache5) evict() *LRUNode5 { - item := c.tail.prev - c.pop(item) - delete(c.cache, item.key) - return item -} - -func (c *LRUCache5) Get(key string) bool { - if c.cache == nil { - c.init() - } - item := c.cache[key] - if item == nil { - return false - } - if c.head.next != item { - c.pop(item) - c.push(item) - } - return true -} - -func (c *LRUCache5) Put(key string) { - if c.cache == nil { - c.init() - } - item := c.cache[key] - if item == nil { - if len(c.cache) == c.capacity { - item = c.evict() - } else { - item = new(LRUNode5) - } - item.key = key - c.push(item) - c.cache[key] = item - } else { - if c.head.next != item { - c.pop(item) - c.push(item) - } - } -} - -func (c *LRUCache5) Clear() { - if c.cache != nil { - - for elem := range c.cache { - delete(c.cache, elem) - } - - c.head = nil - c.tail = nil - } -} - -func (c *LRUCache5) Dump() { - if c.cache != nil { - - for elem := range c.cache { - fmt.Println(elem) - } - - } -} diff --git a/tools/tsdb/bloom-tester/lrucache_test.go b/tools/tsdb/bloom-tester/lrucache_test.go deleted file mode 100644 index dec5f85688664..0000000000000 --- a/tools/tsdb/bloom-tester/lrucache_test.go +++ /dev/null @@ -1,207 +0,0 @@ -package main - -import ( - "encoding/binary" - "strconv" - "testing" - - "github.com/stretchr/testify/require" -) - -var num = 1000000 - -func BenchmarkLRU1Put(b *testing.B) { - cache := NewLRUCache(num) - for i := 0; i < b.N; i++ { - cache.Put(strconv.Itoa(i)) - } -} - -func BenchmarkLRU1Get(b *testing.B) { - cache := NewLRUCache(num) - for i := 0; i < num; i++ { - cache.Put(strconv.Itoa(i)) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - cache.Get(strconv.Itoa(i)) - } -} - -func BenchmarkLRU2Put(b *testing.B) { - cache := NewLRUCache2(num) - for i := 0; i < b.N; i++ { - cache.Put(strconv.Itoa(i)) - } -} - -func BenchmarkLRU2Get(b *testing.B) { - cache := NewLRUCache2(num) - for i := 0; i < num; i++ { - cache.Put(strconv.Itoa(i)) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - cache.Get(strconv.Itoa(i)) - } -} - -func BenchmarkLRU4Put(b *testing.B) { - cache := NewLRUCache4(num) - for i := 0; i < b.N; i++ { - cache.Put([]byte(strconv.Itoa(i))) - } -} - -func BenchmarkLRU4Get(b *testing.B) { - cache := NewLRUCache4(num) - for i := 0; i < num; i++ { - cache.Put([]byte(strconv.Itoa(i))) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - cache.Get([]byte(strconv.Itoa(i))) - } -} - -func TestByteSet(t *testing.T) { - set := NewByteSet(30) - set.Put("fooa") - set.PutBytes([]byte("foob")) - for _, tc := range []struct { - desc string - input string - exp bool - }{ - { - desc: "test string put", - input: "fooa", - exp: true, - }, - { - desc: "test byte put", - input: "foob", - exp: true, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - require.Equal(t, tc.exp, set.Get(tc.input)) - }) - } -} - -func TestByteKeyLRUCache(t *testing.T) { - set := NewByteKeyLRUCache(30) - set.Put(NewFourByteKeyFromSlice([]byte("fooa"))) - //set.PutBytes([]byte("foob")) - for _, tc := range []struct { - desc string - input string - exp bool - }{ - { - desc: "test valid", - input: "fooa", - exp: true, - }, - { - desc: "test not valid", - input: "foob", - exp: false, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - require.Equal(t, tc.exp, set.Get(NewFourByteKeyFromSlice([]byte(tc.input)))) - }) - } -} - -func TestLRUCache5(t *testing.T) { - set := NewLRUCache5(30) - set.Put("fooa") - for _, tc := range []struct { - desc string - input string - exp bool - }{ - { - desc: "test valid", - input: "fooa", - exp: true, - }, - { - desc: "test not valid", - input: "foob", - exp: false, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - require.Equal(t, tc.exp, set.Get(tc.input)) - }) - } -} - -func BenchmarkLRU5Put(b *testing.B) { - cache := NewLRUCache5(num) - for i := 0; i < b.N; i++ { - cache.Put(strconv.Itoa(i)) - } -} - -func BenchmarkLRU5Get(b *testing.B) { - cache := NewLRUCache5(num) - for i := 0; i < num; i++ { - cache.Put(strconv.Itoa(i)) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - cache.Get(strconv.Itoa(i)) - } -} - -func BenchmarkByteKeyLRUCacheSet(b *testing.B) { - buf := make([]byte, 26) - cache := NewByteKeyLRUCache(num) - for i := 0; i < b.N; i++ { - binary.LittleEndian.PutUint64(buf, uint64(i)) - - cache.Put(NewThirtyOneByteKeyFromSlice(buf)) - } -} - -func BenchmarkByteKeyLRUCacheGet(b *testing.B) { - buf := make([]byte, 26) - - cache := NewByteKeyLRUCache(num) - for i := 0; i < b.N; i++ { - binary.LittleEndian.PutUint64(buf, uint64(i)) - - cache.Put(NewThirtyOneByteKeyFromSlice(buf)) - //cache.Put(NewTwentySixByteKeyFromSlice([]byte(strconv.Itoa(i)))) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - binary.LittleEndian.PutUint64(buf, uint64(i)) - - cache.Get(NewThirtyOneByteKeyFromSlice(buf)) - //cache.Get(NewTwentySixByteKeyFromSlice([]byte(strconv.Itoa(i)))) - } -} - -func BenchmarkByteSetPut(b *testing.B) { - cache := NewByteSet(num) - for i := 0; i < b.N; i++ { - cache.Put(strconv.Itoa(i)) - } -} - -func BenchmarkByteSetGet(b *testing.B) { - cache := NewByteSet(num) - for i := 0; i < b.N; i++ { - cache.Put(strconv.Itoa(i)) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - cache.Get(strconv.Itoa(i)) - } -} diff --git a/tools/tsdb/bloom-tester/main.go b/tools/tsdb/bloom-tester/main.go deleted file mode 100644 index ab5b9dfbcf1c1..0000000000000 --- a/tools/tsdb/bloom-tester/main.go +++ /dev/null @@ -1,29 +0,0 @@ -package main - -import ( - "fmt" - "os" - "strings" - - "github.com/go-kit/log/level" - - util_log "github.com/grafana/loki/v3/pkg/util/log" -) - -// go build ./tools/tsdb/bloom-tester && HOSTNAME="bloom-tester-121" NUM_TESTERS="128" BUCKET="19625" DIR=/Users/progers/dev/bloom WRITE_MODE="false" BUCKET_PREFIX="new-experiments" ./tools/tsdb/bloom-tester/bloom-tester --config.file=/Users/progers/dev/bloom/config.yaml -func main() { - writeMode := os.Getenv("WRITE_MODE") - - if strings.EqualFold(writeMode, "true") { - fmt.Println("write mode") - level.Info(util_log.Logger).Log("msg", "starting up in write mode") - //time.Sleep(3000 * time.Second) - execute() - } else { - fmt.Println("read mode") - level.Info(util_log.Logger).Log("msg", "starting up in read mode") - //time.Sleep(3000 * time.Second) - - executeRead() - } -} diff --git a/tools/tsdb/bloom-tester/metrics.go b/tools/tsdb/bloom-tester/metrics.go deleted file mode 100644 index 3eea766b95f51..0000000000000 --- a/tools/tsdb/bloom-tester/metrics.go +++ /dev/null @@ -1,216 +0,0 @@ -package main - -import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - - v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" - - "github.com/grafana/loki/v3/pkg/storage/bloom/v1/filter" -) - -type Experiment struct { - name string - tokenizer *v1.NGramTokenizer - bloom func() *filter.ScalableBloomFilter - encodeChunkID bool -} - -func NewExperiment(name string, tokenizer *v1.NGramTokenizer, encodeChunkID bool, bloom func() *filter.ScalableBloomFilter) Experiment { - return Experiment{ - name: name, - tokenizer: tokenizer, - bloom: bloom, - encodeChunkID: encodeChunkID, - } -} - -type QueryExperiment struct { - name string - searchString string -} - -func NewQueryExperiment(name string, searchString string) QueryExperiment { - return QueryExperiment{name: name, - searchString: searchString} -} - -const ExperimentLabel = "experiment" -const QueryExperimentLabel = "query_experiment" -const LookupResultType = "lookup_result_type" -const FalsePositive = "false_positive" -const FalseNegative = "false_negative" -const TruePositive = "true_positive" -const TrueNegative = "true_negative" - -type Metrics struct { - tenants prometheus.Counter - readTenants prometheus.Counter - series prometheus.Counter // number of series - readSeries prometheus.Counter // number of series - seriesKept prometheus.Counter // number of series kept - readSeriesKept prometheus.Counter // number of series kept - - chunks prometheus.Counter // number of chunks - readChunks prometheus.Counter // number of chunks - chunksKept prometheus.Counter // number of chunks kept - readChunksKept prometheus.Counter // number of chunks kept - chunksPerSeries prometheus.Histogram // number of chunks per series - chunkSize prometheus.Histogram // uncompressed size of all chunks summed per series - readChunkSize prometheus.Histogram // uncompressed size of all chunks summed per series - - lines *prometheus.CounterVec // number of lines processed per experiment (should be the same) - inserts *prometheus.CounterVec // number of inserts attempted into bloom filters - collisions *prometheus.CounterVec // number of inserts that collided with existing keys - - hammingWeightRatio *prometheus.HistogramVec // ratio of the hamming weight of the bloom filter to the number of bits in the bloom filter - estimatedCount *prometheus.HistogramVec // estimated number of elements in the bloom filter - estimatedErrorRate *prometheus.HistogramVec // estimated error rate of the bloom filter - bloomSize *prometheus.HistogramVec // size of the bloom filter in bytes - readBloomSize *prometheus.HistogramVec // size of the bloom filter in bytes - - totalChunkMatchesPerSeries *prometheus.CounterVec // total number of matches for a given string, iterating over all lines in a chunk - chunkMatchesPerSeries *prometheus.CounterVec // number of matches for a given string in a chunk - sbfMatchesPerSeries *prometheus.CounterVec // number of matches for a given string, using the bloom filter - missesPerSeries *prometheus.CounterVec // number of cases where the bloom filter did not have a match, but the chunks contained the string (should be zero) - //counterPerSeries *prometheus.CounterVec // number of matches for a given string - sbfCount prometheus.Counter // number of chunks - experimentCount prometheus.Counter // number of experiments performed - - sbfLookups *prometheus.CounterVec - sbfCreationTime *prometheus.CounterVec // time spent creating sbfs - sbfsCreated *prometheus.CounterVec // number of sbfs created -} - -func NewMetrics(r prometheus.Registerer) *Metrics { - return &Metrics{ - tenants: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "bloom_tenants", - Help: "Number of tenants", - }), - readTenants: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "bloom_tenants_read", - Help: "Number of tenants", - }), - series: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "bloom_series", - Help: "Number of series", - }), - readSeries: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "bloom_series_read", - Help: "Number of series", - }), - seriesKept: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "bloom_series_kept", - Help: "Number of series kept", - }), - readSeriesKept: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "bloom_series_kept_read", - Help: "Number of series kept", - }), - chunks: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "bloom_chunks", - Help: "Number of chunks", - }), - readChunks: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "bloom_chunks_read", - Help: "Number of chunks", - }), - chunksKept: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "bloom_chunks_kept", - Help: "Number of chunks kept", - }), - readChunksKept: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "bloom_chunks_kept_read", - Help: "Number of chunks kept", - }), - sbfCount: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "bloom_files_found", - Help: "Number of bloom files processed", - }), - experimentCount: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "num_experiments", - Help: "Number of experiments performed", - }), - chunksPerSeries: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ - Name: "bloom_chunks_per_series", - Help: "Number of chunks per series", - Buckets: prometheus.ExponentialBucketsRange(1, 10000, 12), - }), - chunkSize: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ - Name: "bloom_chunk_series_size", - Help: "Uncompressed size of chunks in a series", - Buckets: prometheus.ExponentialBucketsRange(1<<10, 1<<30, 10), - }), - readChunkSize: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ - Name: "bloom_chunk_series_size_read", - Help: "Uncompressed size of chunks in a series", - Buckets: prometheus.ExponentialBucketsRange(1<<10, 1<<30, 10), - }), - lines: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: "bloom_lines", - Help: "Number of lines processed", - }, []string{ExperimentLabel}), - inserts: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: "bloom_inserts", - Help: "Number of inserts attempted into bloom filters", - }, []string{ExperimentLabel}), - collisions: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: "bloom_collisions", - Help: "Number of inserts that collided with existing keys", - }, []string{ExperimentLabel}), - hammingWeightRatio: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{ - Name: "bloom_hamming_weight_ratio", - Help: "Ratio of the hamming weight of the bloom filter to the number of bits in the bloom filter", - Buckets: prometheus.ExponentialBucketsRange(0.001, 1, 12), - }, []string{ExperimentLabel}), - estimatedCount: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{ - Name: "bloom_estimated_count", - Help: "Estimated number of elements in the bloom filter", - Buckets: prometheus.ExponentialBucketsRange(1, 32<<20, 10), - }, []string{ExperimentLabel}), - estimatedErrorRate: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{ - Name: "bloom_estimated_error_rate", - Help: "Estimated error rate of the bloom filter", - Buckets: prometheus.ExponentialBucketsRange(0.0001, 0.5, 10), - }, []string{ExperimentLabel}), - bloomSize: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{ - Name: "bloom_size", - Help: "Size of the bloom filter in bytes", - Buckets: prometheus.ExponentialBucketsRange(128, 16<<20, 8), - }, []string{ExperimentLabel}), - readBloomSize: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{ - Name: "bloom_size_read", - Help: "Size of the bloom filter in bytes", - Buckets: prometheus.ExponentialBucketsRange(128, 16<<20, 8), - }, []string{ExperimentLabel}), - chunkMatchesPerSeries: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: "chunk_matches_per_series", - Help: "Number of chunk matches per series", - }, []string{ExperimentLabel, QueryExperimentLabel}), - totalChunkMatchesPerSeries: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: "total_chunk_matches_per_series", - Help: "Number of total chunk matches per series", - }, []string{ExperimentLabel, QueryExperimentLabel}), - sbfMatchesPerSeries: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: "sbf_matches_per_series", - Help: "Number of sbf matches per series", - }, []string{ExperimentLabel, QueryExperimentLabel}), - missesPerSeries: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: "sbf_misses_per_series", - Help: "Number of sbf misses per series", - }, []string{ExperimentLabel, QueryExperimentLabel}), - sbfLookups: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: "sbf_lookups", - Help: "sbf lookup results", - }, []string{ExperimentLabel, QueryExperimentLabel, LookupResultType}), - sbfCreationTime: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: "bloom_creation_time", - Help: "Time spent creating sbfs", - }, []string{ExperimentLabel}), - sbfsCreated: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: "blooms_created", - Help: "number of sbfs created", - }, []string{ExperimentLabel}), - } -} diff --git a/tools/tsdb/bloom-tester/readlib.go b/tools/tsdb/bloom-tester/readlib.go deleted file mode 100644 index 3001b12554051..0000000000000 --- a/tools/tsdb/bloom-tester/readlib.go +++ /dev/null @@ -1,324 +0,0 @@ -package main - -import ( - "context" - "flag" - "fmt" - - "github.com/grafana/dskit/services" - - "github.com/grafana/loki/v3/pkg/chunkenc" - "github.com/grafana/loki/v3/pkg/logproto" - "github.com/grafana/loki/v3/pkg/logql/log" - bt "github.com/grafana/loki/v3/pkg/storage/bloom/v1" - "github.com/grafana/loki/v3/pkg/storage/bloom/v1/filter" - "github.com/grafana/loki/v3/pkg/storage/chunk" - tsdbindex "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index" - - "math" - "os" - "strconv" - "strings" - "time" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/go-kit/log/level" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - - "github.com/grafana/loki/v3/pkg/storage" - "github.com/grafana/loki/v3/pkg/storage/chunk/client" - - //indexshipper_index "github.com/grafana/loki/pkg/storage/stores/indexshipper/index" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper" - shipperindex "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/index" - "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" - - //"github.com/grafana/loki/pkg/storage/stores/tsdb" - //"github.com/grafana/loki/pkg/storage/stores/tsdb/index" - util_log "github.com/grafana/loki/v3/pkg/util/log" - "github.com/grafana/loki/v3/tools/tsdb/helpers" -) - -var queryExperiments = []QueryExperiment{ - //NewQueryExperiment("three_char_word", "tra"), - NewQueryExperiment("four_char_word", "trac"), - NewQueryExperiment("five_char_word", "trace"), - //NewQueryExperiment("level", "level"), - //NewQueryExperiment("level=", "level="), - - NewQueryExperiment("six_char_word", "traceI"), - NewQueryExperiment("seven_char_word", "traceID"), - NewQueryExperiment("uuid", "2b1a5e46-36a2-4694-a4b1-f34cc7bdfc45"), - NewQueryExperiment("longer_string_that_exists", "synthetic-monitoring-agent"), - //NewQueryExperiment("longer_string_that_doesnt_exist", "abcdefghjiklmnopqrstuvwxyzzy1234567890"), -} - -func executeRead() { - conf, svc, bucket, err := helpers.Setup() - helpers.ExitErr("setting up", err) - - _, overrides, clientMetrics := helpers.DefaultConfigs() - - flag.Parse() - - periodCfg, tableRange, tableName, err := helpers.GetPeriodConfigForTableNumber(bucket, conf.SchemaConfig.Configs) - helpers.ExitErr("find period config for bucket", err) - - objectClient, err := storage.NewObjectClient(periodCfg.ObjectType, conf.StorageConfig, clientMetrics) - helpers.ExitErr("creating object client", err) - - chunkClient := client.NewClient(objectClient, nil, conf.SchemaConfig) - - openFn := func(p string) (shipperindex.Index, error) { - return tsdb.OpenShippableTSDB(p) - } - - indexShipper, err := indexshipper.NewIndexShipper( - periodCfg.IndexTables.PathPrefix, - conf.StorageConfig.TSDBShipperConfig, - objectClient, - overrides, - nil, - openFn, - tableRange, - prometheus.WrapRegistererWithPrefix("loki_tsdb_shipper_", prometheus.DefaultRegisterer), - util_log.Logger, - ) - helpers.ExitErr("creating index shipper", err) - - tenants, err := helpers.ResolveTenants(objectClient, periodCfg.IndexTables.PathPrefix, tableName) - level.Info(util_log.Logger).Log("tenants", strings.Join(tenants, ","), "table", tableName) - helpers.ExitErr("resolving tenants", err) - - sampler, err := NewProbabilisticSampler(1.000) - helpers.ExitErr("creating sampler", err) - - metrics := NewMetrics(prometheus.DefaultRegisterer) - - level.Info(util_log.Logger).Log("msg", "starting server") - err = services.StartAndAwaitRunning(context.Background(), svc) - helpers.ExitErr("waiting for service to start", err) - level.Info(util_log.Logger).Log("msg", "server started") - - err = analyzeRead(metrics, sampler, indexShipper, chunkClient, tableName, tenants, objectClient) - helpers.ExitErr("analyzing", err) -} - -func analyzeRead(metrics *Metrics, sampler Sampler, shipper indexshipper.IndexShipper, client client.Client, tableName string, tenants []string, objectClient client.ObjectClient) error { - metrics.readTenants.Add(float64(len(tenants))) - - testerNumber := extractTesterNumber(os.Getenv("HOSTNAME")) - if testerNumber == -1 { - helpers.ExitErr("extracting hostname index number", nil) - } - numTesters, _ := strconv.Atoi(os.Getenv("NUM_TESTERS")) - if numTesters == -1 { - helpers.ExitErr("extracting total number of testers", nil) - } - level.Info(util_log.Logger).Log("msg", "starting analyze()", "tester", testerNumber, "total", numTesters) - - // var n int // count iterated series - // reportEvery := 10 // report every n chunks - // pool := newPool(runtime.NumCPU()) - // pool := newPool(16) - // searchString := os.Getenv("SEARCH_STRING") - // 147854,148226,145541,145603,147159,147836,145551,145599,147393,147841,145265,145620,146181,147225,147167,146131,146189,146739,147510,145572,146710,148031,29,146205,147175,146984,147345 - // mytenants := []string{"29"} - bloomTokenizer, _ := NewBloomTokenizer(prometheus.DefaultRegisterer, DefaultNGramLength, DefaultNGramSkip) - for _, tenant := range tenants { - level.Info(util_log.Logger).Log("Analyzing tenant", tenant, "table", tableName) - err := shipper.ForEach( - context.Background(), - tableName, - tenant, - func(isMultiTenantIndex bool, idx shipperindex.Index) error { - if isMultiTenantIndex { - return nil - } - - casted := idx.(*tsdb.TSDBFile).Index.(*tsdb.TSDBIndex) - _ = casted.ForSeries( - context.Background(), - "", nil, - model.Earliest, model.Latest, - func(ls labels.Labels, fp model.Fingerprint, chks []tsdbindex.ChunkMeta) (stop bool) { - seriesString := ls.String() - seriesStringHash := FNV32a(seriesString) - pos, _ := strconv.Atoi(seriesStringHash) - - workernumber := AssignToWorker(pos, numTesters) - - if (workernumber == testerNumber) && (len(chks) < 10000) { // For every series - /* - pool.acquire( - ls.Copy(), - fp, - chksCpy, - func(ls labels.Labels, fp model.Fingerprint, chks []index.ChunkMeta) {*/ - - metrics.readSeries.Inc() - metrics.readChunks.Add(float64(len(chks))) - - if !sampler.Sample() { - return - } - - transformed := make([]chunk.Chunk, 0, len(chks)) - for _, chk := range chks { - transformed = append(transformed, chunk.Chunk{ - ChunkRef: logproto.ChunkRef{ - Fingerprint: uint64(fp), - UserID: tenant, - From: chk.From(), - Through: chk.Through(), - Checksum: chk.Checksum, - }, - }) - } - - got, err := client.GetChunks( - context.Background(), - transformed, - ) - if err == nil { - bucketPrefix := os.Getenv("BUCKET_PREFIX") - if strings.EqualFold(bucketPrefix, "") { - bucketPrefix = "named-experiments-" - } - for _, experiment := range experiments { // for each experiment - if sbfFileExists("bloomtests", - fmt.Sprint(bucketPrefix, experiment.name), - os.Getenv("BUCKET"), - tenant, - ls.String(), - objectClient) { - - sbf := readSBFFromObjectStorage("bloomtests", - fmt.Sprint(bucketPrefix, experiment.name), - os.Getenv("BUCKET"), - tenant, - ls.String(), - objectClient) - bloomTokenizer.SetLineTokenizer(experiment.tokenizer) - for gotIdx := range got { // for every chunk - for _, queryExperiment := range queryExperiments { // for each search string - if len(queryExperiment.searchString) >= experiment.tokenizer.N()+experiment.tokenizer.SkipFactor() { - - foundInChunk := false - foundInSbf := false - - foundInSbf = searchSbf(sbf, *experiment.tokenizer, queryExperiment.searchString) - - lc := got[gotIdx].Data.(*chunkenc.Facade).LokiChunk() - - itr, err := lc.Iterator( - context.Background(), - time.Unix(0, 0), - time.Unix(0, math.MaxInt64), - logproto.FORWARD, - log.NewNoopPipeline().ForStream(ls), - ) - helpers.ExitErr("getting iterator", err) - - for itr.Next() && itr.Error() == nil { - if strings.Contains(itr.Entry().Line, queryExperiment.searchString) { - foundInChunk = true - } - } - - if foundInChunk { - if foundInSbf { - metrics.sbfLookups.WithLabelValues(experiment.name, queryExperiment.name, TruePositive).Inc() - } else { - metrics.sbfLookups.WithLabelValues(experiment.name, queryExperiment.name, FalseNegative).Inc() - } - } else { - if foundInSbf { - metrics.sbfLookups.WithLabelValues(experiment.name, queryExperiment.name, FalsePositive).Inc() - } else { - metrics.sbfLookups.WithLabelValues(experiment.name, queryExperiment.name, TrueNegative).Inc() - } - } - - metrics.experimentCount.Inc() - - helpers.ExitErr("iterating chunks ", itr.Error()) - } - } // for each search string - } // for every chunk - - metrics.sbfCount.Inc() - metrics.readBloomSize.WithLabelValues(experiment.name).Observe(float64(sbf.Capacity() / 8)) - - } // for existing sbf files - } // for every experiment - - } else { - level.Info(util_log.Logger).Log("error getting chunks", err) - } - if len(got) > 0 { // we have chunks, record size info - var chunkTotalUncompressedSize int - for _, c := range got { - chunkTotalUncompressedSize += c.Data.(*chunkenc.Facade).LokiChunk().UncompressedSize() - } - metrics.readChunkSize.Observe(float64(chunkTotalUncompressedSize)) - metrics.readChunksKept.Add(float64(len(chks))) - } - - metrics.readSeriesKept.Inc() - /* - }, - ) - */ - } // For every series - return false - }, - labels.MustNewMatcher(labels.MatchEqual, "", ""), - ) - - return nil - - }, - ) - helpers.ExitErr(fmt.Sprintf("iterating tenant %s", tenant), err) - - } - - level.Info(util_log.Logger).Log("msg", "waiting for workers to finish") - //pool.drain() // wait for workers to finish - level.Info(util_log.Logger).Log("msg", "waiting for final scrape") - //time.Sleep(30 * time.Second) // allow final scrape - time.Sleep(time.Duration(1<<63 - 1)) // wait forever - return nil -} - -func readSBFFromObjectStorage(location, prefix, period, tenant, series string, objectClient client.ObjectClient) *filter.ScalableBloomFilter { - objectStoragePath := fmt.Sprintf("%s/%s/%s/%s", location, prefix, period, tenant) - - sbf := experiments[0].bloom() - closer, _, _ := objectClient.GetObject(context.Background(), fmt.Sprintf("%s/%s", objectStoragePath, FNV32a(series))) - _, _ = sbf.ReadFrom(closer) - return sbf -} - -func searchSbf(sbf *filter.ScalableBloomFilter, tokenizer bt.NGramTokenizer, searchString string) bool { - itr := tokenizer.Tokens(searchString) - numMatches := 0 - numTokens := 0 - for itr.Next() { - token := itr.At() - numTokens++ - if sbf.Test(token) { - numMatches++ - } - } - if numMatches > 0 { - if numMatches == numTokens { - return true - } - } - - return false -} diff --git a/tools/tsdb/bloom-tester/readlib_test.go b/tools/tsdb/bloom-tester/readlib_test.go deleted file mode 100644 index 3f6b51cc6470c..0000000000000 --- a/tools/tsdb/bloom-tester/readlib_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package main - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestSearchSbf(t *testing.T) { - experiment := NewExperiment( - "token=4skip0_error=1%_indexchunks=true", - four, - true, - onePctError, - ) - - for _, tc := range []struct { - desc string - inputLine string - inputSearch string - exp bool - }{ - { - desc: "exact match", - inputLine: "trace", - inputSearch: "trace", - exp: true, - }, - { - desc: "longer line", - inputLine: "trace with other things", - inputSearch: "trace", - exp: true, - }, - { - desc: "offset one", - inputLine: " trace with other things", - inputSearch: "trace", - exp: true, - }, - { - desc: "offset two", - inputLine: " trace with other things", - inputSearch: "trace", - exp: true, - }, - { - desc: "offset three", - inputLine: " trace with other things", - inputSearch: "trace", - exp: true, - }, - { - desc: "realistic", - inputLine: "(Use *node --trace-deprecation. to show where the warning was created)", - inputSearch: "trace", - exp: true, - }, - { - desc: "foo", - inputLine: "level=info ts=2023-10-13T20:03:48.064432622Z caller=readlib.go:280 ****falsenegativeline:=\"{\\\"httpRequest\\\":{\\\"latency\\\":\\\"0.084279s\\\",\\\"remoteIp\\\":\\\"130.211.209.64\\\",\\\"requestMethod\\\":\\\"POST\\\",\\\"requestSize\\\":\\\"151\\\",\\\"requestUrl\\\":\\\"https://prometheus-dev-01-dev-us-central-0.grafana-dev.net/api/prom/api/v1/query\\\",\\\"responseSize\\\":\\\"139\\\",\\\"serverIp\\\":\\\"10.132.64.43\\\",\\\"status\\\":200,\\\"userAgent\\\":\\\"usage_service \\\"},\\\"insertId\\\":\\\"1r9sgvff4fqw78\\\",\\\"jsonPayload\\\":{\\\"@type\\\":\\\"type.googleapis.com/google.cloud.loadbalancing.type.LoadBalancerLogEntry\\\",\\\"backendTargetProjectNumber\\\":\\\"projects/1040409107725\\\",\\\"cacheDecision\\\":[\\\"RESPONSE_HAS_CONTENT_TYPE\\\",\\\"REQUEST_HAS_AUTHORIZATION\\\",\\\"CACHE_MODE_USE_ORIGIN_HEADERS\\\"],\\\"remoteIp\\\":\\\"130.211.209.64\\\",\\\"statusDetails\\\":\\\"response_sent_by_backend\\\"},\\\"logName\\\":\\\"projects/grafanalabs-dev/logs/requests\\\",\\\"receiveTimestamp\\\":\\\"2023-09-25T20:15:17.305415664Z\\\",\\\"resource\\\":{\\\"labels\\\":{\\\"backend_service_name\\\":\\\"k8s1-bb201ea5-cortex-de-prometheus-dev-01-dev-us-cen-8-d92f8647\\\",\\\"forwarding_rule_name\\\":\\\"k8s2-fs-i7ga9yyz-cortex-de-prometheus-dev-01-dev-us-ce-5fyxhcia\\\",\\\"project_id\\\":\\\"grafanalabs-dev\\\",\\\"target_proxy_name\\\":\\\"k8s2-ts-i7ga9yyz-cortex-de-prometheus-dev-01-dev-us-ce-5fyxhcia\\\",\\\"url_map_name\\\":\\\"k8s2-um-i7ga9yyz-cortex-de-prometheus-dev-01-dev-us-ce-5fyxhcia\\\",\\\"zone\\\":\\\"global\\\"},\\\"type\\\":\\\"http_load_balancer\\\"},\\\"severity\\\":\\\"INFO\\\",\\\"spanId\\\":\\\"d2c261eca4d5a01a\\\",\\\"timestamp\\\":\\\"2023-09-25T20:15:16.522437Z\\\",\\\"trace\\\":\\\"projects/grafanalabs-dev/traces/0a178fae3e96ed27aaf81a4a268730c2\\\"}\"", - inputSearch: "trace", - exp: true, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - sbf := experiment.bloom() - tokens := four.Tokens(tc.inputLine) - for tokens.Next() { - tok := tokens.At() - sbf.Add(tok) - } - - require.Equal(t, tc.exp, searchSbf(sbf, *four, tc.inputSearch)) - }) - } -} diff --git a/tools/tsdb/bloom-tester/sampler.go b/tools/tsdb/bloom-tester/sampler.go deleted file mode 100644 index ae47bdc298207..0000000000000 --- a/tools/tsdb/bloom-tester/sampler.go +++ /dev/null @@ -1,34 +0,0 @@ -package main - -import ( - "errors" - "math/rand" -) - -type Sampler interface { - Sample() bool -} - -func NewProbabilisticSampler(p float64) (*ProbabilisticSampler, error) { - if p < 0 || p > 1 { - return &ProbabilisticSampler{}, errors.New("invalid probability, must be between 0 and 1") - } - - return &ProbabilisticSampler{ - p: p, - rng: rand.New(rand.NewSource(0)), // always use deterministic seed so identical instantiations sample the same way - }, nil -} - -// Sampler is a probabilistic sampler. -type ProbabilisticSampler struct { - p float64 - rng *rand.Rand -} - -func (s *ProbabilisticSampler) Sample() bool { - scale := 1e6 - x := s.rng.Intn(int(scale)) - return float64(x) < s.p*scale - -} diff --git a/tools/tsdb/bloom-tester/tokenizer.go b/tools/tsdb/bloom-tester/tokenizer.go deleted file mode 100644 index ac52c3de623e0..0000000000000 --- a/tools/tsdb/bloom-tester/tokenizer.go +++ /dev/null @@ -1,221 +0,0 @@ -package main - -import ( - "context" - "math" - "time" - - v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" - - "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/grafana/loki/v3/pkg/util/constants" - - "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" - - "github.com/grafana/loki/v3/pkg/chunkenc" - "github.com/grafana/loki/v3/pkg/logproto" - "github.com/grafana/loki/v3/pkg/logql/log" - - "github.com/grafana/loki/v3/pkg/storage/chunk" - "github.com/grafana/loki/v3/pkg/util/encoding" - util_log "github.com/grafana/loki/v3/pkg/util/log" -) - -type metrics struct { - sbfCreationTime prometheus.Counter // time spent creating sbfs - chunkSize prometheus.Histogram // uncompressed size of all chunks summed per series - bloomSize prometheus.Histogram // size of the bloom filter in bytes - hammingWeightRatio prometheus.Histogram // ratio of the hamming weight of the bloom filter to the number of bits in the bloom filter - estimatedCount prometheus.Histogram // estimated number of elements in the bloom filter -} - -/* -BloomTokenizer is a utility that converts either Loki chunks or individual lines into tokens. -These tokens are n-grams, representing adjacent letters, that are used to populate a bloom filter. -https://en.wikipedia.org/wiki/Bloom_filter -Bloom filters are utilized for faster lookups of log lines. -*/ -type BloomTokenizer struct { - metrics *metrics - - lineTokenizer *v1.NGramTokenizer - cache map[string]interface{} -} - -const cacheSize = 150000 -const bloomTokenizerMetricsSubsystem = "bloom_tokenizer" -const eightBits = 8 - -// NewBloomTokenizer returns a new instance of the Bloom Tokenizer. -// Warning: the tokens returned use the same byte slice to reduce allocations. This has two consequences: -// 1) The token slices generated must not be mutated externally -// 2) The token slice must not be used after the next call to `Tokens()` as it will repopulate the slice. -// 2) This is not thread safe. -func NewBloomTokenizer(reg prometheus.Registerer, NGramLength, NGramSkip int) (*BloomTokenizer, error) { - t := &BloomTokenizer{ - metrics: newMetrics(reg, constants.Loki, bloomTokenizerMetricsSubsystem), - } - t.cache = make(map[string]interface{}, cacheSize) - t.lineTokenizer = v1.NewNGramTokenizer(NGramLength, NGramSkip) - - level.Info(util_log.Logger).Log("bloom tokenizer created") - - return t, nil -} - -func (bt *BloomTokenizer) SetLineTokenizer(t *v1.NGramTokenizer) { - bt.lineTokenizer = t -} - -func (bt *BloomTokenizer) GetNGramLength() uint64 { - return uint64(bt.lineTokenizer.N()) -} - -func (bt *BloomTokenizer) GetNGramSkip() uint64 { - return uint64(bt.lineTokenizer.SkipFactor()) -} - -func newMetrics(r prometheus.Registerer, namespace, subsystem string) *metrics { - return &metrics{ - sbfCreationTime: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "bloom_creation_time", - Help: "Time spent creating scalable bloom filters", - Namespace: namespace, - Subsystem: subsystem, - }), - chunkSize: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ - Name: "bloom_chunk_series_size", - Help: "Uncompressed size of chunks in a series", - Buckets: prometheus.ExponentialBucketsRange(1024, 1073741824, 10), - Namespace: namespace, - Subsystem: subsystem, - }), - bloomSize: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ - Name: "bloom_size", - Help: "Size of the bloom filter in bytes", - Buckets: prometheus.ExponentialBucketsRange(128, 16777216, 8), - Namespace: namespace, - Subsystem: subsystem, - }), - hammingWeightRatio: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ - Name: "bloom_hamming_weight_ratio", - Help: "Ratio of the hamming weight of the bloom filter to the number of bits in the bloom filter", - Buckets: prometheus.ExponentialBucketsRange(0.001, 1, 12), - Namespace: namespace, - Subsystem: subsystem, - }), - estimatedCount: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ - Name: "bloom_estimated_count", - Help: "Estimated number of elements in the bloom filter", - Buckets: prometheus.ExponentialBucketsRange(1, 33554432, 10), - Namespace: namespace, - Subsystem: subsystem, - }), - } -} - -func clearCache(cache map[string]interface{}) { - clear(cache) -} - -// prefixedToken returns a byte slice with sufficient capacity for a chunk-ref prefixed token -// of specific ngram length, along with the length of the prefix. -// It ensures enough capacity for the prefix and the token so additional tokens can be created -// without allocations by appending them to the prefix length -func prefixedToken(ngram int, chk logproto.ChunkRef) ([]byte, int) { - var enc encoding.Encbuf - enc.PutBE64(uint64(chk.From)) - enc.PutBE64(uint64(chk.Through)) - enc.PutBE32(chk.Checksum) - prefixLn := enc.Len() // record the length of the prefix - - enc.PutBytes(make([]byte, ngram*v1.MaxRuneLen)) // ensure enough capacity for the ngram - - // return the underlying byte slice and the length of the prefix - return enc.Get(), prefixLn -} - -// PopulateSeriesWithBloom is intended to be called on the write path, and is used to populate the bloom filter for a given series. -func (bt *BloomTokenizer) PopulateSeriesWithBloom(seriesWithBloom *v1.SeriesWithBloom, chunks []chunk.Chunk) error { - startTime := time.Now().UnixMilli() - - clearCache(bt.cache) - chunkTotalUncompressedSize := 0 - - for idx := range chunks { - lc := chunks[idx].Data.(*chunkenc.Facade).LokiChunk() - tokenBuf, prefixLn := prefixedToken(bt.lineTokenizer.N(), chunks[idx].ChunkRef) - chunkTotalUncompressedSize += lc.UncompressedSize() - - itr, err := lc.Iterator( - context.Background(), - time.Unix(0, 0), // TODO: Parameterize/better handle the timestamps? - time.Unix(0, math.MaxInt64), - logproto.FORWARD, - log.NewNoopPipeline().ForStream(chunks[idx].Metric), - ) - if err != nil { - level.Error(util_log.Logger).Log("msg", "chunk iterator cannot be created", "err", err) - return err - } - - defer itr.Close() - - for itr.Next() && itr.Error() == nil { - chunkTokenizer := v1.NewPrefixedTokenIter(tokenBuf, prefixLn, bt.lineTokenizer.Tokens(itr.Entry().Line)) - for chunkTokenizer.Next() { - tok := chunkTokenizer.At() - if tok != nil { - str := string(tok) - _, found := bt.cache[str] // A cache is used ahead of the SBF, as it cuts out the costly operations of scaling bloom filters - if !found { - bt.cache[str] = nil - - seriesWithBloom.Bloom.ScalableBloomFilter.TestAndAdd(tok) - - if len(bt.cache) >= cacheSize { // While crude, this has proven efficient in performance testing. This speaks to the similarity in log lines near each other - clearCache(bt.cache) - } - } - } - } - lineTokenizer := bt.lineTokenizer.Tokens(itr.Entry().Line) - for lineTokenizer.Next() { - tok := lineTokenizer.At() - if tok != nil { - str := string(tok) - _, found := bt.cache[str] // A cache is used ahead of the SBF, as it cuts out the costly operations of scaling bloom filters - if !found { - bt.cache[str] = nil - - seriesWithBloom.Bloom.ScalableBloomFilter.TestAndAdd(tok) - - if len(bt.cache) >= cacheSize { // While crude, this has proven efficient in performance testing. This speaks to the similarity in log lines near each other - clearCache(bt.cache) - } - } - } - } - - } - seriesWithBloom.Series.Chunks = append(seriesWithBloom.Series.Chunks, v1.ChunkRef{ - From: chunks[idx].From, - Through: chunks[idx].Through, - Checksum: chunks[idx].Checksum, - }) - } // for each chunk - - endTime := time.Now().UnixMilli() - - fillRatio := seriesWithBloom.Bloom.ScalableBloomFilter.FillRatio() - bt.metrics.hammingWeightRatio.Observe(fillRatio) - bt.metrics.estimatedCount.Observe( - float64(estimatedCount(seriesWithBloom.Bloom.ScalableBloomFilter.Capacity(), fillRatio)), - ) - bt.metrics.bloomSize.Observe(float64(seriesWithBloom.Bloom.ScalableBloomFilter.Capacity() / eightBits)) - bt.metrics.sbfCreationTime.Add(float64(endTime - startTime)) - bt.metrics.chunkSize.Observe(float64(chunkTotalUncompressedSize)) - return nil -}